summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.pre-commit-config.yaml9
-rw-r--r--.zuul.yaml251
-rw-r--r--HACKING.rst6
-rw-r--r--README.rst2
-rw-r--r--api-guide/source/accelerator-support.rst4
-rw-r--r--api-guide/source/server_concepts.rst14
-rw-r--r--api-guide/source/users.rst2
-rw-r--r--api-ref/source/flavors.inc9
-rw-r--r--api-ref/source/os-keypairs.inc22
-rw-r--r--api-ref/source/parameters.yaml61
-rw-r--r--api-ref/source/servers-action-shelve.inc94
-rw-r--r--api-ref/source/servers-actions.inc19
-rw-r--r--api-ref/source/servers.inc7
-rw-r--r--bindep.txt1
-rw-r--r--devstack/nova-multi-cell-exclude-list.txt4
-rw-r--r--doc/api_samples/images/images-details-get-resp.json198
-rw-r--r--doc/api_samples/images/images-list-get-resp.json128
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json4
-rw-r--r--doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json5
-rw-r--r--doc/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json8
-rw-r--r--doc/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json9
-rw-r--r--doc/api_samples/os-shelve/v2.77/os-unshelve-az.json (renamed from doc/api_samples/os-shelve/v2.77/os-unshelve.json)0
-rw-r--r--doc/api_samples/os-shelve/v2.91/os-unshelve-az-host.json6
-rw-r--r--doc/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json6
-rw-r--r--doc/api_samples/os-shelve/v2.91/os-unshelve-host.json5
-rw-r--r--doc/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json5
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild-resp.json80
-rw-r--r--doc/api_samples/servers/v2.94/server-action-rebuild.json15
-rw-r--r--doc/api_samples/servers/v2.94/server-create-req.json30
-rw-r--r--doc/api_samples/servers/v2.94/server-create-resp.json22
-rw-r--r--doc/api_samples/servers/v2.94/server-get-resp.json81
-rw-r--r--doc/api_samples/servers/v2.94/server-update-req.json8
-rw-r--r--doc/api_samples/servers/v2.94/server-update-resp.json78
-rw-r--r--doc/api_samples/servers/v2.94/servers-details-resp.json88
-rw-r--r--doc/api_samples/servers/v2.94/servers-list-resp.json24
-rw-r--r--doc/api_samples/versions/v21-version-get-resp.json2
-rw-r--r--doc/api_samples/versions/versions-get-resp.json2
-rw-r--r--doc/api_schemas/network_data.json2
-rw-r--r--doc/ext/extra_specs.py2
-rw-r--r--doc/ext/feature_matrix.py6
-rw-r--r--doc/notification_samples/common_payloads/ImageMetaPropsPayload.json2
-rw-r--r--doc/source/_extra/.htaccess3
-rw-r--r--doc/source/admin/architecture.rst66
-rw-r--r--doc/source/admin/availability-zones.rst81
-rw-r--r--doc/source/admin/cells.rst120
-rw-r--r--doc/source/admin/compute-node-identification.rst83
-rw-r--r--doc/source/admin/config-drive.rst9
-rw-r--r--doc/source/admin/configuration/hypervisor-hyper-v.rst6
-rw-r--r--doc/source/admin/configuration/hypervisor-powervm.rst75
-rw-r--r--doc/source/admin/configuration/hypervisors.rst7
-rw-r--r--doc/source/admin/configuring-migrations.rst6
-rw-r--r--doc/source/admin/cpu-topologies.rst154
-rw-r--r--doc/source/admin/evacuate.rst14
-rw-r--r--doc/source/admin/huge-pages.rst2
-rw-r--r--doc/source/admin/hw-emulation-architecture.rst133
-rw-r--r--doc/source/admin/hw-machine-type.rst32
-rw-r--r--doc/source/admin/index.rst11
-rw-r--r--doc/source/admin/libvirt-misc.rst30
-rw-r--r--doc/source/admin/live-migration-usage.rst2
-rw-r--r--doc/source/admin/manage-logs.rst10
-rw-r--r--doc/source/admin/manage-volumes.rst6
-rw-r--r--doc/source/admin/managing-resource-providers.rst2
-rw-r--r--doc/source/admin/networking.rst126
-rw-r--r--doc/source/admin/notifications.rst132
-rw-r--r--doc/source/admin/pci-passthrough.rst271
-rw-r--r--doc/source/admin/remote-console-access.rst16
-rw-r--r--doc/source/admin/resource-limits.rst3
-rw-r--r--doc/source/admin/scheduling.rst49
-rw-r--r--doc/source/admin/secure-live-migration-with-qemu-native-tls.rst4
-rw-r--r--doc/source/admin/soft-delete-shadow-tables.rst62
-rw-r--r--doc/source/admin/upgrades.rst20
-rw-r--r--doc/source/admin/vdpa.rst86
-rw-r--r--doc/source/cli/nova-compute.rst2
-rw-r--r--doc/source/cli/nova-manage.rst101
-rw-r--r--doc/source/cli/nova-rootwrap.rst2
-rw-r--r--doc/source/cli/nova-status.rst2
-rw-r--r--doc/source/configuration/extra-specs.rst10
-rw-r--r--doc/source/configuration/index.rst4
-rw-r--r--doc/source/configuration/policy-concepts.rst319
-rw-r--r--doc/source/contributor/api-2.rst56
-rw-r--r--doc/source/contributor/api-ref-guideline.rst2
-rw-r--r--doc/source/contributor/api.rst112
-rw-r--r--doc/source/contributor/development-environment.rst2
-rw-r--r--doc/source/contributor/how-to-get-involved.rst4
-rw-r--r--doc/source/contributor/index.rst21
-rw-r--r--doc/source/contributor/notifications.rst272
-rw-r--r--doc/source/contributor/process.rst10
-rw-r--r--doc/source/contributor/ptl-guide.rst78
-rw-r--r--doc/source/index.rst16
-rw-r--r--doc/source/install/overview.rst2
-rw-r--r--doc/source/install/verify.rst4
-rw-r--r--doc/source/reference/attach-volume.rst2
-rw-r--r--doc/source/reference/block-device-structs.rst9
-rw-r--r--doc/source/reference/database-migrations.rst12
-rw-r--r--doc/source/reference/glossary.rst8
-rw-r--r--doc/source/reference/index.rst11
-rw-r--r--doc/source/reference/isolate-aggregates.rst2
-rw-r--r--doc/source/reference/libvirt-distro-support-matrix.rst2
-rw-r--r--doc/source/reference/notifications.rst376
-rw-r--r--doc/source/reference/stable-api.rst2
-rw-r--r--doc/source/user/block-device-mapping.rst2
-rw-r--r--doc/source/user/certificate-validation.rst4
-rw-r--r--doc/source/user/feature-matrix-gp.ini21
-rw-r--r--doc/source/user/feature-matrix-hpc.ini6
-rw-r--r--doc/source/user/metadata.rst11
-rw-r--r--doc/source/user/support-matrix.ini97
-rw-r--r--doc/source/user/wsgi.rst14
-rw-r--r--doc/test/redirect-tests.txt3
-rw-r--r--etc/nova/api-paste.ini2
-rw-r--r--etc/nova/nova-config-generator.conf3
-rw-r--r--etc/nova/rootwrap.conf6
-rwxr-xr-xgate/post_test_hook.sh67
-rw-r--r--lower-constraints.txt165
-rw-r--r--mypy-files.txt7
-rw-r--r--nova/api/openstack/api_version_request.py12
-rw-r--r--nova/api/openstack/auth.py2
-rw-r--r--nova/api/openstack/compute/assisted_volume_snapshots.py10
-rw-r--r--nova/api/openstack/compute/attach_interfaces.py1
-rw-r--r--nova/api/openstack/compute/console_auth_tokens.py2
-rw-r--r--nova/api/openstack/compute/deferred_delete.py2
-rw-r--r--nova/api/openstack/compute/evacuate.py25
-rw-r--r--nova/api/openstack/compute/flavor_access.py9
-rw-r--r--nova/api/openstack/compute/keypairs.py16
-rw-r--r--nova/api/openstack/compute/limits.py3
-rw-r--r--nova/api/openstack/compute/migrate_server.py2
-rw-r--r--nova/api/openstack/compute/migrations.py2
-rw-r--r--nova/api/openstack/compute/quota_classes.py20
-rw-r--r--nova/api/openstack/compute/quota_sets.py23
-rw-r--r--nova/api/openstack/compute/remote_consoles.py3
-rw-r--r--nova/api/openstack/compute/rest_api_version_history.rst51
-rw-r--r--nova/api/openstack/compute/schemas/evacuate.py4
-rw-r--r--nova/api/openstack/compute/schemas/keypairs.py11
-rw-r--r--nova/api/openstack/compute/schemas/server_external_events.py4
-rw-r--r--nova/api/openstack/compute/schemas/servers.py14
-rw-r--r--nova/api/openstack/compute/schemas/shelve.py54
-rw-r--r--nova/api/openstack/compute/server_external_events.py8
-rw-r--r--nova/api/openstack/compute/server_groups.py17
-rw-r--r--nova/api/openstack/compute/server_metadata.py2
-rw-r--r--nova/api/openstack/compute/server_migrations.py2
-rw-r--r--nova/api/openstack/compute/server_topology.py3
-rw-r--r--nova/api/openstack/compute/servers.py23
-rw-r--r--nova/api/openstack/compute/services.py7
-rw-r--r--nova/api/openstack/compute/shelve.py62
-rw-r--r--nova/api/openstack/compute/views/servers.py10
-rw-r--r--nova/api/openstack/compute/volumes.py5
-rw-r--r--nova/api/openstack/identity.py22
-rw-r--r--nova/api/openstack/wsgi_app.py5
-rw-r--r--nova/api/validation/extra_specs/hw.py57
-rw-r--r--nova/api/validation/extra_specs/powervm.py271
-rw-r--r--nova/api/validation/parameter_types.py14
-rw-r--r--nova/api/validation/validators.py23
-rw-r--r--nova/block_device.py6
-rw-r--r--nova/cmd/manage.py181
-rw-r--r--nova/cmd/status.py75
-rw-r--r--nova/compute/api.py560
-rw-r--r--nova/compute/claims.py25
-rw-r--r--nova/compute/manager.py762
-rw-r--r--nova/compute/pci_placement_translator.py623
-rw-r--r--nova/compute/resource_tracker.py283
-rw-r--r--nova/compute/rpcapi.py32
-rw-r--r--nova/compute/utils.py29
-rw-r--r--nova/compute/vm_states.py3
-rw-r--r--nova/conductor/api.py7
-rw-r--r--nova/conductor/manager.py73
-rw-r--r--nova/conductor/rpcapi.py26
-rw-r--r--nova/conductor/tasks/cross_cell_migrate.py4
-rw-r--r--nova/conductor/tasks/live_migrate.py12
-rw-r--r--nova/conductor/tasks/migrate.py9
-rw-r--r--nova/conf/__init__.py2
-rw-r--r--nova/conf/api.py11
-rw-r--r--nova/conf/compute.py61
-rw-r--r--nova/conf/hyperv.py2
-rw-r--r--nova/conf/ironic.py1
-rw-r--r--nova/conf/keystone.py4
-rw-r--r--nova/conf/libvirt.py30
-rw-r--r--nova/conf/mks.py2
-rw-r--r--nova/conf/neutron.py2
-rw-r--r--nova/conf/pci.py165
-rw-r--r--nova/conf/powervm.py66
-rw-r--r--nova/conf/quota.py4
-rw-r--r--nova/conf/scheduler.py68
-rw-r--r--nova/conf/spice.py53
-rw-r--r--nova/conf/vmware.py5
-rw-r--r--nova/conf/workarounds.py81
-rw-r--r--nova/console/websocketproxy.py2
-rw-r--r--nova/context.py2
-rw-r--r--nova/db/api/legacy_migrations/README4
-rw-r--r--nova/db/api/legacy_migrations/migrate.cfg20
-rw-r--r--nova/db/api/legacy_migrations/versions/067_train.py602
-rw-r--r--nova/db/api/legacy_migrations/versions/068_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/069_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/070_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/071_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/072_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/073_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/074_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/075_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/076_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/077_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/078_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/079_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/080_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/081_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/082_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/083_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/084_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/085_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/086_placeholder.py22
-rw-r--r--nova/db/api/legacy_migrations/versions/087_placeholder.py22
-rw-r--r--nova/db/main/api.py368
-rw-r--r--nova/db/main/legacy_migrations/README4
-rw-r--r--nova/db/main/legacy_migrations/manage.py20
-rw-r--r--nova/db/main/legacy_migrations/migrate.cfg20
-rw-r--r--nova/db/main/legacy_migrations/versions/402_train.py1617
-rw-r--r--nova/db/main/legacy_migrations/versions/403_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/404_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/405_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/406_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/407_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/408_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/409_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/410_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/411_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/412_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/413_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/414_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/415_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/416_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/417_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/418_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/419_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/420_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/421_placeholder.py22
-rw-r--r--nova/db/main/legacy_migrations/versions/422_placeholder.py22
-rw-r--r--nova/db/main/migrations/versions/8f2f1571d55b_initial_version.py4
-rw-r--r--nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py35
-rw-r--r--nova/db/main/migrations/versions/ccb0fa1a2252_add_encryption_fields_to_.py59
-rw-r--r--nova/db/main/models.py14
-rw-r--r--nova/db/migration.py77
-rw-r--r--nova/exception.py182
-rw-r--r--nova/filesystem.py59
-rw-r--r--nova/hacking/checks.py69
-rw-r--r--nova/limit/__init__.py (renamed from nova/db/api/legacy_migrations/__init__.py)0
-rw-r--r--nova/limit/local.py234
-rw-r--r--nova/limit/placement.py217
-rw-r--r--nova/limit/utils.py (renamed from nova/db/api/legacy_migrations/manage.py)12
-rw-r--r--nova/locale/cs/LC_MESSAGES/nova.po383
-rw-r--r--nova/locale/de/LC_MESSAGES/nova.po457
-rw-r--r--nova/locale/es/LC_MESSAGES/nova.po414
-rw-r--r--nova/locale/fr/LC_MESSAGES/nova.po423
-rw-r--r--nova/locale/it/LC_MESSAGES/nova.po415
-rw-r--r--nova/locale/ja/LC_MESSAGES/nova.po415
-rw-r--r--nova/locale/ko_KR/LC_MESSAGES/nova.po406
-rw-r--r--nova/locale/pt_BR/LC_MESSAGES/nova.po409
-rw-r--r--nova/locale/ru/LC_MESSAGES/nova.po405
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova.po387
-rw-r--r--nova/locale/zh_CN/LC_MESSAGES/nova.po476
-rw-r--r--nova/locale/zh_TW/LC_MESSAGES/nova.po385
-rw-r--r--nova/manager.py7
-rw-r--r--nova/monkey_patch.py15
-rw-r--r--nova/network/constants.py46
-rw-r--r--nova/network/model.py16
-rw-r--r--nova/network/neutron.py384
-rw-r--r--nova/network/os_vif_util.py9
-rw-r--r--nova/notifications/objects/image.py7
-rw-r--r--nova/objects/aggregate.py43
-rw-r--r--nova/objects/block_device.py52
-rw-r--r--nova/objects/cell_mapping.py12
-rw-r--r--nova/objects/compute_node.py15
-rw-r--r--nova/objects/external_event.py6
-rw-r--r--nova/objects/fields.py34
-rw-r--r--nova/objects/flavor.py14
-rw-r--r--nova/objects/host_mapping.py19
-rw-r--r--nova/objects/image_meta.py33
-rw-r--r--nova/objects/instance.py41
-rw-r--r--nova/objects/instance_group.py26
-rw-r--r--nova/objects/instance_info_cache.py27
-rw-r--r--nova/objects/instance_mapping.py24
-rw-r--r--nova/objects/instance_pci_requests.py14
-rw-r--r--nova/objects/migrate_data.py41
-rw-r--r--nova/objects/migration.py23
-rw-r--r--nova/objects/pci_device.py92
-rw-r--r--nova/objects/request_spec.py120
-rw-r--r--nova/objects/service.py50
-rw-r--r--nova/pci/devspec.py148
-rw-r--r--nova/pci/manager.py49
-rw-r--r--nova/pci/request.py18
-rw-r--r--nova/pci/stats.py391
-rw-r--r--nova/pci/utils.py64
-rw-r--r--nova/pci/whitelist.py12
-rw-r--r--nova/policies/admin_actions.py8
-rw-r--r--nova/policies/admin_password.py4
-rw-r--r--nova/policies/aggregates.py36
-rw-r--r--nova/policies/assisted_volume_snapshots.py22
-rw-r--r--nova/policies/attach_interfaces.py16
-rw-r--r--nova/policies/availability_zone.py6
-rw-r--r--nova/policies/baremetal_nodes.py8
-rw-r--r--nova/policies/base.py78
-rw-r--r--nova/policies/console_auth_tokens.py4
-rw-r--r--nova/policies/console_output.py4
-rw-r--r--nova/policies/create_backup.py4
-rw-r--r--nova/policies/deferred_delete.py8
-rw-r--r--nova/policies/evacuate.py4
-rw-r--r--nova/policies/extended_server_attributes.py4
-rw-r--r--nova/policies/extensions.py2
-rw-r--r--nova/policies/flavor_access.py16
-rw-r--r--nova/policies/flavor_extra_specs.py45
-rw-r--r--nova/policies/flavor_manage.py12
-rw-r--r--nova/policies/floating_ip_pools.py2
-rw-r--r--nova/policies/floating_ips.py24
-rw-r--r--nova/policies/hosts.py24
-rw-r--r--nova/policies/hypervisors.py28
-rw-r--r--nova/policies/instance_actions.py16
-rw-r--r--nova/policies/instance_usage_audit_log.py8
-rw-r--r--nova/policies/ips.py8
-rw-r--r--nova/policies/keypairs.py16
-rw-r--r--nova/policies/limits.py6
-rw-r--r--nova/policies/lock_server.py12
-rw-r--r--nova/policies/migrate_server.py8
-rw-r--r--nova/policies/migrations.py4
-rw-r--r--nova/policies/multinic.py8
-rw-r--r--nova/policies/networks.py8
-rw-r--r--nova/policies/pause_server.py8
-rw-r--r--nova/policies/quota_class_sets.py8
-rw-r--r--nova/policies/quota_sets.py21
-rw-r--r--nova/policies/remote_consoles.py4
-rw-r--r--nova/policies/rescue.py8
-rw-r--r--nova/policies/security_groups.py40
-rw-r--r--nova/policies/server_diagnostics.py4
-rw-r--r--nova/policies/server_external_events.py12
-rw-r--r--nova/policies/server_groups.py27
-rw-r--r--nova/policies/server_metadata.py24
-rw-r--r--nova/policies/server_password.py8
-rw-r--r--nova/policies/server_tags.py24
-rw-r--r--nova/policies/server_topology.py8
-rw-r--r--nova/policies/servers.py173
-rw-r--r--nova/policies/servers_migrations.py16
-rw-r--r--nova/policies/services.py12
-rw-r--r--nova/policies/shelve.py24
-rw-r--r--nova/policies/simple_tenant_usage.py8
-rw-r--r--nova/policies/suspend_server.py8
-rw-r--r--nova/policies/tenant_networks.py8
-rw-r--r--nova/policies/volumes.py40
-rw-r--r--nova/policies/volumes_attachments.py31
-rw-r--r--nova/policy.py12
-rw-r--r--nova/quota.py171
-rw-r--r--nova/rpc.py16
-rw-r--r--nova/scheduler/client/report.py96
-rw-r--r--nova/scheduler/filters/__init__.py44
-rw-r--r--nova/scheduler/filters/numa_topology_filter.py24
-rw-r--r--nova/scheduler/filters/pci_passthrough_filter.py23
-rw-r--r--nova/scheduler/host_manager.py34
-rw-r--r--nova/scheduler/manager.py140
-rw-r--r--nova/scheduler/request_filter.py71
-rw-r--r--nova/scheduler/rpcapi.py12
-rw-r--r--nova/scheduler/utils.py20
-rw-r--r--nova/scheduler/weights/hypervisor_version.py39
-rw-r--r--nova/service.py4
-rw-r--r--nova/test.py45
-rw-r--r--nova/tests/fixtures/__init__.py2
-rw-r--r--nova/tests/fixtures/cinder.py30
-rw-r--r--nova/tests/fixtures/filesystem.py81
-rw-r--r--nova/tests/fixtures/glance.py31
-rw-r--r--nova/tests/fixtures/libvirt.py151
-rw-r--r--nova/tests/fixtures/libvirt_data.py204
-rw-r--r--nova/tests/fixtures/libvirt_imagebackend.py18
-rw-r--r--nova/tests/fixtures/neutron.py7
-rw-r--r--nova/tests/fixtures/notifications.py6
-rw-r--r--nova/tests/fixtures/nova.py375
-rw-r--r--nova/tests/fixtures/os_brick.py3
-rw-r--r--nova/tests/fixtures/policy.py4
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl118
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl76
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json.tpl8
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json.tpl9
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-post-req.json.tpl7
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-az.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl4
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-shelve.json.tpl (renamed from nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-null.json.tpl)0
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az-host.json.tpl6
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json.tpl6
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve.json.tpl (renamed from doc/api_samples/os-shelve/v2.77/os-unshelve-null.json)2
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl80
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl15
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl21
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl22
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl81
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl8
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl78
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl88
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl24
-rw-r--r--nova/tests/functional/api_sample_tests/test_baremetal_nodes.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_compare_result.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_create_backup.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_evacuate.py61
-rw-r--r--nova/tests/functional/api_sample_tests/test_hypervisors.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_images.py30
-rw-r--r--nova/tests/functional/api_sample_tests/test_keypairs.py63
-rw-r--r--nova/tests/functional/api_sample_tests/test_migrate_server.py3
-rw-r--r--nova/tests/functional/api_sample_tests/test_networks.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_remote_consoles.py20
-rw-r--r--nova/tests/functional/api_sample_tests/test_server_migrations.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_server_password.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_servers.py45
-rw-r--r--nova/tests/functional/api_sample_tests/test_shelve.py297
-rw-r--r--nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py2
-rw-r--r--nova/tests/functional/compute/test_init_host.py2
-rw-r--r--nova/tests/functional/compute/test_live_migration.py3
-rw-r--r--nova/tests/functional/compute/test_migration_list.py6
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py13
-rw-r--r--nova/tests/functional/db/test_aggregate.py2
-rw-r--r--nova/tests/functional/db/test_compute_api.py3
-rw-r--r--nova/tests/functional/db/test_compute_node.py2
-rw-r--r--nova/tests/functional/db/test_host_mapping.py3
-rw-r--r--nova/tests/functional/db/test_instance_group.py3
-rw-r--r--nova/tests/functional/db/test_instance_mapping.py3
-rw-r--r--nova/tests/functional/db/test_quota.py3
-rw-r--r--nova/tests/functional/db/test_virtual_interface.py3
-rw-r--r--nova/tests/functional/integrated_helpers.py60
-rw-r--r--nova/tests/functional/libvirt/base.py189
-rw-r--r--nova/tests/functional/libvirt/test_device_bus_migration.py407
-rw-r--r--nova/tests/functional/libvirt/test_evacuate.py9
-rw-r--r--nova/tests/functional/libvirt/test_live_migration.py119
-rw-r--r--nova/tests/functional/libvirt/test_machine_type.py30
-rw-r--r--nova/tests/functional/libvirt/test_numa_live_migration.py12
-rw-r--r--nova/tests/functional/libvirt/test_numa_servers.py82
-rw-r--r--nova/tests/functional/libvirt/test_pci_in_placement.py1997
-rw-r--r--nova/tests/functional/libvirt/test_pci_sriov_servers.py2784
-rw-r--r--nova/tests/functional/libvirt/test_power_manage.py270
-rw-r--r--nova/tests/functional/libvirt/test_report_cpu_traits.py8
-rw-r--r--nova/tests/functional/libvirt/test_reshape.py23
-rw-r--r--nova/tests/functional/libvirt/test_uefi.py3
-rw-r--r--nova/tests/functional/libvirt/test_vgpu.py35
-rw-r--r--nova/tests/functional/libvirt/test_vpmem.py7
-rw-r--r--nova/tests/functional/libvirt/test_vtpm.py4
-rw-r--r--nova/tests/functional/notification_sample_tests/notification_sample_base.py2
-rw-r--r--nova/tests/functional/notification_sample_tests/test_compute_task.py7
-rw-r--r--nova/tests/functional/notification_sample_tests/test_instance.py32
-rw-r--r--nova/tests/functional/notification_sample_tests/test_keypair.py5
-rw-r--r--nova/tests/functional/notification_sample_tests/test_libvirt.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1554631.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1595962.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1628606.py60
-rw-r--r--nova/tests/functional/regressions/test_bug_1669054.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1713783.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1732947.py4
-rw-r--r--nova/tests/functional/regressions/test_bug_1764883.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1781286.py33
-rw-r--r--nova/tests/functional/regressions/test_bug_1823370.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1830747.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1831771.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1843090.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1843708.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1845291.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1849165.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1853009.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1862633.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1879878.py3
-rw-r--r--nova/tests/functional/regressions/test_bug_1888395.py42
-rw-r--r--nova/tests/functional/regressions/test_bug_1889108.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1890244.py96
-rw-r--r--nova/tests/functional/regressions/test_bug_1893284.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1896463.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1899835.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1902925.py5
-rw-r--r--nova/tests/functional/regressions/test_bug_1914777.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1922053.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1928063.py6
-rw-r--r--nova/tests/functional/regressions/test_bug_1937084.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1937375.py2
-rw-r--r--nova/tests/functional/regressions/test_bug_1944619.py76
-rw-r--r--nova/tests/functional/regressions/test_bug_1951656.py73
-rw-r--r--nova/tests/functional/regressions/test_bug_1978983.py71
-rw-r--r--nova/tests/functional/regressions/test_bug_1980720.py68
-rw-r--r--nova/tests/functional/regressions/test_bug_1983753.py177
-rw-r--r--nova/tests/functional/regressions/test_bug_1995153.py107
-rw-r--r--nova/tests/functional/test_aggregates.py24
-rw-r--r--nova/tests/functional/test_availability_zones.py448
-rw-r--r--nova/tests/functional/test_boot_from_volume.py42
-rw-r--r--nova/tests/functional/test_cold_migrate.py2
-rw-r--r--nova/tests/functional/test_compute_mgr.py3
-rw-r--r--nova/tests/functional/test_cross_cell_migrate.py2
-rw-r--r--nova/tests/functional/test_ephemeral_encryption.py381
-rw-r--r--nova/tests/functional/test_images.py8
-rw-r--r--nova/tests/functional/test_instance_actions.py12
-rw-r--r--nova/tests/functional/test_ip_allocation.py53
-rw-r--r--nova/tests/functional/test_monkey_patch.py45
-rw-r--r--nova/tests/functional/test_nova_manage.py2
-rw-r--r--nova/tests/functional/test_policy.py2
-rw-r--r--nova/tests/functional/test_report_client.py70
-rw-r--r--nova/tests/functional/test_routed_networks.py2
-rw-r--r--nova/tests/functional/test_server_faults.py2
-rw-r--r--nova/tests/functional/test_server_group.py80
-rw-r--r--nova/tests/functional/test_server_rescue.py86
-rw-r--r--nova/tests/functional/test_servers.py209
-rw-r--r--nova/tests/functional/test_servers_provider_tree.py6
-rw-r--r--nova/tests/functional/test_servers_resource_request.py33
-rw-r--r--nova/tests/functional/test_service.py85
-rw-r--r--nova/tests/functional/test_unified_limits.py217
-rw-r--r--nova/tests/unit/accelerator/test_cyborg.py8
-rw-r--r--nova/tests/unit/api/openstack/compute/admin_only_action_common.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_admin_password.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_aggregates.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_api.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_attach_interfaces.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_availability_zone.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_console_output.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_create_backup.py7
-rw-r--r--nova/tests/unit/api/openstack/compute/test_deferred_delete.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_disk_config.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_evacuate.py32
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py27
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_manage.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_floating_ips.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hosts.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hypervisors.py396
-rw-r--r--nova/tests/unit/api/openstack/compute/test_image_metadata.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_images.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_instance_actions.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_keypairs.py172
-rw-r--r--nova/tests/unit/api/openstack/compute/test_limits.py327
-rw-r--r--nova/tests/unit/api/openstack/compute/test_lock_server.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_microversions.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrate_server.py6
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrations.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_multinic.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_networks.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quota_classes.py224
-rw-r--r--nova/tests/unit/api/openstack/compute/test_quotas.py480
-rw-r--r--nova/tests/unit/api/openstack/compute/test_remote_consoles.py17
-rw-r--r--nova/tests/unit/api/openstack/compute/test_rescue.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_security_groups.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_actions.py41
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_diagnostics.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_external_events.py5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_group_quotas.py108
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_groups.py100
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_metadata.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_migrations.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_password.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_reset_state.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_start_stop.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_tags.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_topology.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_servers.py41
-rw-r--r--nova/tests/unit/api/openstack/compute/test_services.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_shelve.py258
-rw-r--r--nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_snapshots.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_suspend_server.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_tenant_networks.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_volumes.py12
-rw-r--r--nova/tests/unit/api/openstack/fakes.py14
-rw-r--r--nova/tests/unit/api/openstack/test_common.py3
-rw-r--r--nova/tests/unit/api/openstack/test_faults.py3
-rw-r--r--nova/tests/unit/api/openstack/test_requestlog.py2
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi.py3
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi_app.py17
-rw-r--r--nova/tests/unit/api/test_auth.py3
-rw-r--r--nova/tests/unit/api/test_wsgi.py2
-rw-r--r--nova/tests/unit/api/validation/extra_specs/test_validators.py11
-rw-r--r--nova/tests/unit/cmd/test_baseproxy.py2
-rw-r--r--nova/tests/unit/cmd/test_common.py2
-rw-r--r--nova/tests/unit/cmd/test_compute.py2
-rw-r--r--nova/tests/unit/cmd/test_manage.py318
-rw-r--r--nova/tests/unit/cmd/test_nova_api.py2
-rw-r--r--nova/tests/unit/cmd/test_policy.py19
-rw-r--r--nova/tests/unit/cmd/test_scheduler.py2
-rw-r--r--nova/tests/unit/cmd/test_status.py62
-rw-r--r--nova/tests/unit/compute/monitors/cpu/test_virt_driver.py2
-rw-r--r--nova/tests/unit/compute/monitors/test_monitors.py2
-rw-r--r--nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml4
-rw-r--r--nova/tests/unit/compute/test_api.py622
-rw-r--r--nova/tests/unit/compute/test_claims.py8
-rw-r--r--nova/tests/unit/compute/test_compute.py346
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py908
-rw-r--r--nova/tests/unit/compute/test_flavors.py2
-rw-r--r--nova/tests/unit/compute/test_host_api.py3
-rw-r--r--nova/tests/unit/compute/test_instance_list.py3
-rw-r--r--nova/tests/unit/compute/test_keypairs.py111
-rw-r--r--nova/tests/unit/compute/test_multi_cell_list.py3
-rw-r--r--nova/tests/unit/compute/test_pci_placement_translator.py291
-rw-r--r--nova/tests/unit/compute/test_provider_config.py14
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py543
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py88
-rw-r--r--nova/tests/unit/compute/test_shelve.py644
-rw-r--r--nova/tests/unit/compute/test_utils.py70
-rw-r--r--nova/tests/unit/compute/test_virtapi.py158
-rw-r--r--nova/tests/unit/conductor/tasks/test_base.py2
-rw-r--r--nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py6
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py41
-rw-r--r--nova/tests/unit/conductor/tasks/test_migrate.py3
-rw-r--r--nova/tests/unit/conductor/test_conductor.py176
-rw-r--r--nova/tests/unit/console/rfb/test_auth.py2
-rw-r--r--nova/tests/unit/console/rfb/test_authnone.py2
-rw-r--r--nova/tests/unit/console/rfb/test_authvencrypt.py2
-rw-r--r--nova/tests/unit/console/securityproxy/test_rfb.py2
-rw-r--r--nova/tests/unit/console/test_serial.py3
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py67
-rw-r--r--nova/tests/unit/db/api/test_api.py2
-rw-r--r--nova/tests/unit/db/api/test_migrations.py47
-rw-r--r--nova/tests/unit/db/main/test_api.py557
-rw-r--r--nova/tests/unit/db/main/test_migrations.py89
-rw-r--r--nova/tests/unit/db/test_migration.py195
-rw-r--r--nova/tests/unit/fake_policy.py1
-rw-r--r--nova/tests/unit/fixtures/test_libvirt.py3
-rw-r--r--nova/tests/unit/image/test_glance.py2
-rw-r--r--nova/tests/unit/limit/__init__.py (renamed from nova/db/api/legacy_migrations/versions/__init__.py)0
-rw-r--r--nova/tests/unit/limit/test_local.py256
-rw-r--r--nova/tests/unit/limit/test_placement.py353
-rw-r--r--nova/tests/unit/network/test_network_info.py46
-rw-r--r--nova/tests/unit/network/test_neutron.py1072
-rw-r--r--nova/tests/unit/network/test_os_vif_util.py33
-rw-r--r--nova/tests/unit/network/test_security_group.py3
-rw-r--r--nova/tests/unit/notifications/objects/test_flavor.py2
-rw-r--r--nova/tests/unit/notifications/objects/test_instance.py3
-rw-r--r--nova/tests/unit/notifications/objects/test_notification.py4
-rw-r--r--nova/tests/unit/notifications/objects/test_service.py2
-rw-r--r--nova/tests/unit/notifications/test_base.py2
-rw-r--r--nova/tests/unit/objects/test_aggregate.py3
-rw-r--r--nova/tests/unit/objects/test_block_device.py16
-rw-r--r--nova/tests/unit/objects/test_build_request.py3
-rw-r--r--nova/tests/unit/objects/test_cell_mapping.py3
-rw-r--r--nova/tests/unit/objects/test_compute_node.py31
-rw-r--r--nova/tests/unit/objects/test_console_auth_token.py2
-rw-r--r--nova/tests/unit/objects/test_ec2.py3
-rw-r--r--nova/tests/unit/objects/test_external_event.py2
-rw-r--r--nova/tests/unit/objects/test_fields.py6
-rw-r--r--nova/tests/unit/objects/test_flavor.py2
-rw-r--r--nova/tests/unit/objects/test_host_mapping.py3
-rw-r--r--nova/tests/unit/objects/test_image_meta.py87
-rw-r--r--nova/tests/unit/objects/test_instance.py178
-rw-r--r--nova/tests/unit/objects/test_instance_action.py2
-rw-r--r--nova/tests/unit/objects/test_instance_device_metadata.py3
-rw-r--r--nova/tests/unit/objects/test_instance_fault.py3
-rw-r--r--nova/tests/unit/objects/test_instance_group.py4
-rw-r--r--nova/tests/unit/objects/test_instance_info_cache.py27
-rw-r--r--nova/tests/unit/objects/test_instance_mapping.py3
-rw-r--r--nova/tests/unit/objects/test_instance_numa.py3
-rw-r--r--nova/tests/unit/objects/test_instance_pci_requests.py20
-rw-r--r--nova/tests/unit/objects/test_keypair.py3
-rw-r--r--nova/tests/unit/objects/test_migrate_data.py65
-rw-r--r--nova/tests/unit/objects/test_migration.py3
-rw-r--r--nova/tests/unit/objects/test_migration_context.py3
-rw-r--r--nova/tests/unit/objects/test_objects.py28
-rw-r--r--nova/tests/unit/objects/test_pci_device.py22
-rw-r--r--nova/tests/unit/objects/test_quotas.py2
-rw-r--r--nova/tests/unit/objects/test_request_spec.py287
-rw-r--r--nova/tests/unit/objects/test_resource.py3
-rw-r--r--nova/tests/unit/objects/test_security_group.py3
-rw-r--r--nova/tests/unit/objects/test_service.py3
-rw-r--r--nova/tests/unit/objects/test_tag.py2
-rw-r--r--nova/tests/unit/objects/test_task_log.py2
-rw-r--r--nova/tests/unit/objects/test_trusted_certs.py2
-rw-r--r--nova/tests/unit/objects/test_virtual_interface.py3
-rw-r--r--nova/tests/unit/objects/test_volume_usage.py3
-rw-r--r--nova/tests/unit/pci/fakes.py2
-rw-r--r--nova/tests/unit/pci/test_devspec.py277
-rw-r--r--nova/tests/unit/pci/test_manager.py351
-rw-r--r--nova/tests/unit/pci/test_request.py22
-rw-r--r--nova/tests/unit/pci/test_stats.py1234
-rw-r--r--nova/tests/unit/pci/test_utils.py183
-rw-r--r--nova/tests/unit/policies/base.py165
-rw-r--r--nova/tests/unit/policies/test_admin_actions.py75
-rw-r--r--nova/tests/unit/policies/test_admin_password.py77
-rw-r--r--nova/tests/unit/policies/test_aggregates.py157
-rw-r--r--nova/tests/unit/policies/test_assisted_volume_snapshots.py68
-rw-r--r--nova/tests/unit/policies/test_attach_interfaces.py158
-rw-r--r--nova/tests/unit/policies/test_availability_zone.py85
-rw-r--r--nova/tests/unit/policies/test_baremetal_nodes.py79
-rw-r--r--nova/tests/unit/policies/test_console_auth_tokens.py61
-rw-r--r--nova/tests/unit/policies/test_console_output.py73
-rw-r--r--nova/tests/unit/policies/test_create_backup.py77
-rw-r--r--nova/tests/unit/policies/test_deferred_delete.py95
-rw-r--r--nova/tests/unit/policies/test_evacuate.py63
-rw-r--r--nova/tests/unit/policies/test_extensions.py13
-rw-r--r--nova/tests/unit/policies/test_flavor_access.py165
-rw-r--r--nova/tests/unit/policies/test_flavor_extra_specs.py329
-rw-r--r--nova/tests/unit/policies/test_flavor_manage.py70
-rw-r--r--nova/tests/unit/policies/test_floating_ip_pools.py12
-rw-r--r--nova/tests/unit/policies/test_floating_ips.py194
-rw-r--r--nova/tests/unit/policies/test_hosts.py160
-rw-r--r--nova/tests/unit/policies/test_hypervisors.py130
-rw-r--r--nova/tests/unit/policies/test_instance_actions.py141
-rw-r--r--nova/tests/unit/policies/test_instance_usage_audit_log.py78
-rw-r--r--nova/tests/unit/policies/test_keypairs.py159
-rw-r--r--nova/tests/unit/policies/test_limits.py113
-rw-r--r--nova/tests/unit/policies/test_lock_server.py163
-rw-r--r--nova/tests/unit/policies/test_migrate_server.py92
-rw-r--r--nova/tests/unit/policies/test_migrations.py55
-rw-r--r--nova/tests/unit/policies/test_multinic.py91
-rw-r--r--nova/tests/unit/policies/test_networks.py71
-rw-r--r--nova/tests/unit/policies/test_pause_server.py86
-rw-r--r--nova/tests/unit/policies/test_quota_class_sets.py104
-rw-r--r--nova/tests/unit/policies/test_quota_sets.py205
-rw-r--r--nova/tests/unit/policies/test_remote_consoles.py73
-rw-r--r--nova/tests/unit/policies/test_rescue.py94
-rw-r--r--nova/tests/unit/policies/test_security_groups.py336
-rw-r--r--nova/tests/unit/policies/test_server_diagnostics.py77
-rw-r--r--nova/tests/unit/policies/test_server_external_events.py52
-rw-r--r--nova/tests/unit/policies/test_server_groups.py277
-rw-r--r--nova/tests/unit/policies/test_server_ips.py77
-rw-r--r--nova/tests/unit/policies/test_server_metadata.py155
-rw-r--r--nova/tests/unit/policies/test_server_migrations.py150
-rw-r--r--nova/tests/unit/policies/test_server_password.py129
-rw-r--r--nova/tests/unit/policies/test_server_tags.py150
-rw-r--r--nova/tests/unit/policies/test_server_topology.py103
-rw-r--r--nova/tests/unit/policies/test_servers.py864
-rw-r--r--nova/tests/unit/policies/test_services.py171
-rw-r--r--nova/tests/unit/policies/test_shelve.py128
-rw-r--r--nova/tests/unit/policies/test_simple_tenant_usage.py101
-rw-r--r--nova/tests/unit/policies/test_suspend_server.py89
-rw-r--r--nova/tests/unit/policies/test_tenant_networks.py80
-rw-r--r--nova/tests/unit/policies/test_volumes.py436
-rw-r--r--nova/tests/unit/privsep/test_fs.py2
-rw-r--r--nova/tests/unit/privsep/test_idmapshift.py2
-rw-r--r--nova/tests/unit/privsep/test_libvirt.py3
-rw-r--r--nova/tests/unit/privsep/test_linux_net.py2
-rw-r--r--nova/tests/unit/privsep/test_path.py3
-rw-r--r--nova/tests/unit/privsep/test_qemu.py2
-rw-r--r--nova/tests/unit/privsep/test_utils.py2
-rw-r--r--nova/tests/unit/scheduler/client/test_query.py3
-rw-r--r--nova/tests/unit/scheduler/client/test_report.py96
-rw-r--r--nova/tests/unit/scheduler/fakes.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_affinity_filters.py3
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_availability_zone_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_compute_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_io_ops_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_num_instances_filters.py2
-rw-r--r--nova/tests/unit/scheduler/filters/test_numa_topology_filters.py97
-rw-r--r--nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py115
-rw-r--r--nova/tests/unit/scheduler/filters/test_type_filters.py2
-rw-r--r--nova/tests/unit/scheduler/test_filters.py2
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py14
-rw-r--r--nova/tests/unit/scheduler/test_manager.py910
-rw-r--r--nova/tests/unit/scheduler/test_request_filter.py125
-rw-r--r--nova/tests/unit/scheduler/test_rpcapi.py3
-rw-r--r--nova/tests/unit/scheduler/test_utils.py3
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_affinity.py2
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py97
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_metrics.py2
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_pci.py2
-rw-r--r--nova/tests/unit/servicegroup/test_api.py2
-rw-r--r--nova/tests/unit/servicegroup/test_db_servicegroup.py3
-rw-r--r--nova/tests/unit/servicegroup/test_mc_servicegroup.py2
-rw-r--r--nova/tests/unit/storage/test_rbd.py4
-rw-r--r--nova/tests/unit/test_availability_zones.py3
-rw-r--r--nova/tests/unit/test_block_device.py3
-rw-r--r--nova/tests/unit/test_cache.py2
-rw-r--r--nova/tests/unit/test_cinder.py2
-rw-r--r--nova/tests/unit/test_conf.py2
-rw-r--r--nova/tests/unit/test_configdrive2.py2
-rw-r--r--nova/tests/unit/test_context.py4
-rw-r--r--nova/tests/unit/test_crypto.py2
-rw-r--r--nova/tests/unit/test_exception_wrapper.py2
-rw-r--r--nova/tests/unit/test_filesystem.py52
-rw-r--r--nova/tests/unit/test_fixtures.py16
-rw-r--r--nova/tests/unit/test_hacking.py46
-rw-r--r--nova/tests/unit/test_identity.py4
-rw-r--r--nova/tests/unit/test_json_ref.py2
-rw-r--r--nova/tests/unit/test_metadata.py27
-rw-r--r--nova/tests/unit/test_notifications.py4
-rw-r--r--nova/tests/unit/test_notifier.py2
-rw-r--r--nova/tests/unit/test_policy.py92
-rw-r--r--nova/tests/unit/test_quota.py266
-rw-r--r--nova/tests/unit/test_rpc.py47
-rw-r--r--nova/tests/unit/test_service.py14
-rw-r--r--nova/tests/unit/test_service_auth.py3
-rw-r--r--nova/tests/unit/test_test.py17
-rw-r--r--nova/tests/unit/test_utils.py2
-rw-r--r--nova/tests/unit/test_weights.py2
-rw-r--r--nova/tests/unit/test_wsgi.py2
-rw-r--r--nova/tests/unit/utils.py3
-rw-r--r--nova/tests/unit/virt/disk/mount/test_api.py5
-rw-r--r--nova/tests/unit/virt/disk/mount/test_loop.py3
-rw-r--r--nova/tests/unit/virt/disk/mount/test_nbd.py2
-rw-r--r--nova/tests/unit/virt/disk/test_api.py3
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_guestfs.py2
-rw-r--r--nova/tests/unit/virt/hyperv/__init__.py20
-rw-r--r--nova/tests/unit/virt/hyperv/test_base.py3
-rw-r--r--nova/tests/unit/virt/hyperv/test_block_device_manager.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_driver.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_eventhandler.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_hostops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_imagecache.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_livemigrationops.py3
-rw-r--r--nova/tests/unit/virt/hyperv/test_migrationops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_pathutils.py3
-rw-r--r--nova/tests/unit/virt/hyperv/test_rdpconsoleops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_serialconsolehandler.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_serialconsoleops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_serialproxy.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_snapshotops.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_vif.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py21
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeops.py3
-rw-r--r--nova/tests/unit/virt/ironic/test_client_wrapper.py3
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py76
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/__init__.py (renamed from nova/db/main/legacy_migrations/__init__.py)0
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_api.py194
-rw-r--r--nova/tests/unit/virt/libvirt/cpu/test_core.py122
-rw-r--r--nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py3
-rw-r--r--nova/tests/unit/virt/libvirt/storage/test_lvm.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py102
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py217
-rw-r--r--nova/tests/unit/virt/libvirt/test_designer.py2
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py1862
-rw-r--r--nova/tests/unit/virt/libvirt/test_guest.py39
-rw-r--r--nova/tests/unit/virt/libvirt/test_host.py320
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py38
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagecache.py2
-rw-r--r--nova/tests/unit/virt/libvirt/test_machine_type_utils.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_migration.py44
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py113
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py41
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fs.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_iscsi.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_lightos.py79
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_mount.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_net.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nfs.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nvme.py35
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_quobyte.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_remotefs.py3
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_scaleio.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_smbfs.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_storpool.py2
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_volume.py3
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_vzstorage.py2
-rw-r--r--nova/tests/unit/virt/powervm/__init__.py56
-rw-r--r--nova/tests/unit/virt/powervm/disk/__init__.py0
-rw-r--r--nova/tests/unit/virt/powervm/disk/fake_adapter.py52
-rw-r--r--nova/tests/unit/virt/powervm/disk/test_driver.py59
-rw-r--r--nova/tests/unit/virt/powervm/disk/test_localdisk.py312
-rw-r--r--nova/tests/unit/virt/powervm/disk/test_ssp.py424
-rw-r--r--nova/tests/unit/virt/powervm/tasks/__init__.py0
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_image.py68
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_network.py323
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_storage.py354
-rw-r--r--nova/tests/unit/virt/powervm/tasks/test_vm.py134
-rw-r--r--nova/tests/unit/virt/powervm/test_driver.py649
-rw-r--r--nova/tests/unit/virt/powervm/test_host.py62
-rw-r--r--nova/tests/unit/virt/powervm/test_image.py55
-rw-r--r--nova/tests/unit/virt/powervm/test_media.py203
-rw-r--r--nova/tests/unit/virt/powervm/test_mgmt.py193
-rw-r--r--nova/tests/unit/virt/powervm/test_vif.py327
-rw-r--r--nova/tests/unit/virt/powervm/test_vm.py563
-rw-r--r--nova/tests/unit/virt/powervm/volume/__init__.py0
-rw-r--r--nova/tests/unit/virt/powervm/volume/test_fcvscsi.py456
-rw-r--r--nova/tests/unit/virt/test_block_device.py319
-rw-r--r--nova/tests/unit/virt/test_hardware.py588
-rw-r--r--nova/tests/unit/virt/test_imagecache.py5
-rw-r--r--nova/tests/unit/virt/test_images.py48
-rw-r--r--nova/tests/unit/virt/test_netutils.py23
-rw-r--r--nova/tests/unit/virt/test_node.py142
-rw-r--r--nova/tests/unit/virt/test_osinfo.py3
-rw-r--r--nova/tests/unit/virt/test_virt.py29
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py17
-rw-r--r--nova/tests/unit/virt/vmwareapi/__init__.py20
-rw-r--r--nova/tests/unit/virt/vmwareapi/fake.py203
-rw-r--r--nova/tests/unit/virt/vmwareapi/stubs.py7
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_configdrive.py3
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_driver_api.py65
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_imagecache.py2
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_images.py10
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_network_util.py10
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_session.py208
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vif.py3
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vim_util.py6
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vm_util.py91
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vmops.py47
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_volumeops.py192
-rw-r--r--nova/tests/unit/virt/zvm/__init__.py20
-rw-r--r--nova/tests/unit/virt/zvm/test_driver.py3
-rw-r--r--nova/tests/unit/virt/zvm/test_guest.py2
-rw-r--r--nova/tests/unit/virt/zvm/test_hypervisor.py2
-rw-r--r--nova/tests/unit/virt/zvm/test_utils.py2
-rw-r--r--nova/tests/unit/volume/test_cinder.py54
-rw-r--r--nova/utils.py48
-rw-r--r--nova/virt/block_device.py89
-rw-r--r--nova/virt/driver.py62
-rw-r--r--nova/virt/fake.py105
-rw-r--r--nova/virt/hardware.py304
-rw-r--r--nova/virt/hyperv/driver.py9
-rw-r--r--nova/virt/hyperv/serialproxy.py4
-rw-r--r--nova/virt/images.py31
-rw-r--r--nova/virt/interfaces.template6
-rw-r--r--nova/virt/ironic/driver.py73
-rw-r--r--nova/virt/libvirt/blockinfo.py85
-rw-r--r--nova/virt/libvirt/config.py320
-rw-r--r--nova/virt/libvirt/cpu/__init__.py (renamed from nova/db/main/legacy_migrations/versions/__init__.py)0
-rw-r--r--nova/virt/libvirt/cpu/api.py157
-rw-r--r--nova/virt/libvirt/cpu/core.py78
-rw-r--r--nova/virt/libvirt/driver.py1175
-rw-r--r--nova/virt/libvirt/event.py7
-rw-r--r--nova/virt/libvirt/guest.py28
-rw-r--r--nova/virt/libvirt/host.py272
-rw-r--r--nova/virt/libvirt/imagebackend.py111
-rw-r--r--nova/virt/libvirt/migration.py13
-rw-r--r--nova/virt/libvirt/utils.py163
-rw-r--r--nova/virt/libvirt/vif.py2
-rw-r--r--nova/virt/libvirt/volume/fibrechannel.py3
-rw-r--r--nova/virt/libvirt/volume/lightos.py63
-rw-r--r--nova/virt/libvirt/volume/nvme.py1
-rw-r--r--nova/virt/netutils.py9
-rw-r--r--nova/virt/node.py108
-rw-r--r--nova/virt/powervm/__init__.py17
-rw-r--r--nova/virt/powervm/disk/__init__.py0
-rw-r--r--nova/virt/powervm/disk/driver.py268
-rw-r--r--nova/virt/powervm/disk/localdisk.py211
-rw-r--r--nova/virt/powervm/disk/ssp.py258
-rw-r--r--nova/virt/powervm/driver.py708
-rw-r--r--nova/virt/powervm/host.py66
-rw-r--r--nova/virt/powervm/image.py62
-rw-r--r--nova/virt/powervm/media.py237
-rw-r--r--nova/virt/powervm/mgmt.py175
-rw-r--r--nova/virt/powervm/tasks/__init__.py0
-rw-r--r--nova/virt/powervm/tasks/base.py38
-rw-r--r--nova/virt/powervm/tasks/image.py81
-rw-r--r--nova/virt/powervm/tasks/network.py259
-rw-r--r--nova/virt/powervm/tasks/storage.py429
-rw-r--r--nova/virt/powervm/tasks/vm.py154
-rw-r--r--nova/virt/powervm/vif.py373
-rw-r--r--nova/virt/powervm/vm.py543
-rw-r--r--nova/virt/powervm/volume/__init__.py28
-rw-r--r--nova/virt/powervm/volume/fcvscsi.py468
-rw-r--r--nova/virt/vmwareapi/constants.py3
-rw-r--r--nova/virt/vmwareapi/driver.py64
-rw-r--r--nova/virt/vmwareapi/session.py157
-rw-r--r--nova/virt/vmwareapi/vm_util.py104
-rw-r--r--nova/virt/vmwareapi/vmops.py10
-rw-r--r--nova/virt/vmwareapi/volumeops.py96
-rw-r--r--nova/virt/zvm/driver.py1
-rw-r--r--nova/virt/zvm/hypervisor.py2
-rw-r--r--nova/volume/cinder.py24
-rw-r--r--nova/weights.py40
-rw-r--r--playbooks/ceph/glance-copy-policy.yaml15
-rw-r--r--playbooks/ceph/glance-setup.yaml39
-rw-r--r--playbooks/nova-emulation/pre.yaml35
-rw-r--r--releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml5
-rw-r--r--releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml23
-rw-r--r--releasenotes/notes/add-vmware-fcd-support-822edccb0e38bc37.yaml5
-rw-r--r--releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml10
-rw-r--r--releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml13
-rw-r--r--releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml51
-rw-r--r--releasenotes/notes/bp-boot-vm-with-unaddressed-port-4cb05bb6dc859d98.yaml3
-rw-r--r--releasenotes/notes/bp-keypair-generation-removal-3004a8643dcd1fd9.yaml10
-rw-r--r--releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml18
-rw-r--r--releasenotes/notes/bp-pci-device-tracking-in-placement-75ee1d20a57662f2.yaml9
-rw-r--r--releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml8
-rw-r--r--releasenotes/notes/bp-pick-guest-arch-based-on-host-arch-in-libvirt-driver-f087c3799d388bb6.yaml10
-rw-r--r--releasenotes/notes/bp-policy-defaults-refresh-2-473c70f641f9f397.yaml30
-rw-r--r--releasenotes/notes/bp-unified-limits-656b55863df22e16.yaml43
-rw-r--r--releasenotes/notes/bp-unshelve_to_host-c9047d518eb67747.yaml10
-rw-r--r--releasenotes/notes/bug-1942329-22b08fa4b322881d.yaml9
-rw-r--r--releasenotes/notes/bug-1944619-fix-live-migration-rollback.yaml10
-rw-r--r--releasenotes/notes/bug-1958636-smm-check-and-enable.yaml7
-rw-r--r--releasenotes/notes/bug-1960230-cleanup-instances-dir-resize-56282e1b436a4908.yaml6
-rw-r--r--releasenotes/notes/bug-1960401-504eb255253d966a.yaml8
-rw-r--r--releasenotes/notes/bug-1967157-extend-encrypted.yaml9
-rw-r--r--releasenotes/notes/bug-1970383-segment-scheduling-permissions-92ba907b10a9eb1c.yaml7
-rw-r--r--releasenotes/notes/bug-1978372-optimized-numa-fitting-algorithm-5d5b922b0bdbf818.yaml9
-rw-r--r--releasenotes/notes/bug-1978444-db46df5f3d5ea19e.yaml7
-rw-r--r--releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml9
-rw-r--r--releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml11
-rw-r--r--releasenotes/notes/bug-1983753-update-requestspec-pci_request-for-resize-a3c6b0a979db723f.yaml6
-rw-r--r--releasenotes/notes/bug-1986838-pci-double-booking-1da71ea4399db65a.yaml8
-rw-r--r--releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml28
-rw-r--r--releasenotes/notes/default-host-numa-strategy-to-spread-18668c6d80154042.yaml12
-rw-r--r--releasenotes/notes/deprecate-passthrough_whitelist-config-name-0530d502c960d753.yaml6
-rw-r--r--releasenotes/notes/deprecate-use_forwarded_for-f7b24eaf130782b9.yaml12
-rw-r--r--releasenotes/notes/drop-python-3-6-and-3-7-cd3bf1e945f05fd3.yaml5
-rw-r--r--releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml23
-rw-r--r--releasenotes/notes/extra-sorting-for-host-cells-c03e37de1e57043b.yaml10
-rw-r--r--releasenotes/notes/fix-group-policy-validation-with-deleted-groups-4f685fd1d6b84192.yaml13
-rw-r--r--releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml11
-rw-r--r--releasenotes/notes/greendns-34df7f9fba952bcd.yaml14
-rw-r--r--releasenotes/notes/guest-iommu-device-4795c3a060aca424.yaml21
-rw-r--r--releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml6
-rw-r--r--releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml20
-rw-r--r--releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml11
-rw-r--r--releasenotes/notes/lightos-fcafefdfd0939316.yaml8
-rw-r--r--releasenotes/notes/microversion-2-94-59649401d5763286.yaml22
-rw-r--r--releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml14
-rw-r--r--releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml13
-rw-r--r--releasenotes/notes/nova-manage-image-property-26b2e3eaa2ef343b.yaml17
-rw-r--r--releasenotes/notes/pci-vpd-capability-0d8039629db4afb8.yaml20
-rw-r--r--releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml6
-rw-r--r--releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml36
-rw-r--r--releasenotes/notes/register-defaults-for-undefined-hw-image-properties-d86bcf99f4610239.yaml15
-rw-r--r--releasenotes/notes/remove-default-cputune-shares-values-85d5ddf4b8e24eaa.yaml15
-rw-r--r--releasenotes/notes/remove-powervm-6132cc10255ca205.yaml6
-rw-r--r--releasenotes/notes/remove-qos-queue-vmware-nsx-extension-208d72da23e7ae49.yaml7
-rw-r--r--releasenotes/notes/remove-sqlalchemy-migrate-907c200314884d81.yaml5
-rw-r--r--releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml6
-rw-r--r--releasenotes/notes/skip-compare-cpu-on-dest-6ae419ddd61fd0f8.yaml24
-rw-r--r--releasenotes/notes/skip-hypervisor-version-check-on-lm-a87f2dcb4f8bf0f2.yaml13
-rw-r--r--releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml19
-rw-r--r--releasenotes/notes/too-old-compute-check-code-7dbcde45cfd23394.yaml6
-rw-r--r--releasenotes/notes/update-initial-allocation-defaults-94106033b66b8fa0.yaml21
-rw-r--r--releasenotes/notes/update-libvirt-enlightenments-for-windows-23abea98cc1db667.yaml21
-rw-r--r--releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml12
-rw-r--r--releasenotes/notes/use-multipath-0a0aa2b479e02370.yaml7
-rw-r--r--releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml (renamed from nova/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml)0
-rw-r--r--releasenotes/notes/validate-machine-type-0d5f3dbd1e2ace31.yaml10
-rw-r--r--releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml11
-rw-r--r--releasenotes/notes/vdpa-suspend-detach-and-live-migrate-e591e6a03a0c834d.yaml25
-rw-r--r--releasenotes/notes/vmware-add-ram-size-multiple-of-4-validation-9740bf60d59ce5e2.yaml7
-rw-r--r--releasenotes/notes/vnic-type-remote-managed-b90cacf1c91df22b.yaml27
-rw-r--r--releasenotes/notes/yoga-prelude-31dd83eb18c789f6.yaml49
-rw-r--r--releasenotes/notes/zed-prelude-a3cddb8b2ac8e293.yaml46
-rw-r--r--releasenotes/source/2023.1.rst6
-rw-r--r--releasenotes/source/index.rst3
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po359
-rw-r--r--releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po2
-rw-r--r--releasenotes/source/yoga.rst6
-rw-r--r--releasenotes/source/zed.rst6
-rw-r--r--requirements.txt44
-rw-r--r--setup.cfg12
-rw-r--r--test-requirements.txt6
-rwxr-xr-xtools/test-setup.sh8
-rw-r--r--tox.ini153
1038 files changed, 45173 insertions, 29350 deletions
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 83fbb77920..d02bdbdfca 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -4,7 +4,7 @@ default_language_version:
python: python3
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v2.4.0
+ rev: v4.1.0
hooks:
- id: trailing-whitespace
- id: mixed-line-ending
@@ -19,7 +19,7 @@ repos:
- id: check-yaml
files: .*\.(yaml|yml)$
- repo: https://github.com/Lucas-C/pre-commit-hooks
- rev: v1.1.7
+ rev: v1.1.13
hooks:
- id: remove-tabs
exclude: '.*\.(svg)$'
@@ -28,14 +28,13 @@ repos:
- id: flake8
name: flake8
additional_dependencies:
- - hacking>=2.0,<3.0
+ - hacking>=3.1.0,<3.2.0
language: python
entry: flake8
files: '^.*\.py$'
exclude: '^(doc|releasenotes|tools)/.*$'
-
- repo: https://github.com/pre-commit/mirrors-autopep8
- rev: 'v1.5.7'
+ rev: v1.6.0
hooks:
- id: autopep8
files: '^.*\.py$'
diff --git a/.zuul.yaml b/.zuul.yaml
index 1c3d13d5dd..9c41476e68 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -2,13 +2,12 @@
# for job naming conventions.
- job:
- name: nova-tox-functional-centos8-py36
- parent: openstack-tox-functional-py36
- nodeset: devstack-single-node-centos-8-stream
+ name: nova-tox-functional-py38
+ parent: openstack-tox-functional-py38
description: |
Run tox-based functional tests for the OpenStack Nova project
- under cPython version 3.6 with Nova specific irrelevant-files list.
- Uses tox with the ``functional-py36`` environment.
+ under cPython version 3.8 with Nova specific irrelevant-files list.
+ Uses tox with the ``functional-py38`` environment.
This job also provides a parent for other projects to run the nova
functional tests on their own changes.
@@ -26,16 +25,16 @@
# explicitly stating the work dir makes this job reusable by other
# projects
zuul_work_dir: src/opendev.org/openstack/nova
- bindep_profile: test py36
+ bindep_profile: test py38
timeout: 3600
- job:
- name: nova-tox-functional-py38
- parent: openstack-tox-functional-py38
+ name: nova-tox-functional-py39
+ parent: openstack-tox-functional-py39
description: |
Run tox-based functional tests for the OpenStack Nova project
- under cPython version 3.8 with Nova specific irrelevant-files list.
- Uses tox with the ``functional-py38`` environment.
+ under cPython version 3.9 with Nova specific irrelevant-files list.
+ Uses tox with the ``functional-py39`` environment.
This job also provides a parent for other projects to run the nova
functional tests on their own changes.
@@ -48,16 +47,16 @@
# explicitly stating the work dir makes this job reusable by other
# projects
zuul_work_dir: src/opendev.org/openstack/nova
- bindep_profile: test py38
+ bindep_profile: test py39
timeout: 3600
- job:
- name: nova-tox-functional-py39
- parent: openstack-tox-functional-py39
+ name: nova-tox-functional-py310
+ parent: openstack-tox-functional-py310
description: |
Run tox-based functional tests for the OpenStack Nova project
- under cPython version 3.9 with Nova specific irrelevant-files list.
- Uses tox with the ``functional-py39`` environment.
+ under cPython version 3.10 with Nova specific irrelevant-files list.
+ Uses tox with the ``functional-py310`` environment.
This job also provides a parent for other projects to run the nova
functional tests on their own changes.
@@ -70,7 +69,7 @@
# explicitly stating the work dir makes this job reusable by other
# projects
zuul_work_dir: src/opendev.org/openstack/nova
- bindep_profile: test py39
+ bindep_profile: test py310
timeout: 3600
- job:
@@ -90,7 +89,7 @@
description: |
Run tempest live migration tests against local qcow2 ephemeral storage
and shared LVM/iSCSI cinder volumes.
- irrelevant-files: &nova-base-irrelevant-files
+ irrelevant-files:
- ^api-.*$
- ^(test-|)requirements.txt$
- ^.*\.rst$
@@ -101,6 +100,7 @@
- ^nova/policies/.*$
- ^nova/tests/.*$
- ^nova/test.py$
+ - ^nova/virt/ironic/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- ^tools/.*$
@@ -117,8 +117,7 @@
compute-feature-enabled:
volume_backed_live_migration: true
block_migration_for_live_migration: true
- # NOTE(lyarwood): Skip until bug #1931702 is resolved.
- block_migrate_cinder_iscsi: false
+ block_migrate_cinder_iscsi: true
post-run: playbooks/nova-live-migration/post-run.yaml
- job:
@@ -130,14 +129,28 @@
the "iptables_hybrid" securitygroup firewall driver, aka "hybrid plug".
The external events interactions between Nova and Neutron in these
situations has historically been fragile. This job exercises them.
- irrelevant-files: *nova-base-irrelevant-files
+ irrelevant-files: &nova-base-irrelevant-files
+ - ^api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/policies/.*$
+ - ^nova/tests/.*$
+ - ^nova/test.py$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tools/.*$
+ - ^tox.ini$
vars:
tox_envlist: all
tempest_test_regex: (^tempest\..*compute\..*(migration|resize|reboot).*)
devstack_localrc:
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
- Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
ML2_L3_PLUGIN: router
devstack_services:
# Disable OVN services
@@ -156,12 +169,15 @@
"/$NEUTRON_CORE_PLUGIN_CONF":
securitygroup:
firewall_driver: iptables_hybrid
+ $NEUTRON_CONF:
+ nova:
+ live_migration_events: True
group-vars:
subnode:
devstack_localrc:
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
- Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
ML2_L3_PLUGIN: router
devstack_services:
# Disable OVN services
@@ -179,6 +195,9 @@
"/$NEUTRON_CORE_PLUGIN_CONF":
securitygroup:
firewall_driver: iptables_hybrid
+ $NEUTRON_CONF:
+ nova:
+ live_migration_events: True
post-run: playbooks/nova-live-migration/post-run.yaml
- job:
@@ -229,8 +248,9 @@
tox_envlist: all
# Only run compute API tests.
tempest_test_regex: ^tempest\.api\.compute
- # Skip slow tests.
- tempest_exclude_regex: .*\[.*\bslow\b.*\]
+ # Skip slow tests. Also, skip some volume detach tests until bug#1998148
+ # is fixed.
+ tempest_exclude_regex: (^tempest\.(api\.compute\.(volumes\.test_attach_volume\.AttachVolumeTestJSON\.test_attach_detach_volume|servers\.(test_server_rescue\.ServerStableDeviceRescueTest\.test_stable_device_rescue_disk_virtio_with_volume_attached|test_server_rescue_negative\.ServerRescueNegativeTestJSON\.test_rescued_vm_detach_volume)))|.*\[.*\bslow\b.*\])
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
@@ -255,6 +275,47 @@
# Disable non-essential services that we don't need for this job.
c-bak: false
+- job:
+ name: nova-emulation
+ parent: devstack-tempest
+ description: |
+ Run compute tests using emulated AARCH64 architecture.
+ # NOTE(chateaulav): due to constraints with no IDE support for aarch64,
+ # tests have been limited to eliminate any items that are incompatible.
+ # This is to be re-evaluated as greater support is added and defined.
+ irrelevant-files:
+ - ^(?!.zuul.yaml)(?!nova/virt/libvirt/)(?!nova/objects/)(?!nova/scheduler/).*$
+ - ^api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/policies/.*$
+ - ^nova/tests/.*$
+ - ^nova/test.py$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tools/.*$
+ - ^tox.ini$
+ vars:
+ tox_envlist: all
+ tempest_test_regex: ^tempest\.(api\.compute\.servers|scenario\.test_network_basic_ops)
+ tempest_exclude_regex: (^tempest\.(api\.compute\.servers\.(test_attach_interfaces.AttachInterfacesTestJSON.test_create_list_show_delete_interfaces_by_network_port|test_delete_server.DeleteServersTestJSON.test_delete_server_while_in_attached_volume.*|test_list_.*|test_disk_config|test_server_rescue.*|test_server_actions\.ServerActionsTestJSON\.test_resize.*|test_device_tag.*))|.*\[.*\bslow\b.*\])
+ devstack_localrc:
+ FORCE_CONFIG_DRIVE=False
+ ADMIN_PASSWORD=emulation
+ DATABASE_PASSWORD=$ADMIN_PASSWORD
+ RABBIT_PASSWORD=$ADMIN_PASSWORD
+ SERVICE_PASSWORD=$ADMIN_PASSWORD
+ SWIFT_HASH=1234abcd
+ DOWNLOAD_DEFAULT_IMAGES=False
+ IMAGE_URLS="http://download.cirros-cloud.net/0.5.2/cirros-0.5.2-aarch64-disk.img"
+ DEFAULT_INSTANCE_TYPE=m1.micro
+ pre-run:
+ - playbooks/nova-emulation/pre.yaml
+
# TODO(lucasagomes): Move this job to ML2/OVN when QoS Minimum Bandwidth
# support is implemented.
# See: https://docs.openstack.org/neutron/latest/ovn/gaps.html
@@ -294,7 +355,7 @@
# tempest_test_exclude_list.
# FIXME(lyarwood): The tempest.api.compute.admin.test_volume_swap tests
# are skipped until bug #1929710 is resolved.
- tempest_exclude_regex: ^tempest\.(scenario\.test_network_(?!qos)|api\.compute\.admin\.test_volume_swap)
+ tempest_exclude_regex: ^tempest\.(scenario\.test_network_(?!qos)|api\.compute\.admin\.test_volume_swap)|tempest.api.compute.servers.test_device_tagging.TaggedAttachmentsTest.test_tagged_attachment
devstack_local_conf:
post-config:
$NOVA_CPU_CONF:
@@ -339,12 +400,14 @@
compute-feature-enabled:
# The q35 machine type doesn't support an IDE bus
ide_bus: False
+ # Added in Yoga.
+ unified_limits: True
neutron_plugin_options:
available_type_drivers: flat,geneve,vlan,gre,local,vxlan
devstack_localrc:
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
- Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
ML2_L3_PLUGIN: router
# Enable TLS between the noVNC proxy & compute nodes; this requires
# the tls-proxy service to be enabled. Added in Queens.
@@ -355,6 +418,8 @@
FORCE_CONFIG_DRIVE: True
# Added in Yoga.
NOVNC_FROM_PACKAGE: False
+ NOVA_USE_UNIFIED_LIMITS: True
+ MYSQL_REDUCE_MEMORY: True
devstack_services:
# Disable OVN services
br-ex-tcpdump: false
@@ -382,9 +447,8 @@
devstack_localrc:
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
- Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
ML2_L3_PLUGIN: router
- NOVA_USE_SERVICE_TOKEN: True
NOVA_CONSOLE_PROXY_COMPUTE_TLS: True
FORCE_CONFIG_DRIVE: True
# Added in Yoga.
@@ -445,8 +509,7 @@
live_migration: true
volume_backed_live_migration: true
block_migration_for_live_migration: true
- # NOTE(lyarwood): Skip until bug #1931702 is resolved.
- block_migrate_cinder_iscsi: false
+ block_migrate_cinder_iscsi: true
tox_envlist: all
tempest_test_regex: ((tempest\.(api\.compute|scenario)\..*smoke.*)|(^tempest\.api\.compute\.admin\.(test_live_migration|test_migration)))
@@ -533,8 +596,11 @@
required-projects:
- openstack/nova
pre-run:
- - playbooks/ceph/glance-copy-policy.yaml
+ - playbooks/ceph/glance-setup.yaml
vars:
+ # NOTE(danms): Increase our swap size since we're dealing with
+ # larger images and trigger OOMs.
+ configure_swap_size: 4096
# NOTE(danms): These tests create an empty non-raw image, which nova
# will refuse because we set never_download_image_if_on_rbd in this job.
# Just skip these tests for this case.
@@ -542,7 +608,24 @@
GLANCE_STANDALONE: True
GLANCE_USE_IMPORT_WORKFLOW: True
DEVSTACK_PARALLEL: True
+ GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 2048
+ MYSQL_REDUCE_MEMORY: True
+ # NOTE(danms): This job is pretty heavy as it is, so we disable some
+ # services that are not relevant to the nova-glance-ceph scenario
+ # that this job is intended to validate.
+ devstack_services:
+ c-bak: false
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ image-feature-enabled:
+ manage_locations: true
+ volume:
+ volume_size: 1
post-config:
$NOVA_CONF:
libvirt:
@@ -551,7 +634,7 @@
never_download_image_if_on_rbd: True
$GLANCE_API_CONF:
DEFAULT:
- enabled_backends: "cheap:file, robust:rbd"
+ enabled_backends: "cheap:file, robust:rbd, web:http"
default_log_levels: "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, oslo_messaging=INFO, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO, dogpile.core.dogpile=INFO, oslo_policy=DEBUG"
glance_store:
default_backend: cheap
@@ -563,6 +646,8 @@
rbd_store_ceph_conf: /etc/ceph/ceph.conf
cheap:
filesystem_store_datadir: /opt/stack/data/glance/images/
+ web:
+ https_insecure: false
os_glance_staging_store:
filesystem_store_datadir: /opt/stack/data/glance/os_glance_staging_store/
os_glance_tasks_store:
@@ -573,15 +658,44 @@
image_conversion:
output_format: raw
+# TODO(gmann): Remove this jobs once all the required services for intergrate
+# compute gate (Cinder, Glance, Neutron) by default enable scope and new
+# defaults which means all the nova jobs will be tested with new RBAC in
+# integrated way and we do not need this separate job.
+- job:
+ name: tempest-integrated-compute-enforce-scope-new-defaults
+ parent: tempest-integrated-compute
+ description: |
+ This job runs the Tempest tests with scope and new defaults enabled
+ for Nova, Neutron, Glance, and Cinder services.
+ # TODO (gmann): There were few fixes in neutron and neutron-lib for the
+ # RBAC but they are not yet released so we need to add both projcts as
+ # the required-projects. Those can be removed once new version of neutron
+ # and neutron-lib is released.
+ required-projects:
+ - openstack/neutron
+ - openstack/neutron-lib
+ vars:
+ devstack_localrc:
+ # Enabeling the scope and new defaults for services implemented it.
+ # NOTE (gmann): We need to keep keystone scope check disable as
+ # services (except ironic) does not support the system scope and
+ # they need keystone to continue working with project scope. Until
+ # Keystone policies are changed to work for project scoped also, we
+ # need to keep scope check disable for keystone.
+ NOVA_ENFORCE_SCOPE: true
+ CINDER_ENFORCE_SCOPE: true
+ GLANCE_ENFORCE_SCOPE: true
+ NEUTRON_ENFORCE_SCOPE: true
+
- project:
# Please try to keep the list of job names sorted alphabetically.
templates:
- check-requirements
- integrated-gate-compute
- openstack-cover-jobs
- - openstack-lower-constraints-jobs
- - openstack-python3-yoga-jobs
- - openstack-python3-yoga-jobs-arm64
+ - openstack-python3-jobs
+ - openstack-python3-jobs-arm64
- periodic-stable-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
@@ -608,28 +722,32 @@
- nova-ovs-hybrid-plug
- nova-tox-validate-backport:
voting: false
- - nova-tox-functional-centos8-py36
- nova-tox-functional-py38
- nova-tox-functional-py39
+ - nova-tox-functional-py310
- tempest-integrated-compute:
- # NOTE(gmann): Policies changes do not need to run all the
- # integration test jobs. Running only tempest and grenade
- # common jobs will be enough along with nova functional
- # and unit tests.
- irrelevant-files: &policies-irrelevant-files
- - ^api-.*$
- - ^(test-|)requirements.txt$
- - ^.*\.rst$
- - ^.git.*$
- - ^doc/.*$
- - ^nova/hacking/.*$
- - ^nova/locale/.*$
- - ^nova/tests/.*$
- - ^nova/test.py$
- - ^releasenotes/.*$
- - ^setup.cfg$
- - ^tools/.*$
- - ^tox.ini$
+ # NOTE(gmann): Policies changes do not need to run all the
+ # integration test jobs. Running only tempest and grenade
+ # common jobs will be enough along with nova functional
+ # and unit tests.
+ irrelevant-files: &policies-irrelevant-files
+ - ^api-.*$
+ - ^(test-|)requirements.txt$
+ - ^.*\.rst$
+ - ^.git.*$
+ - ^doc/.*$
+ - ^nova/hacking/.*$
+ - ^nova/locale/.*$
+ - ^nova/tests/.*$
+ - ^nova/test.py$
+ - ^releasenotes/.*$
+ - ^setup.cfg$
+ - ^tools/.*$
+ - ^tox.ini$
+ - tempest-integrated-compute-enforce-scope-new-defaults:
+ irrelevant-files: *policies-irrelevant-files
+ - grenade-skip-level-always:
+ irrelevant-files: *policies-irrelevant-files
- nova-grenade-multinode:
irrelevant-files: *policies-irrelevant-files
- tempest-ipv6-only:
@@ -642,16 +760,10 @@
- barbican-tempest-plugin-simple-crypto:
irrelevant-files: *nova-base-irrelevant-files
voting: false
- - tempest-integrated-compute-centos-8-stream:
- irrelevant-files: *nova-base-irrelevant-files
- - tempest-centos8-stream-fips:
- irrelevant-files: *nova-base-irrelevant-files
- voting: false
gate:
jobs:
- nova-live-migration
- nova-live-migration-ceph
- - nova-tox-functional-centos8-py36
- nova-tox-functional-py38
- nova-tox-functional-py39
- nova-multi-cell
@@ -666,15 +778,23 @@
# code; we don't need to run this on all changes.
- ^(?!nova/network/.*)(?!nova/virt/libvirt/vif.py).*$
- tempest-integrated-compute:
+ irrelevant-files: *policies-irrelevant-files
+ - tempest-integrated-compute-enforce-scope-new-defaults:
irrelevant-files: *policies-irrelevant-files
- - nova-grenade-multinode:
+ - grenade-skip-level-always:
irrelevant-files: *policies-irrelevant-files
+ - nova-grenade-multinode:
+ irrelevant-files: *policies-irrelevant-files
- tempest-ipv6-only:
irrelevant-files: *nova-base-irrelevant-files
- openstacksdk-functional-devstack:
irrelevant-files: *nova-base-irrelevant-files
- - tempest-integrated-compute-centos-8-stream:
- irrelevant-files: *nova-base-irrelevant-files
+ periodic-weekly:
+ jobs:
+ # Runs emulation feature functionality test less frequently due
+ # to being the initial release and experimental in nature.
+ - nova-emulation
+ - tempest-centos9-stream-fips
experimental:
jobs:
- ironic-tempest-bfv:
@@ -704,3 +824,8 @@
irrelevant-files: *nova-base-irrelevant-files
- devstack-tobiko-nova:
irrelevant-files: *nova-base-irrelevant-files
+ - tempest-centos9-stream-fips:
+ irrelevant-files: *nova-base-irrelevant-files
+ - nova-emulation
+ - tempest-integrated-compute-centos-9-stream:
+ irrelevant-files: *nova-base-irrelevant-files
diff --git a/HACKING.rst b/HACKING.rst
index 0f98901864..c5a1ba4ae3 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -71,6 +71,12 @@ Nova Specific Commandments
- [N367] Disallow aliasing the mock.Mock and similar classes in tests.
- [N368] Reject if the mock.Mock class is used as a replacement value instead of and
instance of a mock.Mock during patching in tests.
+- [N369] oslo_concurrency.lockutils.ReaderWriterLock() or
+ fasteners.ReaderWriterLock() does not function correctly
+ with eventlet patched code. Use nova.utils.ReaderWriterLock() instead.
+- [N370] Don't use or import six
+- [N371] You must explicitly import python's mock: ``from unittest import mock``
+- [N372] Don't use the setDaemon method. Use the daemon attribute instead.
Creating Unit Tests
-------------------
diff --git a/README.rst b/README.rst
index 2b7eda2a65..028448f0fc 100644
--- a/README.rst
+++ b/README.rst
@@ -10,7 +10,7 @@ OpenStack Nova
OpenStack Nova provides a cloud computing fabric controller, supporting a wide
variety of compute technologies, including: libvirt (KVM, Xen, LXC and more),
-Hyper-V, VMware, OpenStack Ironic and PowerVM.
+Hyper-V, VMware and OpenStack Ironic.
Use the following resources to learn more.
diff --git a/api-guide/source/accelerator-support.rst b/api-guide/source/accelerator-support.rst
index c71e899fd4..9d1b4d77b4 100644
--- a/api-guide/source/accelerator-support.rst
+++ b/api-guide/source/accelerator-support.rst
@@ -12,7 +12,7 @@ appropriate privileges) must do the following:
* Create a device profile in Cyborg, which specifies what accelerator
resources need to be provisioned. (See `Cyborg device profiles API`_.)
- .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles
* Set the device profile name as an extra spec in a chosen flavor,
with this syntax:
@@ -102,7 +102,7 @@ appropriate privileges) must do the following:
resources need to be provisioned. (See `Cyborg device profiles API`_,
`Cyborg SRIOV Test Report`_.)
- .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/v2/index.html#device-profiles
+ .. _`Cyborg device profiles API`: https://docs.openstack.org/api-ref/accelerator/#device-profiles
.. _`Cyborg SRIOV Test Report`: https://wiki.openstack.org/wiki/Cyborg/TestReport/IntelNic
* create a 'accelerator-direct' vnic type port with the device-profile name
diff --git a/api-guide/source/server_concepts.rst b/api-guide/source/server_concepts.rst
index 62d8331891..9341965140 100644
--- a/api-guide/source/server_concepts.rst
+++ b/api-guide/source/server_concepts.rst
@@ -518,12 +518,12 @@ Server actions
- **Suspend**, **Resume**
- Administrative users might want to suspend a server if it is
- infrequently used or to perform system maintenance. When you suspend
- a server, its VM state is stored on disk, all memory is written to
- disk, and the virtual machine is stopped. Suspending a server is
- similar to placing a device in hibernation; memory and vCPUs become
- available to create other servers.
+ Users might want to suspend a server if it is infrequently used or
+ to perform system maintenance. When you suspend a server, its VM state
+ is stored on disk, all memory is written to disk, and the virtual machine
+ is stopped. Suspending a server is similar to placing a device in
+ hibernation and its occupied resource will not be freed but rather kept
+ for when the server is resumed.
Resume will resume a suspended server to an active state.
@@ -1048,7 +1048,7 @@ Nova is able to write metadata to a special configuration drive that attaches
to the server when it boots. The server can mount this drive and read files
from it to get information that is normally available through the metadata
service. For more details, refer to the :nova-doc:`user guide
-<user/metadata.html>`.
+<user/metadata.html#config-drives>`.
User data
---------
diff --git a/api-guide/source/users.rst b/api-guide/source/users.rst
index a0b74374a2..28a59201c0 100644
--- a/api-guide/source/users.rst
+++ b/api-guide/source/users.rst
@@ -28,7 +28,7 @@ The Compute API uses these roles, along with oslo.policy, to decide
what the user is authorized to do.
Refer to the to
-:nova-doc:`compute admin guide </admin/arch#projects-users-and-roles>`
+:nova-doc:`compute admin guide </admin/architecture#projects-users-and-roles>`
for details.
Personas used in this guide
diff --git a/api-ref/source/flavors.inc b/api-ref/source/flavors.inc
index 0216ce2983..52577667ec 100644
--- a/api-ref/source/flavors.inc
+++ b/api-ref/source/flavors.inc
@@ -60,6 +60,15 @@ Creates a flavor.
Creating a flavor is typically only available to administrators of a
cloud because this has implications for scheduling efficiently in the cloud.
+.. note::
+ Flavors with special characters in the flavor ID, except the hyphen '-',
+ underscore '_', spaces and dots '.', are not permitted.
+
+ Flavor IDs are meant to be UUIDs. Serialized strings separated/grouped by "-"
+ represent the default flavor ID or UUID. eg: 01cc74d8-4816-4bef-835b-e36ff188c406.
+
+ Only for backward compatibility, an integer as a flavor ID is permitted.
+
Normal response codes: 200
Error response codes: badRequest(400), unauthorized(401), forbidden(403),
diff --git a/api-ref/source/os-keypairs.inc b/api-ref/source/os-keypairs.inc
index e03e7d91aa..76099fad16 100644
--- a/api-ref/source/os-keypairs.inc
+++ b/api-ref/source/os-keypairs.inc
@@ -44,12 +44,16 @@ Response
.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.35/keypairs-list-resp.json
:language: javascript
-Create Or Import Keypair
-========================
+Import (or create) Keypair
+==========================
.. rest_method:: POST /os-keypairs
-Generates or imports a keypair.
+Imports (or generates) a keypair.
+
+.. warning::
+
+ Generating a keypair is no longer possible starting from version 2.92.
Normal response codes: 200, 201
@@ -65,7 +69,7 @@ Request
.. rest_parameters:: parameters.yaml
- keypair: keypair
- - name: keypair_name
+ - name: keypair_name_in
- public_key: keypair_public_key_in
- type: keypair_type_in
- user_id: keypair_userid_in
@@ -75,6 +79,11 @@ Request
.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-req.json
:language: javascript
+**Example Import Keypair (v2.92): JSON request**
+
+.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json
+ :language: javascript
+
Response
--------
@@ -93,6 +102,11 @@ Response
.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.10/keypairs-import-post-resp.json
:language: javascript
+**Example Import Keypair (v2.92): JSON response**
+
+.. literalinclude:: ../../doc/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json
+ :language: javascript
+
Show Keypair Details
====================
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 5ea19faab9..e185dce29d 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -1858,8 +1858,11 @@ availability_zone_state:
availability_zone_unshelve:
description: |
The availability zone name. Specifying an availability zone is only
- allowed when the server status is ``SHELVED_OFFLOADED`` otherwise a
- 409 HTTPConflict response is returned.
+ allowed when the server status is ``SHELVED_OFFLOADED`` otherwise
+ HTTP 409 conflict response is returned.
+
+ Since microversion 2.91 ``"availability_zone":null`` allows unpinning the
+ instance from any availability_zone it is pinned to.
in: body
required: false
type: string
@@ -3126,8 +3129,9 @@ flavor_id_body_2_46:
max_version: 2.46
flavor_id_body_create:
description: |
- The ID of the flavor. While people often make this look like an int, this
- is really a string. If not provided, this defaults to a uuid.
+ Only alphanumeric characters with hyphen '-', underscore '_', spaces
+ and dots '.' are permitted. If an ID is not provided, then a default UUID
+ will be assigned.
in: body
required: false
type: string
@@ -3690,6 +3694,15 @@ host_status_update_rebuild:
required: false
type: string
min_version: 2.75
+host_unshelve:
+ description: |
+ The destination host name. Specifying a destination host is by default only
+ allowed to project_admin, if it not the case HTTP 403 forbidden response
+ is returned.
+ in: body
+ required: false
+ type: string
+ min_version: 2.91
host_zone:
description: |
The available zone of the host.
@@ -4007,14 +4020,15 @@ imageRef:
type: string
imageRef_rebuild:
description: |
- The UUID of the image to rebuild for your server instance.
- It must be a valid UUID otherwise API will return 400.
- If rebuilding a volume-backed server with a new image
- (an image different from the image used when creating the volume),
- the API will return 400.
- For non-volume-backed servers, specifying a new image will result
- in validating that the image is acceptable for the current compute host
- on which the server exists. If the new image is not valid,
+ The UUID of the image to rebuild for your server instance. It
+ must be a valid UUID otherwise API will return 400. To rebuild a
+ volume-backed server with a new image, at least microversion 2.93
+ needs to be provided in the request else the request will fall
+ back to old behaviour i.e. the API will return 400 (for an image
+ different from the image used when creating the volume). For
+ non-volume-backed servers, specifying a new image will result in
+ validating that the image is acceptable for the current compute
+ host on which the server exists. If the new image is not valid,
the server will go into ``ERROR`` status.
in: body
required: true
@@ -4402,7 +4416,19 @@ keypair_name:
required: true
type: string
description: |
+ The name for the keypair.
+keypair_name_in:
+ in: body
+ required: true
+ type: string
+ description: |
A name for the keypair which will be used to reference it later.
+
+ .. note::
+
+ Since microversion 2.92, allowed characters are ASCII letters
+ ``[a-zA-Z]``, digits ``[0-9]`` and the following special
+ characters: ``[@._- ]``.
keypair_private_key:
description: |
If you do not provide a public key on create, a new keypair will
@@ -4412,6 +4438,7 @@ keypair_private_key:
in: body
required: false
type: string
+ max_version: 2.91
keypair_public_key:
description: |
The keypair public key.
@@ -4420,10 +4447,11 @@ keypair_public_key:
type: string
keypair_public_key_in:
description: |
- The public ssh key to import. If you omit this value, a keypair is
- generated for you.
+ The public ssh key to import.
+ Was optional before microversion 2.92 : if you were omitting this value, a
+ keypair was generated for you.
in: body
- required: false
+ required: true
type: string
keypair_type:
in: body
@@ -6354,6 +6382,9 @@ server_hostname_req:
description: |
The hostname to configure for the instance in the metadata service.
+ Starting with microversion 2.94, this can be a Fully Qualified Domain Name
+ (FQDN) of up to 255 characters in length.
+
.. note::
This information is published via the metadata service and requires
diff --git a/api-ref/source/servers-action-shelve.inc b/api-ref/source/servers-action-shelve.inc
index 08ca65dadd..af8bc1969b 100644
--- a/api-ref/source/servers-action-shelve.inc
+++ b/api-ref/source/servers-action-shelve.inc
@@ -121,9 +121,65 @@ Policy defaults enable only users with the administrative role or the owner of t
**Preconditions**
-The server status must be ``SHELVED`` or ``SHELVED_OFFLOADED``.
+Unshelving a server without parameters requires its status to be ``SHELVED`` or ``SHELVED_OFFLOADED``.
+
+Unshelving a server with availability_zone and/or host parameters requires its status to be only ``SHELVED_OFFLOADED`` otherwise HTTP 409 conflict response is returned.
+
+If a server is locked, you must have administrator privileges to unshelve the server.
+
+As of ``microversion 2.91``, you can unshelve to a specific compute node if you have PROJECT_ADMIN privileges.
+This microversion also gives the ability to pin a server to an availability_zone and to unpin a server
+from any availability_zone.
+
+When a server is pinned to an availability_zone, the server move operations will keep the server in that
+availability_zone. However, when the server is not pinned to any availability_zone, the move operations can
+move the server to nodes in different availability_zones.
+
+The behavior according to unshelve parameters will follow the below table.
+
++----------+---------------------------+----------+--------------------------------+
+| Boot | AZ (1) | Host (1) | Result |
++==========+===========================+==========+================================+
+| No AZ | No AZ or AZ=null | No | Free scheduling (2) |
++----------+---------------------------+----------+--------------------------------+
+| No AZ | No AZ or AZ=null | Host1 | Schedule to Host1. |
+| | | | Server remains unpinned. |
++----------+---------------------------+----------+--------------------------------+
+| No AZ | AZ="AZ1" | No | Schedule to any host in "AZ1". |
+| | | | Server is pined to "AZ1". |
++----------+---------------------------+----------+--------------------------------+
+| No AZ | AZ="AZ1" | Host1 | Verify Host1 is in "AZ1", |
+| | | | then schedule to Host1, |
+| | | | otherwise reject the request. |
+| | | | Server is pined to "AZ1". |
++----------+---------------------------+----------+--------------------------------+
+| AZ1 | No AZ | No | Schedule to any host in "AZ1". |
+| | | | Server remains pined to "AZ1". |
++----------+---------------------------+----------+--------------------------------+
+| AZ1 | AZ=null | No | Free scheduling (2). |
+| | | | Server is unpinned. |
++----------+---------------------------+----------+--------------------------------+
+| AZ1 | No AZ | Host1 | Verify Host1 is in "AZ1", |
+| | | | then schedule to Host1, |
+| | | | otherwise reject the request. |
+| | | | Server remains pined to "AZ1". |
++----------+---------------------------+----------+--------------------------------+
+| AZ1 | AZ=null | Host1 | Schedule to Host1. |
+| | | | Server is unpinned. |
++----------+---------------------------+----------+--------------------------------+
+| AZ1 | AZ="AZ2" | No | Schedule to any host in "AZ2". |
+| | | | Server is pined to "AZ2". |
++----------+---------------------------+----------+--------------------------------+
+| AZ1 | AZ="AZ2" | Host1 | Verify Host1 is in "AZ2" then |
+| | | | schedule to Host1, |
+| | | | otherwise reject the request. |
+| | | | Server is pined to "AZ2". |
++----------+---------------------------+----------+--------------------------------+
+
+(1) Unshelve body parameters
+(2) Schedule to any host available.
+
-If the server is locked, you must have administrator privileges to unshelve the server.
**Asynchronous Postconditions**
@@ -147,11 +203,30 @@ Request
{"unshelve": null} or {"unshelve": {"availability_zone": <string>}}.
A request body of {"unshelve": {}} is not allowed.
+.. note:: Since microversion 2.91, allowed request body schema are
+
+ - {"unshelve": null} (Keep compatibility with previous microversions)
+
+ or
+
+ - {"unshelve": {"availability_zone": <string>}} (Unshelve and pin server to availability_zone)
+ - {"unshelve": {"availability_zone": null}} (Unshelve and unpin server from any availability zone)
+ - {"unshelve": {"host": <fqdn>}}
+ - {"unshelve": {"availability_zone": <string>, "host": <fqdn>}}
+ - {"unshelve": {"availability_zone": null, "host": <fqdn>}}
+
+ Everything else is not allowed, examples:
+
+ - {"unshelve": {}}
+ - {"unshelve": {"host": <fqdn>, "host": <fqdn>}}
+ - {"unshelve": {"foo": <string>}}
+
.. rest_parameters:: parameters.yaml
- server_id: server_id_path
- unshelve: unshelve
- availability_zone: availability_zone_unshelve
+ - host: host_unshelve
|
@@ -162,9 +237,22 @@ Request
**Example Unshelve server (unshelve Action) (v2.77)**
-.. literalinclude:: ../../doc/api_samples/os-shelve/v2.77/os-unshelve.json
+.. literalinclude:: ../../doc/api_samples/os-shelve/v2.77/os-unshelve-az.json
:language: javascript
+**Examples Unshelve server (unshelve Action) (v2.91)**
+
+.. literalinclude:: ../../doc/api_samples/os-shelve/v2.91/os-unshelve-host.json
+ :language: javascript
+
+.. literalinclude:: ../../doc/api_samples/os-shelve/v2.91/os-unshelve-az-host.json
+ :language: javascript
+
+.. literalinclude:: ../../doc/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json
+ :language: javascript
+
+.. literalinclude:: ../../doc/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json
+ :language: javascript
Response
--------
diff --git a/api-ref/source/servers-actions.inc b/api-ref/source/servers-actions.inc
index f480403a40..bb9953afa0 100644
--- a/api-ref/source/servers-actions.inc
+++ b/api-ref/source/servers-actions.inc
@@ -540,7 +540,13 @@ Rebuilds a server.
Specify the ``rebuild`` action in the request body.
This operation recreates the root disk of the server.
-For a volume-backed server, this operation keeps the contents of the volume.
+
+With microversion 2.93, we support rebuilding volume backed
+instances which will reimage the volume with the provided
+image. For microversion < 2.93, this operation keeps the
+contents of the volume given the image provided is same as
+the image with which the volume was created else the opearation
+will error out.
**Preconditions**
@@ -552,8 +558,10 @@ If the server was in status ``SHUTOFF`` before the rebuild, it will be stopped
and in status ``SHUTOFF`` after the rebuild, otherwise it will be ``ACTIVE``
if the rebuild was successful or ``ERROR`` if the rebuild failed.
-.. note:: There is a `known limitation`_ where the root disk is not
- replaced for volume-backed instances during a rebuild.
+.. note:: With microversion 2.93, we support rebuilding volume backed
+ instances. If any microversion < 2.93 is specified, there is a
+ `known limitation`_ where the root disk is not replaced for
+ volume-backed instances during a rebuild.
.. _known limitation: https://bugs.launchpad.net/nova/+bug/1482040
@@ -596,6 +604,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-action-rebuild.json
:language: javascript
+**Example Rebuild Server (rebuild Action) (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-action-rebuild.json
+ :language: javascript
+
Response
--------
diff --git a/api-ref/source/servers.inc b/api-ref/source/servers.inc
index 547a71e914..e72d0641b9 100644
--- a/api-ref/source/servers.inc
+++ b/api-ref/source/servers.inc
@@ -448,6 +448,11 @@ Request
.. literalinclude:: ../../doc/api_samples/servers/v2.90/server-create-req.json
:language: javascript
+**Example Create Server With FQDN in Hostname (v2.94)**
+
+.. literalinclude:: ../../doc/api_samples/servers/v2.94/server-create-req.json
+ :language: javascript
+
Response
--------
@@ -610,7 +615,7 @@ Response
.. rest_parameters:: parameters.yaml
- - server: server
+ - servers: servers
- accessIPv4: accessIPv4
- accessIPv6: accessIPv6
- addresses: addresses
diff --git a/bindep.txt b/bindep.txt
index 3a4d7bef80..b129058d30 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -39,7 +39,6 @@ postgresql
postgresql-client [platform:dpkg]
postgresql-devel [platform:rpm test]
postgresql-server [platform:rpm]
-python-dev [platform:dpkg test]
python3-all [platform:dpkg]
python3-all-dev [platform:dpkg]
python3 [platform:rpm test]
diff --git a/devstack/nova-multi-cell-exclude-list.txt b/devstack/nova-multi-cell-exclude-list.txt
index a61229c906..0dbe383abf 100644
--- a/devstack/nova-multi-cell-exclude-list.txt
+++ b/devstack/nova-multi-cell-exclude-list.txt
@@ -10,3 +10,7 @@
# https://bugs.launchpad.net/nova/+bug/1907511 for details
test_migrate_with_qos_min_bw_allocation
test_resize_with_qos_min_bw_allocation
+
+# Also exclude unshelve to specific host test cases as unshelve cannot move VMs across cells
+# See https://bugs.launchpad.net/nova/+bug/1988316
+tempest.api.compute.admin.test_servers_on_multinodes.UnshelveToHostMultiNodesTest
diff --git a/doc/api_samples/images/images-details-get-resp.json b/doc/api_samples/images/images-details-get-resp.json
index 034c35f0c0..33cf667287 100644
--- a/doc/api_samples/images/images-details-get-resp.json
+++ b/doc/api_samples/images/images-details-get-resp.json
@@ -1,59 +1,56 @@
{
"images": [
{
- "OS-DCF:diskConfig": "AUTO",
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "25165824",
"created": "2011-01-01T01:02:03Z",
- "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
"architecture": "x86_64",
- "auto_disk_config": "True",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
"minDisk": 0,
"minRam": 0,
- "name": "fakeimage7",
+ "name": "fakeimage123456",
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "58145823",
"created": "2011-01-01T01:02:03Z",
- "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
- "architecture": "x86_64",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
@@ -65,25 +62,26 @@
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "83594576",
"created": "2011-01-01T01:02:03Z",
- "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
+ "architecture": "x86_64",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
@@ -95,40 +93,37 @@
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-DCF:diskConfig": "MANUAL",
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "84035174",
"created": "2011-01-01T01:02:03Z",
- "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379",
+ "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
- "architecture": "x86_64",
- "auto_disk_config": "False",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
"minDisk": 0,
"minRam": 0,
- "name": "fakeimage6",
+ "name": "fakeimage123456",
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-EXT-IMG-SIZE:size": "26360814",
"created": "2011-01-01T01:02:03Z",
"id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
"links": [
@@ -158,65 +153,188 @@
"updated": "2011-01-01T01:02:03Z"
},
{
- "OS-EXT-IMG-SIZE:size": "74185822",
+ "OS-DCF:diskConfig": "MANUAL",
+ "OS-EXT-IMG-SIZE:size": "49163826",
"created": "2011-01-01T01:02:03Z",
- "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "id": "a440c04b-79fa-479c-bed1-0b816eaec379",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a440c04b-79fa-479c-bed1-0b816eaec379",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://glance.openstack.example.com/images/a440c04b-79fa-479c-bed1-0b816eaec379",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
+ "architecture": "x86_64",
+ "auto_disk_config": "False",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
"minDisk": 0,
"minRam": 0,
- "name": "fakeimage123456",
+ "name": "fakeimage6",
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
},
{
+ "OS-DCF:diskConfig": "AUTO",
"OS-EXT-IMG-SIZE:size": "74185822",
"created": "2011-01-01T01:02:03Z",
- "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
"metadata": {
"architecture": "x86_64",
+ "auto_disk_config": "True",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
},
"minDisk": 0,
"minRam": 0,
+ "name": "fakeimage7",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "95fad737-9325-4855-b37e-20a62268ec88",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/95fad737-9325-4855-b37e-20a62268ec88",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/95fad737-9325-4855-b37e-20a62268ec88",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/95fad737-9325-4855-b37e-20a62268ec88",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "535426d4-5d75-44f4-9591-a2123d23c33f",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/535426d4-5d75-44f4-9591-a2123d23c33f",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/535426d4-5d75-44f4-9591-a2123d23c33f",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/535426d4-5d75-44f4-9591-a2123d23c33f",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "False"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "5f7d4f5b-3781-4a4e-9046-a2a800e807e5",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/5f7d4f5b-3781-4a4e-9046-a2a800e807e5",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/5f7d4f5b-3781-4a4e-9046-a2a800e807e5",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/5f7d4f5b-3781-4a4e-9046-a2a800e807e5",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "luks"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "OS-EXT-IMG-SIZE:size": "25165824",
+ "created": "2011-01-01T01:02:03Z",
+ "id": "261b52ed-f693-4147-8f3b-d25df5efd968",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/261b52ed-f693-4147-8f3b-d25df5efd968",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/261b52ed-f693-4147-8f3b-d25df5efd968",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/261b52ed-f693-4147-8f3b-d25df5efd968",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "plain"
+ },
+ "minDisk": 0,
+ "minRam": 0,
"name": "fakeimage123456",
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
}
]
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/images/images-list-get-resp.json b/doc/api_samples/images/images-list-get-resp.json
index 00d06f96b3..e2207b9271 100644
--- a/doc/api_samples/images/images-list-get-resp.json
+++ b/doc/api_samples/images/images-list-get-resp.json
@@ -1,37 +1,37 @@
{
"images": [
{
- "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
],
- "name": "fakeimage7"
+ "name": "fakeimage123456"
},
{
- "id": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "id": "a2459075-d96c-40d5-893e-577ff92e721c",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -39,18 +39,56 @@
"name": "fakeimage123456"
},
{
- "id": "a2459075-d96c-40d5-893e-577ff92e721c",
+ "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/a2459075-d96c-40d5-893e-577ff92e721c",
+ "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -77,18 +115,37 @@
"name": "fakeimage6"
},
{
- "id": "c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/c905cedb-7281-47e4-8a62-f26bc5fc4c77",
+ "href": "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage7"
+ },
+ {
+ "id": "a2293931-dc33-45cc-85ef-232aa9491710",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/a2293931-dc33-45cc-85ef-232aa9491710",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/a2293931-dc33-45cc-85ef-232aa9491710",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/a2293931-dc33-45cc-85ef-232aa9491710",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -96,18 +153,18 @@
"name": "fakeimage123456"
},
{
- "id": "cedef40a-ed67-4d10-800e-17455edce175",
+ "id": "e78f0ee9-96ef-4ce7-accf-e816f273be45",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/e78f0ee9-96ef-4ce7-accf-e816f273be45",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/e78f0ee9-96ef-4ce7-accf-e816f273be45",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/cedef40a-ed67-4d10-800e-17455edce175",
+ "href": "http://glance.openstack.example.com/images/e78f0ee9-96ef-4ce7-accf-e816f273be45",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -115,18 +172,37 @@
"name": "fakeimage123456"
},
{
- "id": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "id": "54eadb78-eeb6-4b13-beed-20b9894eeadf",
"links": [
{
- "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/54eadb78-eeb6-4b13-beed-20b9894eeadf",
"rel": "self"
},
{
- "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/54eadb78-eeb6-4b13-beed-20b9894eeadf",
"rel": "bookmark"
},
{
- "href": "http://glance.openstack.example.com/images/76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ "href": "http://glance.openstack.example.com/images/54eadb78-eeb6-4b13-beed-20b9894eeadf",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "eb7458f3-d003-4187-8027-595591dc2723",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/images/eb7458f3-d003-4187-8027-595591dc2723",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/eb7458f3-d003-4187-8027-595591dc2723",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/eb7458f3-d003-4187-8027-595591dc2723",
"rel": "alternate",
"type": "application/vnd.openstack.image"
}
@@ -134,4 +210,4 @@
"name": "fakeimage123456"
}
]
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
new file mode 100644
index 0000000000..8ad929226e
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json
@@ -0,0 +1,4 @@
+{
+ "evacuate": {
+ }
+}
diff --git a/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
new file mode 100644
index 0000000000..d192892cdc
--- /dev/null
+++ b/doc/api_samples/os-evacuate/v2.95/server-evacuate-req.json
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "host": "testHost"
+ }
+}
diff --git a/doc/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json b/doc/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json
new file mode 100644
index 0000000000..72600f8368
--- /dev/null
+++ b/doc/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json
@@ -0,0 +1,8 @@
+{
+ "keypair": {
+ "name": "me.and.myself@this.nice.domain.com mooh.",
+ "type": "ssh",
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json b/doc/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json
new file mode 100644
index 0000000000..b828798c5e
--- /dev/null
+++ b/doc/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json
@@ -0,0 +1,9 @@
+{
+ "keypair": {
+ "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c",
+ "name": "me.and.myself@this.nice.domain.com mooh.",
+ "type": "ssh",
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/os-shelve/v2.77/os-unshelve.json b/doc/api_samples/os-shelve/v2.77/os-unshelve-az.json
index 8ca146b593..8ca146b593 100644
--- a/doc/api_samples/os-shelve/v2.77/os-unshelve.json
+++ b/doc/api_samples/os-shelve/v2.77/os-unshelve-az.json
diff --git a/doc/api_samples/os-shelve/v2.91/os-unshelve-az-host.json b/doc/api_samples/os-shelve/v2.91/os-unshelve-az-host.json
new file mode 100644
index 0000000000..6d5e7b1a2e
--- /dev/null
+++ b/doc/api_samples/os-shelve/v2.91/os-unshelve-az-host.json
@@ -0,0 +1,6 @@
+{
+ "unshelve": {
+ "availability_zone": "nova",
+ "host": "host01"
+ }
+}
diff --git a/doc/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json b/doc/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json
new file mode 100644
index 0000000000..e04cc4e7f4
--- /dev/null
+++ b/doc/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json
@@ -0,0 +1,6 @@
+{
+ "unshelve": {
+ "availability_zone": null,
+ "host": "host01"
+ }
+}
diff --git a/doc/api_samples/os-shelve/v2.91/os-unshelve-host.json b/doc/api_samples/os-shelve/v2.91/os-unshelve-host.json
new file mode 100644
index 0000000000..bd68363d6e
--- /dev/null
+++ b/doc/api_samples/os-shelve/v2.91/os-unshelve-host.json
@@ -0,0 +1,5 @@
+{
+ "unshelve": {
+ "host": "host01"
+ }
+}
diff --git a/doc/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json b/doc/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json
new file mode 100644
index 0000000000..598710aed9
--- /dev/null
+++ b/doc/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json
@@ -0,0 +1,5 @@
+{
+ "unshelve": {
+ "availability_zone": null
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
new file mode 100644
index 0000000000..7eeb568ea4
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild-resp.json
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "updated-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "2021-08-19T15:16:22.177882",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "2019-04-23T17:10:22Z",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0c37a84a-c757-4f22-8c7f-0bf8b6970886",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2019-04-23T17:10:24Z",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-action-rebuild.json b/doc/api_samples/servers/v2.94/server-action-rebuild.json
new file mode 100644
index 0000000000..b5401ad9ca
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-action-rebuild.json
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "adminPass" : "seekr3t",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-req.json b/doc/api_samples/servers/v2.94/server-create-req.json
new file mode 100644
index 0000000000..c6d4ce5640
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-req.json
@@ -0,0 +1,30 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "1",
+ "availability_zone": "us-west",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "custom-hostname.example.com",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 b25zLiINCg0KLVJpY2hhcmQgQmFjaA=="
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "IyEvYmluL2Jhc2gKL2Jpbi9zdQplY2hvICJJIGFtIGluIHlvdSEiCg=="
+ },
+ "OS-SCH-HNT:scheduler_hints": {
+ "same_host": "48e6a9f6-30af-47e0-bc04-acaed113bb4e"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-create-resp.json b/doc/api_samples/servers/v2.94/server-create-resp.json
new file mode 100644
index 0000000000..f50e29dd8b
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "6NpUwoz2QDRN",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-get-resp.json b/doc/api_samples/servers/v2.94/server-get-resp.json
new file mode 100644
index 0000000000..0a05b2f917
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-get-resp.json
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-req.json b/doc/api_samples/servers/v2.94/server-update-req.json
new file mode 100644
index 0000000000..1743f05fc7
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-req.json
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "4.3.2.1",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname" : "new-server-hostname.example.com"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/server-update-resp.json b/doc/api_samples/servers/v2.94/server-update-resp.json
new file mode 100644
index 0000000000..4aa834f9ec
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/server-update-resp.json
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:37:00.880302",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.94/servers-details-resp.json b/doc/api_samples/servers/v2.94/servers-details-resp.json
new file mode 100644
index 0000000000..54b63fa523
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-details-resp.json
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.1.30",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "2013-09-23T13:53:12.774549",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "2013-09-03T04:01:32Z",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers/detail?limit=1&marker=f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.94/servers-list-resp.json b/doc/api_samples/servers/v2.94/servers-list-resp.json
new file mode 100644
index 0000000000..742d54b170
--- /dev/null
+++ b/doc/api_samples/servers/v2.94/servers-list-resp.json
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "22c91117-08de-4894-9aa9-6ef382400985",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/6f70656e737461636b20342065766572/servers/22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/servers?limit=1&marker=22c91117-08de-4894-9aa9-6ef382400985",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index f976225f9c..3f285e6017 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.90",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index 327dbd82d6..749fd4674f 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.90",
+ "version": "2.95",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_schemas/network_data.json b/doc/api_schemas/network_data.json
index f980973d75..c05f068d05 100644
--- a/doc/api_schemas/network_data.json
+++ b/doc/api_schemas/network_data.json
@@ -1,5 +1,5 @@
{
- "$schema": "http://openstack.org/nova/network_data.json#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"id": "http://openstack.org/nova/network_data.json",
"type": "object",
"title": "OpenStack Nova network metadata schema",
diff --git a/doc/ext/extra_specs.py b/doc/ext/extra_specs.py
index 534f5fa969..ddd233d503 100644
--- a/doc/ext/extra_specs.py
+++ b/doc/ext/extra_specs.py
@@ -103,7 +103,7 @@ def _format_validator_help(
validator: base.ExtraSpecValidator,
summary: bool,
):
- """Generate reStucturedText snippets for the provided validator.
+ """Generate reStructuredText snippets for the provided validator.
:param validator: A validator to document.
:type validator: nova.api.validation.extra_specs.base.ExtraSpecValidator
diff --git a/doc/ext/feature_matrix.py b/doc/ext/feature_matrix.py
index 2bb773a657..31725e311e 100644
--- a/doc/ext/feature_matrix.py
+++ b/doc/ext/feature_matrix.py
@@ -69,10 +69,10 @@ class MatrixImplementation(object):
STATUS_COMPLETE = "complete"
STATUS_PARTIAL = "partial"
STATUS_MISSING = "missing"
- STATUS_UKNOWN = "unknown"
+ STATUS_UNKNOWN = "unknown"
STATUS_ALL = [STATUS_COMPLETE, STATUS_PARTIAL, STATUS_MISSING,
- STATUS_UKNOWN]
+ STATUS_UNKNOWN]
def __init__(self, status=STATUS_MISSING, notes=None, release=None):
"""MatrixImplementation models a cell in the matrix
@@ -394,7 +394,7 @@ class FeatureMatrixDirective(rst.Directive):
impl_status = u"\u2716"
elif impl.status == MatrixImplementation.STATUS_PARTIAL:
impl_status = u"\u2714"
- elif impl.status == MatrixImplementation.STATUS_UKNOWN:
+ elif impl.status == MatrixImplementation.STATUS_UNKNOWN:
impl_status = u"?"
implref.append(nodes.literal(
diff --git a/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
index ef9d49647d..cdde7d3097 100644
--- a/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
+++ b/doc/notification_samples/common_payloads/ImageMetaPropsPayload.json
@@ -4,5 +4,5 @@
"hw_architecture": "x86_64"
},
"nova_object.name": "ImageMetaPropsPayload",
- "nova_object.version": "1.8"
+ "nova_object.version": "1.12"
}
diff --git a/doc/source/_extra/.htaccess b/doc/source/_extra/.htaccess
index d9292223bc..97056cd8e7 100644
--- a/doc/source/_extra/.htaccess
+++ b/doc/source/_extra/.htaccess
@@ -1,4 +1,4 @@
-redirectmatch 301 ^/nova/([^/]+)/addmethod.openstackapi.html$ /nova/$1/contributor/api-2.html
+redirectmatch 301 ^/nova/([^/]+)/addmethod.openstackapi.html$ /nova/$1/contributor/api.html
redirectmatch 301 ^/nova/([^/]+)/admin/flavors2.html$ /nova/$1/admin/flavors.html
redirectmatch 301 ^/nova/([^/]+)/admin/numa.html$ /nova/$1/admin/cpu-topologies.html
redirectmatch 301 ^/nova/([^/]+)/admin/quotas2.html$ /nova/$1/admin/quotas.html
@@ -85,3 +85,4 @@ redirectmatch 301 ^/nova/([^/]+)/admin/system-admin.html$ /nova/$1/admin/index.h
redirectmatch 301 ^/nova/([^/]+)/admin/port_with_resource_request.html$ /nova/$1/admin/ports-with-resource-requests.html
redirectmatch 301 ^/nova/([^/]+)/admin/manage-users.html$ /nova/$1/admin/architecture.html
redirectmatch 301 ^/nova/([^/]+)/admin/mitigation-for-Intel-MDS-security-flaws.html /nova/$1/admin/cpu-models.html
+redirectmatch 301 ^/nova/([^/]+)/contributor/api-2.html$ /nova/$1/contributor/api.html
diff --git a/doc/source/admin/architecture.rst b/doc/source/admin/architecture.rst
index e0194dd78d..f5e2b90dd9 100644
--- a/doc/source/admin/architecture.rst
+++ b/doc/source/admin/architecture.rst
@@ -11,23 +11,26 @@ reads/writes, optionally sending RPC messages to other Nova services,
and generating responses to the REST calls.
RPC messaging is done via the **oslo.messaging** library,
an abstraction on top of message queues.
-Nova uses a messaging-based, ``shared nothing`` architecture and most of the
+Nova uses a messaging-based, "shared nothing" architecture and most of the
major nova components can be run on multiple servers, and have a manager that
is listening for RPC messages.
-The one major exception is ``nova-compute``, where a single process runs on the
+The one major exception is the compute service, where a single process runs on the
hypervisor it is managing (except when using the VMware or Ironic drivers).
The manager also, optionally, has periodic tasks.
-For more details on our RPC system, please see: :doc:`/reference/rpc`
+For more details on our RPC system, refer to :doc:`/reference/rpc`.
-Nova also uses a central database that is (logically) shared between all
-components. However, to aid upgrade, the DB is accessed through an object
-layer that ensures an upgraded control plane can still communicate with
-a ``nova-compute`` running the previous release.
-To make this possible ``nova-compute`` proxies DB requests over RPC to a
-central manager called ``nova-conductor``.
+Nova uses traditional SQL databases to store information.
+These are (logically) shared between multiple components.
+To aid upgrade, the database is accessed through an object layer that ensures
+an upgraded control plane can still communicate with a compute nodes running
+the previous release.
+To make this possible, services running on the compute node proxy database
+requests over RPC to a central manager called the conductor.
To horizontally expand Nova deployments, we have a deployment sharding
-concept called cells. For more information please see: :doc:`/admin/cells`
+concept called :term:`cells <cell>`.
+All deployments contain at least one cell.
+For more information, refer to :doc:`/admin/cells`.
Components
@@ -81,8 +84,6 @@ availability zones. Nova supports the following hypervisors:
- `Linux Containers (LXC) <https://linuxcontainers.org>`__
-- `PowerVM <https://www.ibm.com/us-en/marketplace/ibm-powervm>`__
-
- `Quick Emulator (QEMU) <https://wiki.qemu.org/Manual>`__
- `Virtuozzo <https://www.virtuozzo.com/products/vz7.html>`__
@@ -109,11 +110,9 @@ projects on a shared system, and role-based access assignments. Roles control
the actions that a user is allowed to perform.
Projects are isolated resource containers that form the principal
-organizational structure within the Nova service. They typically consist of an
-individual VLAN, and volumes, instances, images, keys, and users. A user can
-specify the project by appending ``project_id`` to their access key. If no
-project is specified in the API request, Nova attempts to use a project with
-the same ID as the user.
+organizational structure within the Nova service. They typically consist of
+networks, volumes, instances, images, keys, and users. A user can
+specify the project by appending ``project_id`` to their access key.
For projects, you can use quota controls to limit the number of processor cores
and the amount of RAM that can be allocated. Other projects also allow quotas
@@ -142,13 +141,14 @@ consumption across available hardware resources.
Block storage
-------------
-OpenStack provides two classes of block storage: ephemeral storage and
-persistent volume.
+OpenStack provides two classes of block storage: storage that is provisioned by
+Nova itself, and storage that is managed by the block storage service, Cinder.
-.. rubric:: Ephemeral storage
+.. rubric:: Nova-provisioned block storage
-Ephemeral storage includes a root ephemeral volume and an additional ephemeral
-volume. These are provided by nova itself.
+Nova provides the ability to create a root disk and an optional "ephemeral"
+volume. The root disk will always be present unless the instance is a
+:term:`Boot From Volume` instance.
The root disk is associated with an instance, and exists only for the life of
this very instance. Generally, it is used to store an instance's root file
@@ -156,7 +156,7 @@ system, persists across the guest operating system reboots, and is removed on
an instance deletion. The amount of the root ephemeral volume is defined by the
flavor of an instance.
-In addition to the ephemeral root volume, flavors can provide an additional
+In addition to the root volume, flavors can provide an additional
ephemeral block device. It is represented as a raw block device with no
partition table or file system. A cloud-aware operating system can discover,
format, and mount such a storage device. Nova defines the default file system
@@ -171,17 +171,17 @@ is possible to configure other filesystem types.
mounts it on ``/mnt``. This is a cloud-init feature, and is not an OpenStack
mechanism. OpenStack only provisions the raw storage.
-.. rubric:: Persistent volume
+.. rubric:: Cinder-provisioned block storage
-A persistent volume is represented by a persistent virtualized block device
-independent of any particular instance. These are provided by the OpenStack
-Block Storage service, cinder.
+The OpenStack Block Storage service, Cinder, provides persistent volumes that
+are represented by a persistent virtualized block device independent of any
+particular instance.
Persistent volumes can be accessed by a single instance or attached to multiple
instances. This type of configuration requires a traditional network file
system to allow multiple instances accessing the persistent volume. It also
requires a traditional network file system like NFS, CIFS, or a cluster file
-system such as GlusterFS. These systems can be built within an OpenStack
+system such as Ceph. These systems can be built within an OpenStack
cluster, or provisioned outside of it, but OpenStack software does not provide
these features.
@@ -194,14 +194,6 @@ if the instance is shut down. For more information about this type of
configuration, see :cinder-doc:`Introduction to the Block Storage service
<configuration/block-storage/block-storage-overview.html>`.
-.. note::
-
- A persistent volume does not provide concurrent access from multiple
- instances. That type of configuration requires a traditional network file
- system like NFS, or CIFS, or a cluster file system such as GlusterFS. These
- systems can be built within an OpenStack cluster, or provisioned outside of
- it, but OpenStack software does not provide these features.
-
Building blocks
---------------
@@ -245,7 +237,7 @@ The displayed image attributes are:
Virtual hardware templates are called ``flavors``. By default, these are
configurable by admin users, however, that behavior can be changed by redefining
-the access controls ``policy.yaml`` on the ``nova-compute`` server. For more
+the access controls ``policy.yaml`` on the ``nova-api`` server. For more
information, refer to :doc:`/configuration/policy`.
For a list of flavors that are available on your system:
diff --git a/doc/source/admin/availability-zones.rst b/doc/source/admin/availability-zones.rst
index 678aff2c5a..28c4451b60 100644
--- a/doc/source/admin/availability-zones.rst
+++ b/doc/source/admin/availability-zones.rst
@@ -9,15 +9,22 @@ Availability Zones
zones, refer to the :doc:`user guide </user/availability-zones>`.
Availability Zones are an end-user visible logical abstraction for partitioning
-a cloud without knowing the physical infrastructure. Availability zones are not
-modeled in the database; rather, they are defined by attaching specific
-metadata information to an :doc:`aggregate </admin/aggregates>` The addition of
-this specific metadata to an aggregate makes the aggregate visible from an
-end-user perspective and consequently allows users to schedule instances to a
-specific set of hosts, the ones belonging to the aggregate.
+a cloud without knowing the physical infrastructure. They can be used to
+partition a cloud on arbitrary factors, such as location (country, datacenter,
+rack), network layout and/or power source.
-However, despite their similarities, there are a few additional differences to
-note when comparing availability zones and host aggregates:
+.. note::
+
+ Availability Zones should not be assumed to map to fault domains and provide
+ no intrinsic HA benefit by themselves.
+
+Availability zones are not modeled in the database; rather, they are defined by
+attaching specific metadata information to an
+:doc:`aggregate </admin/aggregates>` The addition of this specific metadata to
+an aggregate makes the aggregate visible from an end-user perspective and
+consequently allows users to schedule instances to a specific set of hosts, the
+ones belonging to the aggregate. There are a few additional differences to note
+when comparing availability zones and host aggregates:
- A host can be part of multiple aggregates but it can only be in one
availability zone.
@@ -32,7 +39,7 @@ note when comparing availability zones and host aggregates:
The use of the default availability zone name in requests can be very
error-prone. Since the user can see the list of availability zones, they
have no way to know whether the default availability zone name (currently
- ``nova``) is provided because an host belongs to an aggregate whose AZ
+ ``nova``) is provided because a host belongs to an aggregate whose AZ
metadata key is set to ``nova``, or because there is at least one host
not belonging to any aggregate. Consequently, it is highly recommended
for users to never ever ask for booting an instance by specifying an
@@ -111,11 +118,47 @@ Implications for moving servers
There are several ways to move a server to another host: evacuate, resize,
cold migrate, live migrate, and unshelve. Move operations typically go through
-the scheduler to pick the target host *unless* a target host is specified and
-the request forces the server to that host by bypassing the scheduler. Only
-evacuate and live migrate can forcefully bypass the scheduler and move a
-server to a specified host and even then it is highly recommended to *not*
-force and bypass the scheduler.
+the scheduler to pick the target host.
+
+Prior to API microversion 2.68, using older openstackclient (pre-5.5.0) and
+novaclient, it was possible to specify a target host and the request forces
+the server to that host by bypassing the scheduler. Only evacuate and live
+migrate can forcefully bypass the scheduler and move a server to specified host
+and even then it is highly recommended to *not* force and bypass a scheduler.
+
+- live migrate with force host (works with older openstackclients(pre-5.5.0):
+
+.. code-block:: console
+
+ $ openstack server migrate --live <host> <server>
+
+- live migrate without forcing:
+
+.. code-block:: console
+
+ $ openstack server migrate --live-migration --host <host> <server>
+
+While support for 'server evacuate' command to openstackclient was added
+in 5.5.3 and there it never exposed ability to force an evacuation, but
+it was previously possible with novaclient.
+
+- evacuate with force host:
+
+.. code-block:: console
+
+ $ nova evacuate --force <server> <host>
+
+- evacuate without forcing using novaclient:
+
+.. code-block:: console
+
+ $ nova evacuate
+
+- evacuate without forcing using openstackclient:
+
+.. code-block:: console
+
+ $ openstack server evacuate --host <host> <server>
With respect to availability zones, a server is restricted to a zone if:
@@ -143,16 +186,6 @@ If the server was not created in a specific zone then it is free to be moved
to other zones, i.e. the :ref:`AvailabilityZoneFilter <AvailabilityZoneFilter>`
is a no-op.
-Knowing this, it is dangerous to force a server to another host with evacuate
-or live migrate if the server is restricted to a zone and is then forced to
-move to a host in another zone, because that will create an inconsistency in
-the internal tracking of where that server should live and may require manually
-updating the database for that server. For example, if a user creates a server
-in zone A and then the admin force live migrates the server to zone B, and then
-the user resizes the server, the scheduler will try to move it back to zone A
-which may or may not work, e.g. if the admin deleted or renamed zone A in the
-interim.
-
Resource affinity
~~~~~~~~~~~~~~~~~
diff --git a/doc/source/admin/cells.rst b/doc/source/admin/cells.rst
index 87bf81a542..bb83e967f3 100644
--- a/doc/source/admin/cells.rst
+++ b/doc/source/admin/cells.rst
@@ -26,9 +26,13 @@ Laski gave at the Austin (Newton) summit which may be worth watching.
Overview
--------
-The purpose of the cells functionality in nova is specifically to
-allow larger deployments to shard their many compute nodes into cells.
-A basic Nova system consists of the following components:
+The purpose of the cells functionality in nova is to allow larger deployments
+to shard their many compute nodes into cells. All nova deployments are by
+definition cells deployments, even if most will only ever have a single cell.
+This means a multi-cell deployment will not b radically different from a
+"standard" nova deployment.
+
+Consider such a deployment. It will consists of the following components:
- The :program:`nova-api` service which provides the external REST API to
users.
@@ -43,7 +47,7 @@ A basic Nova system consists of the following components:
instances being built but not yet scheduled.
- The :program:`nova-conductor` service which offloads long-running tasks for
- the API-level service and insulates compute nodes from direct database access
+ the API-level services and insulates compute nodes from direct database access
- The :program:`nova-compute` service which manages the virt driver and
hypervisor host.
@@ -60,15 +64,19 @@ A basic Nova system consists of the following components:
- A message queue which allows the services to communicate with each
other via RPC.
-All deployments have at least the above components. Smaller deployments
-likely have a single message queue that all services share and a
-single database server which hosts the API database, a single cell
-database, as well as the required cell0 database. This is considered a
-"single-cell deployment" because it only has one "real" cell.
-However, while there will only ever be one global API database, a larger
-deployments can have many cell databases (where the bulk of the instance
-information lives), each with a portion of the instances for the entire
-deployment within, as well as per-cell message queues.
+In smaller deployments, there will typically be a single message queue that all
+services share and a single database server which hosts the API database, a
+single cell database, as well as the required cell0 database. Because we only
+have one "real" cell, we consider this a "single-cell deployment".
+
+In larger deployments, we can opt to shard the deployment using multiple cells.
+In this configuration there will still only be one global API database but
+there will be a cell database (where the bulk of the instance information
+lives) for each cell, each containing a portion of the instances for the entire
+deployment within, as well as per-cell message queues and per-cell
+:program:`nova-conductor` instances. There will also be an additional
+:program:`nova-conductor` instance, known as a *super conductor*, to handle
+API-level operations.
In these larger deployments, each of the nova services will use a cell-specific
configuration file, all of which will at a minimum specify a message queue
@@ -98,6 +106,9 @@ other cells in the API database, with records called *cell mappings*.
lower-level services. See the ``nova-manage`` :ref:`man-page-cells-v2`
commands for more information about how to create and examine these records.
+The following section goes into more detail about the difference between
+single-cell and multi-cell deployments.
+
Service layout
--------------
@@ -242,70 +253,42 @@ any other API-layer services via RPC, nor do they have access to the
API database for global visibility of resources across the cloud.
This is intentional and provides security and failure domain
isolation benefits, but also has impacts on some things that would
-otherwise require this any-to-any communication style. Check the
-release notes for the version of Nova you are using for the most
-up-to-date information about any caveats that may be present due to
-this limitation.
+otherwise require this any-to-any communication style. Check :ref:`upcall`
+below for the most up-to-date information about any caveats that may be present
+due to this limitation.
Database layout
---------------
As mentioned previously, there is a split between global data and data that is
-local to a cell.
+local to a cell. These databases schema are referred to as the *API* and *main*
+database schemas, respectively.
-The following is a breakdown of what data can uncontroversially considered
-global versus local to a cell. Missing data will be filled in as consensus is
-reached on the data that is more difficult to cleanly place. The missing data
-is mostly concerned with scheduling and networking.
-
-.. note::
+API database
+~~~~~~~~~~~~
- This list of tables is accurate as of the 15.0.0 (Pike) release. It's
- possible that schema changes may have added additional tables since.
+The API database is the database used for API-level services, such as
+:program:`nova-api` and, in a multi-cell deployment, the superconductor.
+The models and migrations related to this database can be found in
+``nova.db.api``, and the database can be managed using the
+:program:`nova-manage api_db` commands.
-Global (API-level) tables
-~~~~~~~~~~~~~~~~~~~~~~~~~
+Main (cell-level) database
+~~~~~~~~~~~~~~~~~~~~~~~~~~
-- ``instance_types``
-- ``instance_type_projects``
-- ``instance_type_extra_specs``
-- ``quotas``
-- ``project_user_quotas``
-- ``quota_classes``
-- ``quota_usages``
-- ``security_groups``
-- ``security_group_rules``
-- ``security_group_default_rules``
-- ``provider_fw_rules``
-- ``key_pairs``
-- ``migrations``
-- ``networks``
-- ``tags``
-
-Cell-level tables
-~~~~~~~~~~~~~~~~~
-
-- ``instances``
-- ``instance_info_caches``
-- ``instance_extra``
-- ``instance_metadata``
-- ``instance_system_metadata``
-- ``instance_faults``
-- ``instance_actions``
-- ``instance_actions_events``
-- ``instance_id_mappings``
-- ``pci_devices``
-- ``block_device_mapping``
-- ``virtual_interfaces``
+The main database is the database used for cell-level :program:`nova-conductor`
+instances. The models and migrations related to this database can be found in
+``nova.db.main``, and the database can be managed using the
+:program:`nova-manage db` commands.
Usage
-----
As noted previously, all deployments are in effect now cells v2 deployments. As
-a result, setup of a any nova deployment - even those that intend to only have
-once cell - will involve some level of cells configuration. These changes are
+a result, setup of any nova deployment - even those that intend to only have
+one cell - will involve some level of cells configuration. These changes are
configuration-related, both in the main nova configuration file as well as some
extra records in the databases.
@@ -345,11 +328,11 @@ Configuring a new deployment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you are installing Nova for the first time and have no compute hosts in the
-database yet then it will be necessary to configure cell0 and at least once
-additional "real" cell. To begin, ensure your API database has been created
-using the :program:`nova-manage api_db sync` command. Ensure the connection
-information for this database is stored in the ``nova.conf`` file using the
-:oslo.config:option:`api_database.connection` config option:
+database yet then it will be necessary to configure cell0 and at least one
+additional "real" cell. To begin, ensure your API database schema has been
+populated using the :program:`nova-manage api_db sync` command. Ensure the
+connection information for this database is stored in the ``nova.conf`` file
+using the :oslo.config:option:`api_database.connection` config option:
.. code-block:: ini
@@ -557,7 +540,6 @@ existing instances to the new cell(s). For example:
have been mapped. An exit code of 1 indicates that there are remaining
instances that need to be mapped.
-
Template URLs in Cell Mappings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -618,7 +600,7 @@ of ``rabbit://bob:s3kret@otherhost/nova`` when used with the above example.
The :oslo.config:option:`transport_url` option can contain an
extended syntax for the "netloc" part of the URL
(i.e. ``userA:passwordA@hostA:portA,userB:passwordB:hostB:portB``). In this
- case, substitions of the form ``username1``, ``username2``, etc will be
+ case, substitutions of the form ``username1``, ``username2``, etc will be
honored and can be used in the template URL.
The templating of these URLs may be helpful in order to provide each service host
@@ -1152,7 +1134,7 @@ real-world users of the feature.
- `Rocky Summit Video - Moving from CellsV1 to CellsV2 at CERN`__
- `Stein Summit Video - Scaling Nova with CellsV2: The Nova Developer and the
CERN Operator perspective`__
-- `Ussuri Summit Video - What's new in Nova Cellsv2?`__
+- `Train Summit Video - What's new in Nova Cellsv2?`__
.. __: https://www.openstack.org/videos/austin-2016/nova-cells-v2-whats-going-on
.. __: https://www.openstack.org/videos/boston-2017/scaling-nova-how-cellsv2-affects-your-deployment
diff --git a/doc/source/admin/compute-node-identification.rst b/doc/source/admin/compute-node-identification.rst
new file mode 100644
index 0000000000..31d4802d0b
--- /dev/null
+++ b/doc/source/admin/compute-node-identification.rst
@@ -0,0 +1,83 @@
+===========================
+Compute Node Identification
+===========================
+
+Nova requires that compute nodes maintain a constant and consistent identity
+during their lifecycle. With the exception of the ironic driver, starting in
+the 2023.1 release, this is achieved by use of a file containing the node
+unique identifier that is persisted on disk. Prior to 2023.1, a combination of
+the compute node's hostname and the :oslo.config:option:`host` value in the
+configuration file were used.
+
+The 2023.1 and later compute node identification file must remain unchanged
+during the lifecycle of the compute node. Changing the value or removing the
+file will result in a failure to start and may require advanced techniques
+for recovery. The file is read once at `nova-compute`` startup, at which point
+it is validated for formatting and the corresponding node is located or
+created in the database.
+
+.. note::
+
+ Even after 2023.1, the compute node's hostname may not be changed after
+ the initial registration with the controller nodes, it is just not used
+ as the primary method for identification.
+
+The behavior of ``nova-compute`` is different when using the ironic driver,
+as the (UUID-based) identity and mapping of compute nodes to compute manager
+service hosts is dynamic. In that case, no single node identity is maintained
+by the compute host and thus no identity file is read or written. Thus none
+of the sections below apply to hosts with :oslo.config:option:`compute_driver`
+set to `ironic`.
+
+Self-provisioning of the node identity
+--------------------------------------
+
+By default, ``nova-compute`` will automatically generate and write a UUID to
+disk the first time it starts up, and will use that going forward as its
+stable identity. Using the :oslo.config:option:`state_path`
+(which is ``/var/lib/nova`` on most systems), a ``compute_id`` file will be
+created with a generated UUID.
+
+Since this file (and it's parent directory) is writable by nova, it may be
+desirable to move this to one of the other locations that nova looks for the
+identification file.
+
+Deployment provisioning of the node identity
+--------------------------------------------
+
+In addition to the location mentioned above, nova will also search the parent
+directories of any config file in use (either the defaults or provided on
+the command line) for a ``compute_id`` file. Thus, a deployment tool may, on
+most systems, pre-provision the node's UUID by writing one to
+``/etc/nova/compute_id``.
+
+The contents of the file should be a single UUID in canonical textual
+representation with no additional whitespace or other characters. The following
+should work on most Linux systems:
+
+.. code-block:: shell
+
+ $ uuidgen > /etc/nova/compute_id
+
+.. note::
+
+ **Do not** execute the above command blindly in every run of a deployment
+ tool, as that will result in overwriting the ``compute_id`` file each time,
+ which *will* prevent nova from working properly.
+
+Upgrading from pre-2023.1
+-------------------------
+
+Before release 2023.1, ``nova-compute`` only used the hostname (combined with
+:oslo.config:option:`host`, if set) to identify its compute node objects in
+the database. When upgrading from a prior release, the compute node will
+perform a one-time migration of the hostname-matched compute node UUID to the
+``compute_id`` file in the :oslo.config:option:`state_path` location.
+
+.. note::
+
+ It is imperative that you allow the above migration to run and complete on
+ compute nodes that are being upgraded. Skipping this step by
+ pre-provisioning a ``compute_id`` file before the upgrade will **not** work
+ and will be equivalent to changing the compute node UUID after it has
+ already been created once.
diff --git a/doc/source/admin/config-drive.rst b/doc/source/admin/config-drive.rst
index 05f553478b..7111a2407b 100644
--- a/doc/source/admin/config-drive.rst
+++ b/doc/source/admin/config-drive.rst
@@ -39,9 +39,8 @@ compute host and image.
.. rubric:: Compute host requirements
-The following virt drivers support the config drive: libvirt,
-Hyper-V, VMware, and (since 17.0.0 Queens) PowerVM. The Bare Metal service also
-supports the config drive.
+The following virt drivers support the config drive: libvirt, Hyper-V and
+VMware. The Bare Metal service also supports the config drive.
- To use config drives with libvirt or VMware, you must first
install the :command:`genisoimage` package on each compute host. Use the
@@ -56,8 +55,8 @@ supports the config drive.
:oslo.config:option:`hyperv.qemu_img_cmd` config option to the full path to an
:command:`qemu-img` command installation.
-- To use config drives with PowerVM or the Bare Metal service, you do not need
- to prepare anything.
+- To use config drives with the Bare Metal service, you do not need to prepare
+ anything.
.. rubric:: Image requirements
diff --git a/doc/source/admin/configuration/hypervisor-hyper-v.rst b/doc/source/admin/configuration/hypervisor-hyper-v.rst
index 79d72cad05..8ce9c2ebb4 100644
--- a/doc/source/admin/configuration/hypervisor-hyper-v.rst
+++ b/doc/source/admin/configuration/hypervisor-hyper-v.rst
@@ -244,7 +244,7 @@ The following packages must be installed with pip:
* ``pywin32``
* ``pymysql``
* ``greenlet``
-* ``pycryto``
+* ``pycrypto``
* ``ecdsa``
* ``amqp``
* ``wmi``
@@ -311,7 +311,7 @@ To install ``nova-compute``, run:
Configure nova-compute
~~~~~~~~~~~~~~~~~~~~~~
-The ``nova.conf`` file must be placed in ``C:\etc\nova`` for running OpenStack
+The ``nova.conf`` file must be placed in ``C:\nova\etc\nova`` for running OpenStack
on Hyper-V. Below is a sample ``nova.conf`` for Windows:
.. code-block:: ini
@@ -425,7 +425,7 @@ Windows server:
.. code-block:: none
- PS C:\> C:\Python27\python.exe c:\Python27\Scripts\nova-compute --config-file c:\etc\nova\nova.conf
+ PS C:\> C:\Python38\python.exe c:\Python38\Scripts\nova-compute --config-file c:\nova\etc\nova\nova.conf
Troubleshooting
diff --git a/doc/source/admin/configuration/hypervisor-powervm.rst b/doc/source/admin/configuration/hypervisor-powervm.rst
deleted file mode 100644
index a2947ff608..0000000000
--- a/doc/source/admin/configuration/hypervisor-powervm.rst
+++ /dev/null
@@ -1,75 +0,0 @@
-=======
-PowerVM
-=======
-
-Introduction
-------------
-
-OpenStack Compute supports the PowerVM hypervisor through `NovaLink`_. In the
-NovaLink architecture, a thin NovaLink virtual machine running on the Power
-system manages virtualization for that system. The ``nova-compute`` service
-can be installed on the NovaLink virtual machine and configured to use the
-PowerVM compute driver. No external management element (e.g. Hardware
-Management Console) is needed.
-
-.. _NovaLink: https://www.ibm.com/support/knowledgecenter/en/POWER8/p8eig/p8eig_kickoff.htm
-
-
-Configuration
--------------
-
-In order to function properly, the ``nova-compute`` service must be executed
-by a member of the ``pvm_admin`` group. Use the ``usermod`` command to add the
-user. For example, to add the ``stacker`` user to the ``pvm_admin`` group, execute:
-
-.. code-block:: console
-
- # usermod -a -G pvm_admin stacker
-
-The user must re-login for the change to take effect.
-
-To enable the PowerVM compute driver, configure
-:oslo.config:option:`DEFAULT.compute_driver` = ``powervm.PowerVMDriver``. For
-example:
-
-.. code-block:: ini
-
- [DEFAULT]
- compute_driver = powervm.PowerVMDriver
-
-The PowerVM driver supports two types of storage for ephemeral disks:
-``localdisk`` or ``ssp``. If ``localdisk`` is selected, you must specify which
-volume group should be used. E.g.:
-
-.. code-block:: ini
-
- [powervm]
- disk_driver = localdisk
- volume_group_name = openstackvg
-
-.. note::
-
- Using the ``rootvg`` volume group is strongly discouraged since ``rootvg``
- is used by the management partition and filling this will cause failures.
-
-The PowerVM driver also supports configuring the default amount of physical
-processor compute power (known as "proc units") which will be given to each
-vCPU. This value will be used if the requested flavor does not specify the
-``powervm:proc_units`` extra-spec. A factor value of 1.0 means a whole physical
-processor, whereas 0.05 means 1/20th of a physical processor. E.g.:
-
-.. code-block:: ini
-
- [powervm]
- proc_units_factor = 0.1
-
-
-Volume Support
---------------
-
-Volume support is provided for the PowerVM virt driver via Cinder. Currently,
-the only supported volume protocol is `vSCSI`__ Fibre Channel. Attach, detach,
-and extend are the operations supported by the PowerVM vSCSI FC volume adapter.
-:term:`Boot From Volume` is not yet supported.
-
-.. __: https://www.ibm.com/support/knowledgecenter/en/POWER8/p8hat/p8hat_virtualscsi.htm
diff --git a/doc/source/admin/configuration/hypervisors.rst b/doc/source/admin/configuration/hypervisors.rst
index ed913b083f..26aa8f97cc 100644
--- a/doc/source/admin/configuration/hypervisors.rst
+++ b/doc/source/admin/configuration/hypervisors.rst
@@ -11,7 +11,6 @@ Hypervisors
hypervisor-vmware
hypervisor-hyper-v
hypervisor-virtuozzo
- hypervisor-powervm
hypervisor-zvm
hypervisor-ironic
@@ -44,9 +43,6 @@ The following hypervisors are supported:
* `Virtuozzo`_ 7.0.0 and newer - OS Containers and Kernel-based Virtual
Machines supported. The supported formats include ploop and qcow2 images.
-* `PowerVM`_ - Server virtualization with IBM PowerVM for AIX, IBM i, and Linux
- workloads on the Power Systems platform.
-
* `zVM`_ - Server virtualization on z Systems and IBM LinuxONE, it can run Linux,
z/OS and more.
@@ -68,8 +64,6 @@ virt drivers:
* :oslo.config:option:`compute_driver` = ``hyperv.HyperVDriver``
-* :oslo.config:option:`compute_driver` = ``powervm.PowerVMDriver``
-
* :oslo.config:option:`compute_driver` = ``zvm.ZVMDriver``
* :oslo.config:option:`compute_driver` = ``fake.FakeDriver``
@@ -83,6 +77,5 @@ virt drivers:
.. _VMware vSphere: https://www.vmware.com/support/vsphere-hypervisor.html
.. _Hyper-V: https://docs.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview
.. _Virtuozzo: https://www.virtuozzo.com/products/vz7.html
-.. _PowerVM: https://www.ibm.com/us-en/marketplace/ibm-powervm
.. _zVM: https://www.ibm.com/it-infrastructure/z/zvm
.. _Ironic: https://docs.openstack.org/ironic/latest/
diff --git a/doc/source/admin/configuring-migrations.rst b/doc/source/admin/configuring-migrations.rst
index 4de7fe36aa..357189b3b1 100644
--- a/doc/source/admin/configuring-migrations.rst
+++ b/doc/source/admin/configuring-migrations.rst
@@ -263,8 +263,10 @@ memory-intensive instances succeed.
live_migration_downtime_steps = 10
live_migration_downtime_delay = 75
- ``live_migration_downtime`` sets the maximum permitted downtime for a live
- migration, in *milliseconds*. The default is 500.
+ ``live_migration_downtime`` sets the target maximum period of time Nova will
+ try to keep the instance paused during the last part of the memory copy, in
+ *milliseconds*. This value may be exceeded if there is any reduction on the
+ transfer rate after the VM is paused. The default is 500.
``live_migration_downtime_steps`` sets the total number of adjustment steps
until ``live_migration_downtime`` is reached. The default is 10 steps.
diff --git a/doc/source/admin/cpu-topologies.rst b/doc/source/admin/cpu-topologies.rst
index 179f7bd377..082c88f655 100644
--- a/doc/source/admin/cpu-topologies.rst
+++ b/doc/source/admin/cpu-topologies.rst
@@ -95,14 +95,64 @@ In all cases where NUMA awareness is used, the ``NUMATopologyFilter``
filter must be enabled. Details on this filter are provided in
:doc:`/admin/scheduling`.
+The host's NUMA node(s) used are chosen based on some logic and controlled by
+``packing_host_numa_cells_allocation_strategy`` configuration variable in
+nova.conf. By default ``packing_host_numa_cells_allocation_strategy``
+variable is set to ``True``. It leads to attempt to chose NUMA node(s) with
+less amount of free resources (or in other words **more used** NUMA nodes)
+first. It is so-called "pack" strategy - we try to place as much as possible
+load at **more used** host's NUMA node until it will be completely exhausted.
+And only after we will choose **most used** host's NUMA node from the rest
+available nodes on host. "Spread" strategy is reverse to "pack" strategy.
+The NUMA node(s) with **more free** resources will be used first. So "spread"
+strategy will try to balance load between all NUMA nodes and keep number of
+free resources on all NUMA nodes as more equal as possible.
+
.. caution::
- The NUMA node(s) used are normally chosen at random. However, if a PCI
- passthrough or SR-IOV device is attached to the instance, then the NUMA
- node that the device is associated with will be used. This can provide
- important performance improvements. However, booting a large number of
- similar instances can result in unbalanced NUMA node usage. Care should
- be taken to mitigate this issue. See this `discussion`_ for more details.
+ Host's NUMA nodes are placed in list and list is sorted based on strategy
+ chosen and resource available in each NUMA node. Sorts are performed on
+ same list one after another, so the last sort implemented is the sort
+ with most priority.
+
+The python performed so-called stable sort. It means that each sort executed
+on same list will change order of list items only if item's property we sort on
+differs. If this properties in all list's items are equal than elements order
+will not changed.
+
+Sorts are performed on host's NUMA nodes list in the following order:
+
+* sort based on available memory on node(first sort-less priority)
+* sort based on cpu usage (in case of shared CPUs requested by guest
+ VM topology) or free pinned cpus otherwise.
+* sort based on number of free PCI device on node(last sort-top priority)
+
+Top sorting priority is for host's NUMA nodes with PCI devices attached. If VM
+requested PCI device(s) logic **always** puts host's NUMA nodes with more PCI
+devices at the beginning of the host's NUMA nodes list. If PCI devices isn't
+requested by VM than NUMA nodes with no (or less) PCI device available will be
+placed at the beginning of the list.
+
+.. caution::
+
+ The described logic for PCI devices is used **both** for "pack" and "spread"
+ strategies. It is done to keep backward compatibility with previous nova
+ versions.
+
+
+During "pack" logic implementation rest (two) sorts are performed with sort
+order to move NUMA nodes with more available resources (CPUs and memory) at the
+END of host's NUMA nodes list. Sort based on memory is the first sort
+implemented and has least priority.
+
+During "spread" logic implementation rest (two) sorts are performed with sort
+order to move NUMA nodes with more available resources (CPUs and memory) at the
+BEGINNING of host's NUMA nodes list. Sort based on memory is the first sort
+implemented and has least priority.
+
+Finally resulting list (after all sorts) is passed next and attempts to place
+VM's NUMA node to host's NUMA node are performed starting from the first
+host's NUMA node in list.
.. caution::
@@ -680,6 +730,97 @@ CPU policy, meanwhile, will consume ``VCPU`` inventory.
.. _configure-hyperv-numa:
+Configuring CPU power management for dedicated cores
+----------------------------------------------------
+
+.. versionchanged:: 27.0.0
+
+ This feature was only introduced by the 2023.1 Antelope release
+
+.. important::
+
+ The functionality described below is currently only supported by the
+ libvirt/KVM driver.
+
+For power saving reasons, operators can decide to turn down the power usage of
+CPU cores whether they are in use or not. For obvious reasons, Nova only allows
+to change the power consumption of a dedicated CPU core and not a shared one.
+Accordingly, usage of this feature relies on the reading of
+:oslo.config:option:`compute.cpu_dedicated_set` config option to know which CPU
+cores to handle.
+The main action to enable the power management of dedicated cores is to set
+:oslo.config:option:`libvirt.cpu_power_management` config option to ``True``.
+
+By default, if this option is enabled, Nova will lookup the dedicated cores and
+power them down at the compute service startup. Then, once an instance starts
+by being attached to a dedicated core, this below core will be powered up right
+before the libvirt guest starts. On the other way, once an instance is stopped,
+migrated or deleted, then the corresponding dedicated core will be powered down.
+
+There are two distinct strategies for powering up or down :
+
+- the default is to offline the CPU core and online it when needed.
+- an alternative strategy is to use two distinct CPU governors for the up state
+ and the down state.
+
+The strategy can be chosen using
+:oslo.config:option:`libvirt.cpu_power_management_strategy` config option.
+``cpu_state`` supports the first online/offline strategy, while ``governor``
+sets the alternative strategy.
+We default to turning off the cores as it provides you the best power savings
+while there could be other tools outside Nova to manage the governor, like
+tuned. That being said, we also provide a way to automatically change the
+governors on the fly, as explained below.
+
+If the strategy is set to ``governor``, a couple of config options are provided
+to define which exact CPU govenor to use for each of the up and down states :
+
+- :oslo.config:option:`libvirt.cpu_power_governor_low` will define the governor
+ to use for the powerdown state (defaults to ``powersave``)
+- :oslo.config:option:`libvirt.cpu_power_governor_high` will define the
+ governor to use for the powerup state (defaults to ``performance``)
+
+.. important::
+ This is the responsibility of the operator to ensure that the govenors
+ defined by the configuration options are currently supported by the OS
+ underlying kernel that runs the compute service.
+
+ As a side note, we recommend the ``schedutil`` governor as an alternative for
+ the high-power state (if the kernel supports it) as the CPU frequency is
+ dynamically set based on CPU task states. Other governors may be worth to
+ be tested, including ``conservative`` and ``ondemand`` which are quite a bit
+ more power consuming than ``schedutil`` but more efficient than
+ ``performance``. See `Linux kernel docs`_ for further explanations.
+
+.. _`Linux kernel docs`: https://www.kernel.org/doc/Documentation/cpu-freq/governors.txt
+
+As an example, a ``nova.conf`` part of configuration would look like::
+
+ [compute]
+ cpu_dedicated_set=2-17
+
+ [libvirt]
+ cpu_power_management=True
+ cpu_power_management_strategy=cpu_state
+
+.. warning::
+
+ The CPU core #0 has a special meaning in most of the recent Linux kernels.
+ This is always highly discouraged to use it for CPU pinning but please
+ refrain to have it power managed or you could have surprises if Nova turns
+ it off !
+
+One last important note : you can decide to change the CPU management strategy
+during the compute lifecycle, or you can currently already manage the CPU
+states. For ensuring that Nova can correctly manage the CPU performances, we
+added a couple of checks at startup that refuse to start nova-compute service
+if those arbitrary rules aren't enforced :
+
+- if the operator opts for ``cpu_state`` strategy, then all dedicated CPU
+ governors *MUST* be identical.
+- if they decide using ``governor``, then all dedicated CPU cores *MUST* be
+ online.
+
Configuring Hyper-V compute nodes for instance NUMA policies
------------------------------------------------------------
@@ -724,5 +865,4 @@ instances with a NUMA topology.
.. Links
.. _`Image metadata`: https://docs.openstack.org/image-guide/introduction.html#image-metadata
-.. _`discussion`: http://lists.openstack.org/pipermail/openstack-dev/2016-March/090367.html
.. _`MTTCG project`: http://wiki.qemu.org/Features/tcg-multithread
diff --git a/doc/source/admin/evacuate.rst b/doc/source/admin/evacuate.rst
index ef9eccd931..18796d9c23 100644
--- a/doc/source/admin/evacuate.rst
+++ b/doc/source/admin/evacuate.rst
@@ -97,3 +97,17 @@ instances up and running.
using a pattern you might want to use the ``--strict`` flag which got introduced
in version 10.2.0 to make sure nova matches the ``FAILED_HOST``
exactly.
+
+.. note::
+ .. code-block:: bash
+
+ +------+--------+--------------+
+ | Name | Status | Task State |
+ +------+--------+--------------+
+ | vm_1 | ACTIVE | powering-off |
+ +------------------------------+
+
+ If the instance task state is not None, evacuation will be possible. However,
+ depending on the ongoing operation, there may be clean up required in other
+ services which the instance was using, such as neutron, cinder, glance, or
+ the storage backend. \ No newline at end of file
diff --git a/doc/source/admin/huge-pages.rst b/doc/source/admin/huge-pages.rst
index 73f6c5dd2d..a451c6e3ab 100644
--- a/doc/source/admin/huge-pages.rst
+++ b/doc/source/admin/huge-pages.rst
@@ -96,7 +96,7 @@ pages at boot time, run:
.. code-block:: console
- # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' > /etc/default/grub
+ # echo 'GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"' >> /etc/default/grub
$ grep GRUB_CMDLINE_LINUX /etc/default/grub
GRUB_CMDLINE_LINUX="..."
GRUB_CMDLINE_LINUX="$GRUB_CMDLINE_LINUX hugepagesz=2M hugepages=2048 transparent_hugepage=never"
diff --git a/doc/source/admin/hw-emulation-architecture.rst b/doc/source/admin/hw-emulation-architecture.rst
new file mode 100644
index 0000000000..71222fa043
--- /dev/null
+++ b/doc/source/admin/hw-emulation-architecture.rst
@@ -0,0 +1,133 @@
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+============================================================================
+hw_emulation_architecture - Configuring QEMU instance emulation architecture
+============================================================================
+
+.. versionadded:: 25.0.0 (Yoga)
+
+ The libvirt driver now allows for handling of specific cpu architectures
+ when defined within the image metadata properties, to be emulated through
+ QEMU.
+
+ Added ``hw_emulation_architecture`` as an available image_meta property.
+
+.. note::
+
+ The following only applies to environments using libvirt compute hosts.
+ and should be considered experimental in its entirety, during its first
+ release as a feature.
+
+Introduction
+------------
+
+This capability is to fill a need with environments that do not have the
+capability to support the various cpu architectures that are present today
+with physical hardware. A small subset of architectures that are supported
+both within libvirt and QEMU have been selected as prime candidates for
+emulation support.
+
+While support has been added for the below base architectures, this does
+not guarantee that every subset or custom operating system that leverages
+one of these architectures will function.
+
+Configure
+---------
+
+-------------------
+QEMU Binary Support
+-------------------
+
+To ensure that libvirt and QEMU can properly handle the level of cpu
+emulation desired by the end-user, you are required to install the specific
+``qemu-system-XXX``, ``qemu-efi-arm``, ``qemu-efi-aarch64`` binaries on the
+compute nodes that will be providing support.
+
+---------------
+Console Support
+---------------
+
+Consideration need to be made in regards to which architectures you want to
+support, as there are limitations on support through spice, novnc, and
+serial. All testing and validation has been done to ensure that spice and
+serial connections function as expected.
+
+- ``AARCH64`` - Spice & Serial
+- ``S390X`` - Serial
+- ``PPC64LE`` - Spice & Serial
+- ``MIPSEL`` - untested
+
+--------------------------------
+Supported Emulated Architectures
+--------------------------------
+
+The supported emulated architectures require specific image meta
+properties to be set in order to trigger the proper settings to be
+configured by libvirtd.
+
+For end users the emulation architecture of an instance is controlled by the
+selection of an image with the ``hw_emulation_architecture`` image metadata
+property set.
+
+
+AARCH64
+~~~~~~~
+
+``Tested and Validated as functional``
+
+.. code-block:: shell
+
+ $ openstack image set --property hw_emulation_architecture=aarch64 $IMAGE
+ $ openstack image set --property hw_machine_type=virt $IMAGE
+ $ openstack image set --property hw_firmware_type=uefi $IMAGE
+
+S390x
+~~~~~
+
+``Tested and Validated as functional``
+
+.. code-block:: shell
+
+ $ openstack image set --property hw_emulation_architecture=s390x $IMAGE
+ $ openstack image set --property hw_machine_type=s390-ccw-virtio $IMAGE
+ $ openstack image set --property hw_video_model=virtio $IMAGE
+
+PPC64LE
+~~~~~~~
+
+``Tested and Validated as functional``
+
+.. code-block:: shell
+
+ $ openstack image set --property hw_emulation_architecture=ppc64le $IMAGE
+ $ openstack image set --property hw_machine_type=pseries $IMAGE
+
+
+MIPSEL
+~~~~~~
+
+``Testing and validation is ongoing to overcome PCI issues``
+
+.. note::
+
+ Support is currently impacted, one current method for support is manually
+ patching and compiling as defined in libvirt bug
+ `XML error: No PCI buses available`_.
+
+.. _`XML error: No PCI buses available`: https://bugzilla.redhat.com/show_bug.cgi?id=1432101
+
+.. code-block:: shell
+
+ $ openstack image set --property hw_emulation_architecture=mipsel $IMAGE
+ $ openstack image set --property hw_machine_type=virt $IMAGE
diff --git a/doc/source/admin/hw-machine-type.rst b/doc/source/admin/hw-machine-type.rst
index e8a0df87e4..7b6af4e410 100644
--- a/doc/source/admin/hw-machine-type.rst
+++ b/doc/source/admin/hw-machine-type.rst
@@ -25,6 +25,10 @@ hw_machine_type - Configuring and updating QEMU instance machine types
Added ``nova-manage`` commands to control the machine_type of an instance.
+.. versionchanged:: 25.0.0 (Yoga)
+
+ Added ``nova-manage`` commands to set the image properties of an instance.
+
.. note::
The following only applies to environments using libvirt compute hosts.
@@ -135,3 +139,31 @@ Once it has been verified that all instances within the environment or specific
cell have had a machine type recorded then the
:oslo.config:option:`libvirt.hw_machine_type` can be updated without impacting
existing instances.
+
+Device bus and model image properties
+-------------------------------------
+
+.. versionadded:: 25.0.0 (Yoga)
+
+Device bus and model types defined as image properties associated with an
+instance are always used when launching instances with the libvirt driver.
+Support for each device bus and model is dependent on the machine type used and
+version of QEMU available on the underlying compute host. As such, any changes
+to the machine type of an instance or version of QEMU on a host might suddenly
+invalidate the stored device bus or model image properties.
+
+It is now possible to change the stored image properties of an instance without
+having to rebuild the instance.
+
+To show the stored image properties of an instance:
+
+.. code-block:: shell
+
+ $ nova-manage image_property show $instance_uuid $property
+
+To update the stored image properties of an instance:
+
+.. code-block:: shell
+
+ $ nova-manage image_property set \
+ $instance_uuid --property $property_name=$property_value
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index c8515f3ec1..8cb5bf7156 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -99,11 +99,9 @@ the defaults from the :doc:`install guide </install/index>` will be sufficient.
cells allow sharding of your compute environment. Upfront planning is key to
a successful cells v2 layout.
-* :doc:`Availablity Zones </admin/availability-zones>`: Availability Zones are
+* :doc:`Availability Zones </admin/availability-zones>`: Availability Zones are
an end-user visible logical abstraction for partitioning a cloud without
- knowing the physical infrastructure. They can be used to partition a cloud on
- arbitrary factors, such as location (country, datacenter, rack), network
- layout and/or power source.
+ knowing the physical infrastructure.
* :placement-doc:`Placement service <>`: Overview of the placement
service, including how it fits in with the rest of nova.
@@ -165,6 +163,7 @@ log management and live migration of instances.
security-groups
security
vendordata
+ notifications
Advanced configuration
@@ -200,12 +199,14 @@ instance for these kind of workloads.
virtual-gpu
file-backed-memory
ports-with-resource-requests
+ vdpa
virtual-persistent-memory
emulated-tpm
uefi
secure-boot
sev
managing-resource-providers
+ compute-node-identification
resource-limits
cpu-models
libvirt-misc
@@ -229,3 +230,5 @@ Once you are running nova, the following information is extremely useful.
upgrades
node-down
hw-machine-type
+ hw-emulation-architecture
+ soft-delete-shadow-tables
diff --git a/doc/source/admin/libvirt-misc.rst b/doc/source/admin/libvirt-misc.rst
index 87dbe18ea4..eb3d20b479 100644
--- a/doc/source/admin/libvirt-misc.rst
+++ b/doc/source/admin/libvirt-misc.rst
@@ -138,3 +138,33 @@ For example, to hide your signature from the guest OS, run:
.. code:: console
$ openstack flavor set $FLAVOR --property hw:hide_hypervisor_id=true
+
+
+.. _extra-spec-locked_memory:
+
+Locked memory allocation
+------------------------
+
+.. versionadded:: 26.0.0 (Zed)
+
+Locking memory marks the guest memory allocations as unmovable and
+unswappable. It is implicitly enabled in a number of cases such as SEV or
+realtime guests but can also be enabled explicitly using the
+``hw:locked_memory`` extra spec (or use ``hw_locked_memory`` image property).
+``hw:locked_memory`` (also ``hw_locked_memory`` image property) accept
+boolean values in string format like 'true' or 'false' value.
+It will raise `FlavorImageLockedMemoryConflict` exception if both flavor and
+image property are specified but with different boolean values.
+This will only be allowed if you have also set ``hw:mem_page_size``,
+so we can ensure that the scheduler can actually account for this correctly
+and prevent out of memory events. Otherwise, will raise `LockMemoryForbidden`
+exception.
+
+.. code:: console
+
+ $ openstack flavor set FLAVOR-NAME \
+ --property hw:locked_memory=BOOLEAN_VALUE
+
+.. note::
+
+ This is currently only supported by the libvirt driver.
diff --git a/doc/source/admin/live-migration-usage.rst b/doc/source/admin/live-migration-usage.rst
index 783ab5e27c..32c67c2b0a 100644
--- a/doc/source/admin/live-migration-usage.rst
+++ b/doc/source/admin/live-migration-usage.rst
@@ -102,7 +102,7 @@ Manual selection of the destination host
.. code-block:: console
- $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live HostC
+ $ openstack server migrate d1df1b5a-70c4-4fed-98b7-423362f2c47c --live-migration --host HostC
#. Confirm that the instance has been migrated successfully:
diff --git a/doc/source/admin/manage-logs.rst b/doc/source/admin/manage-logs.rst
index f60a523852..3a1546d8f4 100644
--- a/doc/source/admin/manage-logs.rst
+++ b/doc/source/admin/manage-logs.rst
@@ -181,12 +181,18 @@ websocket client to access the serial console.
.. rubric:: Accessing the serial console on an instance
-#. Use the :command:`nova get-serial-proxy` command to retrieve the websocket
+#. Use the :command:`nova get-serial-console` command to retrieve the websocket
URL for the serial console on the instance:
.. code-block:: console
- $ nova get-serial-proxy INSTANCE_NAME
+ $ nova get-serial-console INSTANCE_NAME
+
+ Or use the :command:`openstack console url show` command.
+
+ .. code-block:: console
+
+ $ openstack console url show --serial INSTANCE_NAME
.. list-table::
:header-rows: 0
diff --git a/doc/source/admin/manage-volumes.rst b/doc/source/admin/manage-volumes.rst
index a9d705a47a..ef45d2c7aa 100644
--- a/doc/source/admin/manage-volumes.rst
+++ b/doc/source/admin/manage-volumes.rst
@@ -188,9 +188,9 @@ fetched using the `volume_attachment get_connector` subcommand:
.. note::
- Future work will remove this requirement and incorperate the gathering of
- the host connector into the main refresh command. Unfortunatley until then
- it must remain a seperate manual step.
+ Future work will remove this requirement and incorporate the gathering of
+ the host connector into the main refresh command. Unfortunately until then
+ it must remain a separate manual step.
We can then provide this connector to the `volume_attachment refresh`
subcommand. This command will connect to the compute, disconnect any host
diff --git a/doc/source/admin/managing-resource-providers.rst b/doc/source/admin/managing-resource-providers.rst
index 27bfe20140..6e4fbc2703 100644
--- a/doc/source/admin/managing-resource-providers.rst
+++ b/doc/source/admin/managing-resource-providers.rst
@@ -158,7 +158,7 @@ Schema Example
items:
patternProperties:
# Allows any key name matching the resource class pattern,
- # check to prevent conflicts with virt driver owned resouces classes
+ # check to prevent conflicts with virt driver owned resources classes
# will be done after schema validation.
^[A-Z0-9_]{1,255}$:
type: object
diff --git a/doc/source/admin/networking.rst b/doc/source/admin/networking.rst
index 667a5bf12f..c5b945b361 100644
--- a/doc/source/admin/networking.rst
+++ b/doc/source/admin/networking.rst
@@ -37,6 +37,42 @@ A full guide on configuring and using SR-IOV is provided in the
Nova will ignore PCI devices reported by the hypervisor if the address is
outside of these ranges.
+.. versionadded:: 25.0.0
+
+ For information on creating servers with remotely-managed SR-IOV network
+ interfaces of SmartNIC DPUs, refer to the relevant section in
+ :neutron-doc:`Networking Guide <admin/ovn/smartnic_dpu>`.
+
+ **Limitations**
+
+ * Only VFs are supported and they must be tagged in the Nova Compute
+ configuration in the :oslo.config:option:`pci.device_spec` option as
+ ``remote_managed: "true"``. There is no auto-discovery of this based
+ on vendor and product IDs;
+ * Either VF or its respective PF must expose a PCI VPD capability with a
+ unique card serial number according to the PCI/PCIe specifications
+ (see `the Libvirt docs <https://libvirt.org/drvnodedev.html#VPDCap>`_ to
+ get an example of how VPD data is represented and what to expect). If
+ this is not the case, those devices will not appear in allocation pools;
+ * Only the Libvirt driver is capable of supporting this feature at the
+ time of writing;
+ * The support for VPD capability handling in Libvirt was added in release
+ `7.9.0 <https://libvirt.org/news.html#v7-9-0-2021-11-01>`_ - older
+ versions are not supported by this feature;
+ * All compute nodes must be upgraded to the Yoga release in order for
+ scheduling of nodes with ``VNIC_TYPE_REMOTE_MANAGED`` ports to succeed;
+ * The same limitations apply to operations like live migration as with
+ `legacy SR-IOV ports <https://docs.openstack.org/neutron/latest/admin/config-sriov.html#known-limitations>`_;
+ * Clearing a VLAN by programming VLAN 0 must not result in errors in the
+ VF kernel driver at the compute host. Before v8.1.0 Libvirt clears
+ a VLAN by programming VLAN 0 before passing a VF through to the guest
+ which may result in an error depending on your driver and kernel version
+ (see, for example, `this bug <https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1957753>`_
+ which discusses a case relevant to one driver). As of Libvirt v8.1.0,
+ EPERM errors encountered while programming VLAN 0 are ignored if
+ VLAN clearing is not explicitly requested in the device XML (i.e.
+ VLAN 0 is not specified explicitly).
+
NUMA Affinity
-------------
@@ -199,3 +235,93 @@ As with the L2-type networks, this configuration will ensure instances using
one or more L3-type networks must be scheduled on host cores from NUMA node 0.
It is also possible to define more than one NUMA node, in which case the
instance must be split across these nodes.
+
+
+virtio-net Multiqueue
+---------------------
+
+.. versionadded:: 12.0.0 (Liberty)
+
+.. versionchanged:: 25.0.0 (Yoga)
+
+ Support for configuring multiqueue via the ``hw:vif_multiqueue_enabled``
+ flavor extra spec was introduced in the Yoga (25.0.0) release.
+
+.. important::
+
+ The functionality described below is currently only supported by the
+ libvirt/KVM driver.
+
+Virtual NICs using the virtio-net driver support the multiqueue feature. By
+default, these vNICs will only use a single virtio-net TX/RX queue pair,
+meaning guests will not transmit or receive packets in parallel. As a result,
+the scale of the protocol stack in a guest may be restricted as the network
+performance will not scale as the number of vCPUs increases and per-queue data
+processing limits in the underlying vSwitch are encountered. The solution to
+this issue is to enable virtio-net multiqueue, which can allow the guest
+instances to increase the total network throughput by scaling the number of
+receive and transmit queue pairs with CPU count.
+
+Multiqueue virtio-net isn't always necessary, but it can provide a significant
+performance benefit when:
+
+- Traffic packets are relatively large.
+- The guest is active on many connections at the same time, with traffic
+ running between guests, guest to host, or guest to an external system.
+- The number of queues is equal to the number of vCPUs. This is because
+ multi-queue support optimizes RX interrupt affinity and TX queue selection in
+ order to make a specific queue private to a specific vCPU.
+
+However, while the virtio-net multiqueue feature will often provide a welcome
+performance benefit, it has some limitations and therefore should not be
+unconditionally enabled:
+
+- Enabling virtio-net multiqueue increases the total network throughput, but in
+ parallel it also increases the CPU consumption.
+- Enabling virtio-net multiqueue in the host QEMU config does not enable the
+ functionality in the guest OS. The guest OS administrator needs to manually
+ turn it on for each guest NIC that requires this feature, using
+ :command:`ethtool`.
+- In case the number of vNICs in a guest instance is proportional to the number
+ of vCPUs, enabling the multiqueue feature is less important.
+
+Having considered these points, multiqueue can be enabled or explicitly
+disabled using either the :nova:extra-spec:`hw:vif_multiqueue_enabled` flavor
+extra spec or equivalent ``hw_vif_multiqueue_enabled`` image metadata property.
+For example, to enable virtio-net multiqueue for a chosen flavor:
+
+.. code-block:: bash
+
+ $ openstack flavor set --property hw:vif_multiqueue_enabled=true $FLAVOR
+
+Alternatively, to explicitly disable multiqueue for a chosen image:
+
+.. code-block:: bash
+
+ $ openstack image set --property hw_vif_multiqueue_enabled=false $IMAGE
+
+.. note::
+
+ If both the flavor extra spec and image metadata property are provided,
+ their values must match or an error will be raised.
+
+Once the guest has started, you must enable multiqueue using
+:command:`ethtool`. For example:
+
+.. code-block:: bash
+
+ $ ethtool -L $devname combined $N
+
+where ``$devname`` is the name of the network device, and ``$N`` is the number
+of TX/RX queue pairs to configure corresponding to the number of instance
+vCPUs. Alternatively, you can configure this persistently using udev. For
+example, to configure four TX/RX queue pairs for network device ``eth0``:
+
+.. code-block:: bash
+
+ # cat /etc/udev/rules.d/50-ethtool.rules
+ ACTION=="add", SUBSYSTEM=="net", NAME=="eth0", RUN+="/sbin/ethtool -L eth0 combined 4"
+
+For more information on this feature, refer to the `original spec`__.
+
+.. __: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/libvirt-virtiomq.html
diff --git a/doc/source/admin/notifications.rst b/doc/source/admin/notifications.rst
new file mode 100644
index 0000000000..3e9c126018
--- /dev/null
+++ b/doc/source/admin/notifications.rst
@@ -0,0 +1,132 @@
+=============
+Notifications
+=============
+
+Like many other OpenStack services, nova emits notifications to the message
+bus with the ``Notifier`` class provided by :oslo.messaging-doc:`oslo.messaging
+<reference/notifier.html>`. From the notification consumer point of view, a
+notification consists of two parts: an envelope with a fixed structure defined
+by oslo.messaging and a payload defined by the service emitting the
+notification. The envelope format is the following::
+
+ {
+ "priority": <string, selected from a predefined list by the sender>,
+ "event_type": <string, defined by the sender>,
+ "timestamp": <string, the isotime of when the notification emitted>,
+ "publisher_id": <string, defined by the sender>,
+ "message_id": <uuid, generated by oslo>,
+ "payload": <json serialized dict, defined by the sender>
+ }
+
+There are two types of notifications in nova: legacy notifications which have
+an unversioned payload and newer notifications which have a versioned payload.
+
+
+Legacy (unversioned) notifications
+----------------------------------
+
+The unversioned notifications exist from the early days of nova and have mostly
+grown organically. The structure of the payload of the unversioned
+notifications is defined in the code that emits the notification and no
+documentation or enforced backward compatibility contract exists for that
+format.
+
+Nova code uses the ``nova.rpc.get_notifier`` call to get a configured
+oslo.messaging ``Notifier`` object and it uses the oslo-provided functions on
+the ``Notifier`` object to emit notifications. The configuration of the
+returned ``Notifier`` object depends on the parameters of the ``get_notifier``
+call and the value of the oslo.messaging configuration options
+:oslo.config:option:`oslo_messaging_notifications.driver` and
+:oslo.config:option:`oslo_messaging_notifications.topics`.
+
+
+Versioned notifications
+-----------------------
+
+The versioned notification concept was created to fix the shortcomings of the
+unversioned notifications. The envelope structure of the emitted notification
+is the same as in the unversioned notification case as it is provided by
+oslo.messaging. However, the payload is not a free-form dictionary but a
+serialized :oslo.versionedobjects-doc:`oslo versionedobjects object <>`.
+
+.. _service.update:
+
+For example the wire format of the ``service.update`` notification looks like
+the following::
+
+ {
+ "priority": "INFO",
+ "payload": {
+ "nova_object.namespace": "nova",
+ "nova_object.name": "ServiceStatusPayload",
+ "nova_object.version": "1.0",
+ "nova_object.data": {
+ "host": "host1",
+ "disabled": false,
+ "last_seen_up": null,
+ "binary": "nova-compute",
+ "topic": "compute",
+ "disabled_reason": null,
+ "report_count": 1,
+ "forced_down": false,
+ "version": 2
+ }
+ },
+ "event_type": "service.update",
+ "publisher_id": "nova-compute:host1"
+ }
+
+The serialized oslo.versionedobject as a payload provides a version number to
+the consumer so the consumer can detect if the structure of the payload has
+changed. Nova provides the following contract regarding the versioned
+notification payload:
+
+* The payload version defined by the ``nova_object.version`` field of the
+ payload will be increased if and only if the syntax or the semantics of the
+ ``nova_object.data`` field of the payload is changed.
+
+* A minor version bump indicates a backward compatible change which means that
+ only new fields are added to the payload so a well written consumer can still
+ consume the new payload without any change.
+
+* A major version bump indicates a backward incompatible change of the payload
+ which can mean removed fields, type change, etc in the payload.
+
+* There is an additional field, ``nova_object.name``, for every payload
+ alongside the ``nova_object.data`` and ``nova_object.version`` fields. This
+ field contains the name of the nova internal representation of the payload
+ type. Client code should not depend on this name.
+
+A `presentation from the Train summit`__ goes over the background and usage of
+versioned notifications, and provides a demo.
+
+.. __: https://www.openstack.org/videos/summits/denver-2019/nova-versioned-notifications-the-result-of-a-3-year-journey
+
+
+Configuration
+-------------
+
+The :oslo.config:option:`notifications.notification_format` config option can
+be used to specify which notifications are emitted by nova.
+
+The versioned notifications are emitted to a different topic than the legacy
+notifications. By default they are emitted to ``versioned_notifications`` but
+this can be configured using the
+:oslo.config:option:`notifications.versioned_notifications_topics` config
+option.
+
+There are notification configuration options in nova which are specific for
+certain notification types like
+:oslo.config:option:`notifications.notify_on_state_change`,
+:oslo.config:option:`notifications.default_level`, etc.
+
+Notifications can be disabled entirely by setting the
+:oslo.config:option:`oslo_messaging_notifications.driver` config option to
+``noop``.
+
+
+Reference
+---------
+
+A list of all currently supported versioned notifications can be found in
+:doc:`/reference/notifications`.
diff --git a/doc/source/admin/pci-passthrough.rst b/doc/source/admin/pci-passthrough.rst
index 727a63070d..09a963603d 100644
--- a/doc/source/admin/pci-passthrough.rst
+++ b/doc/source/admin/pci-passthrough.rst
@@ -51,6 +51,24 @@ capabilities.
Nova will ignore PCI devices reported by the hypervisor if the address is
outside of these ranges.
+.. versionchanged:: 26.0.0 (Zed):
+ PCI passthrough device inventories now can be tracked in Placement.
+ For more information, refer to :ref:`pci-tracking-in-placement`.
+
+.. versionchanged:: 26.0.0 (Zed):
+ The nova-compute service will refuse to start if both the parent PF and its
+ children VFs are configured in :oslo.config:option:`pci.device_spec`.
+ For more information, refer to :ref:`pci-tracking-in-placement`.
+
+.. versionchanged:: 26.0.0 (Zed):
+ The nova-compute service will refuse to start with
+ :oslo.config:option:`pci.device_spec` configuration that uses the
+ ``devname`` field.
+
+.. versionchanged:: 27.0.0 (2023.1 Antelope):
+ Nova provides Placement based scheduling support for servers with flavor
+ based PCI requests. This support is disable by default.
+
Enabling PCI passthrough
------------------------
@@ -92,15 +110,15 @@ Configure ``nova-compute``
Once PCI passthrough has been configured for the host, :program:`nova-compute`
must be configured to allow the PCI device to pass through to VMs. This is done
-using the :oslo.config:option:`pci.passthrough_whitelist` option. For example,
+using the :oslo.config:option:`pci.device_spec` option. For example,
assuming our sample PCI device has a PCI address of ``41:00.0`` on each host:
.. code-block:: ini
[pci]
- passthrough_whitelist = { "address": "0000:41:00.0" }
+ device_spec = { "address": "0000:41:00.0" }
-Refer to :oslo.config:option:`pci.passthrough_whitelist` for syntax information.
+Refer to :oslo.config:option:`pci.device_spec` for syntax information.
Alternatively, to enable passthrough of all devices with the same product and
vendor ID:
@@ -108,7 +126,7 @@ vendor ID:
.. code-block:: ini
[pci]
- passthrough_whitelist = { "vendor_id": "8086", "product_id": "154d" }
+ device_spec = { "vendor_id": "8086", "product_id": "154d" }
If using vendor and product IDs, all PCI devices matching the ``vendor_id`` and
``product_id`` are added to the pool of PCI devices available for passthrough
@@ -156,6 +174,77 @@ Refer to :oslo.config:option:`pci.alias` for syntax information.
Once configured, restart the :program:`nova-compute` service.
+Special Tags
+^^^^^^^^^^^^
+
+When specified in :oslo.config:option:`pci.device_spec` some tags
+have special meaning:
+
+``physical_network``
+ Associates a device with a physical network label which corresponds to the
+ ``physical_network`` attribute of a network segment object in Neutron. For
+ virtual networks such as overlays a value of ``null`` should be specified
+ as follows: ``"physical_network": null``. In the case of physical networks,
+ this tag is used to supply the metadata necessary for identifying a switched
+ fabric to which a PCI device belongs and associate the port with the correct
+ network segment in the networking backend. Besides typical SR-IOV scenarios,
+ this tag can be used for remote-managed devices in conjunction with the
+ ``remote_managed`` tag.
+
+.. note::
+
+ The use of ``"physical_network": null`` is only supported in single segment
+ networks. This is due to Nova not supporting multisegment networks for
+ SR-IOV ports. See
+ `bug 1983570 <https://bugs.launchpad.net/nova/+bug/1983570>`_ for details.
+
+``remote_managed``
+ Used to specify whether a PCI device is managed remotely or not. By default,
+ devices are implicitly tagged as ``"remote_managed": "false"`` but and they
+ must be tagged as ``"remote_managed": "true"`` if ports with
+ ``VNIC_TYPE_REMOTE_MANAGED`` are intended to be used. Once that is done,
+ those PCI devices will not be available for allocation for regular
+ PCI passthrough use. Specifying ``"remote_managed": "true"`` is only valid
+ for SR-IOV VFs and specifying it for PFs is prohibited.
+
+ .. important::
+ It is recommended that PCI VFs that are meant to be remote-managed
+ (e.g. the ones provided by SmartNIC DPUs) are tagged as remote-managed in
+ order to prevent them from being allocated for regular PCI passthrough since
+ they have to be programmed accordingly at the host that has access to the
+ NIC switch control plane. If this is not done, instances requesting regular
+ SR-IOV ports may get a device that will not be configured correctly and
+ will not be usable for sending network traffic.
+
+ .. important::
+ For the Libvirt virt driver, clearing a VLAN by programming VLAN 0 must not
+ result in errors in the VF kernel driver at the compute host. Before v8.1.0
+ Libvirt clears a VLAN before passing a VF through to the guest which may
+ result in an error depending on your driver and kernel version (see, for
+ example, `this bug <https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1957753>`_
+ which discusses a case relevant to one driver). As of Libvirt v8.1.0, EPERM
+ errors encountered while programming a VLAN are ignored if VLAN clearing is
+ not explicitly requested in the device XML.
+
+``trusted``
+ If a port is requested to be trusted by specifying an extra option during
+ port creation via ``--binding-profile trusted=true``, only devices tagged as
+ ``trusted: "true"`` will be allocated to instances. Nova will then configure
+ those devices as trusted by the network controller through its PF device driver.
+ The specific set of features allowed by the trusted mode of a VF will differ
+ depending on the network controller itself, its firmware version and what a PF
+ device driver version allows to pass to the NIC. Common features to be affected
+ by this tag are changing the VF MAC address, enabling promiscuous mode or
+ multicast promiscuous mode.
+
+ .. important::
+ While the ``trusted tag`` does not directly conflict with the
+ ``remote_managed`` tag, network controllers in SmartNIC DPUs may prohibit
+ setting the ``trusted`` mode on a VF via a PF device driver in the first
+ place. It is recommended to test specific devices, drivers and firmware
+ versions before assuming this feature can be used.
+
+
Configure ``nova-scheduler``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -276,3 +365,177 @@ policy for any neutron SR-IOV interfaces attached by the user:
You can also configure this for PCI passthrough devices by specifying the
policy in the alias configuration via :oslo.config:option:`pci.alias`. For more
information, refer to :oslo.config:option:`the documentation <pci.alias>`.
+
+.. _pci-tracking-in-placement:
+
+PCI tracking in Placement
+-------------------------
+.. note::
+ The feature described below are optional and disabled by default in nova
+ 26.0.0. (Zed). The legacy PCI tracker code path is still supported and
+ enabled. The Placement PCI tracking can be enabled via the
+ :oslo.config:option:`pci.report_in_placement` configuration. But please note
+ that once it is enabled on a given compute host it cannot be disabled there
+ any more.
+
+Since nova 26.0.0 (Zed) PCI passthrough device inventories are tracked in
+Placement. If a PCI device exists on the hypervisor and
+matches one of the device specifications configured via
+:oslo.config:option:`pci.device_spec` then Placement will have a representation
+of the device. Each PCI device of type ``type-PCI`` and ``type-PF`` will be
+modeled as a Placement resource provider (RP) with the name
+``<hypervisor_hostname>_<pci_address>``. A devices with type ``type-VF`` is
+represented by its parent PCI device, the PF, as resource provider.
+
+By default nova will use ``CUSTOM_PCI_<vendor_id>_<product_id>`` as the
+resource class in PCI inventories in Placement. However the name of the
+resource class can be customized via the ``resource_class`` tag in the
+:oslo.config:option:`pci.device_spec` option. There is also a new ``traits``
+tag in that configuration that allows specifying a list of placement traits to
+be added to the resource provider representing the matching PCI devices.
+
+.. note::
+ In nova 26.0.0 (Zed) the Placement resource tracking of PCI devices does not
+ support SR-IOV devices intended to be consumed via Neutron ports and
+ therefore having ``physical_network`` tag in
+ :oslo.config:option:`pci.device_spec`. Such devices are supported via the
+ legacy PCI tracker code path in Nova.
+
+.. note::
+ Having different resource class or traits configuration for VFs under the
+ same parent PF is not supported and the nova-compute service will refuse to
+ start with such configuration.
+
+.. important::
+ While nova supported configuring both the PF and its children VFs for PCI
+ passthrough in the past, it only allowed consuming either the parent PF or
+ its children VFs. Since 26.0.0. (Zed) the nova-compute service will
+ enforce the same rule for the configuration as well and will refuse to
+ start if both the parent PF and its VFs are configured.
+
+.. important::
+ While nova supported configuring PCI devices by device name via the
+ ``devname`` parameter in :oslo.config:option:`pci.device_spec` in the past,
+ this proved to be problematic as the netdev name of a PCI device could
+ change for multiple reasons during hypervisor reboot. So since nova 26.0.0
+ (Zed) the nova-compute service will refuse to start with such configuration.
+ It is suggested to use the PCI address of the device instead.
+
+The nova-compute service makes sure that existing instances with PCI
+allocations in the nova DB will have a corresponding PCI allocation in
+placement. This allocation healing also acts on any new instances regardless of
+the status of the scheduling part of this feature to make sure that the nova
+DB and placement are in sync. There is one limitation of the healing logic.
+It assumes that there is no in-progress migration when the nova-compute service
+is upgraded. If there is an in-progress migration then the PCI allocation on
+the source host of the migration will not be healed. The placement view will be
+consistent after such migration is completed or reverted.
+
+Reconfiguring the PCI devices on the hypervisor or changing the
+:oslo.config:option:`pci.device_spec` configuration option and restarting the
+nova-compute service is supported in the following cases:
+
+* new devices are added
+* devices without allocation are removed
+
+Removing a device that has allocations is not supported. If a device having any
+allocation is removed then the nova-compute service will keep the device and
+the allocation exists in the nova DB and in placement and logs a warning. If
+a device with any allocation is reconfigured in a way that an allocated PF is
+removed and VFs from the same PF is configured (or vice versa) then
+nova-compute will refuse to start as it would create a situation where both
+the PF and its VFs are made available for consumption.
+
+Since nova 27.0.0 (2023.1 Antelope) scheduling and allocation of PCI devices
+in Placement can also be enabled via
+:oslo.config:option:`filter_scheduler.pci_in_placement`. Please note that this
+should only be enabled after all the computes in the system is configured to
+report PCI inventory in Placement via
+enabling :oslo.config:option:`pci.report_in_placement`. In Antelope flavor
+based PCI requests are support but Neutron port base PCI requests are not
+handled in Placement.
+
+If you are upgrading from an earlier version with already existing servers with
+PCI usage then you must enable :oslo.config:option:`pci.report_in_placement`
+first on all your computes having PCI allocations and then restart the
+nova-compute service, before you enable
+:oslo.config:option:`filter_scheduler.pci_in_placement`. The compute service
+will heal the missing PCI allocation in placement during startup and will
+continue healing missing allocations for future servers until the scheduling
+support is enabled.
+
+If a flavor requests multiple ``type-VF`` devices via
+:nova:extra-spec:`pci_passthrough:alias` then it is important to consider the
+value of :nova:extra-spec:`group_policy` as well. The value ``none``
+allows nova to select VFs from the same parent PF to fulfill the request. The
+value ``isolate`` restricts nova to select each VF from a different parent PF
+to fulfill the request. If :nova:extra-spec:`group_policy` is not provided in
+such flavor then it will defaulted to ``none``.
+
+Symmetrically with the ``resource_class`` and ``traits`` fields of
+:oslo.config:option:`pci.device_spec` the :oslo.config:option:`pci.alias`
+configuration option supports requesting devices by Placement resource class
+name via the ``resource_class`` field and also support requesting traits to
+be present on the selected devices via the ``traits`` field in the alias. If
+the ``resource_class`` field is not specified in the alias then it is defaulted
+by nova to ``CUSTOM_PCI_<vendor_id>_<product_id>``.
+
+For deeper technical details please read the `nova specification. <https://specs.openstack.org/openstack/nova-specs/specs/zed/approved/pci-device-tracking-in-placement.html>`_
+
+
+Virtual IOMMU support
+---------------------
+
+With provided :nova:extra-spec:`hw:viommu_model` flavor extra spec or equivalent
+image metadata property ``hw_viommu_model`` and with the guest CPU architecture
+and OS allows, we can enable vIOMMU in libvirt driver.
+
+.. note::
+
+ Enable vIOMMU might introduce significant performance overhead.
+ You can see performance comparison table from
+ `AMD vIOMMU session on KVM Forum 2021`_.
+ For the above reason, vIOMMU should only be enabled for workflow that
+ require it.
+
+.. _`AMD vIOMMU session on KVM Forum 2021`: https://static.sched.com/hosted_files/kvmforum2021/da/vIOMMU%20KVM%20Forum%202021%20-%20v4.pdf
+
+Here are four possible values allowed for ``hw:viommu_model``
+(and ``hw_viommu_model``):
+
+**virtio**
+ Supported on Libvirt since 8.3.0, for Q35 and ARM virt guests.
+
+**smmuv3**
+ Supported on Libvirt since 5.5.0, for ARM virt guests.
+**intel**
+ Supported for for Q35 guests.
+
+**auto**
+ This option will translate to ``virtio`` if Libvirt supported,
+ else ``intel`` on X86 (Q35) and ``smmuv3`` on AArch64.
+
+For the viommu attributes:
+
+* ``intremap``, ``caching_mode``, and ``iotlb``
+ options for viommu (These attributes are driver attributes defined in
+ `Libvirt IOMMU Domain`_) will direcly enabled.
+
+* ``eim`` will directly enabled if machine type is Q35.
+ ``eim`` is driver attribute defined in `Libvirt IOMMU Domain`_.
+
+.. note::
+
+ eim(Extended Interrupt Mode) attribute (with possible values on and off)
+ can be used to configure Extended Interrupt Mode.
+ A q35 domain with split I/O APIC (as described in hypervisor features),
+ and both interrupt remapping and EIM turned on for the IOMMU, will be
+ able to use more than 255 vCPUs. Since 3.4.0 (QEMU/KVM only).
+
+* ``aw_bits`` attribute can used to set the address width to allow mapping
+ larger iova addresses in the guest. Since Qemu current supported
+ values are 39 and 48, we directly set this to larger width (48)
+ if Libvirt supported.
+ ``aw_bits`` is driver attribute defined in `Libvirt IOMMU Domain`_.
+
+.. _`Libvirt IOMMU Domain`: https://libvirt.org/formatdomain.html#iommu-devices
diff --git a/doc/source/admin/remote-console-access.rst b/doc/source/admin/remote-console-access.rst
index 01ef44810c..9b28646d27 100644
--- a/doc/source/admin/remote-console-access.rst
+++ b/doc/source/admin/remote-console-access.rst
@@ -221,6 +221,9 @@ server. In :file:`/etc/libvirt/qemu.conf`, uncomment the following settings:
service, this ensures that only approved VNC proxy servers can connect to the
Compute nodes.
+Make sure to provide correct permissions to the certificate files for the process
+which creates instance. Please follow the libvirt wiki page [3]_ for the same.
+
After editing :file:`qemu.conf`, the ``libvirtd`` service must be restarted:
.. code-block:: shell
@@ -363,6 +366,16 @@ Replace ``IP_ADDRESS`` with the IP address from which the proxy is accessible
by the outside world. For example, this may be the management interface IP
address of the controller or the VIP.
+Optionally, the :program:`nova-compute` service supports the following
+additional options to configure compression settings (algorithms and modes)
+for SPICE consoles.
+
+- :oslo.config:option:`spice.image_compression`
+- :oslo.config:option:`spice.jpeg_compression`
+- :oslo.config:option:`spice.zlib_compression`
+- :oslo.config:option:`spice.playback_compression`
+- :oslo.config:option:`spice.streaming_mode`
+
Serial
------
@@ -610,5 +623,6 @@ Frequently Asked Questions
References
----------
-.. [1] https://qemu.weilnetz.de/doc/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify
+.. [1] https://qemu.weilnetz.de/doc/4.2/qemu-doc.html#vnc_005fsec_005fcertificate_005fverify
.. [2] https://tools.ietf.org/html/rfc3280#section-4.2.1.10
+.. [3] https://wiki.libvirt.org/page/VNCTLSSetup#Changes_to_be_made_on_the_virtualisation_host_server \ No newline at end of file
diff --git a/doc/source/admin/resource-limits.rst b/doc/source/admin/resource-limits.rst
index c74ad31c17..8ef248a9a1 100644
--- a/doc/source/admin/resource-limits.rst
+++ b/doc/source/admin/resource-limits.rst
@@ -38,7 +38,8 @@ CPU limits
Libvirt enforces CPU limits in terms of *shares* and *quotas*, configured
via :nova:extra-spec:`quota:cpu_shares` and :nova:extra-spec:`quota:cpu_period`
/ :nova:extra-spec:`quota:cpu_quota`, respectively. Both are implemented using
-the `cgroups v1 cpu controller`__.
+the `cgroups cpu controller`__. Note that allowed values for *shares* are
+platform dependant.
CPU shares are a proportional weighted share of total CPU resources relative to
other instances. It does not limit CPU usage if CPUs are not busy. There is no
diff --git a/doc/source/admin/scheduling.rst b/doc/source/admin/scheduling.rst
index 279062d240..353514ab55 100644
--- a/doc/source/admin/scheduling.rst
+++ b/doc/source/admin/scheduling.rst
@@ -108,8 +108,8 @@ The Filter Scheduler
.. versionchanged:: 23.0.0 (Wallaby)
- Support for custom filters was removed. Only the filter scheduler is now
- supported by nova.
+ Support for custom scheduler drivers was removed. Only the filter scheduler
+ is now supported by nova.
Nova's scheduler, known as the *filter scheduler*, supports filtering and
weighting to make informed decisions on where a new instance should be created.
@@ -406,7 +406,7 @@ Some of attributes that can be used as useful key and their values contains:
* ``free_ram_mb`` (compared with a number, values like ``>= 4096``)
* ``free_disk_mb`` (compared with a number, values like ``>= 10240``)
* ``host`` (compared with a string, values like ``<in> compute``, ``s== compute_01``)
-* ``hypervisor_type`` (compared with a string, values like ``s== QEMU``, ``s== powervm``)
+* ``hypervisor_type`` (compared with a string, values like ``s== QEMU``, ``s== ironic``)
* ``hypervisor_version`` (compared with a number, values like ``>= 1005003``, ``== 2000000``)
* ``num_instances`` (compared with a number, values like ``<= 10``)
* ``num_io_ops`` (compared with a number, values like ``<= 5``)
@@ -1049,6 +1049,37 @@ Otherwise, it will fall back to the
more than one value is found for a host in aggregate metadata, the minimum
value will be used.
+``HypervisorVersionWeigher``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versionadded:: 28.0.0 (Bobcat)
+
+Weigh hosts by their relative hypervisor version reported by the virt driver.
+
+While the hypervisor_version filed for all virt drivers is an int,
+each nova virt driver uses a different algorithm to convert the hypervisor-specific
+version sequence into an int. As such the values are not directly comparable between
+hosts with different hypervisors.
+
+For example, the ironic virt driver uses the ironic API micro-version as the hypervisor
+version for a given node. The libvirt driver uses the libvirt version
+i.e. Libvirt `7.1.123` becomes `700100123` vs Ironic `1.82` becomes `1`
+Hyper-V `6.3` becomes `6003`.
+
+If you have a mixed virt driver deployment in the ironic vs non-ironic
+case nothing special needs to be done. ironic nodes are scheduled using custom
+resource classes so ironic flavors will never match non-ironic compute nodes.
+
+If a deployment has multiple non-ironic virt drivers it is recommended to use aggregates
+to group hosts by virt driver. While this is not strictly required, it is
+desirable to avoid bias towards one virt driver.
+see :ref:`filtering_hosts_by_isolating_aggregates` and :ref:`AggregateImagePropertiesIsolation`
+for more information.
+
+The default behavior of the HypervisorVersionWeigher is to select newer hosts.
+If you prefer to invert the behavior set the
+:oslo.config:option:`filter_scheduler.hypervisor_version_weight_multiplier` option
+to a negative number and the weighing has the opposite effect of the default.
Utilization-aware scheduling
----------------------------
@@ -1100,6 +1131,16 @@ control the initial allocation ratio values for a compute node:
* :oslo.config:option:`initial_disk_allocation_ratio` the initial DISK_GB
inventory allocation ratio for a new compute node record, defaults to 1.0
+Starting with the 27.0.0 Antelope release, the following default values are used
+for the initial allocation ratio values for a compute node:
+
+* :oslo.config:option:`initial_cpu_allocation_ratio` the initial VCPU
+ inventory allocation ratio for a new compute node record, defaults to 4.0
+* :oslo.config:option:`initial_ram_allocation_ratio` the initial MEMORY_MB
+ inventory allocation ratio for a new compute node record, defaults to 1.0
+* :oslo.config:option:`initial_disk_allocation_ratio` the initial DISK_GB
+ inventory allocation ratio for a new compute node record, defaults to 1.0
+
Scheduling considerations
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -1162,7 +1203,7 @@ here.
:oslo.config:option:`DEFAULT.cpu_allocation_ratio`,
:oslo.config:option:`DEFAULT.ram_allocation_ratio` or
:oslo.config:option:`DEFAULT.disk_allocation_ratio` to a non-null value
- would ensure the user-configured value was always overriden.
+ would ensure the user-configured value was always overridden.
.. _osc-placement: https://docs.openstack.org/osc-placement/latest/index.html
diff --git a/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
index 0e6206d0b1..61a4e840cb 100644
--- a/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
+++ b/doc/source/admin/secure-live-migration-with-qemu-native-tls.rst
@@ -186,8 +186,8 @@ Related information
any other ``*_tls*`` parameters, _unless_ you need different
certificates for some services. The rationale for that is that some
services (e.g. migration / NBD) are only exposed to internal
- infrastructure; while some sevices (VNC, Spice) might be exposed
- publically, so might need different certificates. For OpenStack this
+ infrastructure; while some services (VNC, Spice) might be exposed
+ publicly, so might need different certificates. For OpenStack this
does not matter, though, we will stick with the defaults.
- If they are not already open, ensure you open up these TCP ports on
diff --git a/doc/source/admin/soft-delete-shadow-tables.rst b/doc/source/admin/soft-delete-shadow-tables.rst
new file mode 100644
index 0000000000..126279c4d0
--- /dev/null
+++ b/doc/source/admin/soft-delete-shadow-tables.rst
@@ -0,0 +1,62 @@
+=============================
+Soft Delete and Shadow Tables
+=============================
+
+Nova has two unrelated features which are called ``soft delete``:
+
+Soft delete instances that can be restored
+------------------------------------------
+
+After an instance delete request, the actual delete is
+delayed by a configurable amount of time (config option
+:oslo.config:option:`reclaim_instance_interval`). During the delay,
+the instance is marked to be in state ``SOFT_DELETED`` and can be
+restored (:command:`openstack server restore`) by an admin in order to
+gracefully handle human mistakes. If the instance is not restored during
+the configured delay, a periodic job actually deletes the instance.
+
+This feature is optional and by default off.
+
+See also:
+
+- "Delete, Restore" in `API Guide: Server Concepts
+ <https://docs.openstack.org/api-guide/compute/server_concepts.html#server-actions>`_
+- config reference: :oslo.config:option:`reclaim_instance_interval`
+
+Soft delete database rows to shadow tables
+------------------------------------------
+
+At an actual instance delete, no DB record is deleted. Instead the
+records are marked as deleted (for example ``instances.deleted``
+in Nova cell databases). This preserves historic information
+for debugging and audit uses. But it also leads to accumulation
+of data in Nova cell DB tables, which may have an effect on
+Nova DB performance as documented in `DB prune deleted rows
+<https://docs.openstack.org/nova/latest/admin/upgrades.html#concepts>`_.
+
+The records marked as deleted can be cleaned up in multiple stages.
+First you can move them to so-called shadow tables (tables with prefix
+``shadow_`` in Nova cell databases). This is called *archiving the
+deleted rows*. Nova does not query shadow tables, therefore data moved
+to the shadow tables no longer affect DB performance. However storage
+space is still consumed. Then you can actually delete the information
+from the shadow tables. This is called *DB purge*.
+
+These operations can be performed by nova-manage:
+
+- https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-archive-deleted-rows
+- https://docs.openstack.org/nova/latest/cli/nova-manage.html#db-purge
+
+This feature is not optional. Every long-running deployment should
+regularly archive and purge the deleted rows. For example via a cron
+job to regularly call :program:`nova-manage db archive_deleted_rows` and
+:program:`nova-manage db purge`. The tradeoffs between data retention,
+DB performance and storage needs should be considered.
+
+In the Mitaka release there was an agreement between Nova developers that
+it's not desirable to provide shadow tables for every table in the Nova
+database, `documented in a spec
+<https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/no-more-soft-delete.html>`_.
+
+Therefore not all information about an instance is preserved in the shadow
+tables. Since then new shadow tables are not introduced.
diff --git a/doc/source/admin/upgrades.rst b/doc/source/admin/upgrades.rst
index 00a714970b..61fd0cf258 100644
--- a/doc/source/admin/upgrades.rst
+++ b/doc/source/admin/upgrades.rst
@@ -41,21 +41,27 @@ Rolling upgrade process
To reduce downtime, the compute services can be upgraded in a rolling fashion.
It means upgrading a few services at a time. This results in a condition where
both old (N) and new (N+1) nova-compute services co-exist for a certain time
-period. Note that, there is no upgrade of the hypervisor here, this is just
+period (or even N with N+2 upgraded nova-compute services, see below).
+Note that, there is no upgrade of the hypervisor here, this is just
upgrading the nova services. If reduced downtime is not a concern (or lower
complexity is desired), all services may be taken down and restarted at the
same time.
.. important::
- Nova does not currently support the coexistence of N and N+2 or greater
- :program:`nova-compute` or :program:`nova-conductor` services in the same
- deployment. The `nova-conductor`` service will fail to start when a
- ``nova-compute`` service that is older than the previous release (N-2 or
- greater) is detected. Similarly, in a :doc:`deployment with multiple cells
+ As of OpenStack 2023.1 (Antelope), Nova supports the coexistence of N and
+ N-2 (Yoga) :program:`nova-compute` or :program:`nova-conductor` services in
+ the same deployment. The `nova-conductor`` service will fail to start when
+ a ``nova-compute`` service that is older than the support envelope is
+ detected. This varies by release and the support envelope will be explained
+ in the release notes. Similarly, in a :doc:`deployment with multiple cells
</admin/cells>`, neither the super conductor service nor any per-cell
conductor service will start if any other conductor service in the
- deployment is older than the previous release.
+ deployment is older than the N-2 release.
+
+ Releases older than 2023.1 will only support rolling upgrades for a single
+ release difference between :program:`nova-compute` and
+ :program:`nova-conductor` services.
#. Before maintenance window:
diff --git a/doc/source/admin/vdpa.rst b/doc/source/admin/vdpa.rst
new file mode 100644
index 0000000000..5d0408b0b3
--- /dev/null
+++ b/doc/source/admin/vdpa.rst
@@ -0,0 +1,86 @@
+============================
+Using ports vnic_type='vdpa'
+============================
+.. versionadded:: 23.0.0 (Wallaby)
+
+ Introduced support for vDPA.
+
+.. versionadded:: 26.0.0 (Zed)
+
+ Added support for all instance move operations,
+ and the interface attach/detach, and suspend/resume operations.
+
+.. important::
+ The functionality described below is only supported by the
+ libvirt/KVM virt driver.
+
+The kernel vDPA (virtio Data Path Acceleration) framework
+provides a vendor independent framework for offloading data-plane
+processing to software or hardware virtio device backends.
+While the kernel vDPA framework supports many types of vDPA devices,
+at this time nova only support ``virtio-net`` devices
+using the ``vhost-vdpa`` front-end driver. Support for ``virtio-blk`` or
+``virtio-gpu`` may be added in the future but is not currently planned
+for any specific release.
+
+vDPA device tracking
+~~~~~~~~~~~~~~~~~~~~
+When implementing support for vDPA based neutron ports one of the first
+decisions nova had to make was how to model the availability of vDPA devices
+and the capability to virtualize vDPA devices. As the initial use-case
+for this technology was to offload networking to hardware offload OVS via
+neutron ports the decision was made to extend the existing PCI tracker that
+is used for SR-IOV and pci-passthrough to support vDPA devices. As a result
+a simplification was made to assume that the parent device of a vDPA device
+is an SR-IOV Virtual Function (VF). As a result software only vDPA device such
+as those created by the kernel ``vdpa-sim`` sample module are not supported.
+
+To make vDPA device available to be scheduled to guests the operator should
+include the device using the PCI address or vendor ID and product ID of the
+parent VF in the PCI ``device_spec``.
+See: :nova-doc:`pci-passthrough <admin/pci-passthrough>` for details.
+
+Nova will not create the VFs or vDPA devices automatically. It is expected
+that the operator will allocate them before starting the nova-compute agent.
+While no specific mechanisms is prescribed to do this udev rules or systemd
+service files are generally the recommended approach to ensure the devices
+are created consistently across reboots.
+
+.. note::
+ As vDPA is an offload only for the data plane and not the control plane a
+ vDPA control plane is required to properly support vDPA device passthrough.
+ At the time of writing only hardware offloaded OVS is supported when using
+ vDPA with nova. Because of this vDPA devices cannot be requested using the
+ PCI alias. While nova could allow vDPA devices to be requested by the
+ flavor using a PCI alias we would not be able to correctly configure the
+ device as there would be no suitable control plane. For this reason vDPA
+ devices are currently only consumable via neutron ports.
+
+Virt driver support
+~~~~~~~~~~~~~~~~~~~
+
+Supporting neutron ports with ``vnic_type=vdpa`` depends on the capability
+of the virt driver. At this time only the ``libvirt`` virt driver with KVM
+is fully supported. QEMU may also work but is untested.
+
+vDPA support depends on kernel 5.7+, Libvirt 6.9.0+ and QEMU 5.1+.
+
+vDPA lifecycle operations
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To boot a VM with vDPA ports they must first be created in neutron.
+To do this the normal SR-IOV workflow is used where by the port is first created
+in neutron and passed into nova as part of the server create request.
+
+.. code-block:: bash
+
+ openstack port create --network <my network> --vnic-type vdpa vdpa-port
+ openstack server create --flavor <my-flavor> --image <my-image> --port <vdpa-port uuid> vdpa-vm
+
+vDPA live migration
+~~~~~~~~~~~~~~~~~~~
+
+At this time QEMU and the ``vhost-vdpa`` kernel module do not support transparent
+live migration of vm with vdpa ports. To enable live migration of VMs with
+vDPA interfaces the existing SR-IOV hotplug live migration procedure has been
+extended to include ``vnic_type='vdpa'`` interfaces.
diff --git a/doc/source/cli/nova-compute.rst b/doc/source/cli/nova-compute.rst
index f190949efa..1346dab92e 100644
--- a/doc/source/cli/nova-compute.rst
+++ b/doc/source/cli/nova-compute.rst
@@ -41,6 +41,8 @@ Files
* ``/etc/nova/policy.d/``
* ``/etc/nova/rootwrap.conf``
* ``/etc/nova/rootwrap.d/``
+* ``/etc/nova/compute_id``
+* ``/var/lib/nova/compute_id``
See Also
========
diff --git a/doc/source/cli/nova-manage.rst b/doc/source/cli/nova-manage.rst
index a13289b2e8..53152a0a6f 100644
--- a/doc/source/cli/nova-manage.rst
+++ b/doc/source/cli/nova-manage.rst
@@ -258,17 +258,17 @@ stopping at 0, or use the :option:`--until-complete` option.
``YYYY-MM-DD[HH:mm:ss]``. For example::
# Purge shadow table rows older than a specific date
- nova-manage db archive --before 2015-10-21
+ nova-manage db archive_deleted_rows --before 2015-10-21
# or
- nova-manage db archive --before "Oct 21 2015"
+ nova-manage db archive_deleted_rows --before "Oct 21 2015"
# Times are also accepted
- nova-manage db archive --before "2015-10-21 12:00"
+ nova-manage db archive_deleted_rows --before "2015-10-21 12:00"
Note that relative dates (such as ``yesterday``) are not supported
natively. The ``date`` command can be helpful here::
# Archive deleted rows more than one month old
- nova-manage db archive --before "$(date -d 'now - 1 month')"
+ nova-manage db archive_deleted_rows --before "$(date -d 'now - 1 month')"
.. option:: --verbose
@@ -521,6 +521,7 @@ This command should be run before ``nova-manage db sync``.
.. _man-page-cells-v2:
+
Cells v2 Commands
=================
@@ -1144,6 +1145,7 @@ Delete a host by the given host name and the given cell UUID.
* - 4
- The host with the specified name has instances (host not empty).
+
Placement Commands
==================
@@ -1531,6 +1533,7 @@ command.
* - 6
- Instance is not attached to volume
+
Libvirt Commands
================
@@ -1604,7 +1607,7 @@ instance changing when moving between machine types.
.. option:: --force
- Skip machine type compatability checks and force machine type update.
+ Skip machine type compatibility checks and force machine type update.
.. rubric:: Return codes
@@ -1669,12 +1672,100 @@ within an environment.
- Instances found without ``hw_machine_type`` set
+Image Property Commands
+=======================
+
+image_property show
+-------------------
+
+.. program:: nova-manage image_property show
+
+.. code-block:: shell
+
+ nova-manage image_property show [INSTANCE_UUID] [IMAGE_PROPERTY]
+
+Fetch and display the recorded image property ``IMAGE_PROPERTY`` of an
+instance identified by ``INSTANCE_UUID``.
+
+.. versionadded:: 25.0.0 (Yoga)
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Successfully completed
+ * - 1
+ - An unexpected error occurred
+ * - 2
+ - Unable to find instance or instance mapping
+ * - 3
+ - No image property found for instance
+
+image_property set
+------------------
+
+.. program:: nova-manage image_property set
+
+.. code-block:: shell
+
+ nova-manage image_property set \
+ [INSTANCE_UUID] [--property] [IMAGE_PROPERTY]=[VALUE]
+
+Set or update the recorded image property ``IMAGE_PROPERTY`` of instance
+``INSTANCE_UUID`` to value ``VALUE``.
+
+The following criteria must be met when using this command:
+
+* The instance must have a ``vm_state`` of ``STOPPED``, ``SHELVED`` or
+ ``SHELVED_OFFLOADED``.
+
+This command is useful for operators who need to update stored instance image
+properties that have become invalidated by a change of instance machine type,
+for example.
+
+.. versionadded:: 25.0.0 (Yoga)
+
+.. rubric:: Options
+
+.. option:: --property
+
+ Image property to set using the format name=value. For example:
+ ``--property hw_disk_bus=virtio --property hw_cdrom_bus=sata``.
+
+.. rubric:: Return codes
+
+.. list-table::
+ :widths: 20 80
+ :header-rows: 1
+
+ * - Return code
+ - Description
+ * - 0
+ - Update completed successfully
+ * - 1
+ - An unexpected error occurred
+ * - 2
+ - Unable to find instance or instance mapping
+ * - 3
+ - The instance has an invalid ``vm_state``
+ * - 4
+ - The provided image property name is invalid
+ * - 5
+ - The provided image property value is invalid
+
+
See Also
========
:doc:`nova-policy(1) <nova-policy>`,
:doc:`nova-status(1) <nova-status>`
+
Bugs
====
diff --git a/doc/source/cli/nova-rootwrap.rst b/doc/source/cli/nova-rootwrap.rst
index 4fcae829fc..47e3268b97 100644
--- a/doc/source/cli/nova-rootwrap.rst
+++ b/doc/source/cli/nova-rootwrap.rst
@@ -9,7 +9,7 @@ Synopsis
::
- nova-rootwrap CONFIG_FILE COMMMAND
+ nova-rootwrap CONFIG_FILE COMMAND
Description
===========
diff --git a/doc/source/cli/nova-status.rst b/doc/source/cli/nova-status.rst
index a198159e17..5fbb23f388 100644
--- a/doc/source/cli/nova-status.rst
+++ b/doc/source/cli/nova-status.rst
@@ -137,7 +137,7 @@ Upgrade
* Checks for the Placement API are modified to require version 1.35.
* Checks for the policy files are not automatically overwritten with
- new defaults.
+ new defaults. This check has been dropped in 26.0.0 (Zed) release.
**22.0.0 (Victoria)**
diff --git a/doc/source/configuration/extra-specs.rst b/doc/source/configuration/extra-specs.rst
index 45dbf2a94d..94233c5e94 100644
--- a/doc/source/configuration/extra-specs.rst
+++ b/doc/source/configuration/extra-specs.rst
@@ -183,16 +183,6 @@ They are only supported by the HyperV virt driver.
.. extra-specs:: os
-``powervm``
-~~~~~~~~~~~
-
-The following extra specs are used to configure various attributes of
-instances when using the PowerVM virt driver.
-
-They are only supported by the PowerVM virt driver.
-
-.. extra-specs:: powervm
-
``vmware``
~~~~~~~~~~
diff --git a/doc/source/configuration/index.rst b/doc/source/configuration/index.rst
index f7f40790f1..8d59d2dc27 100644
--- a/doc/source/configuration/index.rst
+++ b/doc/source/configuration/index.rst
@@ -48,7 +48,7 @@ services and what configuration options are available can be found below.
.. # NOTE(mriedem): This is the section where we hide things that we don't
# actually want in the table of contents but sphinx build would fail if
# they aren't in the toctree somewhere.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -100,7 +100,7 @@ policies are available can be found below.
.. # NOTE(mriedem): This is the section where we hide things that we don't
# actually want in the table of contents but sphinx build would fail if
# they aren't in the toctree somewhere.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
diff --git a/doc/source/configuration/policy-concepts.rst b/doc/source/configuration/policy-concepts.rst
index e3927b7fc4..383b27247f 100644
--- a/doc/source/configuration/policy-concepts.rst
+++ b/doc/source/configuration/policy-concepts.rst
@@ -41,7 +41,7 @@ resources from project or system level resources. Please refer to
:keystone-doc:`this document </admin/tokens-overview.html#authorization-scopes>`
and `system scope specification <https://specs.openstack.org/openstack/keystone-specs/specs/keystone/queens/system-scope.html>`_ to understand the scope concept.
-In the Nova 21.0.0 (Ussuri) release, Nova policies implemented
+In the Nova 25.0.0 (Yoga) release, Nova policies implemented
the scope concept and default roles provided by keystone (admin, member,
and reader). Using common roles from keystone reduces the likelihood of
similar, but different, roles implemented across projects or deployments
@@ -65,36 +65,13 @@ represent the layer of authorization required to access an API.
.. note::
- The ``scope_type`` of each policy is hardcoded and is not
- overridable via the policy file.
+ The ``scope_type`` of each policy is hardcoded to ``project`` scoped
+ and is not overridable via the policy file.
Nova policies have implemented the scope concept by defining the ``scope_type``
-in policies. To know each policy's ``scope_type``, please refer to the
-:doc:`Policy Reference </configuration/policy>` and look for ``Scope Types`` or
-``Intended scope(s)`` in :doc:`Policy Sample File </configuration/sample-policy>`
-as shown in below examples.
-
-.. rubric:: ``system`` scope
-
-Policies with a ``scope_type`` of ``system`` means a user with a
-``system-scoped`` token has permission to access the resource. This can be
-seen as a global role. All the system-level operation's policies
-have defaulted to ``scope_type`` of ``['system']``.
-
-For example, consider the ``GET /os-hypervisors`` API.
-
-.. code::
-
- # List all hypervisors.
- # GET /os-hypervisors
- # Intended scope(s): system
- #"os_compute_api:os-hypervisors:list": "rule:system_reader_api"
-
-.. rubric:: ``project`` scope
-
-Policies with a ``scope_type`` of ``project`` means a user with a
-``project-scoped`` token has permission to access the resource. Project-level
-only operation's policies are defaulted to ``scope_type`` of ``['project']``.
+for all the policies to ``project`` scoped. It means if user tries to access
+nova APIs with ``system`` scoped token they will get 403 permission denied
+error.
For example, consider the ``POST /os-server-groups`` API.
@@ -105,28 +82,6 @@ For example, consider the ``POST /os-server-groups`` API.
# Intended scope(s): project
#"os_compute_api:os-server-groups:create": "rule:project_member_api"
-.. rubric:: ``system and project`` scope
-
-Policies with a ``scope_type`` of ``system and project`` means a user with a
-``system-scoped`` or ``project-scoped`` token has permission to access the
-resource. All the system and project level operation's policies have defaulted
-to ``scope_type`` of ``['system', 'project']``.
-
-For example, consider the ``POST /servers/{server_id}/action (os-migrateLive)``
-API.
-
-.. code::
-
- # Live migrate a server to a new host without a reboot
- # POST /servers/{server_id}/action (os-migrateLive)
- # Intended scope(s): system, project
- #"os_compute_api:os-migrate-server:migrate_live": "rule:system_admin_api"
-
-These scope types provide a way to differentiate between system-level and
-project-level access roles. You can control the information with scope of the
-users. This means you can control that none of the project level role can get
-the hypervisor information.
-
Policy scope is disabled by default to allow operators to migrate from
the old policy enforcement system in a graceful way. This can be
enabled by configuring the :oslo.config:option:`oslo_policy.enforce_scope`
@@ -149,62 +104,139 @@ defaults for each policy.
.. rubric:: ``reader``
-This provides read-only access to the resources within the ``system`` or
-``project``. Nova policies are defaulted to below rules:
-
-.. code::
-
- system_reader_api
- Default
- role:reader and system_scope:all
-
- system_or_project_reader
- Default
- (rule:system_reader_api) or (role:reader and project_id:%(project_id)s)
+This provides read-only access to the resources. Nova policies are defaulted
+to below rules:
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="project_reader",
+ check_str="role:reader and project_id:%(project_id)s",
+ description="Default rule for Project level read only APIs."
+ )
+
+Using it in policy rule (with admin + reader access): (because we want to keep legacy admin behavior the same we need to give access of reader APIs to admin role too.)
+
+.. code-block:: python
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:show',
+ check_str='role:admin or (' + 'role:reader and project_id:%(project_id)s)',
+ description="Show a server",
+ operations=[
+ {
+ 'method': 'GET',
+ 'path': '/servers/{server_id}'
+ }
+ ],
+ scope_types=['project'],
+ )
+
+OR
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="admin_api",
+ check_str="role:admin",
+ description="Default rule for administrative APIs."
+ )
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:show',
+ check_str='rule: admin or rule:project_reader',
+ description='Show a server',
+ operations=[
+ {
+ 'method': 'GET',
+ 'path': '/servers/{server_id}'
+ }
+ ],
+ scope_types=['project'],
+ )
.. rubric:: ``member``
-This role is to perform the project level write operation with combination
-to the system admin. Nova policies are defaulted to below rules:
-
-.. code::
-
- project_member_api
- Default
- role:member and project_id:%(project_id)s
-
- system_admin_or_owner
- Default
- (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s)
+project-member is denoted by someone with the member role on a project. It is
+intended to be used by end users who consume resources within a project
+which requires higher permission than reader role but less than admin role.
+It inherits all the permissions of a project-reader.
+
+project-member persona in the policy check string:
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="project_member",
+ check_str="role:member and project_id:%(project_id)s",
+ description="Default rule for Project level non admin APIs."
+ )
+
+Using it in policy rule (with admin + member access): (because we want to keep legacy admin behavior, admin role gets access to the project level member APIs.)
+
+.. code-block:: python
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:create',
+ check_str='role:admin or (' + 'role:member and project_id:%(project_id)s)',
+ description='Create a server',
+ operations=[
+ {
+ 'method': 'POST',
+ 'path': '/servers'
+ }
+ ],
+ scope_types=['project'],
+ )
+
+OR
+
+.. code-block:: python
+
+ policy.RuleDefault(
+ name="admin_api",
+ check_str="role:admin",
+ description="Default rule for administrative APIs."
+ )
+
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:servers:create',
+ check_str='rule_admin or rule:project_member',
+ description='Create a server',
+ operations=[
+ {
+ 'method': 'POST',
+ 'path': '/servers'
+ }
+ ],
+ scope_types=['project'],
+ )
+
+'project_id:%(project_id)s' in the check_str is important to restrict the
+access within the requested project.
.. rubric:: ``admin``
-This role is to perform the admin level write operation at system as well
-as at project-level operations. Nova policies are defaulted to below rules:
-
-.. code::
-
- system_admin_api
- Default
- role:admin and system_scope:all
+This role is to perform the admin level write operations. Nova policies are
+defaulted to below rules:
- project_admin_api
- Default
- role:admin and project_id:%(project_id)s
+.. code-block:: python
- system_admin_or_owner
- Default
- (role:admin and system_scope:all) or (role:member and project_id:%(project_id)s)
+ policy.DocumentedRuleDefault(
+ name='os_compute_api:os-hypervisors:list',
+ check_str='role:admin',
+ scope_types=['project']
+ )
With these new defaults, you can solve the problem of:
#. Providing the read-only access to the user. Polices are made more granular
- and defaulted to reader rules. For exmaple: If you need to let someone audit
+ and defaulted to reader rules. For example: If you need to let someone audit
your deployment for security purposes.
#. Customize the policy in better way. For example, you will be able
- to provide access to project level user to perform live migration for their
- server or any other project with their token.
+ to provide access to project level user to perform operations within
+ their project only.
Nova supported scope & Roles
-----------------------------
@@ -212,38 +244,21 @@ Nova supported scope & Roles
Nova supports the below combination of scopes and roles where roles can be
overridden in the policy.yaml file but scope is not override-able.
-#. SYSTEM_ADMIN: ``admin`` role on ``system`` scope
-
-#. SYSTEM_READER: ``reader`` role on ``system`` scope
-
-#. PROJECT_ADMIN: ``admin`` role on ``project`` scope
-
- .. note::
+#. ADMIN: ``admin`` role on ``project`` scope. This is an administrator to
+ perform the admin level operations. Example: enable/disable compute
+ service, Live migrate server etc.
- PROJECT_ADMIN has the limitation for the below policies
+#. PROJECT_MEMBER: ``member`` role on ``project`` scope. This is used to perform
+ resource owner level operation within project. For example: Pause a server.
- * ``os_compute_api:servers:create:forced_host``
- * ``os_compute_api:servers:compute:servers:create:requested_destination``
+#. PROJECT_READER: ``reader`` role on ``project`` scope. This is used to perform
+ read-only operation within project. For example: Get server.
- To create a server on specific host via force host or requested
- destination, you need to pass the hostname in ``POST /servers``
- API request but there is no way for PROJECT_ADMIN to get the hostname
- via API. This limitation will be addressed in a future release.
+#. PROJECT_MEMBER_OR_ADMIN: ``admin`` or ``member`` role on ``project`` scope. Such policy rules are default to most of the owner level APIs and aling
+ with `member` role legacy admin can continue to access those APIs.
-
-#. PROJECT_MEMBER: ``member`` role on ``project`` scope
-
-#. PROJECT_READER: ``reader`` role on ``project`` scope
-
-#. PROJECT_MEMBER_OR_SYSTEM_ADMIN: ``admin`` role on ``system`` scope
- or ``member`` role on ``project`` scope. Such policy rules are scoped
- as both ``system`` as well as ``project``.
-
-#. PROJECT_READER_OR_SYSTEM_READER: ``reader`` role on ``system`` scope
- or ``project`` scope. Such policy rules are scoped as both ``system``
- as well as ``project``.
-
- .. note:: As of now, only ``system`` and ``project`` scopes are supported in Nova.
+#. PROJECT_READER_OR_ADMIN: ``admin`` or ``reader`` role on ``project`` scope. Such policy rules are default to most of the read only APIs so that legacy
+ admin can continue to access those APIs.
Backward Compatibility
----------------------
@@ -251,9 +266,10 @@ Backward Compatibility
Backward compatibility with versions prior to 21.0.0 (Ussuri) is maintained by
supporting the old defaults and disabling the ``scope_type`` feature by default.
This means the old defaults and deployments that use them will keep working
-as-is. However, we encourage every deployment to switch to new policy.
-``scope_type`` will be enabled by default and the old defaults will be removed
-starting in the 23.0.0 (W) release.
+as-is. However, we encourage every deployment to switch to the new policy. The
+new defaults will be enabled by default in OpenStack 2023.1 (Nova 27.0.0)
+release and old defaults will be removed starting in the OpenStack 2023.2
+(Nova 28.0.0) release.
To implement the new default reader roles, some policies needed to become
granular. They have been renamed, with the old names still supported for
@@ -272,7 +288,6 @@ Here is step wise guide for migration:
You need to create the new token with scope knowledge via below CLI:
- - :keystone-doc:`Create System Scoped Token </admin/tokens-overview.html#operation_create_system_token>`.
- :keystone-doc:`Create Project Scoped Token </admin/tokens-overview.html#operation_create_project_scoped_token>`.
#. Create new default roles in keystone if not done:
@@ -292,10 +307,6 @@ Here is step wise guide for migration:
(assuming the rest of the policy passes). The default value of this flag
is False.
- .. note:: Before you enable this flag, you need to audit your users and make
- sure everyone who needs system-level access has a system role
- assignment in keystone.
-
#. Enable new defaults
The :oslo.config:option:`oslo_policy.enforce_new_defaults` flag switches
@@ -308,7 +319,6 @@ Here is step wise guide for migration:
.. note:: Before you enable this flag, you need to educate users about the
different roles they need to use to continue using Nova APIs.
-
#. Check for deprecated policies
A few policies were made more granular to implement the reader roles. New
@@ -316,30 +326,31 @@ Here is step wise guide for migration:
are overwritten in policy file, then warning will be logged. Please migrate
those policies to new policy names.
+NOTE::
+
+ We recommend to enable the both scope as well new defaults together
+ otherwise you may experience some late failures with unclear error
+ messages. For example, if you enable new defaults and disable scope
+ check then it will allow system users to access the APIs but fail
+ later due to the project check which can be difficult to debug.
+
Below table show how legacy rules are mapped to new rules:
-+--------------------+----------------------------------+-----------------+-------------------+
-| Legacy Rules | New Rules | | |
-+====================+==================================+=================+===================+
-| | | *Roles* | *Scope* |
-| +----------------------------------+-----------------+-------------------+
-| | SYSTEM_ADMIN | admin | system |
-| Project Admin +----------------------------------+-----------------+ |
-| Role | SYSTEM_READER | reader | |
-| | | | |
-+--------------------+----------------------------------+-----------------+-------------------+
-| | PROJECT_ADMIN | admin | project |
-| +----------------------------------+-----------------+ |
-| | PROJECT_MEMBER | member | |
-| +----------------------------------+-----------------+ |
-| Project admin or | PROJECT_READER | reader | |
-| owner role +----------------------------------+-----------------+-------------------+
-| | PROJECT_MEMBER_OR_SYSTEM_ADMIN | admin on system | system |
-| | | or member on | OR |
-| | | project | project |
-| +----------------------------------+-----------------+ |
-| | PROJECT_READER_OR_SYSTEM_READER | reader | |
-+--------------------+----------------------------------+-----------------+-------------------+
-
-We expect all deployments to migrate to new policy by 23.0.0 release so that
-we can remove the support of old policies.
++--------------------+---------------------------+----------------+-----------+
+| Legacy Rule | New Rules |Operation |scope_type |
++====================+===========================+================+===========+
+| RULE_ADMIN_API |-> ADMIN |Global resource | [project] |
+| | |Write & Read | |
++--------------------+---------------------------+----------------+-----------+
+| |-> ADMIN |Project admin | [project] |
+| | |level operation | |
+| +---------------------------+----------------+-----------+
+| RULE_ADMIN_OR_OWNER|-> PROJECT_MEMBER_OR_ADMIN |Project resource| [project] |
+| | |Write | |
+| +---------------------------+----------------+-----------+
+| |-> PROJECT_READER_OR_ADMIN |Project resource| [project] |
+| | |Read | |
++--------------------+---------------------------+----------------+-----------+
+
+We expect all deployments to migrate to the new policy by OpenStack 2023.1
+(Nova 27.0.0) release so that we can remove the support of old policies.
diff --git a/doc/source/contributor/api-2.rst b/doc/source/contributor/api-2.rst
deleted file mode 100644
index a5bf639f81..0000000000
--- a/doc/source/contributor/api-2.rst
+++ /dev/null
@@ -1,56 +0,0 @@
-..
- Copyright 2010-2011 OpenStack Foundation
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-.. TODO::
-
- This should be merged into contributor/api
-
-Adding a Method to the OpenStack API
-====================================
-
-The interface is a mostly RESTful API. REST stands for Representational State Transfer and provides an architecture "style" for distributed systems using HTTP for transport. Figure out a way to express your request and response in terms of resources that are being created, modified, read, or destroyed.
-
-Routing
--------
-
-To map URLs to controllers+actions, OpenStack uses the Routes package, a clone of Rails routes for Python implementations. See http://routes.readthedocs.io/en/latest/ for more information.
-
-URLs are mapped to "action" methods on "controller" classes in ``nova/api/openstack/__init__/ApiRouter.__init__`` .
-
-See http://routes.readthedocs.io/en/latest/modules/mapper.html for all syntax, but you'll probably just need these two:
- - mapper.connect() lets you map a single URL to a single action on a controller.
- - mapper.resource() connects many standard URLs to actions on a controller.
-
-Controllers and actions
------------------------
-
-Controllers live in ``nova/api/openstack``, and inherit from nova.wsgi.Controller.
-
-See ``nova/api/openstack/compute/servers.py`` for an example.
-
-Action methods take parameters that are sucked out of the URL by mapper.connect() or .resource(). The first two parameters are self and the WebOb request, from which you can get the req.environ, req.body, req.headers, etc.
-
-Serialization
--------------
-
-Actions return a dictionary, and wsgi.Controller serializes that to JSON.
-
-Faults
-------
-
-If you need to return a non-200, you should
-return faults.Fault(webob.exc.HTTPNotFound())
-replacing the exception as appropriate.
diff --git a/doc/source/contributor/api-ref-guideline.rst b/doc/source/contributor/api-ref-guideline.rst
index cc5eab2538..0aec4eeeb4 100644
--- a/doc/source/contributor/api-ref-guideline.rst
+++ b/doc/source/contributor/api-ref-guideline.rst
@@ -351,7 +351,7 @@ In the parameter file, define the ``required`` field in each parameter.
but does not appear when non-admin users call.
If a parameter must be specified in the request or always appears
-in the response in the micoversion added or later,
+in the response in the microversion added or later,
the parameter must be defined as required (``true``).
Microversion
diff --git a/doc/source/contributor/api.rst b/doc/source/contributor/api.rst
index 2371990147..2e9cea3300 100644
--- a/doc/source/contributor/api.rst
+++ b/doc/source/contributor/api.rst
@@ -1,23 +1,38 @@
Extending the API
=================
-Background
-----------
+.. note::
-Nova has v2.1 API frameworks which supports microversions.
+ This document provides general background information on how one can extend
+ the REST API in nova. For information on the microversion support including
+ how to add new microversions, see :doc:`/contributor/microversions`. For
+ information on how to use the API, refer to the `API guide`__ and `API
+ reference guide`__.
+
+ .. __: https://docs.openstack.org/api-guide/compute/
+ .. __: https://docs.openstack.org/api-ref/compute/
+
+Nova's API is a mostly RESTful API. REST stands for *Representational State
+Transfer* and provides an architecture "style" for distributed systems using
+HTTP for transport. Figure out a way to express your request and response in
+terms of resources that are being created, modified, read, or destroyed.
+Nova has v2.1 API frameworks which supports microversions.
This document covers how to add API for the v2.1 API framework. A
-:doc:`microversions specific document <microversions>` covers the details
+:doc:`microversions-specific document <microversions>` covers the details
around what is required for the microversions part.
The v2.1 API framework is under ``nova/api`` and each API is implemented in
``nova/api/openstack/compute``.
-Note that any change to the Nova API to be merged will first require a
-spec be approved first. See `here <https://opendev.org/openstack/nova-specs>`_
-for the appropriate repository. For guidance on the design of the API
-please refer to the `OpenStack API WG
-<https://wiki.openstack.org/wiki/API_Working_Group>`_
+.. note::
+
+ Any change to the Nova API to be merged will first require a spec be
+ approved first.
+ See `here <https://opendev.org/openstack/nova-specs>`_ for the appropriate
+ repository.
+ For guidance on the design of the API please refer to the `OpenStack API WG
+ <https://wiki.openstack.org/wiki/API_Working_Group>`_
Basic API Controller
@@ -25,7 +40,9 @@ Basic API Controller
API controller includes the implementation of API methods for a resource.
-A very basic controller of a v2.1 API::
+A very basic controller of a v2.1 API:
+
+.. code-block:: python
"""Basic Controller"""
@@ -51,6 +68,8 @@ A very basic controller of a v2.1 API::
# Defining support for other RESTFul methods based on resource.
+ # ...
+
See `servers.py <https://opendev.org/openstack/nova/src/branch/master/nova/api/openstack/compute/servers.py>`_ for ref.
@@ -63,7 +82,9 @@ The URL mapping is based on the plain list which routes the API request to
appropriate controller and method. Each API needs to add its route information
in ``nova/api/openstack/compute/routes.py``.
-A basic skeleton of URL mapping in routers.py::
+A basic skeleton of URL mapping in ``routers.py``:
+
+.. code-block:: python
"""URL Mapping Router List"""
@@ -74,7 +95,8 @@ A basic skeleton of URL mapping in routers.py::
# Create a controller object
basic_controller = functools.partial(
- _create_controller, basic_api.BasicController, [], [])
+ _create_controller, basic_api.BasicController, [], [],
+ )
# Routing list structure:
# (
@@ -88,20 +110,16 @@ A basic skeleton of URL mapping in routers.py::
# ...
# )
ROUTE_LIST = (
- .
- .
- .
+ # ...
('/basic', {
'GET': [basic_controller, 'index'],
'POST': [basic_controller, 'create']
}),
- .
- .
- .
+ # ...
)
-Complete routing list can be found in `routes.py <https://opendev.org/openstack/nova/src/branch/master/nova/api/openstack/compute/routes.py>`_.
-
+Complete routing list can be found in `routes.py
+<https://opendev.org/openstack/nova/src/branch/master/nova/api/openstack/compute/routes.py>`_.
Policy
~~~~~~
@@ -113,7 +131,7 @@ Modularity
~~~~~~~~~~
The Nova REST API is separated into different controllers in the directory
-'nova/api/openstack/compute/'
+``nova/api/openstack/compute/``.
Because microversions are supported in the Nova REST API, the API can be
extended without any new controller. But for code readability, the Nova REST API
@@ -140,39 +158,13 @@ JSON-Schema
The v2.1 API validates a REST request body with JSON-Schema library.
Valid body formats are defined with JSON-Schema in the directory
-'nova/api/openstack/compute/schemas'. Each definition is used at the
-corresponding method with the ``validation.schema`` decorator like::
-
- @validation.schema(schema.update_something)
- def update(self, req, id, body):
- ....
-
-Similarly to controller modularity, JSON-Schema definitions can be added
-in same or separate JSON-Schema module.
+``nova/api/openstack/compute/schemas``. Each definition is used at the
+corresponding method with the ``validation.schema`` decorator like:
-The following are the combinations of extensible API and method name
-which returns additional JSON-Schema parameters:
+.. code-block:: python
-* Create a server API - get_server_create_schema()
-
-For example, keypairs extension(Keypairs class) contains the method
-get_server_create_schema() which returns::
-
- {
- 'key_name': parameter_types.name,
- }
-
-then the parameter key_name is allowed on Create a server API.
-
-.. note:: Currently only create schema are implemented in modular way.
- Final goal is to merge them all and define the concluded
- process in this doc.
-
-These are essentially hooks into the servers controller which allow other
-controller to modify behaviour without having to modify servers.py. In
-the past not having this capability led to very large chunks of
-unrelated code being added to servers.py which was difficult to
-maintain.
+ @validation.schema(schema.update_something)
+ def update(self, req, id, body):
Unit Tests
@@ -187,7 +179,7 @@ Negative tests would include such things as:
* Request schema validation failures, for both the request body and query
parameters
-* HTTPNotFound or other >=400 response code failures
+* ``HTTPNotFound`` or other >=400 response code failures
Functional tests and API Samples
@@ -203,6 +195,7 @@ The API samples tests are made of two parts:
``doc/api_samples/``. There is typically one directory per API controller
with subdirectories per microversion for that API controller. The unversioned
samples are used for the base v2.0 / v2.1 APIs.
+
* Corresponding API sample templates found under path
``nova/tests/functional/api_sample_tests/api_samples``. These have a similar
structure to the API reference docs samples, except the format of the sample
@@ -220,7 +213,7 @@ need to be made.
Note that it is possible to automatically generate the API reference doc
samples using the templates by simply running the tests using
-``tox -r -e api-samples``. This relies, of course, upon the test and templates
+``tox -e api-samples``. This relies, of course, upon the test and templates
being correct for the test to pass, which may take some iteration.
In general, if you are adding a new microversion to an existing API controller,
@@ -228,7 +221,7 @@ it is easiest to simply copy an existing test and modify it for the new
microversion and the new samples/templates.
The functional API samples tests are not the simplest thing in the world to
-get used to, and can be very frustrating at times when they fail in not
+get used to and it can be very frustrating at times when they fail in not
obvious ways. If you need help debugging a functional API sample test failure,
feel free to post your work-in-progress change for review and ask for help in
the ``openstack-nova`` OFTC IRC channel.
@@ -270,11 +263,13 @@ The general steps for deprecating a REST API are:
* Set a maximum allowed microversion for the route. Requests beyond that
microversion on that route will result in a ``404 HTTPNotFound`` error.
+
* Update the Compute API reference documentation to indicate the route is
deprecated and move it to the bottom of the list with the other deprecated
APIs.
+
* Deprecate, and eventually remove, related CLI / SDK functionality in other
- projects like python-novaclient.
+ projects like *python-novaclient*.
Removing deprecated APIs
@@ -294,15 +289,20 @@ The general steps for removing support for a deprecated REST API are:
microversion that does not support a deprecated API. 410 means the resource
is gone and not coming back, which is more appropriate when the API is
fully removed and will not work at any microversion.
+
* Related configuration options, policy rules, and schema validation are
removed.
+
* The API reference documentation should be updated to move the documentation
for the removed API to the `Obsolete APIs`_ section and mention in which
release the API was removed.
+
* Unit tests can be removed.
+
* API sample functional tests can be changed to assert the 410 response
behavior, but can otherwise be mostly gutted. Related \*.tpl files for the
API sample functional tests can be deleted since they will not be used.
+
* An "upgrade" :doc:`release note <releasenotes>` should be added to mention
the REST API routes that were removed along with any related configuration
options that were also removed.
diff --git a/doc/source/contributor/development-environment.rst b/doc/source/contributor/development-environment.rst
index 32b8f8334e..3e19ef1ca2 100644
--- a/doc/source/contributor/development-environment.rst
+++ b/doc/source/contributor/development-environment.rst
@@ -197,7 +197,7 @@ Using fake computes for tests
The number of instances supported by fake computes is not limited by physical
constraints. It allows you to perform stress tests on a deployment with few
resources (typically a laptop). Take care to avoid using scheduler filters
-that will limit the number of instances per compute, such as ``AggregateCoreFilter``.
+that will limit the number of instances per compute, such as ``NumInstancesFilter``.
Fake computes can also be used in multi hypervisor-type deployments in order to
take advantage of fake and "real" computes during tests:
diff --git a/doc/source/contributor/how-to-get-involved.rst b/doc/source/contributor/how-to-get-involved.rst
index dcf869bad7..28e75564b0 100644
--- a/doc/source/contributor/how-to-get-involved.rst
+++ b/doc/source/contributor/how-to-get-involved.rst
@@ -261,7 +261,7 @@ reviews:
- Where do I start? What should I review?
- There are various tools, but a good place to start is:
- https://etherpad.openstack.org/p/nova-runways-yoga
+ https://review.opendev.org/q/project:openstack/nova+status:open+label:Review-Priority%253DANY
- Depending on the time in the cycle, it's worth looking at
NeedsCodeReview blueprints:
https://blueprints.launchpad.net/nova/
@@ -323,7 +323,7 @@ becoming a member of nova-core.
How to do great nova-spec reviews?
==================================
-https://specs.openstack.org/openstack/nova-specs/specs/yoga/template.html
+https://specs.openstack.org/openstack/nova-specs/specs/2023.1/template.html
:doc:`/contributor/blueprints`.
diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst
index 2889199147..5b6e0b8f92 100644
--- a/doc/source/contributor/index.rst
+++ b/doc/source/contributor/index.rst
@@ -22,7 +22,7 @@ Getting Started
* :doc:`/contributor/development-environment`: Get your computer setup to
contribute
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -60,7 +60,7 @@ while keeping users happy and keeping developers productive.
* :doc:`/contributor/ptl-guide`: A chronological PTL reference guide
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -86,7 +86,7 @@ Reviewing
* :doc:`/contributor/documentation`: Guidelines for handling documentation
contributions
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -120,7 +120,7 @@ be Python code. All new code needs to be validated somehow.
* :doc:`/contributor/testing/eventlet-profiling`
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -140,8 +140,6 @@ changes done to the API, as the impact can be very wide.
* :doc:`/contributor/api`: How the code is structured inside the API layer
-* :doc:`/contributor/api-2`: (needs update)
-
* :doc:`/contributor/microversions`: How the API is (micro)versioned and what
you need to do when adding an API exposed feature that needs a new
microversion.
@@ -149,15 +147,20 @@ changes done to the API, as the impact can be very wide.
* :doc:`/contributor/api-ref-guideline`: The guideline to write the API
reference.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+Nova also provides notifications over the RPC API, which you may wish to
+extend.
+
+* :doc:`/contributor/notifications`: How to add your own notifications
+
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
api
- api-2
microversions
api-ref-guideline
+ notifications
Nova Major Subsystems
=====================
@@ -173,7 +176,7 @@ diving in.
* :doc:`/contributor/resize-and-cold-migrate`: Describes the differences and
similarities between resize and cold migrate operations.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
diff --git a/doc/source/contributor/notifications.rst b/doc/source/contributor/notifications.rst
new file mode 100644
index 0000000000..d94051a2e5
--- /dev/null
+++ b/doc/source/contributor/notifications.rst
@@ -0,0 +1,272 @@
+=============
+Notifications
+=============
+
+As discussed in :doc:`/admin/notifications`, nova emits notifications to the
+message bus. There are two types of notifications provided in nova: legacy
+(unversioned) notifications and versioned notifications. As a developer, you
+may choose to add additional notifications or extend existing notifications.
+
+.. note::
+
+ This section provides information on adding your own notifications in nova.
+ For background information on notifications including usage information,
+ refer to :doc:`/admin/notifications`.
+ For a list of available versioned notifications, refer to
+ :doc:`/reference/notifications`.
+
+
+How to add a new versioned notification
+---------------------------------------
+
+To provide the versioning for versioned notifications, each notification
+is modeled with oslo.versionedobjects. Every versioned notification class
+shall inherit from the ``nova.notifications.objects.base.NotificationBase``
+which already defines three mandatory fields of the notification
+``event_type``, ``publisher`` and ``priority``. The new notification class
+shall add a new field ``payload`` with an appropriate payload type. The payload
+object of the notifications shall inherit from the
+``nova.notifications.objects.base.NotificationPayloadBase`` class and shall
+define the fields of the payload as versionedobject fields. The base classes
+are described in the following section.
+
+.. rubric:: The ``nova.notifications.objects.base`` module
+
+.. automodule:: nova.notifications.objects.base
+ :noindex:
+ :members:
+ :show-inheritance:
+
+Note that the notification objects must not be registered to the
+``NovaObjectRegistry`` to avoid mixing nova-internal objects with the
+notification objects. Instead, use the ``register_notification`` decorator on
+every concrete notification object.
+
+The following code example defines the necessary model classes for a new
+notification ``myobject.update``.
+
+.. code-block:: python
+
+ @notification.notification_sample('myobject-update.json')
+ @object_base.NovaObjectRegistry.register.register_notification
+ class MyObjectNotification(notification.NotificationBase):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'payload': fields.ObjectField('MyObjectUpdatePayload')
+ }
+
+
+ @object_base.NovaObjectRegistry.register.register_notification
+ class MyObjectUpdatePayload(notification.NotificationPayloadBase):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+ fields = {
+ 'some_data': fields.StringField(),
+ 'another_data': fields.StringField(),
+ }
+
+
+After that the notification can be populated and emitted with the following
+code.
+
+.. code-block:: python
+
+ payload = MyObjectUpdatePayload(some_data="foo", another_data="bar")
+ MyObjectNotification(
+ publisher=notification.NotificationPublisher.from_service_obj(
+ <nova.objects.service.Service instance that emits the notification>),
+ event_type=notification.EventType(
+ object='myobject',
+ action=fields.NotificationAction.UPDATE),
+ priority=fields.NotificationPriority.INFO,
+ payload=payload).emit(context)
+
+The above code will generate the following notification on the wire.
+
+.. code-block:: json
+
+ {
+ "priority":"INFO",
+ "payload":{
+ "nova_object.namespace":"nova",
+ "nova_object.name":"MyObjectUpdatePayload",
+ "nova_object.version":"1.0",
+ "nova_object.data":{
+ "some_data":"foo",
+ "another_data":"bar",
+ }
+ },
+ "event_type":"myobject.update",
+ "publisher_id":"<the name of the service>:<the host where the service runs>"
+ }
+
+
+There is a possibility to reuse an existing versionedobject as notification
+payload by adding a ``SCHEMA`` field for the payload class that defines a
+mapping between the fields of existing objects and the fields of the new
+payload object. For example the service.status notification reuses the existing
+``nova.objects.service.Service`` object when defines the notification's
+payload.
+
+.. code-block:: python
+
+ @notification.notification_sample('service-update.json')
+ @object_base.NovaObjectRegistry.register.register_notification
+ class ServiceStatusNotification(notification.NotificationBase):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'payload': fields.ObjectField('ServiceStatusPayload')
+ }
+
+ @object_base.NovaObjectRegistry.register.register_notification
+ class ServiceStatusPayload(notification.NotificationPayloadBase):
+ SCHEMA = {
+ 'host': ('service', 'host'),
+ 'binary': ('service', 'binary'),
+ 'topic': ('service', 'topic'),
+ 'report_count': ('service', 'report_count'),
+ 'disabled': ('service', 'disabled'),
+ 'disabled_reason': ('service', 'disabled_reason'),
+ 'availability_zone': ('service', 'availability_zone'),
+ 'last_seen_up': ('service', 'last_seen_up'),
+ 'forced_down': ('service', 'forced_down'),
+ 'version': ('service', 'version')
+ }
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+ fields = {
+ 'host': fields.StringField(nullable=True),
+ 'binary': fields.StringField(nullable=True),
+ 'topic': fields.StringField(nullable=True),
+ 'report_count': fields.IntegerField(),
+ 'disabled': fields.BooleanField(),
+ 'disabled_reason': fields.StringField(nullable=True),
+ 'availability_zone': fields.StringField(nullable=True),
+ 'last_seen_up': fields.DateTimeField(nullable=True),
+ 'forced_down': fields.BooleanField(),
+ 'version': fields.IntegerField(),
+ }
+
+ def populate_schema(self, service):
+ super(ServiceStatusPayload, self).populate_schema(service=service)
+
+If the ``SCHEMA`` field is defined then the payload object needs to be
+populated with the ``populate_schema`` call before it can be emitted.
+
+.. code-block:: python
+
+ payload = ServiceStatusPayload()
+ payload.populate_schema(service=<nova.object.service.Service object>)
+ ServiceStatusNotification(
+ publisher=notification.NotificationPublisher.from_service_obj(
+ <nova.object.service.Service object>),
+ event_type=notification.EventType(
+ object='service',
+ action=fields.NotificationAction.UPDATE),
+ priority=fields.NotificationPriority.INFO,
+ payload=payload).emit(context)
+
+The above code will emit the :ref:`already shown notification <service.update>`
+on the wire.
+
+Every item in the ``SCHEMA`` has the syntax of::
+
+ <payload field name which needs to be filled>:
+ (<name of the parameter of the populate_schema call>,
+ <the name of a field of the parameter object>)
+
+The mapping defined in the ``SCHEMA`` field has the following semantics. When
+the ``populate_schema`` function is called the content of the ``SCHEMA`` field
+is enumerated and the value of the field of the pointed parameter object is
+copied to the requested payload field. So in the above example the ``host``
+field of the payload object is populated from the value of the ``host`` field
+of the ``service`` object that is passed as a parameter to the
+``populate_schema`` call.
+
+A notification payload object can reuse fields from multiple existing
+objects. Also a notification can have both new and reused fields in its
+payload.
+
+Note that the notification's publisher instance can be created two different
+ways. It can be created by instantiating the ``NotificationPublisher`` object
+with a ``host`` and a ``source`` string parameter or it can be generated from a
+``Service`` object by calling ``NotificationPublisher.from_service_obj``
+function.
+
+Versioned notifications shall have a sample file stored under
+``doc/sample_notifications`` directory and the notification object shall be
+decorated with the ``notification_sample`` decorator. For example the
+``service.update`` notification has a sample file stored in
+``doc/sample_notifications/service-update.json`` and the
+``ServiceUpdateNotification`` class is decorated accordingly.
+
+Notification payload classes can use inheritance to avoid duplicating common
+payload fragments in nova code. However the leaf classes used directly in a
+notification should be created with care to avoid future needs of adding extra
+level of inheritance that changes the name of the leaf class as that name is
+present in the payload class. If this cannot be avoided and the only change is
+the renaming then the version of the new payload shall be the same as the old
+payload was before the rename. See [1]_ as an example. If the renaming
+involves any other changes on the payload (e.g. adding new fields) then the
+version of the new payload shall be higher than the old payload was. See [2]_
+as an example.
+
+
+What should be in the notification payload?
+-------------------------------------------
+
+This is just a guideline. You should always consider the actual use case that
+requires the notification.
+
+* Always include the identifier (e.g. uuid) of the entity that can be used to
+ query the whole entity over the REST API so that the consumer can get more
+ information about the entity.
+
+* You should consider including those fields that are related to the event
+ you are sending the notification about. For example if a change of a field of
+ the entity triggers an update notification then you should include the field
+ to the payload.
+
+* An update notification should contain information about what part of the
+ entity is changed. Either by filling the nova_object.changes part of the
+ payload (note that it is not supported by the notification framework
+ currently) or sending both the old state and the new state of the entity in
+ the payload.
+
+* You should never include a nova internal object in the payload. Create a new
+ object and use the SCHEMA field to map the internal object to the
+ notification payload. This way the evolution of the internal object model
+ can be decoupled from the evolution of the notification payload.
+
+ .. important::
+
+ This does not mean that every field from internal objects
+ should be mirrored in the notification payload objects.
+ Think about what is actually needed by a consumer before
+ adding it to a payload. When in doubt, if no one is requesting
+ specific information in notifications, then leave it out until
+ someone asks for it.
+
+* The delete notification should contain the same information as the create or
+ update notifications. This makes it possible for the consumer to listen only to
+ the delete notifications but still filter on some fields of the entity
+ (e.g. project_id).
+
+
+What should **NOT** be in the notification payload
+--------------------------------------------------
+
+* Generally anything that contains sensitive information about the internals
+ of the nova deployment, for example fields that contain access credentials
+ to a cell database or message queue (see `bug 1823104`_).
+
+.. _bug 1823104: https://bugs.launchpad.net/nova/+bug/1823104
+
+.. references:
+
+.. [1] https://review.opendev.org/#/c/463001/
+.. [2] https://review.opendev.org/#/c/453077/
diff --git a/doc/source/contributor/process.rst b/doc/source/contributor/process.rst
index 2ee2e2fa6f..f1be1c1b4a 100644
--- a/doc/source/contributor/process.rst
+++ b/doc/source/contributor/process.rst
@@ -36,8 +36,8 @@ If you are new to Nova, please read this first: :ref:`getting_involved`.
Dates overview
==============
-For Yoga, please see:
-https://wiki.openstack.org/wiki/Nova/Yoga_Release_Schedule
+For 2023.1 Antelope, please see:
+https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule
.. note:: Throughout this document any link which references the name of a
release cycle in the link can usually be changed to the name of the
@@ -102,9 +102,9 @@ Why we have a Spec Freeze:
By the freeze date, we expect all blueprints that will be approved for the
cycle to be listed on launchpad and all relevant specs to be merged.
-For Yoga, blueprints can be found at
-https://blueprints.launchpad.net/nova/yoga and specs at
-https://specs.openstack.org/openstack/nova-specs/specs/yoga/index.html
+For 2023.1 Antelope, blueprints can be found at
+https://blueprints.launchpad.net/nova/antelope and specs at
+https://specs.openstack.org/openstack/nova-specs/specs/2023.1/index.html
Starting with Liberty, we are keeping a backlog open for submission at all
times.
diff --git a/doc/source/contributor/ptl-guide.rst b/doc/source/contributor/ptl-guide.rst
index d12e1beeb0..b530b100bc 100644
--- a/doc/source/contributor/ptl-guide.rst
+++ b/doc/source/contributor/ptl-guide.rst
@@ -29,7 +29,11 @@ New PTL
* Get acquainted with the release schedule
- * Example: https://wiki.openstack.org/wiki/Nova/Stein_Release_Schedule
+ * Example: https://releases.openstack.org/antelope/schedule.html
+
+ * Also, note that we usually create a specific wiki page for each cycle like
+ https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule but it's
+ preferred to use the main release schedule above.
Project Team Gathering
----------------------
@@ -37,30 +41,34 @@ Project Team Gathering
* Create PTG planning etherpad, retrospective etherpad and alert about it in
nova meeting and dev mailing list
- * Example: https://etherpad.openstack.org/p/nova-ptg-stein
+ * Example: https://etherpad.opendev.org/p/nova-antelope-ptg
* Run sessions at the PTG
-* Have a priorities discussion at the PTG
+* Do a retro of the previous cycle
- * Example: https://etherpad.openstack.org/p/nova-ptg-stein-priorities
+* Make agreement on the agenda for this release, including but not exhaustively:
-* Sign up for group photo at the PTG (if applicable)
+ * Number of review days, for either specs or implementation
+ * Define the Spec approval and Feature freeze dates
+ * Modify the release schedule if needed by adding the new dates.
+ As an example : https://review.opendev.org/c/openstack/releases/+/877094
+
+* Discuss the implications of the `SLURP or non-SLURP`__ current release
+
+.. __: https://governance.openstack.org/tc/resolutions/20220210-release-cadence-adjustment.html
-* Open review runways for the cycle
+* Sign up for group photo at the PTG (if applicable)
- * Example: https://etherpad.openstack.org/p/nova-runways-stein
After PTG
---------
* Send PTG session summaries to the dev mailing list
-* Make sure the cycle priorities spec gets reviewed and merged
-
- * Example: https://specs.openstack.org/openstack/nova-specs/priorities/stein-priorities.html
+* Add `RFE bugs`__ if you have action items that are simple to do but without a owner yet.
-* Run the count-blueprints script daily to gather data for the cycle burndown chart
+.. __: https://bugs.launchpad.net/nova/+bugs?field.tag=rfe
A few weeks before milestone 1
------------------------------
@@ -70,12 +78,13 @@ A few weeks before milestone 1
* Periodically check the series goals others have proposed in the “Set series
goals†link:
- * Example: https://blueprints.launchpad.net/nova/stein/+setgoals
+ * Example: https://blueprints.launchpad.net/nova/antelope/+setgoals
Milestone 1
-----------
-* Do milestone release of nova and python-novaclient (in launchpad only)
+* Do milestone release of nova and python-novaclient (in launchpad only, can be
+ optional)
* This is launchpad bookkeeping only. With the latest release team changes,
projects no longer do milestone releases. See: https://releases.openstack.org/reference/release_models.html#cycle-with-milestones-legacy
@@ -87,6 +96,8 @@ Milestone 1
the minor version to leave room for future stable branch releases
* os-vif
+ * placement
+ * os-traits / os-resource-classes
* Release stable branches of nova
@@ -117,28 +128,26 @@ Summit
* Prepare the on-boarding session materials. Enlist help of others
+* Prepare the operator meet-and-greet session. Enlist help of others
+
A few weeks before milestone 2
------------------------------
* Plan a spec review day (optional)
-* Periodically check the series goals others have proposed in the “Set series
- goals†link:
-
- * Example: https://blueprints.launchpad.net/nova/stein/+setgoals
-
Milestone 2
-----------
-* Spec freeze
+* Spec freeze (if agreed)
-* Release nova and python-novaclient
+* Release nova and python-novaclient (if new features were merged)
* Release other libraries as needed
* Stable branch releases of nova
* For nova, set the launchpad milestone release as “released†with the date
+ (can be optional)
Shortly after spec freeze
-------------------------
@@ -146,7 +155,7 @@ Shortly after spec freeze
* Create a blueprint status etherpad to help track, especially non-priority
blueprint work, to help things get done by Feature Freeze (FF). Example:
- * https://etherpad.openstack.org/p/nova-stein-blueprint-status
+ * https://etherpad.opendev.org/p/nova-antelope-blueprint-status
* Create or review a patch to add the next release’s specs directory so people
can propose specs for next release after spec freeze for current release
@@ -155,13 +164,15 @@ Non-client library release freeze
---------------------------------
* Final release for os-vif
+* Final release for os-traits
+* Final release for os-resource-classes
Milestone 3
-----------
* Feature freeze day
-* Client library freeze, release python-novaclient
+* Client library freeze, release python-novaclient and osc-placement
* Close out all blueprints, including “catch all†blueprints like mox,
versioned notifications
@@ -170,6 +181,9 @@ Milestone 3
* For nova, set the launchpad milestone release as “released†with the date
+* Start writing the `cycle highlights
+ <https://docs.openstack.org/project-team-guide/release-management.html#cycle-highlights>`__
+
Week following milestone 3
--------------------------
@@ -196,7 +210,7 @@ A few weeks before RC
* Make a RC1 todos etherpad and tag bugs as ``<release>-rc-potential`` and keep
track of them, example:
- * https://etherpad.openstack.org/p/nova-stein-rc-potential
+ * https://etherpad.opendev.org/p/nova-antelope-rc-potential
* Go through the bug list and identify any rc-potential bugs and tag them
@@ -239,7 +253,7 @@ RC
* Example: https://review.opendev.org/644412
-* Write the cycle-highlights in marketing-friendly sentences and propose to the
+* Push the cycle-highlights in marketing-friendly sentences and propose to the
openstack/releases repo. Usually based on reno prelude but made more readable
and friendly
@@ -254,15 +268,13 @@ Immediately after RC
* https://wiki.openstack.org/wiki/Nova/ReleaseChecklist
- * Add database migration placeholders
-
- * Example: https://review.opendev.org/650964
-
- * Drop old RPC compat code (if there was a RPC major version bump)
+ * Drop old RPC compat code (if there was a RPC major version bump and if
+ agreed on at the PTG)
* Example: https://review.opendev.org/543580
- * Bump the oldest supported compute service version
+ * Bump the oldest supported compute service version (if master branch is now
+ on a non-SLURP version)
* https://review.opendev.org/#/c/738482/
@@ -276,7 +288,9 @@ Immediately after RC
* Set the previous to last series status to “supportedâ€
-* Repeat launchpad steps ^ for python-novaclient
+* Repeat launchpad steps ^ for python-novaclient (optional)
+
+* Repeat launchpad steps ^ for placement
* Register milestones in launchpad for the new cycle based on the new cycle
release schedule
@@ -294,7 +308,7 @@ Immediately after RC
* Create new release wiki:
- * Example: https://wiki.openstack.org/wiki/Nova/Train_Release_Schedule
+ * Example: https://wiki.openstack.org/wiki/Nova/2023.1_Release_Schedule
* Update the contributor guide for the new cycle
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 98da0106f8..8cd5ae9ceb 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -49,7 +49,7 @@ For End Users
As an end user of nova, you'll use nova to create and manage servers with
either tools or the API directly.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -96,8 +96,8 @@ resources will help you get started with consuming the API directly.
Nova can be configured to emit notifications over RPC.
-* :ref:`Versioned Notifications <versioned_notification_samples>`: This
- provides the list of existing versioned notifications with sample payloads.
+* :doc:`Versioned Notifications </admin/notifications>`: This
+ provides information on the notifications emitted by nova.
Other end-user guides can be found under :doc:`/user/index`.
@@ -110,7 +110,7 @@ Architecture Overview
* :doc:`Nova architecture </admin/architecture>`: An overview of how all the parts in
nova fit together.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -128,7 +128,7 @@ having installed :keystone-doc:`keystone <install/>`, :glance-doc:`glance
:placement-doc:`placement <install/>`. Ensure that you follow their install
guides first.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:maxdepth: 2
@@ -192,7 +192,7 @@ Once you are running nova, the following information is extremely useful.
instances (either via metadata server or config drive) for your specific
purposes.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -211,7 +211,7 @@ Reference Material
* :doc:`Configuration Guide <configuration/index>`: Information on configuring
the system, including role-based access control policy rules.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -230,7 +230,7 @@ For Contributors
both current and future looking parts of our architecture.
These are collected here.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
diff --git a/doc/source/install/overview.rst b/doc/source/install/overview.rst
index 9781973d9f..c8e72e4256 100644
--- a/doc/source/install/overview.rst
+++ b/doc/source/install/overview.rst
@@ -67,7 +67,7 @@ follows:
For more information on production architectures, see the `Architecture Design
Guide <https://docs.openstack.org/arch-design/>`_, `OpenStack Operations Guide
-<https://wiki.openstack.org/wiki/OpsGuide>`_, and `OpenStack Networking Guide
+<https://docs.openstack.org/operations-guide/>`_, and `OpenStack Networking Guide
<https://docs.openstack.org/ocata/networking-guide/>`_.
.. _figure-hwreqs:
diff --git a/doc/source/install/verify.rst b/doc/source/install/verify.rst
index 99936c1d9c..c4e0383af4 100644
--- a/doc/source/install/verify.rst
+++ b/doc/source/install/verify.rst
@@ -119,10 +119,6 @@ Verify operation of the Compute service.
| Result: Success |
| Details: None |
+--------------------------------------------------------------------+
- | Check: Policy Scope-based Defaults |
- | Result: Success |
- | Details: None |
- +--------------------------------------------------------------------+
| Check: Policy File JSON to YAML Migration |
| Result: Success |
| Details: None |
diff --git a/doc/source/reference/attach-volume.rst b/doc/source/reference/attach-volume.rst
index a38a32e533..c82c035a14 100644
--- a/doc/source/reference/attach-volume.rst
+++ b/doc/source/reference/attach-volume.rst
@@ -22,7 +22,7 @@ the optional interactions with the ``os-brick`` library on the compute hosts
during the request.
.. note:: ``os-brick`` is not always used to connect volumes to the host, most
- notibly when connecting an instance natively to ceph ``rbd`` volumes
+ notably when connecting an instance natively to ceph ``rbd`` volumes
The diagram also outlines the various locks taken on the compute during the
attach volume flow. In this example these include locks against the
diff --git a/doc/source/reference/block-device-structs.rst b/doc/source/reference/block-device-structs.rst
index 1b8636c537..8c2508539f 100644
--- a/doc/source/reference/block-device-structs.rst
+++ b/doc/source/reference/block-device-structs.rst
@@ -71,6 +71,8 @@ called ``block_device_info``, and is generated by
``root_device_name``
Hypervisor's notion of the root device's name
+``image``
+ An image backed disk if used
``ephemerals``
A list of all ephemeral disks
``block_device_mapping``
@@ -105,13 +107,6 @@ persist data to the BDM object in the DB.
In other contexts this filtering will not have happened, and
``block_device_mapping`` will contain all volumes.
-.. note::
-
- Unlike BDMs, ``block_device_info`` does not currently represent all
- disks that an instance might have. Significantly, it will not contain any
- representation of an image-backed local disk, i.e. the root disk of a
- typical instance which isn't boot-from-volume. Other representations used
- by the libvirt driver explicitly reconstruct this missing disk.
libvirt driver specific BDM data structures
===========================================
diff --git a/doc/source/reference/database-migrations.rst b/doc/source/reference/database-migrations.rst
index add7597e93..ea2b9050d9 100644
--- a/doc/source/reference/database-migrations.rst
+++ b/doc/source/reference/database-migrations.rst
@@ -24,6 +24,10 @@ Schema migrations
The database migration engine was changed from ``sqlalchemy-migrate`` to
``alembic``.
+.. versionchanged:: 27.0.0 (Antelope)
+
+ The legacy ``sqlalchemy-migrate``-based database migrations were removed.
+
The `alembic`__ database migration tool is used to manage schema migrations in
nova. The migration files and related metadata can be found in
``nova/db/api/migrations`` (for the API database) and
@@ -36,10 +40,10 @@ respectively.
.. note::
- There are also legacy migrations provided in the ``legacy_migrations``
- subdirectory for both the API and main databases. These are provided to
- facilitate upgrades from pre-Xena (24.0.0) deployments and will be removed
- in a future release. They should not be modified or extended.
+ There were also legacy migrations provided in the ``legacy_migrations``
+ subdirectory for both the API and main databases. These were provided to
+ facilitate upgrades from pre-Xena (24.0.0) deployments. They were removed
+ in the 27.0.0 (Antelope) release.
The best reference for alembic is the `alembic documentation`__, but a small
example is provided here. You can create the migration either manually or
diff --git a/doc/source/reference/glossary.rst b/doc/source/reference/glossary.rst
index 304467e908..a337699aca 100644
--- a/doc/source/reference/glossary.rst
+++ b/doc/source/reference/glossary.rst
@@ -23,6 +23,14 @@ Glossary
has an empty ("") ``image`` parameter in ``GET /servers/{server_id}``
responses.
+ Cell
+ A cell is a shard or horizontal partition in a nova deployment.
+ A cell mostly consists of a database, queue, and set of compute nodes.
+ All deployments will have at least one cell (and one "fake" cell).
+ Larger deployments can have many.
+
+ For more information, refer to :doc:`/admin/cells`.
+
Cross-Cell Resize
A resize (or cold migrate) operation where the source and destination
compute hosts are mapped to different cells. By default, resize and
diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst
index d162157ac4..cb376ad53a 100644
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -24,8 +24,7 @@ The following is a dive into some of the internals in nova.
compute instances
* :doc:`/reference/threading`: The concurrency model used in nova, which is
based on eventlet, and may not be familiar to everyone.
-* :doc:`/reference/notifications`: How the notifications subsystem works in
- nova, and considerations when adding notifications.
+* :doc:`/reference/notifications`: The notifications available in nova.
* :doc:`/reference/update-provider-tree`: A detailed explanation of the
``ComputeDriver.update_provider_tree`` method.
* :doc:`/reference/upgrade-checks`: A guide to writing automated upgrade
@@ -45,7 +44,7 @@ The following is a dive into some of the internals in nova.
* :doc:`/reference/libvirt-distro-support-matrix`: Libvirt virt driver OS
distribution support matrix
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -74,7 +73,7 @@ Debugging
* :doc:`/reference/gmr`: Inspired by Amiga, a way to trigger a very
comprehensive dump of a running service for deep debugging.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -98,7 +97,7 @@ these are a great place to start reading up on the current plans.
* :doc:`/reference/scheduler-evolution`: Motivation behind the scheduler /
placement evolution
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
@@ -113,7 +112,7 @@ Additional Information
* :doc:`/reference/glossary`: A quick reference guide to some of the terms you
might encounter working on or using nova.
-.. # NOTE(amotoki): toctree needs to be placed at the end of the secion to
+.. # NOTE(amotoki): toctree needs to be placed at the end of the section to
# keep the document structure in the PDF doc.
.. toctree::
:hidden:
diff --git a/doc/source/reference/isolate-aggregates.rst b/doc/source/reference/isolate-aggregates.rst
index f5487df912..7b493f4db9 100644
--- a/doc/source/reference/isolate-aggregates.rst
+++ b/doc/source/reference/isolate-aggregates.rst
@@ -13,6 +13,8 @@
License for the specific language governing permissions and limitations
under the License.
+.. _filtering_hosts_by_isolating_aggregates:
+
Filtering hosts by isolating aggregates
=======================================
diff --git a/doc/source/reference/libvirt-distro-support-matrix.rst b/doc/source/reference/libvirt-distro-support-matrix.rst
index e1a31cd6cc..fd22fc5ba3 100644
--- a/doc/source/reference/libvirt-distro-support-matrix.rst
+++ b/doc/source/reference/libvirt-distro-support-matrix.rst
@@ -180,7 +180,7 @@ OS distribution versions
------------------------
This table provides information on a representative sample of OS distros and
-the version of libirt/QEMU/libguestfs that they ship. This is **NOT** intended
+the version of libvirt/QEMU/libguestfs that they ship. This is **NOT** intended
to be an exhaustive list of distros where OpenStack Nova can run - it is
intended to run on any Linux distro that can satisfy the minimum required
software versions. This table merely aims to help identify when minimum
diff --git a/doc/source/reference/notifications.rst b/doc/source/reference/notifications.rst
index 788b3bccde..24655345f2 100644
--- a/doc/source/reference/notifications.rst
+++ b/doc/source/reference/notifications.rst
@@ -1,375 +1,15 @@
-..
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
+=================================
+Available versioned notifications
+=================================
- http://www.apache.org/licenses/LICENSE-2.0
+.. note::
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Notifications in Nova
-=====================
-
-Similarly to other OpenStack services Nova emits notifications to the message
-bus with the Notifier class provided by :oslo.messaging-doc:`oslo.messaging
-<reference/notifier.html>`. From the notification consumer point of view a
-notification consists of two parts: an envelope with a fixed structure defined
-by oslo.messaging and a payload defined by the service emitting the
-notification. The envelope format is the following::
-
- {
- "priority": <string, selected from a predefined list by the sender>,
- "event_type": <string, defined by the sender>,
- "timestamp": <string, the isotime of when the notification emitted>,
- "publisher_id": <string, defined by the sender>,
- "message_id": <uuid, generated by oslo>,
- "payload": <json serialized dict, defined by the sender>
- }
-
-Notifications can be completely disabled by setting the following in
-your nova configuration file:
-
-.. code-block:: ini
-
- [oslo_messaging_notifications]
- driver = noop
-
-There are two types of notifications in Nova: legacy notifications which have
-an unversioned payload and newer notifications which have a versioned payload.
-
-Unversioned notifications
--------------------------
-Nova code uses the nova.rpc.get_notifier call to get a configured
-oslo.messaging Notifier object and it uses the oslo provided functions on the
-Notifier object to emit notifications. The configuration of the returned
-Notifier object depends on the parameters of the get_notifier call and the
-value of the oslo.messaging configuration options ``driver`` and ``topics``.
-There are notification configuration options in Nova which are specific for
-certain notification types like
-:oslo.config:option:`notifications.notify_on_state_change`,
-:oslo.config:option:`notifications.default_level`, etc.
-
-The structure of the payload of the unversioned notifications is defined in the
-code that emits the notification and no documentation or enforced backward
-compatibility contract exists for that format.
-
-
-Versioned notifications
------------------------
-The versioned notification concept is created to fix the shortcomings of the
-unversioned notifications. The envelope structure of the emitted notification
-is the same as in the unversioned notification case as it is provided by
-oslo.messaging. However the payload is not a free form dictionary but a
-serialized :oslo.versionedobjects-doc:`oslo versionedobjects object <>`.
-
-.. _service.update:
-
-For example the wire format of the ``service.update`` notification looks like
-the following::
-
- {
- "priority":"INFO",
- "payload":{
- "nova_object.namespace":"nova",
- "nova_object.name":"ServiceStatusPayload",
- "nova_object.version":"1.0",
- "nova_object.data":{
- "host":"host1",
- "disabled":false,
- "last_seen_up":null,
- "binary":"nova-compute",
- "topic":"compute",
- "disabled_reason":null,
- "report_count":1,
- "forced_down":false,
- "version":2
- }
- },
- "event_type":"service.update",
- "publisher_id":"nova-compute:host1"
- }
-
-The serialized oslo versionedobject as a payload provides a version number to
-the consumer so the consumer can detect if the structure of the payload is
-changed. Nova provides the following contract regarding the versioned
-notification payload:
-
-* the payload version defined by the ``nova_object.version`` field of the
- payload will be increased if and only if the syntax or the semantics of the
- ``nova_object.data`` field of the payload is changed.
-* a minor version bump indicates a backward compatible change which means that
- only new fields are added to the payload so a well written consumer can still
- consume the new payload without any change.
-* a major version bump indicates a backward incompatible change of the payload
- which can mean removed fields, type change, etc in the payload.
-* there is an additional field 'nova_object.name' for every payload besides
- 'nova_object.data' and 'nova_object.version'. This field contains the name of
- the nova internal representation of the payload type. Client code should not
- depend on this name.
-
-There is a Nova configuration parameter
-:oslo.config:option:`notifications.notification_format`
-that can be used to specify which notifications are emitted by Nova.
-
-The versioned notifications are emitted to a different topic than the legacy
-notifications. By default they are emitted to 'versioned_notifications' but it
-is configurable in the nova.conf with the
-:oslo.config:option:`notifications.versioned_notifications_topics`
-config option.
-
-A `presentation from the Train summit`_ goes over the background and usage of
-versioned notifications, and provides a demo.
-
-.. _presentation from the Train summit: https://www.openstack.org/videos/summits/denver-2019/nova-versioned-notifications-the-result-of-a-3-year-journey
-
-How to add a new versioned notification
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To support the above contract from the Nova code every versioned notification
-is modeled with oslo versionedobjects. Every versioned notification class
-shall inherit from the ``nova.notifications.objects.base.NotificationBase``
-which already defines three mandatory fields of the notification
-``event_type``, ``publisher`` and ``priority``. The new notification class
-shall add a new field ``payload`` with an appropriate payload type. The payload
-object of the notifications shall inherit from the
-``nova.notifications.objects.base.NotificationPayloadBase`` class and shall
-define the fields of the payload as versionedobject fields. The base classes
-are described in the following section.
-
-The nova.notifications.objects.base module
-..........................................
-.. automodule:: nova.notifications.objects.base
- :noindex:
- :members:
- :show-inheritance:
-
-Please note that the notification objects shall not be registered to the
-NovaObjectRegistry to avoid mixing nova internal objects with the notification
-objects. Instead of that use the register_notification decorator on every
-concrete notification object.
-
-The following code example defines the necessary model classes for a new
-notification ``myobject.update``::
-
- @notification.notification_sample('myobject-update.json')
- @object_base.NovaObjectRegistry.register.register_notification
- class MyObjectNotification(notification.NotificationBase):
- # Version 1.0: Initial version
- VERSION = '1.0'
-
- fields = {
- 'payload': fields.ObjectField('MyObjectUpdatePayload')
- }
-
-
- @object_base.NovaObjectRegistry.register.register_notification
- class MyObjectUpdatePayload(notification.NotificationPayloadBase):
- # Version 1.0: Initial version
- VERSION = '1.0'
- fields = {
- 'some_data': fields.StringField(),
- 'another_data': fields.StringField(),
- }
-
-
-After that the notification can be populated and emitted with the following
-code::
-
- payload = MyObjectUpdatePayload(some_data="foo", another_data="bar")
- MyObjectNotification(
- publisher=notification.NotificationPublisher.from_service_obj(
- <nova.objects.service.Service instance that emits the notification>),
- event_type=notification.EventType(
- object='myobject',
- action=fields.NotificationAction.UPDATE),
- priority=fields.NotificationPriority.INFO,
- payload=payload).emit(context)
-
-The above code will generate the following notification on the wire::
-
- {
- "priority":"INFO",
- "payload":{
- "nova_object.namespace":"nova",
- "nova_object.name":"MyObjectUpdatePayload",
- "nova_object.version":"1.0",
- "nova_object.data":{
- "some_data":"foo",
- "another_data":"bar",
- }
- },
- "event_type":"myobject.update",
- "publisher_id":"<the name of the service>:<the host where the service runs>"
- }
-
-
-There is a possibility to reuse an existing versionedobject as notification
-payload by adding a ``SCHEMA`` field for the payload class that defines a
-mapping between the fields of existing objects and the fields of the new
-payload object. For example the service.status notification reuses the existing
-``nova.objects.service.Service`` object when defines the notification's
-payload::
-
- @notification.notification_sample('service-update.json')
- @object_base.NovaObjectRegistry.register.register_notification
- class ServiceStatusNotification(notification.NotificationBase):
- # Version 1.0: Initial version
- VERSION = '1.0'
-
- fields = {
- 'payload': fields.ObjectField('ServiceStatusPayload')
- }
-
- @object_base.NovaObjectRegistry.register.register_notification
- class ServiceStatusPayload(notification.NotificationPayloadBase):
- SCHEMA = {
- 'host': ('service', 'host'),
- 'binary': ('service', 'binary'),
- 'topic': ('service', 'topic'),
- 'report_count': ('service', 'report_count'),
- 'disabled': ('service', 'disabled'),
- 'disabled_reason': ('service', 'disabled_reason'),
- 'availability_zone': ('service', 'availability_zone'),
- 'last_seen_up': ('service', 'last_seen_up'),
- 'forced_down': ('service', 'forced_down'),
- 'version': ('service', 'version')
- }
- # Version 1.0: Initial version
- VERSION = '1.0'
- fields = {
- 'host': fields.StringField(nullable=True),
- 'binary': fields.StringField(nullable=True),
- 'topic': fields.StringField(nullable=True),
- 'report_count': fields.IntegerField(),
- 'disabled': fields.BooleanField(),
- 'disabled_reason': fields.StringField(nullable=True),
- 'availability_zone': fields.StringField(nullable=True),
- 'last_seen_up': fields.DateTimeField(nullable=True),
- 'forced_down': fields.BooleanField(),
- 'version': fields.IntegerField(),
- }
-
- def populate_schema(self, service):
- super(ServiceStatusPayload, self).populate_schema(service=service)
-
-If the ``SCHEMA`` field is defined then the payload object needs to be
-populated with the ``populate_schema`` call before it can be emitted::
-
- payload = ServiceStatusPayload()
- payload.populate_schema(service=<nova.object.service.Service object>)
- ServiceStatusNotification(
- publisher=notification.NotificationPublisher.from_service_obj(
- <nova.object.service.Service object>),
- event_type=notification.EventType(
- object='service',
- action=fields.NotificationAction.UPDATE),
- priority=fields.NotificationPriority.INFO,
- payload=payload).emit(context)
-
-The above code will emit the :ref:`already shown notification<service.update>`
-on the wire.
-
-Every item in the ``SCHEMA`` has the syntax of::
-
- <payload field name which needs to be filled>:
- (<name of the parameter of the populate_schema call>,
- <the name of a field of the parameter object>)
-
-The mapping defined in the ``SCHEMA`` field has the following semantics. When
-the ``populate_schema`` function is called the content of the ``SCHEMA`` field
-is enumerated and the value of the field of the pointed parameter object is
-copied to the requested payload field. So in the above example the ``host``
-field of the payload object is populated from the value of the ``host`` field
-of the ``service`` object that is passed as a parameter to the
-``populate_schema`` call.
-
-A notification payload object can reuse fields from multiple existing
-objects. Also a notification can have both new and reused fields in its
-payload.
-
-Note that the notification's publisher instance can be created two different
-ways. It can be created by instantiating the ``NotificationPublisher`` object
-with a ``host`` and a ``source`` string parameter or it can be generated from a
-``Service`` object by calling ``NotificationPublisher.from_service_obj``
-function.
-
-Versioned notifications shall have a sample file stored under
-``doc/sample_notifications`` directory and the notification object shall be
-decorated with the ``notification_sample`` decorator. For example the
-``service.update`` notification has a sample file stored in
-``doc/sample_notifications/service-update.json`` and the
-ServiceUpdateNotification class is decorated accordingly.
-
-Notification payload classes can use inheritance to avoid duplicating common
-payload fragments in nova code. However the leaf classes used directly in a
-notification should be created with care to avoid future needs of adding extra
-level of inheritance that changes the name of the leaf class as that name is
-present in the payload class. If this cannot be avoided and the only change is
-the renaming then the version of the new payload shall be the same as the old
-payload was before the rename. See [1]_ as an example. If the renaming
-involves any other changes on the payload (e.g. adding new fields) then the
-version of the new payload shall be higher than the old payload was. See [2]_
-as an example.
-
-What should be in the notification payload
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-This is just a guideline. You should always consider the actual use case that
-requires the notification.
-
-* Always include the identifier (e.g. uuid) of the entity that can be used to
- query the whole entity over the REST API so that the consumer can get more
- information about the entity.
-* You should consider including those fields that are related to the event
- you are sending the notification about. For example if a change of a field of
- the entity triggers an update notification then you should include the field
- to the payload.
-* An update notification should contain information about what part of the
- entity is changed. Either by filling the nova_object.changes part of the
- payload (note that it is not supported by the notification framework
- currently) or sending both the old state and the new state of the entity in
- the payload.
-* You should never include a nova internal object in the payload. Create a new
- object and use the SCHEMA field to map the internal object to the
- notification payload. This way the evolution of the internal object model
- can be decoupled from the evolution of the notification payload.
-
- .. important:: This does not mean that every field from internal objects
- should be mirrored in the notification payload objects.
- Think about what is actually needed by a consumer before
- adding it to a payload. When in doubt, if no one is requesting
- specific information in notifications, then leave it out until
- someone asks for it.
-
-* The delete notification should contain the same information as the create or
- update notifications. This makes it possible for the consumer to listen only to
- the delete notifications but still filter on some fields of the entity
- (e.g. project_id).
-
-What should **NOT** be in the notification payload
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-* Generally anything that contains sensitive information about the internals
- of the nova deployment, for example fields that contain access credentials
- to a cell database or message queue (see `bug 1823104`_).
-
-.. _bug 1823104: https://bugs.launchpad.net/nova/+bug/1823104
-
-Existing versioned notifications
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. note:: Versioned notifications are added in each release, so the samples
- represented below may not necessarily be in an older version of nova. Ensure
- you are looking at the correct version of the documentation for the release
- you are using.
+ Versioned notifications are added in each release, so the samples
+ represented below may not necessarily be in an older version of nova. Ensure
+ you are looking at the correct version of the documentation for the release
+ you are using.
.. This is a reference anchor used in the main index page.
.. _versioned_notification_samples:
.. versioned_notifications::
-
-.. [1] https://review.opendev.org/#/c/463001/
-.. [2] https://review.opendev.org/#/c/453077/
diff --git a/doc/source/reference/stable-api.rst b/doc/source/reference/stable-api.rst
index 462e8e3feb..3a491150a5 100644
--- a/doc/source/reference/stable-api.rst
+++ b/doc/source/reference/stable-api.rst
@@ -29,7 +29,7 @@ Background
Nova used to include two distinct frameworks for exposing REST API
functionality. Older code is called the "v2 API" and existed in the
/nova/api/openstack/compute/legacy_v2/ directory. This code tree was totally
-removed during Netwon release time frame (14.0.0 and later).
+removed during Newton release time frame (14.0.0 and later).
Newer code is called the "v2.1 API" and exists in the
/nova/api/openstack/compute directory.
diff --git a/doc/source/user/block-device-mapping.rst b/doc/source/user/block-device-mapping.rst
index b43f01de8b..361f4bcf01 100644
--- a/doc/source/user/block-device-mapping.rst
+++ b/doc/source/user/block-device-mapping.rst
@@ -49,7 +49,7 @@ When we talk about block device mapping, we usually refer to one of two things
on.
For more details on this please refer to the :doc:`Driver BDM Data
- Structures <../reference/block-device-structs>` refernce document.
+ Structures <../reference/block-device-structs>` reference document.
.. note::
diff --git a/doc/source/user/certificate-validation.rst b/doc/source/user/certificate-validation.rst
index 1140712159..69219a67ed 100644
--- a/doc/source/user/certificate-validation.rst
+++ b/doc/source/user/certificate-validation.rst
@@ -309,7 +309,7 @@ Create the first intermediate certificate
"""""""""""""""""""""""""""""""""""""""""
Create a certificate request for the first intermediate certificate. For these
instructions, we will save the certificate request as
-``cert_intermeidate_a.csr`` and the private key as ``key_intermediate_a.pem``.
+``cert_intermediate_a.csr`` and the private key as ``key_intermediate_a.pem``.
.. code-block:: console
@@ -357,7 +357,7 @@ Create the second intermediate certificate
""""""""""""""""""""""""""""""""""""""""""
Create a certificate request for the second intermediate certificate. For these
instructions, we will save the certificate request as
-``cert_intermeidate_b.csr`` and the private key as ``key_intermediate_b.pem``.
+``cert_intermediate_b.csr`` and the private key as ``key_intermediate_b.pem``.
.. code-block:: console
diff --git a/doc/source/user/feature-matrix-gp.ini b/doc/source/user/feature-matrix-gp.ini
index 179974ddcb..c168adaeb4 100644
--- a/doc/source/user/feature-matrix-gp.ini
+++ b/doc/source/user/feature-matrix-gp.ini
@@ -34,10 +34,6 @@ link=https://wiki.openstack.org/wiki/ThirdPartySystems/IBM_z/VM_CI
title=Ironic CI
link=
-[target.powervm]
-title=IBM PowerVM CI
-link=https://wiki.openstack.org/wiki/ThirdPartySystems/IBM_PowerVM_CI
-
#
# Lists all features
#
@@ -70,7 +66,6 @@ driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is i
vmware=complete
hyperv=complete
ironic=unknown
-powervm=complete
zvm=complete
[operation.snapshot-server]
@@ -90,7 +85,6 @@ driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is i
vmware=unknown
hyperv=unknown
ironic=unknown
-powervm=complete
zvm=complete
[operation.power-ops]
@@ -109,7 +103,6 @@ driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is i
vmware=complete
hyperv=complete
ironic=unknown
-powervm=complete
zvm=complete
[operation.rebuild-server]
@@ -128,7 +121,6 @@ driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is i
vmware=complete
hyperv=complete
ironic=unknown
-powervm=missing
zvm=missing
[operation.resize-server]
@@ -147,7 +139,6 @@ driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is i
vmware=complete
hyperv=complete
ironic=unknown
-powervm=missing
zvm=missing
[operation.server-volume-ops]
@@ -165,9 +156,6 @@ libvirt-virtuozzo-vm=complete
vmware=complete
hyperv=complete
ironic=missing
-powervm=complete
-driver-notes-powervm=This is not tested for every CI run. Add a
- "powervm:volume-check" comment to trigger a CI job running volume tests.
zvm=missing
[operation.server-bdm]
@@ -189,7 +177,6 @@ vmware=partial
driver-notes-vmware=This is not tested in a CI system, but it is implemented.
hyperv=complete:n
ironic=missing
-powervm=missing
zvm=missing
[operation.server-neutron]
@@ -211,7 +198,6 @@ driver-notes-vmware=This is not tested in a CI system, but it is implemented.
hyperv=partial
driver-notes-hyperv=This is not tested in a CI system, but it is implemented.
ironic=missing
-powervm=complete
zvm=partial
driver-notes-zvm=This is not tested in a CI system, but it is implemented.
@@ -232,7 +218,6 @@ vmware=partial
driver-notes-vmware=This is not tested in a CI system, but it is implemented.
hyperv=complete
ironic=missing
-powervm=missing
zvm=complete
[operation.server-suspend]
@@ -252,7 +237,6 @@ driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is i
vmware=complete
hyperv=complete
ironic=missing
-powervm=missing
zvm=missing
[operation.server-consoleoutput]
@@ -272,7 +256,6 @@ driver-notes-vmware=This is not tested in a CI system, but it is implemented.
hyperv=partial
driver-notes-hyperv=This is not tested in a CI system, but it is implemented.
ironic=missing
-powervm=complete
zvm=complete
[operation.server-rescue]
@@ -293,7 +276,6 @@ vmware=complete
hyperv=partial
driver-notes-hyperv=This is not tested in a CI system, but it is implemented.
ironic=missing
-powervm=missing
zvm=missing
[operation.server-configdrive]
@@ -314,7 +296,6 @@ vmware=complete
hyperv=complete
ironic=partial
driver-notes-ironic=This is not tested in a CI system, but it is implemented.
-powervm=complete
zvm=complete
[operation.server-changepassword]
@@ -334,7 +315,6 @@ vmware=missing
hyperv=partial
driver-notes-hyperv=This is not tested in a CI system, but it is implemented.
ironic=missing
-powervm=missing
zvm=missing
[operation.server-shelve]
@@ -354,5 +334,4 @@ libvirt-virtuozzo-vm=complete
vmware=missing
hyperv=complete
ironic=missing
-powervm=complete
zvm=missing
diff --git a/doc/source/user/feature-matrix-hpc.ini b/doc/source/user/feature-matrix-hpc.ini
index e548d55b6e..d370e86066 100644
--- a/doc/source/user/feature-matrix-hpc.ini
+++ b/doc/source/user/feature-matrix-hpc.ini
@@ -26,10 +26,6 @@ link=https://wiki.openstack.org/wiki/ThirdPartySystems/Hyper-V_CI
title=Ironic
link=http://docs.openstack.org/infra/manual/developers.html#project-gating
-[target.powervm]
-title=PowerVM CI
-link=https://wiki.openstack.org/wiki/ThirdPartySystems/IBM_PowerVM_CI
-
[operation.gpu-passthrough]
title=GPU Passthrough
@@ -51,7 +47,6 @@ driver-notes-libvirt-virtuozzo-vm=This is not tested in a CI system, but it is i
vmware=missing
hyperv=missing
ironic=unknown
-powervm=missing
[operation.virtual-gpu]
@@ -67,4 +62,3 @@ libvirt-virtuozzo-vm=unknown
vmware=missing
hyperv=missing
ironic=missing
-powervm=missing
diff --git a/doc/source/user/metadata.rst b/doc/source/user/metadata.rst
index f5f39231ac..65f5bddc96 100644
--- a/doc/source/user/metadata.rst
+++ b/doc/source/user/metadata.rst
@@ -234,6 +234,17 @@ information about the format of the files and subdirectories within these
directories.
+Setting in image
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ $ openstack image set IMG-UUID --property img_config_drive=mandatory
+
+The img_config_drive image metadata property can be used to force enable the config drive.
+Setting img_config_drive specifies whether the image needs a config drive.
+
+
Nova metadata
-------------
diff --git a/doc/source/user/support-matrix.ini b/doc/source/user/support-matrix.ini
index 7ed837787f..ae5bbde110 100644
--- a/doc/source/user/support-matrix.ini
+++ b/doc/source/user/support-matrix.ini
@@ -104,9 +104,6 @@ title=Hyper-V
[driver.ironic]
title=Ironic
-[driver.powervm]
-title=PowerVM
-
[driver.zvm]
title=zVM
@@ -133,9 +130,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=complete
-driver-notes.powervm=This is not tested for every CI run. Add a
- "powervm:volume-check" comment to trigger a CI job running volume tests.
driver.zvm=missing
[operation.attach-tagged-volume]
@@ -155,7 +149,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.detach-volume]
@@ -174,9 +167,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=complete
-driver-notes.powervm=This is not tested for every CI run. Add a
- "powervm:volume-check" comment to trigger a CI job running volume tests.
driver.zvm=missing
[operation.extend-volume]
@@ -202,9 +192,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=unknown
driver.libvirt-vz-ct=missing
-driver.powervm=complete
-driver-notes.powervm=This is not tested for every CI run. Add a
- "powervm:volume-check" comment to trigger a CI job running volume tests.
driver.zvm=missing
[operation.attach-interface]
@@ -232,7 +219,6 @@ driver-notes.hyperv=Works without issue if instance is off. When
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=missing
[operation.attach-tagged-interface]
@@ -252,7 +238,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.detach-interface]
@@ -274,7 +259,6 @@ driver-notes.hyperv=Works without issue if instance is off. When
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=missing
[operation.maintenance-mode]
@@ -299,7 +283,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.evacuate]
@@ -325,7 +308,6 @@ driver.hyperv=unknown
driver.ironic=unknown
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=unknown
[operation.rebuild]
@@ -348,9 +330,28 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=missing
driver.zvm=unknown
+[operation.rebuild-volume-backed]
+title=Rebuild volume backed instance
+status=optional
+notes=This will wipe out all existing data in the root volume
+ of a volume backed instance. This is available from microversion
+ 2.93 and onwards.
+cli=openstack server rebuild --reimage-boot-volume --image <image> <server>
+driver.libvirt-kvm-x86=complete
+driver.libvirt-kvm-aarch64=complete
+driver.libvirt-kvm-ppc64=complete
+driver.libvirt-kvm-s390x=complete
+driver.libvirt-qemu-x86=complete
+driver.libvirt-lxc=unknown
+driver.vmware=missing
+driver.hyperv=missing
+driver.ironic=missing
+driver.libvirt-vz-vm=missing
+driver.libvirt-vz-ct=missing
+driver.zvm=missing
+
[operation.get-guest-info]
title=Guest instance status
status=mandatory
@@ -370,7 +371,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=complete
[operation.get-host-uptime]
@@ -390,7 +390,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=complete
[operation.get-host-ip]
@@ -410,7 +409,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=complete
[operation.live-migrate]
@@ -439,7 +437,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=missing
driver.zvm=missing
[operation.force-live-migration-to-complete]
@@ -471,7 +468,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.abort-in-progress-live-migration]
@@ -500,7 +496,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=unknown
driver.libvirt-vz-ct=unknown
-driver.powervm=missing
driver.zvm=missing
[operation.launch]
@@ -521,7 +516,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=complete
[operation.pause]
@@ -548,7 +542,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=complete
[operation.reboot]
@@ -571,7 +564,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=complete
[operation.rescue]
@@ -597,7 +589,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=missing
driver.zvm=missing
[operation.resize]
@@ -625,7 +616,6 @@ driver.libvirt-vz-vm=complete
driver-notes.vz-vm=Resizing Virtuozzo instances implies guest filesystem resize also
driver.libvirt-vz-ct=complete
driver-notes.vz-ct=Resizing Virtuozzo instances implies guest filesystem resize also
-driver.powervm=missing
driver.zvm=missing
[operation.resume]
@@ -644,7 +634,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=missing
driver.zvm=missing
[operation.set-admin-password]
@@ -677,7 +666,6 @@ driver.libvirt-vz-vm=complete
driver-notes.libvirt-vz-vm=Requires libvirt>=2.0.0
driver.libvirt-vz-ct=complete
driver-notes.libvirt-vz-ct=Requires libvirt>=2.0.0
-driver.powervm=missing
driver.zvm=missing
[operation.snapshot]
@@ -705,11 +693,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
-driver-notes.powervm=When using the localdisk disk driver, snapshot is only
- supported if I/O is being hosted by the management partition. If hosting I/O
- on traditional VIOS, we are limited by the fact that a VSCSI device can't be
- mapped to two partitions (the VIOS and the management) at once.
driver.zvm=complete
[operation.suspend]
@@ -742,7 +725,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=missing
driver.zvm=missing
[operation.swap-volume]
@@ -768,7 +750,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.terminate]
@@ -794,7 +775,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=complete
[operation.trigger-crash-dump]
@@ -817,7 +797,6 @@ driver.hyperv=missing
driver.ironic=complete
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.unpause]
@@ -836,7 +815,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=missing
driver.zvm=complete
[guest.disk.autoconfig]
@@ -857,7 +835,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=complete
[guest.disk.rate-limit]
@@ -881,7 +858,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[guest.setup.configdrive]
@@ -909,7 +885,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=complete
driver.zvm=complete
[guest.setup.inject.file]
@@ -936,7 +911,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[guest.setup.inject.networking]
@@ -967,7 +941,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[console.rdp]
@@ -993,7 +966,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[console.serial.log]
@@ -1020,7 +992,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=complete
[console.serial.interactive]
@@ -1048,7 +1019,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[console.spice]
@@ -1074,7 +1044,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[console.vnc]
@@ -1100,7 +1069,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=missing
[storage.block]
@@ -1128,9 +1096,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=partial
driver.libvirt-vz-ct=missing
-driver.powervm=complete
-driver-notes.powervm=This is not tested for every CI run. Add a
- "powervm:volume-check" comment to trigger a CI job running volume tests.
driver.zvm=missing
[storage.block.backend.fibrechannel]
@@ -1152,9 +1117,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=complete
-driver-notes.powervm=This is not tested for every CI run. Add a
- "powervm:volume-check" comment to trigger a CI job running volume tests.
driver.zvm=missing
[storage.block.backend.iscsi]
@@ -1179,7 +1141,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[storage.block.backend.iscsi.auth.chap]
@@ -1201,7 +1162,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[storage.image]
@@ -1225,7 +1185,6 @@ driver.hyperv=complete
driver.ironic=complete
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=complete
driver.zvm=complete
[operation.uefi-boot]
@@ -1247,7 +1206,6 @@ driver.ironic=partial
driver-notes.ironic=depends on hardware support
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.device-tags]
@@ -1277,7 +1235,6 @@ driver.hyperv=complete
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=unknown
-driver.powervm=missing
driver.zvm=missing
[operation.quiesce]
@@ -1298,7 +1255,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.unquiesce]
@@ -1317,7 +1273,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.multiattach-volume]
@@ -1341,7 +1296,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.encrypted-volume]
@@ -1371,7 +1325,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=unknown
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.trusted-certs]
@@ -1394,7 +1347,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=missing
driver.zvm=missing
[operation.file-backed-memory]
@@ -1417,7 +1369,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.report-cpu-traits]
@@ -1438,7 +1389,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.port-with-resource-request]
@@ -1461,7 +1411,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.boot-encrypted-vm]
@@ -1487,7 +1436,6 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
[operation.cache-images]
@@ -1510,10 +1458,6 @@ driver.hyperv=partial
driver.ironic=missing
driver.libvirt-vz-vm=complete
driver.libvirt-vz-ct=complete
-driver.powervm=partial
-driver-notes.powervm=The PowerVM driver does image caching natively when using
- the SSP disk driver. It does not use the config options in the [image_cache]
- group.
driver.zvm=missing
[operation.boot-emulated-tpm]
@@ -1537,5 +1481,4 @@ driver.hyperv=missing
driver.ironic=missing
driver.libvirt-vz-vm=missing
driver.libvirt-vz-ct=missing
-driver.powervm=missing
driver.zvm=missing
diff --git a/doc/source/user/wsgi.rst b/doc/source/user/wsgi.rst
index 6b314b4832..63f949df1a 100644
--- a/doc/source/user/wsgi.rst
+++ b/doc/source/user/wsgi.rst
@@ -8,10 +8,16 @@ as Apache_ or nginx_).
The nova project provides two automatically generated entry points that
support this: ``nova-api-wsgi`` and ``nova-metadata-wsgi``. These read
-``nova.conf`` and ``api-paste.ini`` and generate the required module-level
-``application`` that most WSGI servers require. If nova is installed using pip,
-these two scripts will be installed into whatever the expected ``bin``
-directory is for the environment.
+``nova.conf`` and ``api-paste.ini`` by default and generate the required
+module-level ``application`` that most WSGI servers require.
+If nova is installed using pip, these two scripts will be installed into
+whatever the expected ``bin`` directory is for the environment.
+
+The config files and config directory can be overridden via the
+``OS_NOVA_CONFIG_FILES`` and ``OS_NOVA_CONFIG_DIR`` environment variables.
+File paths listed in ``OS_NOVA_CONFIG_FILES`` are relative to
+``OS_NOVA_CONFIG_DIR`` and delimited by ``;``.
+
The new scripts replace older experimental scripts that could be found in the
``nova/wsgi`` directory of the code repository. The new scripts are *not*
diff --git a/doc/test/redirect-tests.txt b/doc/test/redirect-tests.txt
index 90ae3fa6b3..2142ffbe94 100644
--- a/doc/test/redirect-tests.txt
+++ b/doc/test/redirect-tests.txt
@@ -1,4 +1,4 @@
-/nova/latest/addmethod.openstackapi.html 301 /nova/latest/contributor/api-2.html
+/nova/latest/addmethod.openstackapi.html 301 /nova/latest/contributor/api.html
/nova/latest/admin/arch.html 301 /nova/latest/admin/architecture.html
/nova/latest/admin/flavors2.html 301 /nova/latest/admin/flavors.html
/nova/latest/admin/quotas2.html 301 /nova/latest/admin/quotas.html
@@ -86,3 +86,4 @@
/nova/latest/admin/port_with_resource_request.html 301 /nova/latest/admin/ports-with-resource-requests.html
/nova/latest/admin/manage-users.html 301 /nova/latest/admin/architecture.html
/nova/latest/admin/mitigation-for-Intel-MDS-security-flaws.html 301 /nova/latest/admin/cpu-models.html
+/nova/latest/contributor/api-2.html 301 /nova/latest/contributor/api.html
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index 7e20eaa7e2..b73a9fea39 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -6,7 +6,7 @@ use = egg:Paste#urlmap
/: meta
[pipeline:meta]
-pipeline = cors metaapp
+pipeline = cors http_proxy_to_wsgi metaapp
[app:metaapp]
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
diff --git a/etc/nova/nova-config-generator.conf b/etc/nova/nova-config-generator.conf
index 3cf4da9f31..8b7fd3bec8 100644
--- a/etc/nova/nova-config-generator.conf
+++ b/etc/nova/nova-config-generator.conf
@@ -3,6 +3,8 @@ output_file = etc/nova/nova.conf.sample
wrap_width = 80
summarize = true
namespace = nova.conf
+# we intentionally exclude oslo.db since we manage these options ourselves
+namespace = oslo.limit
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.policy
@@ -14,3 +16,4 @@ namespace = oslo.concurrency
namespace = oslo.reports
namespace = keystonemiddleware.auth_token
namespace = osprofiler
+namespace = os_vif
diff --git a/etc/nova/rootwrap.conf b/etc/nova/rootwrap.conf
index c6fd5642b7..25eddbd032 100644
--- a/etc/nova/rootwrap.conf
+++ b/etc/nova/rootwrap.conf
@@ -25,3 +25,9 @@ syslog_log_facility=syslog
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR
+
+# Rootwrap daemon exits after this seconds of inactivity
+daemon_timeout=600
+
+# Rootwrap daemon limits itself to that many file descriptors (Linux only)
+rlimit_nofile=1024
diff --git a/gate/post_test_hook.sh b/gate/post_test_hook.sh
index 28ad9b939e..b788746cef 100755
--- a/gate/post_test_hook.sh
+++ b/gate/post_test_hook.sh
@@ -59,9 +59,7 @@ set -e
purge_db
# We need to get the admin credentials to run the OSC CLIs for Placement.
-set +x
-source $BASE/devstack/openrc admin
-set -x
+export OS_CLOUD=devstack-admin
# Verify whether instances were archived from all cells. Admin credentials are
# needed to list deleted instances across all projects.
@@ -271,3 +269,66 @@ set -e
# Verify whether online data migrations run after archiving will succeed.
# See for more details: https://bugs.launchpad.net/nova/+bug/1824435
$MANAGE db online_data_migrations
+
+
+# Test global registered unified limits by updating registered limits and
+# attempting to create resources. Because these quota limits are global, we
+# can't test them in tempest because modifying global limits can cause other
+# tests running in parallel to fail.
+echo "Testing unified limits registered limits"
+
+# Get the registered limits IDs.
+reglimit_ids_names=$(openstack registered limit list -f value -c "ID" -c "Resource Name")
+
+# Put them in a map to lookup ID from name for subsequent limit set commands.
+# Requires Bash 4.
+declare -A id_name_map
+while read id name
+ do id_name_map["$name"]="$id"
+done <<< "$reglimit_ids_names"
+
+# Server metadata items
+#
+# Set the quota to 1.
+metadata_items_id="${id_name_map["server_metadata_items"]}"
+
+bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME;
+ openstack --os-cloud devstack-system-admin registered limit set \
+ --default-limit 1 $metadata_items_id"
+
+# Create a server. Should succeed with one metadata item.
+openstack --os-compute-api-version 2.37 \
+ server create --image ${image_id} --flavor ${flavor_id} --nic none \
+ --property cool=true --wait metadata-items-test1
+
+# Try to create another server with two metadata items. This should fail.
+set +e
+output=$(openstack --os-compute-api-version 2.37 \
+ server create --image ${image_id} --flavor ${flavor_id} --nic none \
+ --property cool=true --property location=fridge \
+ --wait metadata-items-test2)
+rc=$?
+set -e
+# Return code should be 1 if server create failed.
+if [[ ${rc} -ne 1 ]]; then
+ echo "Expected return code 1 from server create with two metadata items"
+ exit 2
+fi
+# Verify it's a quota error.
+if [[ ! "HTTP 403" =~ "$output" ]]; then
+ echo "Expected HTTP 403 from server create with two metadata items"
+ exit 2
+fi
+
+# Increase the quota limit to two.
+bash -c "unset OS_USERNAME OS_TENANT_NAME OS_PROJECT_NAME;
+ openstack --os-cloud devstack-system-admin registered limit set \
+ --default-limit 2 $metadata_items_id"
+
+# Second server create should succeed now.
+openstack --os-compute-api-version 2.37 \
+ server create --image ${image_id} --flavor ${flavor_id} --nic none \
+ --property cool=true --property location=fridge --wait metadata-items-test2
+
+# Delete the servers.
+openstack server delete metadata-items-test1 metadata-items-test2
diff --git a/lower-constraints.txt b/lower-constraints.txt
deleted file mode 100644
index c01169ad8b..0000000000
--- a/lower-constraints.txt
+++ /dev/null
@@ -1,165 +0,0 @@
-alembic==1.5.0
-amqp==2.5.0
-appdirs==1.4.3
-asn1crypto==0.24.0
-attrs==17.4.0
-automaton==1.14.0
-bandit==1.1.0
-cachetools==2.0.1
-castellan==0.16.0
-cffi==1.14.0
-cliff==2.11.0
-cmd2==0.8.1
-colorama==0.3.9
-coverage==4.0
-cryptography==2.7
-cursive==0.2.1
-dataclasses==0.7
-ddt==1.2.1
-debtcollector==1.19.0
-decorator==4.1.0
-deprecation==2.0
-dogpile.cache==0.6.5
-enum-compat==0.0.2
-eventlet==0.30.1
-extras==1.0.0
-fasteners==0.14.1
-fixtures==3.0.0
-future==0.16.0
-futurist==1.8.0
-gabbi==1.35.0
-gitdb2==2.0.3
-GitPython==2.1.8
-greenlet==0.4.15
-idna==2.6
-iso8601==0.1.11
-Jinja2==2.10
-jmespath==0.9.3
-jsonpatch==1.21
-jsonpath-rw==1.4.0
-jsonpath-rw-ext==1.1.3
-jsonpointer==2.0
-jsonschema==3.2.0
-keystoneauth1==3.16.0
-keystonemiddleware==4.20.0
-kombu==4.6.1
-linecache2==1.0.0
-lxml==4.5.0
-Mako==1.0.7
-MarkupSafe==1.1.1
-microversion-parse==0.2.1
-mock==3.0.0
-msgpack==0.6.0
-msgpack-python==0.5.6
-munch==2.2.0
-mypy==0.761
-netaddr==0.7.18
-netifaces==0.10.4
-networkx==2.1.0
-numpy==1.19.0
-openstacksdk==0.35.0
-os-brick==4.3.1
-os-client-config==1.29.0
-os-resource-classes==1.1.0
-os-service-types==1.7.0
-os-traits==2.5.0
-os-vif==1.15.2
-os-win==5.4.0
-osc-lib==1.10.0
-oslo.cache==1.26.0
-oslo.concurrency==4.4.0
-oslo.config==8.6.0
-oslo.context==3.1.1
-oslo.db==10.0.0
-oslo.i18n==5.0.1
-oslo.log==4.4.0
-oslo.messaging==10.3.0
-oslo.middleware==3.31.0
-oslo.policy==3.7.0
-oslo.privsep==2.4.0
-oslo.reports==1.18.0
-oslo.rootwrap==5.8.0
-oslo.serialization==4.1.0
-oslo.service==2.5.0
-oslo.upgradecheck==1.3.0
-oslo.utils==4.8.0
-oslo.versionedobjects==1.35.0
-oslo.vmware==3.6.0
-oslotest==3.8.0
-osprofiler==1.4.0
-ovs==2.10.0
-ovsdbapp==0.15.0
-packaging==20.4
-paramiko==2.7.1
-Paste==2.0.2
-PasteDeploy==1.5.0
-pbr==5.5.1
-pluggy==0.6.0
-ply==3.11
-prettytable==0.7.1
-psutil==3.2.2
-psycopg2-binary==2.8
-py==1.5.2
-pyasn1==0.4.2
-pyasn1-modules==0.2.1
-pycadf==2.7.0
-pycparser==2.18
-pyinotify==0.9.6
-pyroute2==0.5.4
-PyJWT==1.7.0
-PyMySQL==0.8.0
-pyOpenSSL==17.5.0
-pyparsing==2.2.0
-pyperclip==1.6.0
-pypowervm==1.1.15
-pytest==3.4.2
-python-barbicanclient==4.5.2
-python-cinderclient==3.3.0
-python-dateutil==2.7.0
-python-editor==1.0.3
-python-glanceclient==2.8.0
-python-ironicclient==3.0.0
-python-keystoneclient==3.15.0
-python-mimeparse==1.6.0
-python-neutronclient==7.1.0
-python-subunit==1.4.0
-pytz==2018.3
-PyYAML==5.1
-repoze.lru==0.7
-requests==2.25.1
-requests-mock==1.2.0
-requestsexceptions==1.4.0
-retrying==1.3.3
-rfc3986==1.2.0
-Routes==2.3.1
-simplejson==3.13.2
-six==1.15.0
-smmap2==2.0.3
-sortedcontainers==2.1.0
-SQLAlchemy==1.4.13
-sqlalchemy-migrate==0.13.0
-sqlparse==0.2.4
-statsd==3.2.2
-stestr==2.0.0
-stevedore==1.20.0
-suds-jurko==0.6
-taskflow==3.8.0
-Tempita==0.5.2
-tenacity==6.3.1
-testrepository==0.0.20
-testresources==2.0.0
-testscenarios==0.4
-testtools==2.2.0
-tooz==1.58.0
-traceback2==1.4.0
-types-paramiko==0.1.3
-unittest2==1.1.0
-urllib3==1.22
-vine==1.1.4
-voluptuous==0.11.1
-warlock==1.3.1
-WebOb==1.8.2
-websockify==0.9.0
-wrapt==1.10.11
-wsgi-intercept==1.7.0
-zVMCloudConnector==1.3.0
diff --git a/mypy-files.txt b/mypy-files.txt
index 898eee25c7..391ed58d87 100644
--- a/mypy-files.txt
+++ b/mypy-files.txt
@@ -1,5 +1,9 @@
nova/compute/manager.py
+nova/compute/pci_placement_translator.py
nova/crypto.py
+nova/filesystem.py
+nova/limit/local.py
+nova/limit/placement.py
nova/network/neutron.py
nova/pci
nova/privsep/path.py
@@ -10,6 +14,9 @@ nova/virt/driver.py
nova/virt/hardware.py
nova/virt/libvirt/machine_type_utils.py
nova/virt/libvirt/__init__.py
+nova/virt/libvirt/cpu/__init__.py
+nova/virt/libvirt/cpu/api.py
+nova/virt/libvirt/cpu/core.py
nova/virt/libvirt/driver.py
nova/virt/libvirt/event.py
nova/virt/libvirt/guest.py
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
index 6c205fbae9..718ac7e8e6 100644
--- a/nova/api/openstack/api_version_request.py
+++ b/nova/api/openstack/api_version_request.py
@@ -235,7 +235,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
``POST /flavors/{flavor_id}/os-extra_specs`` and
``PUT /flavors/{flavor_id}/os-extra_specs/{id}`` APIs.
* 2.87 - Adds support for rescuing boot from volume instances when the
- compute host reports the COMPUTE_BFV_RESCUE capability trait.
+ compute host reports the COMPUTE_RESCUE_BFV capability trait.
* 2.88 - Drop statistics-style fields from the ``/os-hypervisors/detail``
and ``/os-hypervisors/{hypervisor_id}`` APIs, and remove the
``/os-hypervisors/statistics`` and
@@ -247,6 +247,14 @@ REST_API_VERSION_HISTORY = """REST API Version History:
updating or rebuilding an instance. The
``OS-EXT-SRV-ATTR:hostname`` attribute is now returned in various
server responses regardless of policy configuration.
+ * 2.91 - Add support to unshelve instance to a specific host and
+ to pin/unpin AZ.
+ * 2.92 - Drop generation of keypair, add keypair name validation on
+ ``POST /os-keypairs`` and allow including @ and dot (.) characters
+ in keypair name.
+ * 2.93 - Add support for volume backed server rebuild.
+ * 2.94 - Allow FQDN in server hostname.
+ * 2.95 - Evacuate will now stop instance at destination.
"""
# The minimum and maximum versions of the API supported
@@ -255,7 +263,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = '2.1'
-_MAX_API_VERSION = '2.90'
+_MAX_API_VERSION = '2.95'
DEFAULT_API_VERSION = _MIN_API_VERSION
# Almost all proxy APIs which are related to network, images and baremetal
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 36c0078ebe..f51078eb39 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -50,7 +50,7 @@ class NoAuthMiddlewareBase(base_wsgi.Middleware):
token = req.headers['X-Auth-Token']
user_id, _sep, project_id = token.partition(':')
project_id = project_id or user_id
- remote_address = getattr(req, 'remote_address', '127.0.0.1')
+ remote_address = getattr(req, 'remote_addr', '127.0.0.1')
if CONF.api.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
is_admin = always_admin or (user_id == 'admin')
diff --git a/nova/api/openstack/compute/assisted_volume_snapshots.py b/nova/api/openstack/compute/assisted_volume_snapshots.py
index ea6ebc8359..ae7213884b 100644
--- a/nova/api/openstack/compute/assisted_volume_snapshots.py
+++ b/nova/api/openstack/compute/assisted_volume_snapshots.py
@@ -39,6 +39,11 @@ class AssistedVolumeSnapshotsController(wsgi.Controller):
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
+ # NOTE(gmann) We pass empty target to policy enforcement. This API
+ # is called by cinder which does not have correct project_id.
+ # By passing the empty target, we make sure that we do not check
+ # the requester project_id and allow users with
+ # allowed role to create snapshot.
context.can(avs_policies.POLICY_ROOT % 'create', target={})
snapshot = body['snapshot']
@@ -69,6 +74,11 @@ class AssistedVolumeSnapshotsController(wsgi.Controller):
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
+ # NOTE(gmann) We pass empty target to policy enforcement. This API
+ # is called by cinder which does not have correct project_id.
+ # By passing the empty target, we make sure that we do not check
+ # the requester project_id and allow users with allowed role to
+ # delete snapshot.
context.can(avs_policies.POLICY_ROOT % 'delete', target={})
delete_metadata = {}
diff --git a/nova/api/openstack/compute/attach_interfaces.py b/nova/api/openstack/compute/attach_interfaces.py
index 6a24a60959..34edf30cb6 100644
--- a/nova/api/openstack/compute/attach_interfaces.py
+++ b/nova/api/openstack/compute/attach_interfaces.py
@@ -178,6 +178,7 @@ class InterfaceAttachmentController(wsgi.Controller):
exception.InterfaceAttachPciClaimFailed,
exception.InterfaceAttachResourceAllocationFailed,
exception.ForbiddenPortsWithAccelerator,
+ exception.ForbiddenWithRemoteManagedPorts,
exception.ExtendedResourceRequestOldCompute,
) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
diff --git a/nova/api/openstack/compute/console_auth_tokens.py b/nova/api/openstack/compute/console_auth_tokens.py
index ae838fe857..810eb00ca3 100644
--- a/nova/api/openstack/compute/console_auth_tokens.py
+++ b/nova/api/openstack/compute/console_auth_tokens.py
@@ -30,7 +30,7 @@ class ConsoleAuthTokensController(wsgi.Controller):
def _show(self, req, id, rdp_only):
"""Checks a console auth token and returns the related connect info."""
context = req.environ['nova.context']
- context.can(cat_policies.BASE_POLICY_NAME, target={})
+ context.can(cat_policies.BASE_POLICY_NAME)
token = id
if not token:
diff --git a/nova/api/openstack/compute/deferred_delete.py b/nova/api/openstack/compute/deferred_delete.py
index 55879267ff..b3f461cca4 100644
--- a/nova/api/openstack/compute/deferred_delete.py
+++ b/nova/api/openstack/compute/deferred_delete.py
@@ -40,7 +40,7 @@ class DeferredDeleteController(wsgi.Controller):
target={'project_id': instance.project_id})
try:
self.compute_api.restore(context, instance)
- except exception.QuotaError as error:
+ except exception.OverQuota as error:
raise webob.exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
diff --git a/nova/api/openstack/compute/evacuate.py b/nova/api/openstack/compute/evacuate.py
index aa35812759..a6602be079 100644
--- a/nova/api/openstack/compute/evacuate.py
+++ b/nova/api/openstack/compute/evacuate.py
@@ -23,9 +23,11 @@ from nova.api.openstack.compute.schemas import evacuate
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute
+from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
+from nova import objects
from nova.policies import evacuate as evac_policies
from nova import utils
@@ -33,6 +35,8 @@ CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
+MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED = 62
+
class EvacuateController(wsgi.Controller):
def __init__(self):
@@ -77,7 +81,8 @@ class EvacuateController(wsgi.Controller):
@validation.schema(evacuate.evacuate, "2.0", "2.13")
@validation.schema(evacuate.evacuate_v214, "2.14", "2.28")
@validation.schema(evacuate.evacuate_v2_29, "2.29", "2.67")
- @validation.schema(evacuate.evacuate_v2_68, "2.68")
+ @validation.schema(evacuate.evacuate_v2_68, "2.68", "2.94")
+ @validation.schema(evacuate.evacuate_v2_95, "2.95")
def _evacuate(self, req, id, body):
"""Permit admins to evacuate a server from a failed host
to a new one.
@@ -92,6 +97,19 @@ class EvacuateController(wsgi.Controller):
host = evacuate_body.get("host")
force = None
+ target_state = None
+ if api_version_request.is_supported(req, min_version='2.95'):
+ min_ver = objects.service.get_minimum_version_all_cells(
+ context, ['nova-compute'])
+ if min_ver < MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED:
+ raise exception.NotSupportedComputeForEvacuateV295(
+ {'currently': min_ver,
+ 'expected': MIN_VER_NOVA_COMPUTE_EVACUATE_STOPPED})
+ # Starts to 2.95 any evacuated instances will be stopped at
+ # destination. Previously an active or stopped instance would have
+ # kept its state.
+ target_state = vm_states.STOPPED
+
on_shared_storage = self._get_on_shared_storage(req, evacuate_body)
if api_version_request.is_supported(req, min_version='2.29'):
@@ -120,7 +138,8 @@ class EvacuateController(wsgi.Controller):
try:
self.compute_api.evacuate(context, instance, host,
- on_shared_storage, password, force)
+ on_shared_storage, password, force,
+ target_state)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'evacuate', id)
@@ -130,6 +149,8 @@ class EvacuateController(wsgi.Controller):
exception.ExtendedResourceRequestOldCompute,
) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
+ except exception.UnsupportedRPCVersion as e:
+ raise exc.HTTPConflict(explanation=e.format_message())
if (not api_version_request.is_supported(req, min_version='2.14') and
CONF.api.enable_instance_password):
diff --git a/nova/api/openstack/compute/flavor_access.py b/nova/api/openstack/compute/flavor_access.py
index e17e6f0ddc..fc8df15db5 100644
--- a/nova/api/openstack/compute/flavor_access.py
+++ b/nova/api/openstack/compute/flavor_access.py
@@ -93,7 +93,14 @@ class FlavorActionController(wsgi.Controller):
vals = body['removeTenantAccess']
tenant = vals['tenant']
- identity.verify_project_id(context, tenant)
+ # It doesn't really matter if project exists or not: we can delete
+ # it from flavor's access list in both cases.
+ try:
+ identity.verify_project_id(context, tenant)
+ except webob.exc.HTTPBadRequest as identity_exc:
+ msg = "Project ID %s is not a valid project." % tenant
+ if msg not in identity_exc.explanation:
+ raise
# NOTE(gibi): We have to load a flavor from the db here as
# flavor.remove_access() will try to emit a notification and that needs
diff --git a/nova/api/openstack/compute/keypairs.py b/nova/api/openstack/compute/keypairs.py
index 1fa1684322..40b702bdb5 100644
--- a/nova/api/openstack/compute/keypairs.py
+++ b/nova/api/openstack/compute/keypairs.py
@@ -26,7 +26,6 @@ from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
-from nova.i18n import _
from nova.objects import keypair as keypair_obj
from nova.policies import keypairs as kp_policies
@@ -44,15 +43,19 @@ class KeypairController(wsgi.Controller):
@wsgi.Controller.api_version("2.10")
@wsgi.response(201)
@wsgi.expected_errors((400, 403, 409))
- @validation.schema(keypairs.create_v210)
+ @validation.schema(keypairs.create_v210, "2.10", "2.91")
+ @validation.schema(keypairs.create_v292, "2.92")
def create(self, req, body):
"""Create or import keypair.
+ Keypair generations are allowed until version 2.91.
+ Afterwards, only imports are allowed.
+
A policy check restricts users from creating keys for other users
params: keypair object with:
name (required) - string
- public_key (optional) - string
+ public_key (optional or required if >=2.92) - string
type (optional) - string
user_id (optional) - string
"""
@@ -115,13 +118,14 @@ class KeypairController(wsgi.Controller):
context, user_id, name, params['public_key'],
key_type_value)
else:
+ # public_key is a required field starting with 2.92 so this
+ # generation should only happen with older versions.
keypair, private_key = self.api.create_key_pair(
context, user_id, name, key_type_value)
keypair['private_key'] = private_key
return_priv_key = True
- except exception.KeypairLimitExceeded:
- msg = _("Quota exceeded, too many key pairs.")
- raise webob.exc.HTTPForbidden(explanation=msg)
+ except exception.KeypairLimitExceeded as e:
+ raise webob.exc.HTTPForbidden(explanation=str(e))
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py
index e6f901e09d..755a6c3656 100644
--- a/nova/api/openstack/compute/limits.py
+++ b/nova/api/openstack/compute/limits.py
@@ -78,8 +78,7 @@ class LimitsController(wsgi.Controller):
project_id = context.project_id
if 'tenant_id' in req.GET:
project_id = req.GET.get('tenant_id')
- context.can(limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME,
- target={'project_id': project_id})
+ context.can(limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME)
quotas = QUOTAS.get_project_quotas(context, project_id,
usages=True)
diff --git a/nova/api/openstack/compute/migrate_server.py b/nova/api/openstack/compute/migrate_server.py
index 855f51a7c6..59b9c384df 100644
--- a/nova/api/openstack/compute/migrate_server.py
+++ b/nova/api/openstack/compute/migrate_server.py
@@ -57,7 +57,7 @@ class MigrateServerController(wsgi.Controller):
try:
self.compute_api.resize(req.environ['nova.context'], instance,
host_name=host_name)
- except (exception.TooManyInstances, exception.QuotaError) as e:
+ except exception.OverQuota as e:
raise exc.HTTPForbidden(explanation=e.format_message())
except (
exception.InstanceIsLocked,
diff --git a/nova/api/openstack/compute/migrations.py b/nova/api/openstack/compute/migrations.py
index cb97a1498a..31287b7393 100644
--- a/nova/api/openstack/compute/migrations.py
+++ b/nova/api/openstack/compute/migrations.py
@@ -89,7 +89,7 @@ class MigrationsController(wsgi.Controller):
sort_dirs=None, sort_keys=None, limit=None, marker=None,
allow_changes_since=False, allow_changes_before=False):
context = req.environ['nova.context']
- context.can(migrations_policies.POLICY_ROOT % 'index', target={})
+ context.can(migrations_policies.POLICY_ROOT % 'index')
search_opts = {}
search_opts.update(req.GET)
if 'changes-since' in search_opts:
diff --git a/nova/api/openstack/compute/quota_classes.py b/nova/api/openstack/compute/quota_classes.py
index 741ce5f848..942fa71f32 100644
--- a/nova/api/openstack/compute/quota_classes.py
+++ b/nova/api/openstack/compute/quota_classes.py
@@ -20,6 +20,7 @@ from nova.api.openstack.compute.schemas import quota_classes
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
+from nova.limit import utils as limit_utils
from nova import objects
from nova.policies import quota_class_sets as qcs_policies
from nova import quota
@@ -129,11 +130,20 @@ class QuotaClassSetsController(wsgi.Controller):
quota_class = id
- for key, value in body['quota_class_set'].items():
- try:
- objects.Quotas.update_class(context, quota_class, key, value)
- except exception.QuotaClassNotFound:
- objects.Quotas.create_class(context, quota_class, key, value)
+ quota_updates = body['quota_class_set'].items()
+ # TODO(johngarbutt) eventually cores, ram and instances changes will
+ # get sent to keystone when using unified limits, but only when the
+ # quota_class == "default".
+ if not limit_utils.use_unified_limits():
+ # When not unified limits, keep updating the database, even though
+ # the noop driver doesn't read these values
+ for key, value in quota_updates:
+ try:
+ objects.Quotas.update_class(
+ context, quota_class, key, value)
+ except exception.QuotaClassNotFound:
+ objects.Quotas.create_class(
+ context, quota_class, key, value)
values = QUOTAS.get_class_quotas(context, quota_class)
return self._format_quota_set(None, values, filtered_quotas,
diff --git a/nova/api/openstack/compute/quota_sets.py b/nova/api/openstack/compute/quota_sets.py
index d955e1b156..fbd4d335b6 100644
--- a/nova/api/openstack/compute/quota_sets.py
+++ b/nova/api/openstack/compute/quota_sets.py
@@ -29,6 +29,7 @@ from nova.api import validation
import nova.conf
from nova import exception
from nova.i18n import _
+from nova.limit import utils as limit_utils
from nova import objects
from nova.policies import quota_sets as qs_policies
from nova import quota
@@ -205,10 +206,16 @@ class QuotaSetsController(wsgi.Controller):
settable_quotas = QUOTAS.get_settable_quotas(context, project_id,
user_id=user_id)
+ requested_quotas = body['quota_set'].items()
+ if limit_utils.use_unified_limits():
+ # NOTE(johngarbutt) currently all info comes from keystone
+ # we don't update the database.
+ requested_quotas = []
+
# NOTE(dims): Pass #1 - In this loop for quota_set.items(), we validate
# min/max values and bail out if any of the items in the set is bad.
valid_quotas = {}
- for key, value in body['quota_set'].items():
+ for key, value in requested_quotas:
if key == 'force' or (not value and value != 0):
continue
# validate whether already used and reserved exceeds the new
@@ -276,8 +283,12 @@ class QuotaSetsController(wsgi.Controller):
context.can(qs_policies.POLICY_ROOT % 'delete', {'project_id': id})
params = urlparse.parse_qs(req.environ.get('QUERY_STRING', ''))
user_id = params.get('user_id', [None])[0]
- if user_id:
- objects.Quotas.destroy_all_by_project_and_user(
- context, id, user_id)
- else:
- objects.Quotas.destroy_all_by_project(context, id)
+
+ # NOTE(johngarbutt) with unified limits we only use keystone, not the
+ # db
+ if not limit_utils.use_unified_limits():
+ if user_id:
+ objects.Quotas.destroy_all_by_project_and_user(
+ context, id, user_id)
+ else:
+ objects.Quotas.destroy_all_by_project(context, id)
diff --git a/nova/api/openstack/compute/remote_consoles.py b/nova/api/openstack/compute/remote_consoles.py
index 36015542aa..7d374ef432 100644
--- a/nova/api/openstack/compute/remote_consoles.py
+++ b/nova/api/openstack/compute/remote_consoles.py
@@ -56,6 +56,9 @@ class RemoteConsolesController(wsgi.Controller):
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
+ except exception.InstanceInvalidState as e:
+ common.raise_http_conflict_for_instance_invalid_state(
+ e, 'get_vnc_console', id)
except NotImplementedError:
common.raise_feature_not_supported()
diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst
index d7a74870f9..c7a2777d3a 100644
--- a/nova/api/openstack/compute/rest_api_version_history.rst
+++ b/nova/api/openstack/compute/rest_api_version_history.rst
@@ -1141,7 +1141,7 @@ Validation is only used for recognized extra spec namespaces, currently:
-------------------------------------
Adds support for rescuing boot from volume instances when the compute host
-reports the ``COMPUTE_BFV_RESCUE`` capability trait.
+reports the ``COMPUTE_RESCUE_BFV`` capability trait.
.. _microversion 2.88:
@@ -1191,8 +1191,8 @@ field.
.. _microversion 2.90:
-2.90 (Maximum in Xena)
-----------------------
+2.90 (Maximum in Xena and Yoga)
+-------------------------------
The ``POST /servers`` (create server), ``PUT /servers/{id}`` (update server)
and ``POST /servers/{server_id}/action (rebuild)`` (rebuild server) APIs now
@@ -1202,3 +1202,48 @@ hostname based on the display name.
In addition, the ``OS-EXT-SRV-ATTR:hostname`` field for all server
responses is now visible to all users. Previously this was an admin-only field.
+
+.. _microversion 2.91:
+
+2.91
+----
+
+Add support to unshelve instance to a specific host.
+
+Add support to pin a server to an availability zone or unpin a server from any availability zone.
+
+.. _microversion 2.92:
+
+2.92
+----
+
+The ``POST /os-keypairs`` API now forbids to generate a keypair and allows new
+safe characters, specifically '@' and '.' (dot character).
+
+.. _microversion 2.93:
+
+2.93 (Maximum in Zed)
+---------------------
+
+Add support for volume backed server rebuild. The end user will provide the
+image with the rebuild command and it will rebuild the volume with the new
+image similar to the result of rebuilding an ephemeral disk.
+
+
+2.94
+----
+
+The ``hostname`` parameter to the ``POST /servers`` (create server), ``PUT
+/servers/{id}`` (update server) and ``POST /servers/{server_id}/action
+(rebuild)`` (rebuild server) APIs is now allowed to be a Fully Qualified Domain
+Name (FQDN).
+
+.. _microversion 2.95:
+
+2.95 (Maximum in 2023.1 Antelope)
+---------------------------------
+
+Any evacuated instances will be now stopped at destination. This
+requires minimun nova release 27.0.0, OpenStack release 2023.1
+Antelope. Operators can still use previous microversion for older
+behavior.
diff --git a/nova/api/openstack/compute/schemas/evacuate.py b/nova/api/openstack/compute/schemas/evacuate.py
index a415a97f89..c7b84a655e 100644
--- a/nova/api/openstack/compute/schemas/evacuate.py
+++ b/nova/api/openstack/compute/schemas/evacuate.py
@@ -46,3 +46,7 @@ evacuate_v2_29['properties']['evacuate']['properties'][
# v2.68 removes the 'force' parameter added in v2.29, meaning it is identical
# to v2.14
evacuate_v2_68 = copy.deepcopy(evacuate_v214)
+
+# v2.95 keeps the same schema, evacuating an instance will now result its state
+# to be stopped at destination.
+evacuate_v2_95 = copy.deepcopy(evacuate_v2_68)
diff --git a/nova/api/openstack/compute/schemas/keypairs.py b/nova/api/openstack/compute/schemas/keypairs.py
index 7ebd3c7433..74b992c3e3 100644
--- a/nova/api/openstack/compute/schemas/keypairs.py
+++ b/nova/api/openstack/compute/schemas/keypairs.py
@@ -23,7 +23,7 @@ create = {
'keypair': {
'type': 'object',
'properties': {
- 'name': parameter_types.name,
+ 'name': parameter_types.keypair_name_special_chars,
'public_key': {'type': 'string'},
},
'required': ['name'],
@@ -46,7 +46,7 @@ create_v22 = {
'keypair': {
'type': 'object',
'properties': {
- 'name': parameter_types.name,
+ 'name': parameter_types.keypair_name_special_chars,
'type': {
'type': 'string',
'enum': ['ssh', 'x509']
@@ -67,7 +67,7 @@ create_v210 = {
'keypair': {
'type': 'object',
'properties': {
- 'name': parameter_types.name,
+ 'name': parameter_types.keypair_name_special_chars,
'type': {
'type': 'string',
'enum': ['ssh', 'x509']
@@ -83,6 +83,11 @@ create_v210 = {
'additionalProperties': False,
}
+create_v292 = copy.deepcopy(create_v210)
+create_v292['properties']['keypair']['properties']['name'] = (parameter_types.
+ keypair_name_special_chars_292)
+create_v292['properties']['keypair']['required'] = ['name', 'public_key']
+
index_query_schema_v20 = {
'type': 'object',
'properties': {},
diff --git a/nova/api/openstack/compute/schemas/server_external_events.py b/nova/api/openstack/compute/schemas/server_external_events.py
index b8a89e047d..6ac3f009ec 100644
--- a/nova/api/openstack/compute/schemas/server_external_events.py
+++ b/nova/api/openstack/compute/schemas/server_external_events.py
@@ -63,3 +63,7 @@ name['enum'].append('power-update')
create_v282 = copy.deepcopy(create_v276)
name = create_v282['properties']['events']['items']['properties']['name']
name['enum'].append('accelerator-request-bound')
+
+create_v293 = copy.deepcopy(create_v282)
+name = create_v293['properties']['events']['items']['properties']['name']
+name['enum'].append('volume-reimaged')
diff --git a/nova/api/openstack/compute/schemas/servers.py b/nova/api/openstack/compute/schemas/servers.py
index 300411de40..0869f83434 100644
--- a/nova/api/openstack/compute/schemas/servers.py
+++ b/nova/api/openstack/compute/schemas/servers.py
@@ -360,6 +360,11 @@ create_v290 = copy.deepcopy(create_v274)
create_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+# Support FQDN as hostname
+create_v294 = copy.deepcopy(create_v290)
+create_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
update = {
'type': 'object',
'properties': {
@@ -391,6 +396,11 @@ update_v290 = copy.deepcopy(update_v219)
update_v290['properties']['server'][
'properties']['hostname'] = parameter_types.hostname
+
+update_v294 = copy.deepcopy(update_v290)
+update_v294['properties']['server'][
+ 'properties']['hostname'] = parameter_types.fqdn
+
rebuild = {
'type': 'object',
'properties': {
@@ -449,6 +459,10 @@ rebuild_v290 = copy.deepcopy(rebuild_v263)
rebuild_v290['properties']['rebuild']['properties'][
'hostname'] = parameter_types.hostname
+rebuild_v294 = copy.deepcopy(rebuild_v290)
+rebuild_v294['properties']['rebuild']['properties'][
+ 'hostname'] = parameter_types.fqdn
+
resize = {
'type': 'object',
diff --git a/nova/api/openstack/compute/schemas/shelve.py b/nova/api/openstack/compute/schemas/shelve.py
index e8d2f1c240..4653338126 100644
--- a/nova/api/openstack/compute/schemas/shelve.py
+++ b/nova/api/openstack/compute/schemas/shelve.py
@@ -15,7 +15,7 @@
from nova.api.validation import parameter_types
# NOTE(brinzhang): For older microversion there will be no change as
-# schema is applied only for >2.77 with unshelve a server API.
+# schema is applied only for version < 2.91 with unshelve a server API.
# Anything working in old version keep working as it is.
unshelve_v277 = {
'type': 'object',
@@ -35,3 +35,55 @@ unshelve_v277 = {
'required': ['unshelve'],
'additionalProperties': False,
}
+
+# NOTE(rribaud):
+# schema is applied only for version >= 2.91 with unshelve a server API.
+# Add host parameter to specify to unshelve to this specific host.
+#
+# Schema has been redefined for better clarity instead of extend 2.77.
+#
+# API can be called with the following body:
+#
+# - {"unshelve": null} (Keep compatibility with previous microversions)
+#
+# or
+#
+# - {"unshelve": {"availability_zone": <string>}}
+# - {"unshelve": {"availability_zone": null}} (Unpin availability zone)
+# - {"unshelve": {"host": <fqdn>}}
+# - {"unshelve": {"availability_zone": <string>, "host": <fqdn>}}
+# - {"unshelve": {"availability_zone": null, "host": <fqdn>}}
+#
+#
+# Everything else is not allowed, examples:
+#
+# - {"unshelve": {}}
+# - {"unshelve": {"host": <fqdn>, "host": <fqdn>}}
+# - {"unshelve": {"foo": <string>}}
+
+unshelve_v291 = {
+ "type": "object",
+ "properties": {
+ "unshelve": {
+ "oneOf": [
+ {
+ "type": ["object"],
+ "properties": {
+ "availability_zone": {
+ "oneOf": [
+ {"type": ["null"]},
+ {"type": "string"}]
+ },
+ "host": {
+ "type": "string"
+ }
+ },
+ "additionalProperties": False
+ },
+ {"type": ["null"]}
+ ]
+ }
+ },
+ "required": ["unshelve"],
+ "additionalProperties": False
+}
diff --git a/nova/api/openstack/compute/server_external_events.py b/nova/api/openstack/compute/server_external_events.py
index 162bd93697..23813d5790 100644
--- a/nova/api/openstack/compute/server_external_events.py
+++ b/nova/api/openstack/compute/server_external_events.py
@@ -69,10 +69,16 @@ class ServerExternalEventsController(wsgi.Controller):
@validation.schema(server_external_events.create, '2.0', '2.50')
@validation.schema(server_external_events.create_v251, '2.51', '2.75')
@validation.schema(server_external_events.create_v276, '2.76', '2.81')
- @validation.schema(server_external_events.create_v282, '2.82')
+ @validation.schema(server_external_events.create_v282, '2.82', '2.92')
+ @validation.schema(server_external_events.create_v293, '2.93')
def create(self, req, body):
"""Creates a new instance event."""
context = req.environ['nova.context']
+ # NOTE(gmann) We pass empty target to policy enforcement. This API
+ # is called by neutron which does not have correct project_id where
+ # server belongs to. By passing the empty target, we make sure that
+ # we do not check the requester project_id and allow users with
+ # allowed role to create external event.
context.can(see_policies.POLICY_ROOT % 'create', target={})
response_events = []
diff --git a/nova/api/openstack/compute/server_groups.py b/nova/api/openstack/compute/server_groups.py
index f490eaabb5..5d3ea97895 100644
--- a/nova/api/openstack/compute/server_groups.py
+++ b/nova/api/openstack/compute/server_groups.py
@@ -30,6 +30,7 @@ import nova.conf
from nova import context as nova_context
import nova.exception
from nova.i18n import _
+from nova.limit import local as local_limit
from nova import objects
from nova.objects import service
from nova.policies import server_groups as sg_policies
@@ -167,7 +168,7 @@ class ServerGroupController(wsgi.Controller):
# new defaults completly then we can remove the above check.
# Until then, let's keep the old behaviour.
context.can(sg_policies.POLICY_ROOT % 'index:all_projects',
- target={})
+ target={'project_id': project_id})
sgs = objects.InstanceGroupList.get_all(context)
else:
sgs = objects.InstanceGroupList.get_by_project_id(
@@ -191,6 +192,10 @@ class ServerGroupController(wsgi.Controller):
try:
objects.Quotas.check_deltas(context, {'server_groups': 1},
project_id, context.user_id)
+ local_limit.enforce_db_limit(context, local_limit.SERVER_GROUPS,
+ entity_scope=project_id, delta=1)
+ except nova.exception.ServerGroupLimitExceeded as e:
+ raise exc.HTTPForbidden(explanation=str(e))
except nova.exception.OverQuota:
msg = _("Quota exceeded, too many server groups.")
raise exc.HTTPForbidden(explanation=msg)
@@ -231,6 +236,16 @@ class ServerGroupController(wsgi.Controller):
objects.Quotas.check_deltas(context, {'server_groups': 0},
project_id,
context.user_id)
+ # TODO(johngarbutt): decide if we need this recheck
+ # The quota rechecking of limits is really just to protect
+ # against denial of service attacks that aim to fill up the
+ # database. Its usefulness could be debated.
+ local_limit.enforce_db_limit(context,
+ local_limit.SERVER_GROUPS,
+ project_id, delta=0)
+ except nova.exception.ServerGroupLimitExceeded as e:
+ sg.destroy()
+ raise exc.HTTPForbidden(explanation=str(e))
except nova.exception.OverQuota:
sg.destroy()
msg = _("Quota exceeded, too many server groups.")
diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py
index 448441a346..e92becb582 100644
--- a/nova/api/openstack/compute/server_metadata.py
+++ b/nova/api/openstack/compute/server_metadata.py
@@ -114,7 +114,7 @@ class ServerMetadataController(wsgi.Controller):
server,
metadata,
delete)
- except exception.QuotaError as error:
+ except exception.OverQuota as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
diff --git a/nova/api/openstack/compute/server_migrations.py b/nova/api/openstack/compute/server_migrations.py
index 230d6088fe..4f404f8944 100644
--- a/nova/api/openstack/compute/server_migrations.py
+++ b/nova/api/openstack/compute/server_migrations.py
@@ -139,7 +139,7 @@ class ServerMigrationsController(wsgi.Controller):
# TODO(Shaohe Feng) we should share the in-progress list.
in_progress = ['queued', 'preparing', 'running', 'post-migrating']
- if migration.get("status") not in in_progress:
+ if migration.status not in in_progress:
msg = _("Live migration %(id)s for server %(uuid)s is not in"
" progress.") % {"id": id, "uuid": server_id}
raise exc.HTTPNotFound(explanation=msg)
diff --git a/nova/api/openstack/compute/server_topology.py b/nova/api/openstack/compute/server_topology.py
index 9d4cc4a5d6..a1695e29fc 100644
--- a/nova/api/openstack/compute/server_topology.py
+++ b/nova/api/openstack/compute/server_topology.py
@@ -35,7 +35,8 @@ class ServerTopologyController(wsgi.Controller):
target={'project_id': instance.project_id})
host_policy = (st_policies.BASE_POLICY_NAME % 'host:index')
- show_host_info = context.can(host_policy, fatal=False)
+ show_host_info = context.can(host_policy,
+ target={'project_id': instance.project_id}, fatal=False)
return self._get_numa_topology(context, instance, show_host_info)
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 1d6d29b45f..33e74456fd 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -68,7 +68,6 @@ INVALID_FLAVOR_IMAGE_EXCEPTIONS = (
exception.ImageNUMATopologyIncomplete,
exception.ImageNUMATopologyMemoryOutOfRange,
exception.ImageNUMATopologyRebuildConflict,
- exception.ImagePMUConflict,
exception.ImageSerialPortNumberExceedFlavorValue,
exception.ImageSerialPortNumberInvalid,
exception.ImageVCPULimitsRangeExceeded,
@@ -678,7 +677,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.create_v263, '2.63', '2.66')
@validation.schema(schema_servers.create_v267, '2.67', '2.73')
@validation.schema(schema_servers.create_v274, '2.74', '2.89')
- @validation.schema(schema_servers.create_v290, '2.90')
+ @validation.schema(schema_servers.create_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.create_v294, '2.94')
def create(self, req, body):
"""Creates a new server for a given user."""
context = req.environ['nova.context']
@@ -797,8 +797,7 @@ class ServersController(wsgi.Controller):
supports_multiattach=supports_multiattach,
supports_port_resource_request=supports_port_resource_request,
**create_kwargs)
- except (exception.QuotaError,
- exception.PortLimitExceeded) as error:
+ except exception.OverQuota as error:
raise exc.HTTPForbidden(
explanation=error.format_message())
except exception.ImageNotFound:
@@ -862,6 +861,7 @@ class ServersController(wsgi.Controller):
exception.DeviceProfileError,
exception.ComputeHostNotFound,
exception.ForbiddenPortsWithAccelerator,
+ exception.ForbiddenWithRemoteManagedPorts,
exception.ExtendedResourceRequestOldCompute,
) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
@@ -907,7 +907,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.update_v20, '2.0', '2.0')
@validation.schema(schema_servers.update, '2.1', '2.18')
@validation.schema(schema_servers.update_v219, '2.19', '2.89')
- @validation.schema(schema_servers.update_v290, '2.90')
+ @validation.schema(schema_servers.update_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.update_v294, '2.94')
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
@@ -1053,7 +1054,7 @@ class ServersController(wsgi.Controller):
try:
self.compute_api.resize(context, instance, flavor_id,
auto_disk_config=auto_disk_config)
- except exception.QuotaError as error:
+ except exception.OverQuota as error:
raise exc.HTTPForbidden(
explanation=error.format_message())
except (
@@ -1148,7 +1149,8 @@ class ServersController(wsgi.Controller):
@validation.schema(schema_servers.rebuild_v254, '2.54', '2.56')
@validation.schema(schema_servers.rebuild_v257, '2.57', '2.62')
@validation.schema(schema_servers.rebuild_v263, '2.63', '2.89')
- @validation.schema(schema_servers.rebuild_v290, '2.90')
+ @validation.schema(schema_servers.rebuild_v290, '2.90', '2.93')
+ @validation.schema(schema_servers.rebuild_v294, '2.94')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
@@ -1206,6 +1208,9 @@ class ServersController(wsgi.Controller):
):
kwargs['hostname'] = rebuild_dict['hostname']
+ if api_version_request.is_supported(req, min_version='2.93'):
+ kwargs['reimage_boot_volume'] = True
+
for request_attribute, instance_attribute in attr_map.items():
try:
if request_attribute == 'name':
@@ -1237,7 +1242,7 @@ class ServersController(wsgi.Controller):
except exception.KeypairNotFound:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
- except exception.QuotaError as error:
+ except exception.OverQuota as error:
raise exc.HTTPForbidden(explanation=error.format_message())
except (exception.AutoDiskConfigDisabledByImage,
exception.CertificateValidationFailed,
@@ -1351,6 +1356,8 @@ class ServersController(wsgi.Controller):
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage', id)
+ except exception.InstanceQuiesceFailed as err:
+ raise exc.HTTPConflict(explanation=err.format_message())
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
except exception.OverQuota as e:
diff --git a/nova/api/openstack/compute/services.py b/nova/api/openstack/compute/services.py
index 6deb84a7f1..e9d51d4d0c 100644
--- a/nova/api/openstack/compute/services.py
+++ b/nova/api/openstack/compute/services.py
@@ -48,13 +48,10 @@ class ServiceController(wsgi.Controller):
self.actions = {"enable": self._enable,
"disable": self._disable,
"disable-log-reason": self._disable_log_reason}
- self._placementclient = None # Lazy-load on first access.
@property
def placementclient(self):
- if self._placementclient is None:
- self._placementclient = report.SchedulerReportClient()
- return self._placementclient
+ return report.report_client_singleton()
def _get_services(self, req):
# The API services are filtered out since they are not RPC services
@@ -328,7 +325,7 @@ class ServiceController(wsgi.Controller):
"Failed to delete compute node resource provider "
"for compute node %s: %s",
compute_node.uuid, str(e))
- # remove the host_mapping of this host.
+ # Remove the host_mapping of this host.
try:
hm = objects.HostMapping.get_by_host(context, service.host)
hm.destroy()
diff --git a/nova/api/openstack/compute/shelve.py b/nova/api/openstack/compute/shelve.py
index 7e1601a2a8..abcb42ee8e 100644
--- a/nova/api/openstack/compute/shelve.py
+++ b/nova/api/openstack/compute/shelve.py
@@ -64,9 +64,10 @@ class ShelveController(wsgi.Controller):
def _shelve_offload(self, req, id, body):
"""Force removal of a shelved instance from the compute node."""
context = req.environ["nova.context"]
- context.can(shelve_policies.POLICY_ROOT % 'shelve_offload')
-
instance = common.get_instance(self.compute_api, context, id)
+ context.can(shelve_policies.POLICY_ROOT % 'shelve_offload',
+ target={'user_id': instance.user_id,
+ 'project_id': instance.project_id})
try:
self.compute_api.shelve_offload(context, instance)
except exception.InstanceIsLocked as e:
@@ -85,32 +86,59 @@ class ShelveController(wsgi.Controller):
# In microversion 2.77 we support specifying 'availability_zone' to
# unshelve a server. But before 2.77 there is no request body
# schema validation (because of body=null).
- @validation.schema(shelve_schemas.unshelve_v277, min_version='2.77')
+ @validation.schema(
+ shelve_schemas.unshelve_v277,
+ min_version='2.77',
+ max_version='2.90'
+ )
+ # In microversion 2.91 we support specifying 'host' to
+ # unshelve an instance to a specific hostself.
+ # 'availability_zone' = None is supported as well to unpin the
+ # availability zone of an instance bonded to this availability_zone
+ @validation.schema(shelve_schemas.unshelve_v291, min_version='2.91')
def _unshelve(self, req, id, body):
"""Restore an instance from shelved mode."""
context = req.environ["nova.context"]
instance = common.get_instance(self.compute_api, context, id)
- context.can(shelve_policies.POLICY_ROOT % 'unshelve',
- target={'project_id': instance.project_id})
+ context.can(
+ shelve_policies.POLICY_ROOT % 'unshelve',
+ target={'project_id': instance.project_id}
+ )
+
+ unshelve_args = {}
- new_az = None
- unshelve_dict = body['unshelve']
- support_az = api_version_request.is_supported(req, '2.77')
- if support_az and unshelve_dict:
- new_az = unshelve_dict['availability_zone']
+ unshelve_dict = body.get('unshelve')
+ support_az = api_version_request.is_supported(
+ req, '2.77')
+ support_host = api_version_request.is_supported(
+ req, '2.91')
+ if unshelve_dict:
+ if support_az and 'availability_zone' in unshelve_dict:
+ unshelve_args['new_az'] = (
+ unshelve_dict['availability_zone']
+ )
+ if support_host:
+ unshelve_args['host'] = unshelve_dict.get('host')
try:
- self.compute_api.unshelve(context, instance, new_az=new_az)
- except (exception.InstanceIsLocked,
- exception.UnshelveInstanceInvalidState,
- exception.MismatchVolumeAZException) as e:
+ self.compute_api.unshelve(
+ context,
+ instance,
+ **unshelve_args,
+ )
+ except (
+ exception.InstanceIsLocked,
+ exception.UnshelveInstanceInvalidState,
+ exception.UnshelveHostNotInAZ,
+ exception.MismatchVolumeAZException,
+ ) as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
- common.raise_http_conflict_for_instance_invalid_state(state_error,
- 'unshelve',
- id)
+ common.raise_http_conflict_for_instance_invalid_state(
+ state_error, 'unshelve', id)
except (
exception.InvalidRequest,
exception.ExtendedResourceRequestOldCompute,
+ exception.ComputeHostNotFound,
) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py
index c14eee5325..8e7b8d3019 100644
--- a/nova/api/openstack/compute/views/servers.py
+++ b/nova/api/openstack/compute/views/servers.py
@@ -32,7 +32,6 @@ from nova import objects
from nova.objects import fields
from nova.objects import virtual_interface
from nova.policies import extended_server_attributes as esa_policies
-from nova.policies import flavor_extra_specs as fes_policies
from nova.policies import servers as servers_policies
from nova import utils
@@ -234,7 +233,9 @@ class ViewBuilder(common.ViewBuilder):
if api_version_request.is_supported(request, min_version='2.47'):
context = request.environ['nova.context']
show_extra_specs = context.can(
- fes_policies.POLICY_ROOT % 'index', fatal=False)
+ servers_policies.SERVERS % 'show:flavor-extra-specs',
+ fatal=False,
+ target={'project_id': instance.project_id})
if cell_down_support and 'display_name' not in instance:
# NOTE(tssurya): If the microversion is >= 2.69, this boolean will
@@ -437,8 +438,9 @@ class ViewBuilder(common.ViewBuilder):
if api_version_request.is_supported(request, min_version='2.47'):
# Determine if we should show extra_specs in the inlined flavor
# once before we iterate the list of instances
- show_extra_specs = context.can(fes_policies.POLICY_ROOT % 'index',
- fatal=False)
+ show_extra_specs = context.can(
+ servers_policies.SERVERS % 'show:flavor-extra-specs',
+ fatal=False)
else:
show_extra_specs = False
show_extended_attr = context.can(
diff --git a/nova/api/openstack/compute/volumes.py b/nova/api/openstack/compute/volumes.py
index 003f96deab..e049bd586a 100644
--- a/nova/api/openstack/compute/volumes.py
+++ b/nova/api/openstack/compute/volumes.py
@@ -506,6 +506,11 @@ class VolumeAttachmentController(wsgi.Controller):
# different from the 'id' in the url path, or only swap is allowed by
# the microversion, we should check the swap volume policy.
# otherwise, check the volume update policy.
+ # NOTE(gmann) We pass empty target to policy enforcement. This API
+ # is called by cinder which does not have correct project_id where
+ # server belongs to. By passing the empty target, we make sure that
+ # we do not check the requester project_id and allow users with
+ # allowed role to perform the swap volume.
if only_swap or id != volume_id:
context.can(va_policies.POLICY_ROOT % 'swap', target={})
else:
diff --git a/nova/api/openstack/identity.py b/nova/api/openstack/identity.py
index 7ffc623fed..15ec884aea 100644
--- a/nova/api/openstack/identity.py
+++ b/nova/api/openstack/identity.py
@@ -27,24 +27,27 @@ def verify_project_id(context, project_id):
"""verify that a project_id exists.
This attempts to verify that a project id exists. If it does not,
- an HTTPBadRequest is emitted.
+ an HTTPBadRequest is emitted. Also HTTPBadRequest is emitted
+ if Keystone identity service version 3.0 is not found.
"""
adap = utils.get_ksa_adapter(
'identity', ksa_auth=context.get_auth_plugin(),
min_version=(3, 0), max_version=(3, 'latest'))
- failure = webob.exc.HTTPBadRequest(
- explanation=_("Project ID %s is not a valid project.") %
- project_id)
try:
resp = adap.get('/projects/%s' % project_id)
except kse.EndpointNotFound:
LOG.error(
- "Keystone identity service version 3.0 was not found. This might "
- "be because your endpoint points to the v2.0 versioned endpoint "
- "which is not supported. Please fix this.")
- raise failure
+ "Keystone identity service version 3.0 was not found. This "
+ "might be caused by Nova misconfiguration or Keystone "
+ "problems.")
+ msg = _("Nova was unable to find Keystone service endpoint.")
+ # TODO(astupnik). It may be reasonable to switch to HTTP 503
+ # (HTTP Service Unavailable) instead of HTTP Bad Request here.
+ # If proper Keystone servie is inaccessible, then technially
+ # this is a server side error and not an error in Nova.
+ raise webob.exc.HTTPBadRequest(explanation=msg)
except kse.ClientException:
# something is wrong, like there isn't a keystone v3 endpoint,
# or nova isn't configured for the interface to talk to it;
@@ -57,7 +60,8 @@ def verify_project_id(context, project_id):
return True
elif resp.status_code == 404:
# we got access, and we know this project is not there
- raise failure
+ msg = _("Project ID %s is not a valid project.") % project_id
+ raise webob.exc.HTTPBadRequest(explanation=msg)
elif resp.status_code == 403:
# we don't have enough permission to verify this, so default
# to "it's ok".
diff --git a/nova/api/openstack/wsgi_app.py b/nova/api/openstack/wsgi_app.py
index d60069ce84..6a2b72a611 100644
--- a/nova/api/openstack/wsgi_app.py
+++ b/nova/api/openstack/wsgi_app.py
@@ -42,8 +42,11 @@ def _get_config_files(env=None):
if env is None:
env = os.environ
dirname = env.get('OS_NOVA_CONFIG_DIR', '/etc/nova').strip()
+ files = env.get('OS_NOVA_CONFIG_FILES', '').split(';')
+ if files == ['']:
+ files = CONFIG_FILES
return [os.path.join(dirname, config_file)
- for config_file in CONFIG_FILES]
+ for config_file in files]
def _setup_service(host, name):
diff --git a/nova/api/validation/extra_specs/hw.py b/nova/api/validation/extra_specs/hw.py
index 4aaccf639a..c0c8f02809 100644
--- a/nova/api/validation/extra_specs/hw.py
+++ b/nova/api/validation/extra_specs/hw.py
@@ -15,6 +15,7 @@
"""Validators for ``hw`` namespaced extra specs."""
from nova.api.validation.extra_specs import base
+from nova.objects import fields
realtime_validators = [
@@ -162,6 +163,18 @@ hugepage_validators = [
'pattern': r'(large|small|any|\d+([kKMGT]i?)?(b|bit|B)?)',
},
),
+ base.ExtraSpecValidator(
+ name='hw:locked_memory',
+ description=(
+ 'Determine if **guest** (instance) memory should be locked '
+ 'preventing swaping. This is required in rare cases for device '
+ 'DMA transfers. Only supported by the libvirt virt driver.'
+ ),
+ value={
+ 'type': bool,
+ 'description': 'Whether to lock **guest** (instance) memory.',
+ },
+ ),
]
numa_validators = [
@@ -498,6 +511,47 @@ feature_flag_validators = [
],
},
),
+ base.ExtraSpecValidator(
+ name='hw:viommu_model',
+ description=(
+ 'This can be used to set model for virtual IOMMU device.'
+ ),
+ value={
+ 'type': str,
+ 'enum': [
+ 'intel',
+ 'smmuv3',
+ 'virtio',
+ 'auto'
+ ],
+ 'description': 'model for vIOMMU',
+ },
+ ),
+]
+
+ephemeral_encryption_validators = [
+ base.ExtraSpecValidator(
+ name='hw:ephemeral_encryption',
+ description=(
+ 'Whether to enable ephemeral storage encryption.'
+ ),
+ value={
+ 'type': bool,
+ 'description': 'Whether to enable ephemeral storage encryption.',
+ },
+ ),
+ base.ExtraSpecValidator(
+ name='hw:ephemeral_encryption_format',
+ description=(
+ 'The encryption format to be used if ephemeral storage '
+ 'encryption is enabled via hw:ephemeral_encryption.'
+ ),
+ value={
+ 'type': str,
+ 'description': 'The encryption format to be used if enabled.',
+ 'enum': fields.BlockDeviceEncryptionFormatType.ALL,
+ },
+ ),
]
@@ -509,5 +563,6 @@ def register():
hugepage_validators +
numa_validators +
cpu_topology_validators +
- feature_flag_validators
+ feature_flag_validators +
+ ephemeral_encryption_validators
)
diff --git a/nova/api/validation/extra_specs/powervm.py b/nova/api/validation/extra_specs/powervm.py
deleted file mode 100644
index 58ef793777..0000000000
--- a/nova/api/validation/extra_specs/powervm.py
+++ /dev/null
@@ -1,271 +0,0 @@
-# Copyright 2020 Red Hat, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Validators for ``powervm`` namespaced extra specs.
-
-These were all taken from the IBM documentation.
-
-https://www.ibm.com/support/knowledgecenter/SSXK2N_1.4.4/com.ibm.powervc.standard.help.doc/powervc_pg_flavorsextraspecs_hmc.html
-"""
-
-from nova.api.validation.extra_specs import base
-
-
-# TODO(stephenfin): A lot of these seem to overlap with existing 'hw:' extra
-# specs and could be deprecated in favour of those.
-EXTRA_SPEC_VALIDATORS = [
- base.ExtraSpecValidator(
- name='powervm:min_mem',
- description=(
- 'Minimum memory (MB). If you do not specify the value, the value '
- 'is defaulted to the value for ``memory_mb``.'
- ),
- value={
- 'type': int,
- 'min': 256,
- 'description': 'Integer >=256 divisible by LMB size of the target',
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:max_mem',
- description=(
- 'Maximum memory (MB). If you do not specify the value, the value '
- 'is defaulted to the value for ``memory_mb``.'
- ),
- value={
- 'type': int,
- 'min': 256,
- 'description': 'Integer >=256 divisible by LMB size of the target',
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:min_vcpu',
- description=(
- 'Minimum virtual processors. Minimum resource that is required '
- 'for LPAR to boot is 1. The maximum value can be equal to the '
- 'value, which is set to vCPUs. If you specify the value of the '
- 'attribute, you must also specify value of powervm:max_vcpu. '
- 'Defaults to value set for vCPUs.'
- ),
- value={
- 'type': int,
- 'min': 1,
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:max_vcpu',
- description=(
- 'Minimum virtual processors. Minimum resource that is required '
- 'for LPAR to boot is 1. The maximum value can be equal to the '
- 'value, which is set to vCPUs. If you specify the value of the '
- 'attribute, you must also specify value of powervm:max_vcpu. '
- 'Defaults to value set for vCPUs.'
- ),
- value={
- 'type': int,
- 'min': 1,
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:proc_units',
- description=(
- 'The wanted ``proc_units``. The value for the attribute cannot be '
- 'less than 1/10 of the value that is specified for Virtual '
- 'CPUs (vCPUs) for hosts with firmware level 7.5 or earlier and '
- '1/20 of the value that is specified for vCPUs for hosts with '
- 'firmware level 7.6 or later. If the value is not specified '
- 'during deployment, it is defaulted to vCPUs * 0.5.'
- ),
- value={
- 'type': str,
- 'pattern': r'\d+\.\d+',
- 'description': (
- 'Float (divisible by 0.1 for hosts with firmware level 7.5 or '
- 'earlier and 0.05 for hosts with firmware level 7.6 or later)'
- ),
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:min_proc_units',
- description=(
- 'Minimum ``proc_units``. The minimum value for the attribute is '
- '0.1 for hosts with firmware level 7.5 or earlier and 0.05 for '
- 'hosts with firmware level 7.6 or later. The maximum value must '
- 'be equal to the maximum value of ``powervm:proc_units``. If you '
- 'specify the attribute, you must also specify '
- '``powervm:proc_units``, ``powervm:max_proc_units``, '
- '``powervm:min_vcpu``, `powervm:max_vcpu``, and '
- '``powervm:dedicated_proc``. Set the ``powervm:dedicated_proc`` '
- 'to false.'
- '\n'
- 'The value for the attribute cannot be less than 1/10 of the '
- 'value that is specified for powervm:min_vcpu for hosts with '
- 'firmware level 7.5 or earlier and 1/20 of the value that is '
- 'specified for ``powervm:min_vcpu`` for hosts with firmware '
- 'level 7.6 or later. If you do not specify the value of the '
- 'attribute during deployment, it is defaulted to equal the value '
- 'of ``powervm:proc_units``.'
- ),
- value={
- 'type': str,
- 'pattern': r'\d+\.\d+',
- 'description': (
- 'Float (divisible by 0.1 for hosts with firmware level 7.5 or '
- 'earlier and 0.05 for hosts with firmware level 7.6 or later)'
- ),
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:max_proc_units',
- description=(
- 'Maximum ``proc_units``. The minimum value can be equal to `` '
- '``powervm:proc_units``. The maximum value for the attribute '
- 'cannot be more than the value of the host for maximum allowed '
- 'processors per partition. If you specify this attribute, you '
- 'must also specify ``powervm:proc_units``, '
- '``powervm:min_proc_units``, ``powervm:min_vcpu``, '
- '``powervm:max_vcpu``, and ``powervm:dedicated_proc``. Set the '
- '``powervm:dedicated_proc`` to false.'
- '\n'
- 'The value for the attribute cannot be less than 1/10 of the '
- 'value that is specified for powervm:max_vcpu for hosts with '
- 'firmware level 7.5 or earlier and 1/20 of the value that is '
- 'specified for ``powervm:max_vcpu`` for hosts with firmware '
- 'level 7.6 or later. If you do not specify the value of the '
- 'attribute during deployment, the value is defaulted to equal the '
- 'value of ``powervm:proc_units``.'
- ),
- value={
- 'type': str,
- 'pattern': r'\d+\.\d+',
- 'description': (
- 'Float (divisible by 0.1 for hosts with firmware level 7.5 or '
- 'earlier and 0.05 for hosts with firmware level 7.6 or later)'
- ),
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:dedicated_proc',
- description=(
- 'Use dedicated processors. The attribute defaults to false.'
- ),
- value={
- 'type': bool,
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:shared_weight',
- description=(
- 'Shared processor weight. When ``powervm:dedicated_proc`` is set '
- 'to true and ``powervm:uncapped`` is also set to true, the value '
- 'of the attribute defaults to 128.'
- ),
- value={
- 'type': int,
- 'min': 0,
- 'max': 255,
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:availability_priority',
- description=(
- 'Availability priority. The attribute priority of the server if '
- 'there is a processor failure and there are not enough resources '
- 'for all servers. VIOS and i5 need to remain high priority '
- 'default of 191. The value of the attribute defaults to 128.'
- ),
- value={
- 'type': int,
- 'min': 0,
- 'max': 255,
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:uncapped',
- description=(
- 'LPAR can use unused processor cycles that are beyond or exceed '
- 'the wanted setting of the attribute. This attribute is '
- 'supported only when ``powervm:dedicated_proc`` is set to false. '
- 'When ``powervm:dedicated_proc`` is set to false, '
- '``powervm:uncapped`` defaults to true.'
- ),
- value={
- 'type': bool,
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:dedicated_sharing_mode',
- description=(
- 'Sharing mode for dedicated processors. The attribute is '
- 'supported only when ``powervm:dedicated_proc`` is set to true.'
- ),
- value={
- 'type': str,
- 'enum': (
- 'share_idle_procs',
- 'keep_idle_procs',
- 'share_idle_procs_active',
- 'share_idle_procs_always',
- )
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:processor_compatibility',
- description=(
- 'A processor compatibility mode is a value that is assigned to a '
- 'logical partition by the hypervisor that specifies the processor '
- 'environment in which the logical partition can successfully '
- 'operate.'
- ),
- value={
- 'type': str,
- 'enum': (
- 'default',
- 'POWER6',
- 'POWER6+',
- 'POWER6_Enhanced',
- 'POWER6+_Enhanced',
- 'POWER7',
- 'POWER8'
- ),
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:shared_proc_pool_name',
- description=(
- 'Specifies the shared processor pool to be targeted during '
- 'deployment of a virtual machine.'
- ),
- value={
- 'type': str,
- 'description': 'String with upper limit of 14 characters',
- },
- ),
- base.ExtraSpecValidator(
- name='powervm:srr_capability',
- description=(
- 'If the value of simplified remote restart capability is set to '
- 'true for the LPAR, you can remote restart the LPAR to supported '
- 'CEC or host when the source CEC or host is down. The attribute '
- 'defaults to false.'
- ),
- value={
- 'type': bool,
- },
- ),
-]
-
-
-def register():
- return EXTRA_SPEC_VALIDATORS
diff --git a/nova/api/validation/parameter_types.py b/nova/api/validation/parameter_types.py
index 79badb7d14..bdb3ad3c83 100644
--- a/nova/api/validation/parameter_types.py
+++ b/nova/api/validation/parameter_types.py
@@ -290,7 +290,7 @@ fqdn = {
name = {
# NOTE: Nova v2.1 API contains some 'name' parameters such
- # as keypair, server, flavor, aggregate and so on. They are
+ # as server, flavor, aggregate and so on. They are
# stored in the DB and Nova specific parameters.
# This definition is used for all their parameters.
'type': 'string', 'minLength': 1, 'maxLength': 255,
@@ -304,6 +304,18 @@ az_name = {
}
+keypair_name_special_chars = {'allOf': [name, {
+ 'type': 'string', 'minLength': 1, 'maxLength': 255,
+ 'format': 'keypair_name_20'
+}]}
+
+
+keypair_name_special_chars_292 = {'allOf': [name, {
+ 'type': 'string', 'minLength': 1, 'maxLength': 255,
+ 'format': 'keypair_name_292'
+}]}
+
+
az_name_with_leading_trailing_spaces = {
'type': 'string', 'minLength': 1, 'maxLength': 255,
'format': 'az_name_with_leading_trailing_spaces'
diff --git a/nova/api/validation/validators.py b/nova/api/validation/validators.py
index ed2f211eee..b0e9478d35 100644
--- a/nova/api/validation/validators.py
+++ b/nova/api/validation/validators.py
@@ -17,6 +17,7 @@ Internal implementation of request Body validating middleware.
"""
import re
+import string
import jsonschema
from jsonschema import exceptions as jsonschema_exc
@@ -153,6 +154,28 @@ def _validate_az_name(instance):
raise exception.InvalidName(reason=regex.reason)
+@jsonschema.FormatChecker.cls_checks('keypair_name_20',
+ exception.InvalidName)
+def _validate_keypair_name_20(keypair_name):
+ safe_chars = "_- " + string.digits + string.ascii_letters
+ return _validate_keypair_name(keypair_name, safe_chars)
+
+
+@jsonschema.FormatChecker.cls_checks('keypair_name_292',
+ exception.InvalidName)
+def _validate_keypair_name_292(keypair_name):
+ safe_chars = "@._- " + string.digits + string.ascii_letters
+ return _validate_keypair_name(keypair_name, safe_chars)
+
+
+def _validate_keypair_name(keypair_name, safe_chars):
+ clean_value = "".join(x for x in keypair_name if x in safe_chars)
+ if clean_value != keypair_name:
+ reason = _("Only expected characters: [%s]") % safe_chars
+ raise exception.InvalidName(reason=reason)
+ return True
+
+
def _soft_validate_additional_properties(validator,
additional_properties_value,
instance,
diff --git a/nova/block_device.py b/nova/block_device.py
index a026078317..31d163f811 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -52,7 +52,9 @@ bdm_new_fields = set(['source_type', 'destination_type',
'guest_format', 'device_type', 'disk_bus', 'boot_index',
'device_name', 'delete_on_termination', 'snapshot_id',
'volume_id', 'volume_size', 'image_id', 'no_device',
- 'connection_info', 'tag', 'volume_type'])
+ 'connection_info', 'tag', 'volume_type', 'encrypted',
+ 'encryption_secret_uuid', 'encryption_format',
+ 'encryption_options'])
bdm_db_only_fields = set(['id', 'instance_uuid', 'attachment_id', 'uuid'])
@@ -612,7 +614,7 @@ def get_bdm_local_disk_num(block_device_mappings):
def get_bdm_image_metadata(context, image_api, volume_api,
block_device_mapping, legacy_bdm=True):
- """Attempt to retrive image metadata from a given block_device_mapping.
+ """Attempt to retrieve image metadata from a given block_device_mapping.
If we are booting from a volume, we need to get the volume details from
Cinder and make sure we pass the metadata back accordingly.
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index fd6a499b78..45ae678ab4 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -122,6 +122,10 @@ def format_dict(dct, dict_property="Property", dict_value='Value',
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
+ # starting in PrettyTable 3.4.0 we need to also set the header
+ # as align now only applies to the data.
+ if hasattr(pt, 'header_align'):
+ pt.header_align = 'l'
for k, v in sorted(dct.items(), key=sort_key):
# convert dict to str to check length
if isinstance(v, dict):
@@ -752,15 +756,7 @@ class CellV2Commands(object):
# worry about parsing and splitting a URL which could have special
# characters in the password, which makes parsing a nightmare.
url = sqla_url.make_url(connection)
-
- # TODO(gibi): remove hasattr() conditional in favor of "url.set()"
- # when SQLAlchemy 1.4 is the minimum version in requirements
- if hasattr(url, "set"):
- url = url.set(database=url.database + '_cell0')
- else:
- # TODO(zzzeek): remove when SQLAlchemy 1.4
- # is the minimum version in requirements
- url.database = url.database + '_cell0'
+ url = url.set(database=url.database + '_cell0')
return urlparse.unquote(str(url))
@@ -2217,7 +2213,7 @@ class PlacementCommands(object):
output(_('No cells to process.'))
return 4
- placement = report.SchedulerReportClient()
+ placement = report.report_client_singleton()
neutron = None
if heal_port_allocations:
@@ -2718,7 +2714,7 @@ class PlacementCommands(object):
if verbose:
output = lambda msg: print(msg)
- placement = report.SchedulerReportClient()
+ placement = report.report_client_singleton()
# Resets two in-memory dicts for knowing instances per compute node
self.cn_uuid_mapping = collections.defaultdict(tuple)
self.instances_mapping = collections.defaultdict(list)
@@ -3192,6 +3188,168 @@ class VolumeAttachmentCommands(object):
return 1
+class ImagePropertyCommands:
+
+ @action_description(_("Show the value of an instance image property."))
+ @args(
+ 'instance_uuid', metavar='<instance_uuid>',
+ help='UUID of the instance')
+ @args(
+ 'property', metavar='<image_property>',
+ help='Image property to show')
+ def show(self, instance_uuid=None, image_property=None):
+ """Show value of a given instance image property.
+
+ Return codes:
+ * 0: Command completed successfully.
+ * 1: An unexpected error happened.
+ * 2: Instance not found.
+ * 3: Image property not found.
+ """
+ try:
+ ctxt = context.get_admin_context()
+ im = objects.InstanceMapping.get_by_instance_uuid(
+ ctxt, instance_uuid)
+ with context.target_cell(ctxt, im.cell_mapping) as cctxt:
+ instance = objects.Instance.get_by_uuid(
+ cctxt, instance_uuid, expected_attrs=['system_metadata'])
+ image_property = instance.system_metadata.get(
+ f'image_{image_property}')
+ if image_property:
+ print(image_property)
+ return 0
+ else:
+ print(f'Image property {image_property} not found '
+ f'for instance {instance_uuid}.')
+ return 3
+ except (
+ exception.InstanceNotFound,
+ exception.InstanceMappingNotFound,
+ ) as e:
+ print(str(e))
+ return 2
+ except Exception as e:
+ print(f'Unexpected error, see nova-manage.log for the full '
+ f'trace: {str(e)}')
+ LOG.exception('Unexpected error')
+ return 1
+
+ def _validate_image_properties(self, image_properties):
+ """Validate the provided image property names and values
+
+ :param image_properties: List of image property names and values
+ """
+ # Sanity check the format of the provided properties, this should be
+ # in the format of name=value.
+ if any(x for x in image_properties if '=' not in x):
+ raise exception.InvalidInput(
+ "--property should use the format key=value")
+
+ # Transform the list of delimited properties to a dict
+ image_properties = dict(prop.split('=') for prop in image_properties)
+
+ # Validate the names of each property by checking against the o.vo
+ # fields currently listed by ImageProps. We can't use from_dict to
+ # do this as it silently ignores invalid property keys.
+ for image_property_name in image_properties.keys():
+ if image_property_name not in objects.ImageMetaProps.fields:
+ raise exception.InvalidImagePropertyName(
+ image_property_name=image_property_name)
+
+ # Validate the values by creating an object from the provided dict.
+ objects.ImageMetaProps.from_dict(image_properties)
+
+ # Return the dict so we can update the instance system_metadata
+ return image_properties
+
+ def _update_image_properties(self, instance, image_properties):
+ """Update instance image properties
+
+ :param instance: The instance to update
+ :param image_properties: List of image properties and values to update
+ """
+ # Check the state of the instance
+ allowed_states = [
+ obj_fields.InstanceState.STOPPED,
+ obj_fields.InstanceState.SHELVED,
+ obj_fields.InstanceState.SHELVED_OFFLOADED,
+ ]
+ if instance.vm_state not in allowed_states:
+ raise exception.InstanceInvalidState(
+ instance_uuid=instance.uuid, attr='vm_state',
+ state=instance.vm_state,
+ method='image_property set (must be STOPPED, SHELVED, OR '
+ 'SHELVED_OFFLOADED).')
+
+ # Validate the property names and values
+ image_properties = self._validate_image_properties(image_properties)
+
+ # Update the image properties and save the instance record
+ for image_property, value in image_properties.items():
+ instance.system_metadata[f'image_{image_property}'] = value
+
+ # Save and return 0
+ instance.save()
+ return 0
+
+ @action_description(_(
+ "Set the values of instance image properties stored in the database. "
+ "This is only allowed for " "instances with a STOPPED, SHELVED or "
+ "SHELVED_OFFLOADED vm_state."))
+ @args(
+ 'instance_uuid', metavar='<instance_uuid>',
+ help='UUID of the instance')
+ @args(
+ '--property', metavar='<image_property>', action='append',
+ dest='image_properties',
+ help='Image property to set using the format name=value. For example: '
+ '--property hw_disk_bus=virtio --property hw_cdrom_bus=sata')
+ def set(self, instance_uuid=None, image_properties=None):
+ """Set instance image property values
+
+ Return codes:
+ * 0: Command completed successfully.
+ * 1: An unexpected error happened.
+ * 2: Unable to find instance.
+ * 3: Instance is in an invalid state.
+ * 4: Invalid input format.
+ * 5: Invalid image property name.
+ * 6: Invalid image property value.
+ """
+ try:
+ ctxt = context.get_admin_context()
+ im = objects.InstanceMapping.get_by_instance_uuid(
+ ctxt, instance_uuid)
+ with context.target_cell(ctxt, im.cell_mapping) as cctxt:
+ instance = objects.Instance.get_by_uuid(
+ cctxt, instance_uuid, expected_attrs=['system_metadata'])
+ return self._update_image_properties(
+ instance, image_properties)
+ except ValueError as e:
+ print(str(e))
+ return 6
+ except exception.InvalidImagePropertyName as e:
+ print(str(e))
+ return 5
+ except exception.InvalidInput as e:
+ print(str(e))
+ return 4
+ except exception.InstanceInvalidState as e:
+ print(str(e))
+ return 3
+ except (
+ exception.InstanceNotFound,
+ exception.InstanceMappingNotFound,
+ ) as e:
+ print(str(e))
+ return 2
+ except Exception as e:
+ print('Unexpected error, see nova-manage.log for the full '
+ 'trace: %s ' % str(e))
+ LOG.exception('Unexpected error')
+ return 1
+
+
CATEGORIES = {
'api_db': ApiDbCommands,
'cell_v2': CellV2Commands,
@@ -3199,6 +3357,7 @@ CATEGORIES = {
'placement': PlacementCommands,
'libvirt': LibvirtCommands,
'volume_attachment': VolumeAttachmentCommands,
+ 'image_property': ImagePropertyCommands,
}
diff --git a/nova/cmd/status.py b/nova/cmd/status.py
index 082116201b..29e4a5d01e 100644
--- a/nova/cmd/status.py
+++ b/nova/cmd/status.py
@@ -21,10 +21,10 @@ import sys
import traceback
from keystoneauth1 import exceptions as ks_exc
+import microversion_parse
from oslo_config import cfg
from oslo_upgradecheck import common_checks
from oslo_upgradecheck import upgradecheck
-import pkg_resources
import sqlalchemy as sa
from sqlalchemy import func as sqlfunc
@@ -41,7 +41,6 @@ from nova.objects import cell_mapping as cell_mapping_obj
# to be registered under nova.objects when called via _check_machine_type_set
from nova.objects import image_meta as image_meta_obj # noqa: F401
from nova.objects import instance as instance_obj # noqa: F401
-from nova import policy
from nova import utils
from nova import version
from nova.virt.libvirt import machine_type_utils
@@ -175,9 +174,9 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
try:
# TODO(efried): Use ksa's version filtering in _placement_get
versions = self._placement_get("/")
- max_version = pkg_resources.parse_version(
+ max_version = microversion_parse.parse_version_string(
versions["versions"][0]["max_version"])
- needs_version = pkg_resources.parse_version(
+ needs_version = microversion_parse.parse_version_string(
MIN_PLACEMENT_MICROVERSION)
if max_version < needs_version:
msg = (_('Placement API version %(needed)s needed, '
@@ -249,77 +248,13 @@ class UpgradeCommands(upgradecheck.UpgradeCommands):
str(ex))
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
- def _check_policy(self):
- """Checks to see if policy file is overwritten with the new
- defaults.
- """
- msg = _("Your policy file contains rules which examine token scope, "
- "which may be due to generation with the new defaults. "
- "If that is done intentionally to migrate to the new rule "
- "format, then you are required to enable the flag "
- "'oslo_policy.enforce_scope=True' and educate end users on "
- "how to request scoped tokens from Keystone. Another easy "
- "and recommended way for you to achieve the same is via two "
- "flags, 'oslo_policy.enforce_scope=True' and "
- "'oslo_policy.enforce_new_defaults=True' and avoid "
- "overwriting the file. Please refer to this document to "
- "know the complete migration steps: "
- "https://docs.openstack.org/nova/latest/configuration"
- "/policy-concepts.html. If you did not intend to migrate "
- "to new defaults in this upgrade, then with your current "
- "policy file the scope checking rule will fail. A possible "
- "reason for such a policy file is that you generated it with "
- "'oslopolicy-sample-generator' in json format. "
- "Three ways to fix this until you are ready to migrate to "
- "scoped policies: 1. Generate the policy file with "
- "'oslopolicy-sample-generator' in yaml format, keep "
- "the generated content commented out, and update "
- "the generated policy.yaml location in "
- "``oslo_policy.policy_file``. "
- "2. Use a pre-existing sample config file from the Train "
- "release. 3. Use an empty or non-existent file to take all "
- "the defaults.")
- rule = "system_admin_api"
- rule_new_default = "role:admin and system_scope:all"
- status = upgradecheck.Result(upgradecheck.Code.SUCCESS)
- # NOTE(gmann): Initialise the policy if it not initialized.
- # We need policy enforcer with all the rules loaded to check
- # their value with defaults.
- try:
- if policy._ENFORCER is None:
- policy.init(suppress_deprecation_warnings=True)
-
- # For safer side, recheck that the enforcer is available before
- # upgrade checks. If something is wrong on oslo side and enforcer
- # is still not available the return warning to avoid any false
- # result.
- if policy._ENFORCER is not None:
- current_rule = str(policy._ENFORCER.rules[rule]).strip("()")
- if (current_rule == rule_new_default and
- not CONF.oslo_policy.enforce_scope):
- status = upgradecheck.Result(upgradecheck.Code.WARNING,
- msg)
- else:
- status = upgradecheck.Result(
- upgradecheck.Code.WARNING,
- _('Policy is not initialized to check the policy rules'))
- except Exception as ex:
- status = upgradecheck.Result(
- upgradecheck.Code.WARNING,
- _('Unable to perform policy checks due to error: %s') %
- str(ex))
- # reset the policy state so that it can be initialized from fresh if
- # operator changes policy file after running this upgrade checks.
- policy.reset()
- return status
-
def _check_old_computes(self):
# warn if there are computes in the system older than the previous
# major release
try:
utils.raise_if_old_compute()
except exception.TooOldComputeService as e:
- return upgradecheck.Result(upgradecheck.Code.WARNING, str(e))
+ return upgradecheck.Result(upgradecheck.Code.FAILURE, str(e))
return upgradecheck.Result(upgradecheck.Code.SUCCESS)
@@ -350,8 +285,6 @@ https://docs.openstack.org/latest/nova/admin/hw_machine_type.html"""))
(_('Placement API'), _check_placement),
# Added in Train
(_('Cinder API'), _check_cinder),
- # Added in Ussuri
- (_('Policy Scope-based Defaults'), _check_policy),
# Added in Victoria
(
_('Policy File JSON to YAML Migration'),
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 28368d910f..6b2023c19f 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -22,7 +22,6 @@ networking and storage of VMs, and compute hosts on which they run)."""
import collections
import functools
import re
-import string
import typing as ty
from castellan import key_manager
@@ -59,6 +58,8 @@ from nova import exception
from nova import exception_wrapper
from nova.i18n import _
from nova.image import glance
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limits
from nova.network import constants
from nova.network import model as network_model
from nova.network import neutron
@@ -73,6 +74,7 @@ from nova.objects import quotas as quotas_obj
from nova.objects import service as service_obj
from nova.pci import request as pci_request
from nova.policies import servers as servers_policies
+from nova.policies import shelve as shelve_policies
import nova.policy
from nova import profiler
from nova import rpc
@@ -115,6 +117,11 @@ MIN_COMPUTE_BOOT_WITH_EXTENDED_RESOURCE_REQUEST = 58
MIN_COMPUTE_MOVE_WITH_EXTENDED_RESOURCE_REQUEST = 59
MIN_COMPUTE_INT_ATTACH_WITH_EXTENDED_RES_REQ = 60
+SUPPORT_VNIC_TYPE_REMOTE_MANAGED = 61
+MIN_COMPUTE_VDPA_ATTACH_DETACH = 62
+MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION = 63
+
+
# FIXME(danms): Keep a global cache of the cells we find the
# first time we look. This needs to be refreshed on a timer or
# trigger.
@@ -273,7 +280,7 @@ def reject_vtpm_instances(operation):
return outer
-def reject_vdpa_instances(operation):
+def reject_vdpa_instances(operation, until=None):
"""Reject requests to decorated function if instance has vDPA interfaces.
Raise OperationNotSupportedForVDPAInterfaces if operations involves one or
@@ -287,8 +294,18 @@ def reject_vdpa_instances(operation):
vif['vnic_type'] == network_model.VNIC_TYPE_VDPA
for vif in instance.get_network_info()
):
- raise exception.OperationNotSupportedForVDPAInterface(
- instance_uuid=instance.uuid, operation=operation)
+ reject = True
+ if until is not None:
+ min_ver = objects.service.get_minimum_version_all_cells(
+ nova_context.get_admin_context(), ['nova-compute']
+ )
+ if min_ver >= until:
+ reject = False
+
+ if reject:
+ raise exception.OperationNotSupportedForVDPAInterface(
+ instance_uuid=instance.uuid, operation=operation
+ )
return f(self, context, instance, *args, **kw)
return inner
return outer
@@ -376,11 +393,12 @@ def block_extended_resource_request(function):
class API:
"""API for interacting with the compute manager."""
+ _sentinel = object()
+
def __init__(self, image_api=None, network_api=None, volume_api=None):
self.image_api = image_api or glance.API()
self.network_api = network_api or neutron.API()
self.volume_api = volume_api or cinder.API()
- self._placementclient = None # Lazy-load on first access.
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.compute_task_api = conductor.ComputeTaskAPI()
self.servicegroup_api = servicegroup.API()
@@ -398,7 +416,7 @@ class API:
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
- Raises a QuotaError if any limit is exceeded.
+ Raises a OverQuota if any limit is exceeded.
"""
if not injected_files:
return
@@ -407,6 +425,10 @@ class API:
try:
objects.Quotas.limit_check(context,
injected_files=len(injected_files))
+ local_limit.enforce_api_limit(local_limit.INJECTED_FILES,
+ len(injected_files))
+ except exception.OnsetFileLimitExceeded:
+ raise
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
@@ -422,6 +444,16 @@ class API:
objects.Quotas.limit_check(context,
injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
+ # TODO(johngarbutt) we can simplify the except clause when
+ # the above legacy quota check is removed.
+ local_limit.enforce_api_limit(
+ local_limit.INJECTED_FILES_PATH, max_path)
+ local_limit.enforce_api_limit(
+ local_limit.INJECTED_FILES_CONTENT, max_content)
+ except exception.OnsetFilePathLimitExceeded:
+ raise
+ except exception.OnsetFileContentLimitExceeded:
+ raise
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
@@ -442,6 +474,10 @@ class API:
num_metadata = len(metadata)
try:
objects.Quotas.limit_check(context, metadata_items=num_metadata)
+ local_limit.enforce_api_limit(
+ local_limit.SERVER_METADATA_ITEMS, num_metadata)
+ except exception.MetadataLimitExceeded:
+ raise
except exception.OverQuota as exc:
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
@@ -838,17 +874,10 @@ class API:
"""
image_meta = _get_image_meta_obj(image)
- API._validate_flavor_image_mem_encryption(flavor, image_meta)
-
- # validate PMU extra spec and image metadata
- flavor_pmu = flavor.extra_specs.get('hw:pmu')
- image_pmu = image_meta.properties.get('hw_pmu')
- if (flavor_pmu is not None and image_pmu is not None and
- image_pmu != strutils.bool_from_string(flavor_pmu)):
- raise exception.ImagePMUConflict()
-
# Only validate values of flavor/image so the return results of
# following 'get' functions are not used.
+ hardware.get_mem_encryption_constraint(flavor, image_meta)
+ hardware.get_pmu_constraint(flavor, image_meta)
hardware.get_number_of_serial_ports(flavor, image_meta)
hardware.get_realtime_cpu_constraint(flavor, image_meta)
hardware.get_cpu_topology_constraints(flavor, image_meta)
@@ -858,19 +887,6 @@ class API:
if validate_pci:
pci_request.get_pci_requests_from_flavor(flavor)
- @staticmethod
- def _validate_flavor_image_mem_encryption(flavor, image):
- """Validate that the flavor and image don't make contradictory
- requests regarding memory encryption.
-
- :param flavor: Flavor object
- :param image: an ImageMeta object
- :raises: nova.exception.FlavorImageConflict
- """
- # This library function will raise the exception for us if
- # necessary; if not, we can ignore the result returned.
- hardware.get_mem_encryption_constraint(flavor, image)
-
def _get_image_defined_bdms(self, flavor, image_meta, root_device_name):
image_properties = image_meta.get('properties', {})
@@ -1017,6 +1033,22 @@ class API:
" until upgrade finished.")
raise exception.ForbiddenPortsWithAccelerator(msg)
+ def _check_vnic_remote_managed_min_version(self, context):
+ min_version = (objects.service.get_minimum_version_all_cells(
+ context, ['nova-compute']))
+ if min_version < SUPPORT_VNIC_TYPE_REMOTE_MANAGED:
+ msg = ("Remote-managed ports are not supported"
+ " until an upgrade is fully finished.")
+ raise exception.ForbiddenWithRemoteManagedPorts(msg)
+
+ def _check_support_vnic_remote_managed(self, context, requested_networks):
+ if requested_networks:
+ for request_net in requested_networks:
+ if (request_net.port_id and
+ self.network_api.is_remote_managed_port(
+ context, request_net.port_id)):
+ self._check_vnic_remote_managed_min_version(context)
+
def _validate_and_build_base_options(
self, context, flavor, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description, hostname, key_name,
@@ -1087,6 +1119,7 @@ class API:
network_metadata, port_resource_requests, req_lvl_params = result
self._check_support_vnic_accelerator(context, requested_networks)
+ self._check_support_vnic_remote_managed(context, requested_networks)
# Creating servers with ports that have resource requests, like QoS
# minimum bandwidth rules, is only supported in a requested minimum
@@ -1328,6 +1361,25 @@ class API:
# Check quotas
num_instances = compute_utils.check_num_instances_quota(
context, flavor, min_count, max_count)
+
+ # Find out whether or not we are a BFV instance
+ if block_device_mapping:
+ root = block_device_mapping.root_bdm()
+ is_bfv = bool(root and root.is_volume)
+ else:
+ # If we have no BDMs, we're clearly not BFV
+ is_bfv = False
+
+ # NOTE(johngarbutt) when unified limits not used, this just
+ # returns num_instances back again
+ # NOTE: If we want to enforce quota on port or cyborg resources in the
+ # future, this enforce call will need to move after we have populated
+ # the RequestSpec with all of the requested resources and use the real
+ # RequestSpec to get the overall resource usage of the instance.
+ num_instances = placement_limits.enforce_num_instances_and_flavor(
+ context, context.project_id, flavor,
+ is_bfv, min_count, num_instances)
+
security_groups = security_group_api.populate_security_groups(
security_groups)
port_resource_requests = base_options.pop('port_resource_requests')
@@ -1370,14 +1422,7 @@ class API:
security_groups=security_groups,
port_resource_requests=port_resource_requests,
request_level_params=req_lvl_params)
-
- if block_device_mapping:
- # Record whether or not we are a BFV instance
- root = block_device_mapping.root_bdm()
- req_spec.is_bfv = bool(root and root.is_volume)
- else:
- # If we have no BDMs, we're clearly not BFV
- req_spec.is_bfv = False
+ req_spec.is_bfv = is_bfv
# NOTE(danms): We need to record num_instances on the request
# spec as this is how the conductor knows how many were in this
@@ -1452,10 +1497,15 @@ class API:
objects.Quotas.check_deltas(
context, {'server_group_members': 1},
instance_group, context.user_id)
+ local_limit.enforce_db_limit(
+ context, local_limit.SERVER_GROUP_MEMBERS,
+ entity_scope=instance_group.uuid, delta=1)
+ except exception.GroupMemberLimitExceeded:
+ raise
except exception.OverQuota:
msg = _("Quota exceeded, too many servers in "
"group")
- raise exception.QuotaError(msg)
+ raise exception.OverQuota(msg)
members = objects.InstanceGroup.add_members(
context, instance_group.uuid, [instance.uuid])
@@ -1470,12 +1520,25 @@ class API:
objects.Quotas.check_deltas(
context, {'server_group_members': 0},
instance_group, context.user_id)
+ # TODO(johngarbutt): decide if we need this check
+ # The quota rechecking of limits is really just to
+ # protect against denial of service attacks that
+ # aim to fill up the database. Its usefulness could
+ # be debated.
+ local_limit.enforce_db_limit(
+ context, local_limit.SERVER_GROUP_MEMBERS,
+ entity_scope=instance_group.uuid, delta=0)
+ except exception.GroupMemberLimitExceeded:
+ with excutils.save_and_reraise_exception():
+ objects.InstanceGroup._remove_members_in_db(
+ context, instance_group.id,
+ [instance.uuid])
except exception.OverQuota:
objects.InstanceGroup._remove_members_in_db(
context, instance_group.id, [instance.uuid])
msg = _("Quota exceeded, too many servers in "
"group")
- raise exception.QuotaError(msg)
+ raise exception.OverQuota(msg)
# list of members added to servers group in this iteration
# is needed to check quota of server group during add next
# instance
@@ -1530,6 +1593,42 @@ class API:
return objects.InstanceGroup.get_by_uuid(context, group_hint)
+ def _update_ephemeral_encryption_bdms(
+ self,
+ flavor: 'objects.Flavor',
+ image_meta_dict: ty.Dict[str, ty.Any],
+ block_device_mapping: 'objects.BlockDeviceMappingList',
+ ) -> None:
+ """Update local BlockDeviceMappings when ephemeral encryption requested
+
+ Enable ephemeral encryption in all local BlockDeviceMappings
+ when requested in the flavor or image. Also optionally set the format
+ and options if also provided.
+
+ :param flavor: The instance flavor for the request
+ :param image_meta_dict: The image metadata for the request
+ :block_device_mapping: The current block_device_mapping for the request
+ """
+ image_meta = _get_image_meta_obj(image_meta_dict)
+ if not hardware.get_ephemeral_encryption_constraint(
+ flavor, image_meta):
+ return
+
+ # NOTE(lyarwood): Attempt to find the format in the flavor and image,
+ # if one isn't found then the compute will need to provide and save a
+ # default format during a the initial build.
+ eph_format = hardware.get_ephemeral_encryption_format(
+ flavor, image_meta)
+
+ # NOTE(lyarwood): The term ephemeral is overloaded in the codebase,
+ # what it actually means in the context of ephemeral encryption is
+ # anything local to the compute host so use the is_local property.
+ # TODO(lyarwood): Add .get_local_devices() to BlockDeviceMappingList
+ for bdm in [b for b in block_device_mapping if b.is_local]:
+ bdm.encrypted = True
+ if eph_format:
+ bdm.encryption_format = eph_format
+
def _create_instance(self, context, flavor,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -1607,10 +1706,17 @@ class API:
'max_net_count': max_net_count})
max_count = max_net_count
+ # _check_and_transform_bdm transforms block_device_mapping from API
+ # bdms (dicts) to a BlockDeviceMappingList.
block_device_mapping = self._check_and_transform_bdm(context,
base_options, flavor, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
+ # Update any local BlockDeviceMapping objects if ephemeral encryption
+ # has been requested though flavor extra specs or image properties
+ self._update_ephemeral_encryption_bdms(
+ flavor, boot_meta, block_device_mapping)
+
# We can't do this check earlier because we need bdms from all sources
# to have been merged in order to get the root bdm.
# Set validate_numa=False since numa validation is already done by
@@ -2441,6 +2547,8 @@ class API:
instance=instance)
with nova_context.target_cell(context, cell) as cctxt:
self._local_delete(cctxt, instance, bdms, delete_type, cb)
+ self._record_action_start(context, instance,
+ instance_actions.DELETE)
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
@@ -2524,9 +2632,7 @@ class API:
@property
def placementclient(self):
- if self._placementclient is None:
- self._placementclient = report.SchedulerReportClient()
- return self._placementclient
+ return report.report_client_singleton()
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
@@ -2624,6 +2730,9 @@ class API:
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
compute_utils.check_num_instances_quota(context, flavor, 1, 1,
project_id=project_id, user_id=user_id)
+ is_bfv = compute_utils.is_volume_backed_instance(context, instance)
+ placement_limits.enforce_num_instances_and_flavor(context, project_id,
+ flavor, is_bfv, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
@@ -2805,9 +2914,11 @@ class API:
# spec has been archived is being queried.
raise exception.InstanceNotFound(instance_id=uuid)
else:
+ if isinstance(result[cell_uuid], exception.NovaException):
+ LOG.exception(result[cell_uuid])
raise exception.NovaException(
- _("Cell %s is not responding and hence instance "
- "info is not available.") % cell_uuid)
+ _("Cell %s is not responding or returned an exception, "
+ "hence instance info is not available.") % cell_uuid)
def _get_instance(self, context, instance_uuid, expected_attrs,
cell_down_support=False):
@@ -3480,7 +3591,7 @@ class API:
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
- files_to_inject=None, **kwargs):
+ files_to_inject=None, reimage_boot_volume=False, **kwargs):
"""Rebuild the given instance with the provided attributes."""
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
@@ -3561,15 +3672,16 @@ class API:
orig_image_ref = volume_image_metadata.get('image_id')
if orig_image_ref != image_href:
- # Leave a breadcrumb.
- LOG.debug('Requested to rebuild instance with a new image %s '
- 'for a volume-backed server with image %s in its '
- 'root volume which is not supported.', image_href,
- orig_image_ref, instance=instance)
- msg = _('Unable to rebuild with a different image for a '
- 'volume-backed server.')
- raise exception.ImageUnacceptable(
- image_id=image_href, reason=msg)
+ if not reimage_boot_volume:
+ # Leave a breadcrumb.
+ LOG.debug('Requested to rebuild instance with a new image '
+ '%s for a volume-backed server with image %s in '
+ 'its root volume which is not supported.',
+ image_href, orig_image_ref, instance=instance)
+ msg = _('Unable to rebuild with a different image for a '
+ 'volume-backed server.')
+ raise exception.ImageUnacceptable(
+ image_id=image_href, reason=msg)
else:
orig_image_ref = instance.image_ref
@@ -3684,7 +3796,9 @@ class API:
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=host,
- request_spec=request_spec)
+ request_spec=request_spec,
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=None)
def _check_volume_status(self, context, bdms):
"""Check whether the status of the volume is "in-use".
@@ -3741,9 +3855,22 @@ class API:
# TODO(sean-k-mooney): add PCI NUMA affinity policy check.
@staticmethod
- def _check_quota_for_upsize(context, instance, current_flavor, new_flavor):
+ def _check_quota_for_upsize(context, instance, current_flavor,
+ new_flavor, is_bfv, is_revert):
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
+ # NOTE(johngarbutt) for resize, check for sum of existing usage
+ # plus the usage from new flavor, as it will be claimed in
+ # placement that way, even if there is no change in flavor
+ # But for revert resize, we are just removing claims in placement
+ # so we can ignore the quota check
+ if not is_revert:
+ placement_limits.enforce_num_instances_and_flavor(context,
+ project_id,
+ new_flavor,
+ is_bfv, 1, 1)
+
+ # Old quota system only looks at the change in size.
# Deltas will be empty if the resize is not an upsize.
deltas = compute_utils.upsize_quota_delta(new_flavor,
current_flavor)
@@ -3785,8 +3912,11 @@ class API:
elevated, instance.uuid, 'finished')
# If this is a resize down, a revert might go over quota.
+ reqspec = objects.RequestSpec.get_by_instance_uuid(
+ context, instance.uuid)
self._check_quota_for_upsize(context, instance, instance.flavor,
- instance.old_flavor)
+ instance.old_flavor, reqspec.is_bfv,
+ is_revert=True)
# The AZ for the server may have changed when it was migrated so while
# we are in the API and have access to the API DB, update the
@@ -3810,8 +3940,6 @@ class API:
# the scheduler will be using the wrong values. There's no need to do
# this if the flavor hasn't changed though and we're migrating rather
# than resizing.
- reqspec = objects.RequestSpec.get_by_instance_uuid(
- context, instance.uuid)
if reqspec.flavor['id'] != instance.old_flavor['id']:
reqspec.flavor = instance.old_flavor
reqspec.numa_topology = hardware.numa_get_constraints(
@@ -4030,9 +4158,6 @@ class API:
# finally split resize and cold migration into separate code paths
@block_extended_resource_request
@block_port_accelerators()
- # FIXME(sean-k-mooney): Cold migrate and resize to different hosts
- # probably works but they have not been tested so block them for now
- @reject_vdpa_instances(instance_actions.RESIZE)
@block_accelerators()
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
@@ -4113,9 +4238,16 @@ class API:
# ensure there is sufficient headroom for upsizes
if flavor_id:
+ # Figure out if the instance is volume-backed but only if we didn't
+ # already figure that out above (avoid the extra db hit).
+ if volume_backed is None:
+ # TODO(johngarbutt) should we just use the request spec?
+ volume_backed = compute_utils.is_volume_backed_instance(
+ context, instance)
self._check_quota_for_upsize(context, instance,
current_flavor,
- new_flavor)
+ new_flavor, volume_backed,
+ is_revert=False)
if not same_flavor:
image = utils.get_image_from_system_metadata(
@@ -4150,6 +4282,19 @@ class API:
if not same_flavor:
request_spec.numa_topology = hardware.numa_get_constraints(
new_flavor, instance.image_meta)
+ # if the flavor is changed then we need to recalculate the
+ # pci_requests as well because the new flavor might request
+ # different pci_aliases
+ new_pci_requests = pci_request.get_pci_requests_from_flavor(
+ new_flavor)
+ new_pci_requests.instance_uuid = instance.uuid
+ # The neutron based InstancePCIRequest cannot change during resize,
+ # so we just need to copy them from the old request
+ for request in request_spec.pci_requests.requests or []:
+ if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ new_pci_requests.requests.append(request)
+ request_spec.pci_requests = new_pci_requests
+
# TODO(huaqiang): Remove in Wallaby
# check nova-compute nodes have been updated to Victoria to resize
# instance to a new mixed instance from a dedicated or shared
@@ -4251,10 +4396,7 @@ class API:
allow_same_host = CONF.allow_resize_to_same_host
return allow_same_host
- # FIXME(sean-k-mooney): Shelve works but unshelve does not due to bug
- # #1851545, so block it for now
@block_port_accelerators()
- @reject_vdpa_instances(instance_actions.SHELVE)
@reject_vtpm_instances(instance_actions.SHELVE)
@block_accelerators(until_service=54)
@check_instance_lock
@@ -4316,31 +4458,45 @@ class API:
context, instance=instance,
clean_shutdown=clean_shutdown, accel_uuids=accel_uuids)
+ def _check_offloaded(self, context, instance):
+ """Check if the status of an instance is SHELVE_OFFLOADED,
+ if not raise an exception.
+ """
+ if instance.vm_state != vm_states.SHELVED_OFFLOADED:
+ # NOTE(brinzhang): If the server status is 'SHELVED', it still
+ # belongs to a host, the availability_zone should not change.
+ # Unshelving a shelved offloaded server will go through the
+ # scheduler to find a new host.
+ raise exception.UnshelveInstanceInvalidState(
+ state=instance.vm_state, instance_uuid=instance.uuid)
+
+ def _ensure_host_in_az(self, context, host, availability_zone):
+ """Ensure the host provided belongs to the availability zone,
+ if not raise an exception.
+ """
+ if availability_zone is not None:
+ host_az = availability_zones.get_host_availability_zone(
+ context,
+ host
+ )
+ if host_az != availability_zone:
+ raise exception.UnshelveHostNotInAZ(
+ host=host, availability_zone=availability_zone)
+
def _validate_unshelve_az(self, context, instance, availability_zone):
"""Verify the specified availability_zone during unshelve.
- Verifies that the server is shelved offloaded, the AZ exists and
- if [cinder]/cross_az_attach=False, that any attached volumes are in
- the same AZ.
+ Verifies the AZ exists and if [cinder]/cross_az_attach=False, that
+ any attached volumes are in the same AZ.
:param context: nova auth RequestContext for the unshelve action
:param instance: Instance object for the server being unshelved
:param availability_zone: The user-requested availability zone in
which to unshelve the server.
- :raises: UnshelveInstanceInvalidState if the server is not shelved
- offloaded
:raises: InvalidRequest if the requested AZ does not exist
:raises: MismatchVolumeAZException if [cinder]/cross_az_attach=False
and any attached volumes are not in the requested AZ
"""
- if instance.vm_state != vm_states.SHELVED_OFFLOADED:
- # NOTE(brinzhang): If the server status is 'SHELVED', it still
- # belongs to a host, the availability_zone has not changed.
- # Unshelving a shelved offloaded server will go through the
- # scheduler to find a new host.
- raise exception.UnshelveInstanceInvalidState(
- state=instance.vm_state, instance_uuid=instance.uuid)
-
available_zones = availability_zones.get_availability_zones(
context, self.host_api, get_only_available=True)
if availability_zone not in available_zones:
@@ -4368,31 +4524,96 @@ class API:
@block_extended_resource_request
@check_instance_lock
- @check_instance_state(vm_state=[vm_states.SHELVED,
- vm_states.SHELVED_OFFLOADED])
- def unshelve(self, context, instance, new_az=None):
- """Restore a shelved instance."""
+ @check_instance_state(
+ vm_state=[vm_states.SHELVED, vm_states.SHELVED_OFFLOADED])
+ def unshelve(
+ self, context, instance, new_az=_sentinel, host=None):
+ """Restore a shelved instance.
+
+ :param context: the nova request context
+ :param instance: nova.objects.instance.Instance object
+ :param new_az: (optional) target AZ.
+ If None is provided then the current AZ restriction
+ will be removed from the instance.
+ If the parameter is not provided then the current
+ AZ restriction will not be changed.
+ :param host: (optional) a host to target
+ """
+ # Unshelving a shelved offloaded server will go through the
+ # scheduler to pick a new host, so we update the
+ # RequestSpec.availability_zone here. Note that if scheduling
+ # fails the RequestSpec will remain updated, which is not great.
+ # Bug open to track this https://bugs.launchpad.net/nova/+bug/1978573
+
+ az_passed = new_az is not self._sentinel
+
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
- if new_az:
+ # We need to check a list of preconditions and validate inputs first
+
+ # Ensure instance is shelve offloaded
+ if az_passed or host:
+ self._check_offloaded(context, instance)
+
+ if az_passed and new_az:
+ # we have to ensure that new AZ is valid
self._validate_unshelve_az(context, instance, new_az)
- LOG.debug("Replace the old AZ %(old_az)s in RequestSpec "
- "with a new AZ %(new_az)s of the instance.",
- {"old_az": request_spec.availability_zone,
- "new_az": new_az}, instance=instance)
- # Unshelving a shelved offloaded server will go through the
- # scheduler to pick a new host, so we update the
- # RequestSpec.availability_zone here. Note that if scheduling
- # fails the RequestSpec will remain updated, which is not great,
- # but if we want to change that we need to defer updating the
- # RequestSpec until conductor which probably means RPC changes to
- # pass the new_az variable to conductor. This is likely low
- # priority since the RequestSpec.availability_zone on a shelved
- # offloaded server does not mean much anyway and clearly the user
- # is trying to put the server in the target AZ.
- request_spec.availability_zone = new_az
- request_spec.save()
+ # This will be the AZ of the instance after the unshelve. It can be
+ # None indicating that the instance is not pinned to any AZ after the
+ # unshelve
+ expected_az_after_unshelve = (
+ request_spec.availability_zone
+ if not az_passed else new_az
+ )
+ # host is requested, so we have to see if it exists and does not
+ # contradict with the AZ of the instance
+ if host:
+ # Make sure only admin can unshelve to a specific host.
+ context.can(
+ shelve_policies.POLICY_ROOT % 'unshelve_to_host',
+ target={
+ 'user_id': instance.user_id,
+ 'project_id': instance.project_id
+ }
+ )
+ # Ensure that the requested host exists otherwise raise
+ # a ComputeHostNotFound exception
+ objects.ComputeNode.get_first_node_by_host_for_old_compat(
+ context, host, use_slave=True)
+ # A specific host is requested so we need to make sure that it is
+ # not contradicts with the AZ of the instance
+ self._ensure_host_in_az(
+ context, host, expected_az_after_unshelve)
+
+ if new_az is None:
+ LOG.debug(
+ 'Unpin instance from AZ "%(old_az)s".',
+ {'old_az': request_spec.availability_zone},
+ instance=instance
+ )
+
+ LOG.debug(
+ 'Unshelving instance with old availability_zone "%(old_az)s" to '
+ 'new availability_zone "%(new_az)s" and host "%(host)s".',
+ {
+ 'old_az': request_spec.availability_zone,
+ 'new_az': '%s' %
+ new_az if az_passed
+ else 'not provided',
+ 'host': host,
+ },
+ instance=instance,
+ )
+ # OK every precondition checks out, we just need to tell the scheduler
+ # where to put the instance
+ # We have the expected AZ already calculated. So we just need to
+ # set it in the request_spec to drive the scheduling
+ request_spec.availability_zone = expected_az_after_unshelve
+ # if host is requested we also need to tell the scheduler that
+ if host:
+ request_spec.requested_destination = objects.Destination(host=host)
+ request_spec.save()
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
@@ -4443,11 +4664,10 @@ class API:
return self.compute_rpcapi.get_instance_diagnostics(context,
instance=instance)
- # FIXME(sean-k-mooney): Suspend does not work because we do not unplug
- # the vDPA devices before calling managed save as we do with SR-IOV
- # devices
@block_port_accelerators()
- @reject_vdpa_instances(instance_actions.SUSPEND)
+ @reject_vdpa_instances(
+ instance_actions.SUSPEND, until=MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION
+ )
@block_accelerators()
@reject_sev_instances(instance_actions.SUSPEND)
@check_instance_lock
@@ -4460,6 +4680,9 @@ class API:
self.compute_rpcapi.suspend_instance(context, instance)
@check_instance_lock
+ @reject_vdpa_instances(
+ instance_actions.RESUME, until=MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION
+ )
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
@@ -4477,6 +4700,7 @@ class API:
allow_bfv_rescue=False):
"""Rescue the given instance."""
+ image_meta = None
if rescue_image_ref:
try:
image_meta = image_meta_obj.ImageMeta.from_image_ref(
@@ -4497,6 +4721,8 @@ class API:
"image properties set")
raise exception.UnsupportedRescueImage(
image=rescue_image_ref)
+ else:
+ image_meta = instance.image_meta
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
@@ -4505,6 +4731,9 @@ class API:
volume_backed = compute_utils.is_volume_backed_instance(
context, instance, bdms)
+ allow_bfv_rescue &= 'hw_rescue_bus' in image_meta.properties and \
+ 'hw_rescue_device' in image_meta.properties
+
if volume_backed and allow_bfv_rescue:
cn = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
@@ -4803,10 +5032,24 @@ class API:
This method is separated to make it possible for cells version
to override it.
"""
- volume_bdm = self._create_volume_bdm(
- context, instance, device, volume, disk_bus=disk_bus,
- device_type=device_type, tag=tag,
- delete_on_termination=delete_on_termination)
+ try:
+ volume_bdm = self._create_volume_bdm(
+ context, instance, device, volume, disk_bus=disk_bus,
+ device_type=device_type, tag=tag,
+ delete_on_termination=delete_on_termination)
+ except oslo_exceptions.MessagingTimeout:
+ # The compute node might have already created the attachment but
+ # we never received the answer. In this case it is safe to delete
+ # the attachment as nobody will ever pick it up again.
+ with excutils.save_and_reraise_exception():
+ try:
+ objects.BlockDeviceMapping.get_by_volume_and_instance(
+ context, volume['id'], instance.uuid).destroy()
+ LOG.debug("Delete BDM after compute did not respond to "
+ f"attachment request for volume {volume['id']}")
+ except exception.VolumeBDMNotFound:
+ LOG.debug("BDM not found, ignoring removal. "
+ f"Error attaching volume {volume['id']}")
try:
self._check_attach_and_reserve_volume(context, volume, instance,
volume_bdm,
@@ -5134,9 +5377,14 @@ class API:
instance_uuid=instance.uuid)
@check_instance_lock
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
- vm_states.STOPPED],
- task_state=[None])
+ @reject_vdpa_instances(
+ instance_actions.ATTACH_INTERFACE, until=MIN_COMPUTE_VDPA_ATTACH_DETACH
+ )
+ @check_instance_state(
+ vm_state=[
+ vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED
+ ], task_state=[None]
+ )
def attach_interface(self, context, instance, network_id, port_id,
requested_ip, tag=None):
"""Use hotplug to add an network adapter to an instance."""
@@ -5149,18 +5397,16 @@ class API:
# port.resource_request field which only returned for admins
port = self.network_api.show_port(
context.elevated(), port_id)['port']
- if port.get('binding:vnic_type', "normal") == "vdpa":
- # FIXME(sean-k-mooney): Attach works but detach results in a
- # QEMU error; blocked until this is resolved
- raise exception.OperationNotSupportedForVDPAInterface(
- instance_uuid=instance.uuid,
- operation=instance_actions.ATTACH_INTERFACE)
if port.get('binding:vnic_type', 'normal') in (
network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL):
raise exception.ForbiddenPortsWithAccelerator()
+ if port.get('binding:vnic_type',
+ 'normal') == network_model.VNIC_TYPE_REMOTE_MANAGED:
+ self._check_vnic_remote_managed_min_version(context)
+
self.ensure_compute_version_for_resource_request(
context, instance, port)
@@ -5169,37 +5415,23 @@ class API:
requested_ip=requested_ip, tag=tag)
@check_instance_lock
- @check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
- vm_states.STOPPED],
- task_state=[None])
+ @reject_vdpa_instances(
+ instance_actions.DETACH_INTERFACE, until=MIN_COMPUTE_VDPA_ATTACH_DETACH
+ )
+ @check_instance_state(
+ vm_state=[
+ vm_states.ACTIVE, vm_states.PAUSED, vm_states.STOPPED
+ ], task_state=[None]
+ )
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
-
- # FIXME(sean-k-mooney): Detach currently results in a failure to remove
- # the interface from the live libvirt domain, so while the networking
- # is torn down on the host the vDPA device is still attached to the VM.
- # This is likely a libvirt/qemu bug so block detach until that is
- # resolved.
for vif in instance.get_network_info():
if vif['id'] == port_id:
- if vif['vnic_type'] == 'vdpa':
- raise exception.OperationNotSupportedForVDPAInterface(
- instance_uuid=instance.uuid,
- operation=instance_actions.DETACH_INTERFACE)
if vif['vnic_type'] in (
network_model.VNIC_TYPE_ACCELERATOR_DIRECT,
network_model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL):
raise exception.ForbiddenPortsWithAccelerator()
break
- else:
- # NOTE(sean-k-mooney) This should never happen but just in case the
- # info cache does not have the port we are detaching we can fall
- # back to neutron.
- port = self.network_api.show_port(context, port_id)['port']
- if port.get('binding:vnic_type', 'normal') == 'vdpa':
- raise exception.OperationNotSupportedForVDPAInterface(
- instance_uuid=instance.uuid,
- operation=instance_actions.DETACH_INTERFACE)
self._record_action_start(
context, instance, instance_actions.DETACH_INTERFACE)
@@ -5244,7 +5476,10 @@ class API:
@block_extended_resource_request
@block_port_accelerators()
- @reject_vdpa_instances(instance_actions.LIVE_MIGRATION)
+ @reject_vdpa_instances(
+ instance_actions.LIVE_MIGRATION,
+ until=MIN_COMPUTE_VDPA_HOTPLUG_LIVE_MIGRATION
+ )
@block_accelerators()
@reject_vtpm_instances(instance_actions.LIVE_MIGRATION)
@reject_sev_instances(instance_actions.LIVE_MIGRATION)
@@ -5378,14 +5613,12 @@ class API:
@block_extended_resource_request
@block_port_accelerators()
- # FIXME(sean-k-mooney): rebuild works but we have not tested evacuate yet
- @reject_vdpa_instances(instance_actions.EVACUATE)
@reject_vtpm_instances(instance_actions.EVACUATE)
@block_accelerators(until_service=SUPPORT_ACCELERATOR_SERVICE_FOR_REBUILD)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
- vm_states.ERROR])
+ vm_states.ERROR], task_state=None)
def evacuate(self, context, instance, host, on_shared_storage,
- admin_password=None, force=None):
+ admin_password=None, force=None, target_state=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
@@ -5396,6 +5629,7 @@ class API:
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
:param force: Force the evacuation to the specific host target
+ :param target_state: Set a target state for the evacuated instance
"""
LOG.debug('vm evacuation scheduled', instance=instance)
@@ -5410,7 +5644,7 @@ class API:
context, instance.uuid)
instance.task_state = task_states.REBUILDING
- instance.save(expected_task_state=[None])
+ instance.save(expected_task_state=None)
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NOTE(danms): Create this as a tombstone for the source compute
@@ -5459,7 +5693,7 @@ class API:
on_shared_storage=on_shared_storage,
host=host,
request_spec=request_spec,
- )
+ target_state=target_state)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
@@ -6218,13 +6452,10 @@ class AggregateAPI:
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.query_client = query.SchedulerQueryClient()
- self._placement_client = None # Lazy-load on first access.
@property
def placement_client(self):
- if self._placement_client is None:
- self._placement_client = report.SchedulerReportClient()
- return self._placement_client
+ return report.report_client_singleton()
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
@@ -6519,21 +6750,13 @@ class KeypairAPI:
}
self.notifier.info(context, 'keypair.%s' % event_suffix, payload)
- def _validate_new_key_pair(self, context, user_id, key_name, key_type):
- safe_chars = "_- " + string.digits + string.ascii_letters
- clean_value = "".join(x for x in key_name if x in safe_chars)
- if clean_value != key_name:
- raise exception.InvalidKeypair(
- reason=_("Keypair name contains unsafe characters"))
-
- try:
- utils.check_string_length(key_name, min_length=1, max_length=255)
- except exception.InvalidInput:
- raise exception.InvalidKeypair(
- reason=_('Keypair name must be string and between '
- '1 and 255 characters long'))
+ def _check_key_pair_quotas(self, context, user_id, key_name, key_type):
try:
objects.Quotas.check_deltas(context, {'key_pairs': 1}, user_id)
+ local_limit.enforce_db_limit(context, local_limit.KEY_PAIRS,
+ entity_scope=user_id, delta=1)
+ except exception.KeypairLimitExceeded:
+ raise
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@@ -6541,7 +6764,7 @@ class KeypairAPI:
def import_key_pair(self, context, user_id, key_name, public_key,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Import a key pair using an existing public key."""
- self._validate_new_key_pair(context, user_id, key_name, key_type)
+ self._check_key_pair_quotas(context, user_id, key_name, key_type)
self._notify(context, 'import.start', key_name)
@@ -6576,7 +6799,7 @@ class KeypairAPI:
def create_key_pair(self, context, user_id, key_name,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Create a new key pair."""
- self._validate_new_key_pair(context, user_id, key_name, key_type)
+ self._check_key_pair_quotas(context, user_id, key_name, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
@@ -6606,6 +6829,15 @@ class KeypairAPI:
if CONF.quota.recheck_quota:
try:
objects.Quotas.check_deltas(context, {'key_pairs': 0}, user_id)
+ # TODO(johngarbutt) do we really need this recheck?
+ # The quota rechecking of limits is really just to protect
+ # against denial of service attacks that aim to fill up the
+ # database. Its usefulness could be debated.
+ local_limit.enforce_db_limit(context, local_limit.KEY_PAIRS,
+ entity_scope=user_id, delta=0)
+ except exception.KeypairLimitExceeded:
+ with excutils.save_and_reraise_exception():
+ keypair.destroy()
except exception.OverQuota:
keypair.destroy()
raise exception.KeypairLimitExceeded()
diff --git a/nova/compute/claims.py b/nova/compute/claims.py
index 79e8f2f012..490b418081 100644
--- a/nova/compute/claims.py
+++ b/nova/compute/claims.py
@@ -124,7 +124,13 @@ class Claim(NopClaim):
pci_requests = self._pci_requests
if pci_requests.requests:
stats = self.tracker.pci_tracker.stats
- if not stats.support_requests(pci_requests.requests):
+ if not stats.support_requests(
+ pci_requests.requests,
+ # We explicitly signal that we are _after_ the scheduler made
+ # allocations in placement and therefore pci_requests.requests
+ # carry its own placement provider mapping information
+ provider_mapping=None,
+ ):
return _('Claim pci failed')
def _test_numa_topology(self, compute_node, limit):
@@ -139,12 +145,17 @@ class Claim(NopClaim):
if pci_requests.requests:
pci_stats = self.tracker.pci_tracker.stats
- instance_topology = (
- hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
- limits=limit,
- pci_requests=pci_requests.requests,
- pci_stats=pci_stats))
+ instance_topology = hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
+ limits=limit,
+ pci_requests=pci_requests.requests,
+ pci_stats=pci_stats,
+ # We explicitly signal that we are _after_ the scheduler made
+ # allocations in placement and therefore pci_requests.requests
+ # carry its own placement provider mapping information
+ provider_mapping=None,
+ )
if requested_topology and not instance_topology:
if pci_requests.requests:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index b066b6cc01..5c42aa4d89 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -31,6 +31,7 @@ import contextlib
import copy
import functools
import inspect
+import math
import sys
import time
import traceback
@@ -83,6 +84,7 @@ from nova.objects import external_event as external_event_obj
from nova.objects import fields
from nova.objects import instance as obj_instance
from nova.objects import migrate_data as migrate_data_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_req_module
from nova.pci import whitelist
from nova import safe_utils
@@ -95,6 +97,7 @@ from nova.virt import configdrive
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import hardware
+import nova.virt.node
from nova.virt import storage_users
from nova.virt import virtapi
from nova.volume import cinder
@@ -402,6 +405,79 @@ class ComputeVirtAPI(virtapi.VirtAPI):
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
+ class _InstanceEvent:
+ EXPECTED = "expected"
+ WAITING = "waiting"
+ RECEIVED = "received"
+ RECEIVED_EARLY = "received early"
+ TIMED_OUT = "timed out"
+ RECEIVED_NOT_PROCESSED = "received but not processed"
+
+ def __init__(self, name: str, event: eventlet.event.Event) -> None:
+ self.name = name
+ self.event = event
+ self.status = self.EXPECTED
+ self.wait_time = None
+
+ def mark_as_received_early(self) -> None:
+ self.status = self.RECEIVED_EARLY
+
+ def is_received_early(self) -> bool:
+ return self.status == self.RECEIVED_EARLY
+
+ def _update_status_no_wait(self):
+ if self.status == self.EXPECTED and self.event.ready():
+ self.status = self.RECEIVED_NOT_PROCESSED
+
+ def wait(self) -> 'objects.InstanceExternalEvent':
+ self.status = self.WAITING
+ try:
+ with timeutils.StopWatch() as sw:
+ instance_event = self.event.wait()
+ except eventlet.timeout.Timeout:
+ self.status = self.TIMED_OUT
+ self.wait_time = sw.elapsed()
+
+ raise
+
+ self.status = self.RECEIVED
+ self.wait_time = sw.elapsed()
+ return instance_event
+
+ def __str__(self) -> str:
+ self._update_status_no_wait()
+ if self.status == self.EXPECTED:
+ return f"{self.name}: expected but not received"
+ if self.status == self.RECEIVED:
+ return (
+ f"{self.name}: received after waiting "
+ f"{self.wait_time:.2f} seconds")
+ if self.status == self.TIMED_OUT:
+ return (
+ f"{self.name}: timed out after "
+ f"{self.wait_time:.2f} seconds")
+ return f"{self.name}: {self.status}"
+
+ @staticmethod
+ def _wait_for_instance_events(
+ instance: 'objects.Instance',
+ events: dict,
+ error_callback: ty.Callable,
+ ) -> None:
+ for event_name, event in events.items():
+ if event.is_received_early():
+ continue
+ else:
+ actual_event = event.wait()
+ if actual_event.status == 'completed':
+ continue
+ # If we get here, we have an event that was not completed,
+ # nor skipped via exit_wait_early(). Decide whether to
+ # keep waiting by calling the error_callback() hook.
+ decision = error_callback(event_name, instance)
+ if decision is False:
+ break
+
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
@@ -454,9 +530,10 @@ class ComputeVirtAPI(virtapi.VirtAPI):
name, tag = event_name
event_name = objects.InstanceExternalEvent.make_key(name, tag)
try:
- events[event_name] = (
+ event = (
self._compute.instance_events.prepare_for_instance_event(
instance, name, tag))
+ events[event_name] = self._InstanceEvent(event_name, event)
except exception.NovaException:
error_callback(event_name, instance)
# NOTE(danms): Don't wait for any of the events. They
@@ -468,25 +545,35 @@ class ComputeVirtAPI(virtapi.VirtAPI):
except self._exit_early_exc as e:
early_events = set([objects.InstanceExternalEvent.make_key(n, t)
for n, t in e.events])
- else:
- early_events = set([])
+
+ # If there are expected events that received early, mark them,
+ # so they won't be waited for later
+ for early_event_name in early_events:
+ if early_event_name in events:
+ events[early_event_name].mark_as_received_early()
sw = timeutils.StopWatch()
sw.start()
- with eventlet.timeout.Timeout(deadline):
- for event_name, event in events.items():
- if event_name in early_events:
- continue
- else:
- actual_event = event.wait()
- if actual_event.status == 'completed':
- continue
- # If we get here, we have an event that was not completed,
- # nor skipped via exit_wait_early(). Decide whether to
- # keep waiting by calling the error_callback() hook.
- decision = error_callback(event_name, instance)
- if decision is False:
- break
+ try:
+ with eventlet.timeout.Timeout(deadline):
+ self._wait_for_instance_events(
+ instance, events, error_callback)
+ except eventlet.timeout.Timeout:
+ LOG.warning(
+ 'Timeout waiting for %(events)s for instance with '
+ 'vm_state %(vm_state)s and task_state %(task_state)s. '
+ 'Event states are: %(event_states)s',
+ {
+ 'events': list(events.keys()),
+ 'vm_state': instance.vm_state,
+ 'task_state': instance.task_state,
+ 'event_states':
+ ', '.join([str(event) for event in events.values()]),
+ },
+ instance=instance)
+
+ raise
+
LOG.debug('Instance event wait completed in %i seconds for %s',
sw.elapsed(),
','.join(x[0] for x in event_names),
@@ -531,13 +618,18 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
- target = messaging.Target(version='6.0')
+ target = messaging.Target(version='6.2')
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
# We want the ComputeManager, ResourceTracker and ComputeVirtAPI all
# using the same instance of SchedulerReportClient which has the
# ProviderTree cache for this compute service.
+ # NOTE(danms): We do not use the global placement client
+ # singleton here, because the above-mentioned stack of objects
+ # maintain local state in the client. Thus, keeping our own
+ # private object for that stack avoids any potential conflict
+ # with other users in our process outside of the above.
self.reportclient = report.SchedulerReportClient()
self.virtapi = ComputeVirtAPI(self)
self.network_api = neutron.API()
@@ -1157,6 +1249,20 @@ class ComputeManager(manager.Manager):
'updated.', instance=instance)
self._set_instance_obj_error_state(instance)
return
+ except exception.PciDeviceNotFoundById:
+ # This is bug 1981813 where the bound port vnic_type has changed
+ # from direct to macvtap. Nova does not support that and it
+ # already printed an ERROR when the change is detected during
+ # _heal_instance_info_cache. Now we print an ERROR again and skip
+ # plugging the vifs but let the service startup continue to init
+ # the other instances
+ LOG.exception(
+ 'Virtual interface plugging failed for instance. Probably the '
+ 'vnic_type of the bound port has been changed. Nova does not '
+ 'support such change.',
+ instance=instance
+ )
+ return
if instance.task_state == task_states.RESIZE_MIGRATING:
# We crashed during resize/migration, so roll back for safety
@@ -1366,40 +1472,120 @@ class ComputeManager(manager.Manager):
:return: a dict of ComputeNode objects keyed by the UUID of the given
node.
"""
- nodes_by_uuid = {}
try:
- node_names = self.driver.get_available_nodes()
+ node_ids = self.driver.get_nodenames_by_uuid()
except exception.VirtDriverNotReady:
LOG.warning(
"Virt driver is not ready. If this is the first time this "
- "service is starting on this host, then you can ignore this "
- "warning.")
+ "service is starting on this host, then you can ignore "
+ "this warning.")
return {}
- for node_name in node_names:
- try:
- node = objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, node_name)
- nodes_by_uuid[node.uuid] = node
- except exception.ComputeHostNotFound:
- LOG.warning(
- "Compute node %s not found in the database. If this is "
- "the first time this service is starting on this host, "
- "then you can ignore this warning.", node_name)
- return nodes_by_uuid
+ nodes = objects.ComputeNodeList.get_all_by_uuids(context,
+ list(node_ids.keys()))
+ if not nodes:
+ # NOTE(danms): This should only happen if the compute_id is
+ # pre-provisioned on a host that has never started.
+ LOG.warning('Compute nodes %s for host %s were not found in the '
+ 'database. If this is the first time this service is '
+ 'starting on this host, then you can ignore this '
+ 'warning.',
+ list(node_ids.keys()), self.host)
+ return {}
+
+ for node in nodes:
+ if node.hypervisor_hostname != node_ids.get(node.uuid):
+ raise exception.InvalidConfiguration(
+ ('My compute node %s has hypervisor_hostname %s '
+ 'but virt driver reports it should be %s. Possible '
+ 'rename detected, refusing to start!') % (
+ node.uuid, node.hypervisor_hostname,
+ node_ids.get(node.uuid)))
+
+ return {n.uuid: n for n in nodes}
+
+ def _ensure_existing_node_identity(self, service_ref):
+ """If we are upgrading from an older service version, we need
+ to write our node identity uuid (if not already done) based on
+ nodes assigned to us in the database.
+ """
+ if 'ironic' in CONF.compute_driver.lower():
+ # We do not persist a single local node identity for
+ # ironic
+ return
- def init_host(self):
+ if service_ref.version >= service_obj.NODE_IDENTITY_VERSION:
+ # Already new enough, nothing to do here, but make sure that we
+ # have a UUID file already, as this is not our first time starting.
+ if nova.virt.node.read_local_node_uuid() is None:
+ raise exception.InvalidConfiguration(
+ ('No local node identity found, but this is not our '
+ 'first startup on this host. Refusing to start after '
+ 'potentially having lost that state!'))
+ return
+
+ if nova.virt.node.read_local_node_uuid():
+ # We already have a local node identity, no migration needed
+ return
+
+ context = nova.context.get_admin_context()
+ db_nodes = objects.ComputeNodeList.get_all_by_host(context, self.host)
+ if not db_nodes:
+ # This means we have no nodes in the database (that we
+ # know of) and thus have no need to record an existing
+ # UUID. That is probably strange, so log a warning.
+ raise exception.InvalidConfiguration(
+ ('Upgrading from service version %i but found no '
+ 'nodes in the database for host %s to persist '
+ 'locally; Possible rename detected, '
+ 'refusing to start!') % (
+ service_ref.version, self.host))
+
+ if len(db_nodes) > 1:
+ # If this happens we can't do the right thing, so raise an
+ # exception to abort host startup
+ LOG.warning('Multiple nodes found in the database for host %s; '
+ 'unable to persist local node identity automatically')
+ raise exception.InvalidConfiguration(
+ 'Multiple nodes found in database, manual node uuid '
+ 'configuration required')
+
+ nova.virt.node.write_local_node_uuid(db_nodes[0].uuid)
+
+ def _check_for_host_rename(self, nodes_by_uuid):
+ if 'ironic' in CONF.compute_driver.lower():
+ # Ironic (currently) rebalances nodes at various times, and as
+ # such, nodes being discovered as assigned to this host with a
+ # different hostname is not surprising. Skip this check for
+ # ironic.
+ return
+ for node in nodes_by_uuid.values():
+ if node.host != self.host:
+ raise exception.InvalidConfiguration(
+ 'My node %s has host %r but my host is %r; '
+ 'Possible rename detected, refusing to start!' % (
+ node.uuid, node.host, self.host))
+ LOG.debug('Verified node %s matches my host %s',
+ node.uuid, self.host)
+
+ def init_host(self, service_ref):
"""Initialization for a standalone compute service."""
- if CONF.pci.passthrough_whitelist:
- # Simply loading the PCI passthrough whitelist will do a bunch of
+ if service_ref:
+ # If we are an existing service, check to see if we need
+ # to record a locally-persistent node identity because
+ # we have upgraded from a previous version.
+ self._ensure_existing_node_identity(service_ref)
+
+ if CONF.pci.device_spec:
+ # Simply loading the PCI passthrough spec will do a bunch of
# validation that would otherwise wait until the PciDevTracker is
# constructed when updating available resources for the compute
# node(s) in the resource tracker, effectively killing that task.
- # So load up the whitelist when starting the compute service to
- # flush any invalid configuration early so we can kill the service
+ # So load up the spec when starting the compute service to
+ # flush any invalid configuration early, so we can kill the service
# if the configuration is wrong.
- whitelist.Whitelist(CONF.pci.passthrough_whitelist)
+ whitelist.Whitelist(CONF.pci.device_spec)
nova.conf.neutron.register_dynamic_opts(CONF)
# Even if only libvirt uses them, make it available for all drivers
@@ -1420,7 +1606,18 @@ class ComputeManager(manager.Manager):
raise exception.InvalidConfiguration(msg)
self.driver.init_host(host=self.host)
+
+ # NOTE(gibi): At this point the compute_nodes of the resource tracker
+ # has not been populated yet so we cannot rely on the resource tracker
+ # here.
context = nova.context.get_admin_context()
+ nodes_by_uuid = self._get_nodes(context)
+
+ # NOTE(danms): Check for a possible host rename and abort
+ # startup before we start mucking with instances we think are
+ # ours.
+ self._check_for_host_rename(nodes_by_uuid)
+
instances = objects.InstanceList.get_by_host(
context, self.host,
expected_attrs=['info_cache', 'metadata', 'numa_topology'])
@@ -1430,17 +1627,12 @@ class ComputeManager(manager.Manager):
self._validate_pinning_configuration(instances)
self._validate_vtpm_configuration(instances)
- # NOTE(gibi): At this point the compute_nodes of the resource tracker
- # has not been populated yet so we cannot rely on the resource tracker
- # here.
# NOTE(gibi): If ironic and vcenter virt driver slow start time
# becomes problematic here then we should consider adding a config
# option or a driver flag to tell us if we should thread
# _destroy_evacuated_instances and
# _error_out_instances_whose_build_was_interrupted out in the
# background on startup
- nodes_by_uuid = self._get_nodes(context)
-
try:
# checking that instance was not already evacuated to other host
evacuated_instances = self._destroy_evacuated_instances(
@@ -1633,27 +1825,32 @@ class ComputeManager(manager.Manager):
# hosts. This is a validation step to make sure that starting the
# instance here doesn't violate the policy.
if scheduler_hints is not None:
- # only go through here if scheduler_hints is provided, even if it
- # is empty.
+ # only go through here if scheduler_hints is provided,
+ # even if it is empty.
group_hint = scheduler_hints.get('group')
if not group_hint:
return
else:
- # The RequestSpec stores scheduler_hints as key=list pairs so
- # we need to check the type on the value and pull the single
- # entry out. The API request schema validates that
+ # The RequestSpec stores scheduler_hints as key=list pairs
+ # so we need to check the type on the value and pull the
+ # single entry out. The API request schema validates that
# the 'group' hint is a single value.
if isinstance(group_hint, list):
group_hint = group_hint[0]
-
- group = objects.InstanceGroup.get_by_hint(context, group_hint)
+ try:
+ group = objects.InstanceGroup.get_by_hint(
+ context, group_hint
+ )
+ except exception.InstanceGroupNotFound:
+ return
else:
# TODO(ganso): a call to DB can be saved by adding request_spec
# to rpcapi payload of live_migration, pre_live_migration and
# check_can_live_migrate_destination
try:
group = objects.InstanceGroup.get_by_instance_uuid(
- context, instance.uuid)
+ context, instance.uuid
+ )
except exception.InstanceGroupNotFound:
return
@@ -1674,8 +1871,7 @@ class ComputeManager(manager.Manager):
migrations = (
objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename))
- migration_vm_uuids = set([mig['instance_uuid']
- for mig in migrations])
+ migration_vm_uuids = {mig.instance_uuid for mig in migrations}
total_instances = migration_vm_uuids | ins_on_host
@@ -1937,6 +2133,7 @@ class ComputeManager(manager.Manager):
ephemerals = []
swap = []
block_device_mapping = []
+ image = []
for device in block_devices:
if block_device.new_format_is_ephemeral(device):
@@ -1948,8 +2145,12 @@ class ComputeManager(manager.Manager):
if driver_block_device.is_block_device_mapping(device):
block_device_mapping.append(device)
+ if driver_block_device.is_local_image(device):
+ image.append(device)
+
self._default_device_names_for_instance(instance,
root_device_name,
+ image,
ephemerals,
swap,
block_device_mapping)
@@ -2358,10 +2559,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils\
- .update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -2623,10 +2826,12 @@ class ComputeManager(manager.Manager):
block_device_mapping)
resources['block_device_info'] = block_device_info
except (exception.InstanceNotFound,
- exception.UnexpectedDeletingTaskStateError):
+ exception.UnexpectedDeletingTaskStateError,
+ exception.ComputeResourcesUnavailable):
with excutils.save_and_reraise_exception():
self._build_resources_cleanup(instance, network_info)
except (exception.UnexpectedTaskStateError,
+ exception.InstanceUnacceptable,
exception.OverQuota, exception.InvalidBDM) as e:
self._build_resources_cleanup(instance, network_info)
raise exception.BuildAbortException(instance_uuid=instance.uuid,
@@ -2920,6 +3125,7 @@ class ComputeManager(manager.Manager):
self._try_deallocate_network(context, instance, requested_networks)
timer.restart()
+ connector = None
for bdm in vol_bdms:
try:
if bdm.attachment_id:
@@ -2928,7 +3134,8 @@ class ComputeManager(manager.Manager):
else:
# NOTE(vish): actual driver detach done in driver.destroy,
# so just tell cinder that we are done with it.
- connector = self.driver.get_volume_connector(instance)
+ if connector is None:
+ connector = self.driver.get_volume_connector(instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
@@ -3297,18 +3504,124 @@ class ComputeManager(manager.Manager):
migration.status = status
migration.save()
+ @staticmethod
+ def _reimage_failed_callback(event_name, instance):
+ msg = ('Cinder reported failure during reimaging '
+ 'with %(event)s for instance %(uuid)s')
+ msg_args = {'event': event_name, 'uuid': instance.uuid}
+ LOG.error(msg, msg_args)
+ raise exception.ReimageException(msg % msg_args)
+
+ def _detach_root_volume(self, context, instance, root_bdm):
+ volume_id = root_bdm.volume_id
+ mp = root_bdm.device_name
+ old_connection_info = jsonutils.loads(root_bdm.connection_info)
+ try:
+ self.driver.detach_volume(context, old_connection_info,
+ instance, root_bdm.device_name)
+ except exception.DiskNotFound as err:
+ LOG.warning('Ignoring DiskNotFound exception while '
+ 'detaching volume %(volume_id)s from '
+ '%(mp)s : %(err)s',
+ {'volume_id': volume_id, 'mp': mp,
+ 'err': err}, instance=instance)
+ except exception.DeviceDetachFailed:
+ with excutils.save_and_reraise_exception():
+ LOG.warning('Guest refused to detach volume %(vol)s',
+ {'vol': volume_id}, instance=instance)
+ self.volume_api.roll_detaching(context, volume_id)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception('Failed to detach volume '
+ '%(volume_id)s from %(mp)s',
+ {'volume_id': volume_id, 'mp': mp},
+ instance=instance)
+ self.volume_api.roll_detaching(context, volume_id)
+
+ def _rebuild_volume_backed_instance(self, context, instance, bdms,
+ image_id):
+ # Get root bdm and attachment ID associated to it
+ root_bdm = compute_utils.get_root_bdm(context, instance, bdms)
+ old_attachment_id = root_bdm.attachment_id
+
+ # Create a new attachment and delete the previous attachment
+ # We create a new attachment first to keep the volume in
+ # reserved state after old attachment is deleted and avoid any
+ # races in between the attachment create and delete.
+ attachment_id = None
+ try:
+ attachment_id = self.volume_api.attachment_create(
+ context, root_bdm.volume_id, instance.uuid)['id']
+ self._detach_root_volume(context, instance, root_bdm)
+ root_bdm.attachment_id = attachment_id
+ root_bdm.save()
+ self.volume_api.attachment_delete(context,
+ old_attachment_id)
+ except exception.InstanceNotFound:
+ # This means we failed to save the new attachment because
+ # the instance is deleted, so (try to) delete it and abort.
+ try:
+ self.volume_api.attachment_delete(context,
+ attachment_id)
+ except cinder_exception.ClientException:
+ LOG.error('Failed to delete new attachment %s',
+ attachment_id)
+ msg = _('Failed to rebuild volume backed instance.')
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+ except cinder_exception.ClientException:
+ if attachment_id:
+ LOG.error('Failed to delete old attachment %s',
+ old_attachment_id)
+ else:
+ LOG.error('Failed to create new attachment')
+ msg = _('Failed to rebuild volume backed instance.')
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+ events = [('volume-reimaged', root_bdm.volume_id)]
+
+ # Get the image requested for rebuild
+ try:
+ image = self.image_api.get(context, image_id)
+ except exception.ImageNotFound:
+ msg = _('Image %s not found.') % image_id
+ LOG.error(msg)
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+ image_size = int(math.ceil(float(image.get('size')) / units.Gi))
+ deadline = CONF.reimage_timeout_per_gb * image_size
+ error_cb = self._reimage_failed_callback
+
+ # Call cinder to perform reimage operation and wait until an
+ # external event is triggered.
+ try:
+ with self.virtapi.wait_for_instance_event(instance, events,
+ deadline=deadline,
+ error_callback=error_cb):
+ self.volume_api.reimage_volume(
+ context, root_bdm.volume_id, image_id,
+ reimage_reserved=True)
+
+ except Exception as ex:
+ LOG.error('Failed to rebuild volume backed instance: %s',
+ str(ex), instance=instance)
+ msg = _('Failed to rebuild volume backed instance.')
+ raise exception.BuildAbortException(
+ instance_uuid=instance.uuid, reason=msg)
+
def _rebuild_default_impl(
self, context, instance, image_meta, injected_files,
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None, evacuate=False,
block_device_info=None, preserve_ephemeral=False,
- accel_uuids=None):
+ accel_uuids=None, reimage_boot_volume=False):
if preserve_ephemeral:
# The default code path does not support preserving ephemeral
# partitions.
raise exception.PreserveEphemeralNotSupported()
accel_info = []
+ detach_root_bdm = not reimage_boot_volume
if evacuate:
if instance.flavor.extra_specs.get('accel:device_profile'):
try:
@@ -3320,13 +3633,36 @@ class ComputeManager(manager.Manager):
msg = _('Failure getting accelerator resources.')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
- detach_block_devices(context, bdms)
+ detach_block_devices(context, bdms,
+ detach_root_bdm=detach_root_bdm)
else:
self._power_off_instance(instance, clean_shutdown=True)
- detach_block_devices(context, bdms)
- self.driver.destroy(context, instance,
- network_info=network_info,
- block_device_info=block_device_info)
+ detach_block_devices(context, bdms,
+ detach_root_bdm=detach_root_bdm)
+ if reimage_boot_volume:
+ # Previously, the calls reaching here were for image
+ # backed instance rebuild and didn't have a root bdm
+ # so now we need to handle the case for root bdm.
+ # For the root BDM, we are doing attach/detach operations
+ # manually as we want to maintain a 'reserved' state
+ # throughout the reimage process from the cinder side so
+ # we are excluding the root BDM from certain operations
+ # here i.e. deleteing it's mapping before the destroy call.
+ block_device_info_copy = copy.deepcopy(block_device_info)
+ root_bdm = compute_utils.get_root_bdm(context, instance, bdms)
+ mapping = block_device_info_copy["block_device_mapping"]
+ # drop root bdm from the mapping
+ mapping = [
+ bdm for bdm in mapping
+ if bdm["volume_id"] != root_bdm.volume_id
+ ]
+ self.driver.destroy(context, instance,
+ network_info=network_info,
+ block_device_info=block_device_info_copy)
+ else:
+ self.driver.destroy(context, instance,
+ network_info=network_info,
+ block_device_info=block_device_info)
try:
accel_info = self._get_accel_info(context, instance)
except Exception as exc:
@@ -3335,6 +3671,12 @@ class ComputeManager(manager.Manager):
msg = _('Failure getting accelerator resources.')
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=msg)
+ if reimage_boot_volume:
+ is_volume_backed = compute_utils.is_volume_backed_instance(
+ context, instance, bdms)
+ if is_volume_backed:
+ self._rebuild_volume_backed_instance(
+ context, instance, bdms, image_meta.id)
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
@@ -3369,7 +3711,8 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
- scheduled_node, limits, request_spec, accel_uuids):
+ scheduled_node, limits, request_spec, accel_uuids,
+ reimage_boot_volume, target_state):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -3401,6 +3744,10 @@ class ComputeManager(manager.Manager):
specified by the user, this will be None
:param request_spec: a RequestSpec object used to schedule the instance
:param accel_uuids: a list of cyborg ARQ uuids
+ :param reimage_boot_volume: Boolean to specify whether the user has
+ explicitly requested to rebuild a boot
+ volume
+ :param target_state: Set a target state for the evacuated instance.
"""
# recreate=True means the instance is being evacuated from a failed
@@ -3444,9 +3791,21 @@ class ComputeManager(manager.Manager):
try:
compute_node = self._get_compute_info(context, self.host)
scheduled_node = compute_node.hypervisor_hostname
- except exception.ComputeHostNotFound:
+ except exception.ComputeHostNotFound as e:
+ # This means we were asked to rebuild one of our own
+ # instances, or another instance as a target of an
+ # evacuation, but we are unable to find a matching compute
+ # node.
LOG.exception('Failed to get compute_info for %s',
self.host)
+ self._set_migration_status(migration, 'failed')
+ self._notify_instance_rebuild_error(context, instance, e,
+ bdms)
+ raise exception.InstanceFaultRollback(
+ inner_exception=exception.BuildAbortException(
+ instance_uuid=instance.uuid,
+ reason=e.format_message()))
+
else:
scheduled_node = instance.node
@@ -3465,7 +3824,8 @@ class ComputeManager(manager.Manager):
image_meta, injected_files, new_pass, orig_sys_metadata,
bdms, evacuate, on_shared_storage, preserve_ephemeral,
migration, request_spec, allocs, rebuild_claim,
- scheduled_node, limits, accel_uuids)
+ scheduled_node, limits, accel_uuids, reimage_boot_volume,
+ target_state)
except (exception.ComputeResourcesUnavailable,
exception.RescheduledException) as e:
if isinstance(e, exception.ComputeResourcesUnavailable):
@@ -3524,7 +3884,8 @@ class ComputeManager(manager.Manager):
self, context, instance, orig_image_ref, image_meta,
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
- allocations, rebuild_claim, scheduled_node, limits, accel_uuids):
+ allocations, rebuild_claim, scheduled_node, limits, accel_uuids,
+ reimage_boot_volume, target_state):
"""Helper to avoid deep nesting in the top-level method."""
provider_mapping = None
@@ -3532,10 +3893,12 @@ class ComputeManager(manager.Manager):
provider_mapping = self._get_request_group_mapping(request_spec)
if provider_mapping:
- compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
claim_context = rebuild_claim(
context, instance, scheduled_node, allocations,
@@ -3546,7 +3909,8 @@ class ComputeManager(manager.Manager):
context, instance, orig_image_ref, image_meta, injected_files,
new_pass, orig_sys_metadata, bdms, evacuate, on_shared_storage,
preserve_ephemeral, migration, request_spec, allocations,
- provider_mapping, accel_uuids)
+ provider_mapping, accel_uuids, reimage_boot_volume,
+ target_state)
@staticmethod
def _get_image_name(image_meta):
@@ -3560,10 +3924,18 @@ class ComputeManager(manager.Manager):
injected_files, new_pass, orig_sys_metadata, bdms, evacuate,
on_shared_storage, preserve_ephemeral, migration, request_spec,
allocations, request_group_resource_providers_mapping,
- accel_uuids):
+ accel_uuids, reimage_boot_volume, target_state):
orig_vm_state = instance.vm_state
if evacuate:
+ if target_state and orig_vm_state != vm_states.ERROR:
+ # This will ensure that at destination the instance will have
+ # the desired state.
+ if target_state not in vm_states.ALLOW_TARGET_STATES:
+ raise exception.InstanceEvacuateNotSupportedTargetState(
+ target_state=target_state)
+ orig_vm_state = target_state
+
if request_spec:
# NOTE(gibi): Do a late check of server group policy as
# parallel scheduling could violate such policy. This will
@@ -3665,8 +4037,23 @@ class ComputeManager(manager.Manager):
self._get_instance_block_device_info(
context, instance, bdms=bdms)
- def detach_block_devices(context, bdms):
+ def detach_block_devices(context, bdms, detach_root_bdm=True):
for bdm in bdms:
+ # Previously, the calls made to this method by rebuild
+ # instance operation were for image backed instances which
+ # assumed we only had attached volumes and no root BDM.
+ # Now we need to handle case for root BDM which we are
+ # doing manually so skipping the attachment create/delete
+ # calls from here.
+ # The detach_root_bdm parameter is only passed while
+ # rebuilding the volume backed instance so we don't have
+ # to worry about other callers as they won't satisfy this
+ # condition.
+ # For evacuate case, we have detach_root_bdm always True
+ # since we don't have reimage_boot_volume parameter in
+ # this case so this will not be executed.
+ if not detach_root_bdm and bdm.is_root:
+ continue
if bdm.is_volume:
# NOTE (ildikov): Having the attachment_id set in the BDM
# means that it's the new Cinder attach/detach flow
@@ -3702,7 +4089,8 @@ class ComputeManager(manager.Manager):
network_info=network_info,
preserve_ephemeral=preserve_ephemeral,
evacuate=evacuate,
- accel_uuids=accel_uuids)
+ accel_uuids=accel_uuids,
+ reimage_boot_volume=reimage_boot_volume)
try:
with instance.mutated_migration_context():
self.driver.rebuild(**kwargs)
@@ -5142,10 +5530,12 @@ class ComputeManager(manager.Manager):
if provider_mapping:
try:
- compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
- context, self.reportclient,
- instance.pci_requests.requests, provider_mapping)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mapping,
+ )
except (exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
) as e:
@@ -5248,7 +5638,7 @@ class ComputeManager(manager.Manager):
clean_shutdown)
except exception.BuildAbortException:
# NOTE(gibi): We failed
- # update_pci_request_spec_with_allocated_interface_name so
+ # update_pci_request_with_placement_allocations so
# there is no reason to re-schedule. Just revert the allocation
# and fail the migration.
with excutils.save_and_reraise_exception():
@@ -5379,7 +5769,7 @@ class ComputeManager(manager.Manager):
'host (%s).', self.host, instance=instance)
self._send_prep_resize_notifications(
ctxt, instance, fields.NotificationPhase.START, flavor)
- # TODO(mriedem): update_pci_request_spec_with_allocated_interface_name
+ # TODO(mriedem): update_pci_request_with_placement_allocations
# should be called here if the request spec has request group mappings,
# e.g. for things like QoS ports with resource requests. Do it outside
# the try/except so if it raises BuildAbortException we do not attempt
@@ -5729,8 +6119,8 @@ class ComputeManager(manager.Manager):
def _finish_resize(self, context, instance, migration, disk_info,
image_meta, bdms, request_spec):
resize_instance = False # indicates disks have been resized
- old_instance_type_id = migration['old_instance_type_id']
- new_instance_type_id = migration['new_instance_type_id']
+ old_instance_type_id = migration.old_instance_type_id
+ new_instance_type_id = migration.new_instance_type_id
old_flavor = instance.flavor # the current flavor is now old
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_state is not set we need to default
@@ -6496,6 +6886,9 @@ class ComputeManager(manager.Manager):
current_power_state = self._get_power_state(instance)
network_info = self.network_api.get_instance_nw_info(context, instance)
+ ports_id = [vif['id'] for vif in network_info]
+ self.network_api.unbind_ports(context, ports_id, detach=False)
+
block_device_info = self._get_instance_block_device_info(context,
instance,
bdms=bdms)
@@ -6626,12 +7019,12 @@ class ComputeManager(manager.Manager):
try:
if provider_mappings:
- update = (
- compute_utils.
- update_pci_request_spec_with_allocated_interface_name)
- update(
- context, self.reportclient, instance.pci_requests.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ instance.pci_requests.requests,
+ provider_mappings,
+ )
accel_info = []
if accel_uuids:
@@ -7611,10 +8004,10 @@ class ComputeManager(manager.Manager):
if not pci_reqs.requests:
return None
- devices = self.rt.claim_pci_devices(
- context, pci_reqs, instance.numa_topology)
-
- if not devices:
+ try:
+ devices = self.rt.claim_pci_devices(
+ context, pci_reqs, instance.numa_topology)
+ except exception.PciDeviceRequestFailed:
LOG.info('Failed to claim PCI devices during interface attach '
'for PCI request %s', pci_reqs, instance=instance)
raise exception.InterfaceAttachPciClaimFailed(
@@ -7711,12 +8104,12 @@ class ComputeManager(manager.Manager):
instance_uuid=instance.uuid) from e
try:
- update = (
- compute_utils.
- update_pci_request_spec_with_allocated_interface_name)
- update(
- context, self.reportclient, pci_reqs.requests,
- provider_mappings)
+ compute_utils.update_pci_request_with_placement_allocations(
+ context,
+ self.reportclient,
+ pci_reqs.requests,
+ provider_mappings,
+ )
except (
exception.AmbiguousResourceProviderForPCIRequest,
exception.UnexpectedResourceProviderNameForPCIRequest
@@ -8046,7 +8439,7 @@ class ComputeManager(manager.Manager):
LOG.info('Destination was ready for NUMA live migration, '
'but source is either too old, or is set to an '
'older upgrade level.', instance=instance)
- if self.network_api.supports_port_binding_extension(ctxt):
+ if self.network_api.has_port_binding_extension(ctxt):
# Create migrate_data vifs if not provided by driver.
if 'vifs' not in migrate_data:
migrate_data.vifs = (
@@ -8181,7 +8574,7 @@ class ComputeManager(manager.Manager):
action=fields.NotificationAction.LIVE_MIGRATION_PRE,
phase=fields.NotificationPhase.START, bdms=bdms)
- connector = self.driver.get_volume_connector(instance)
+ connector = None
try:
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id is not None:
@@ -8195,6 +8588,8 @@ class ComputeManager(manager.Manager):
#
# Also note that attachment_update is not needed as we
# are providing the connector in the create call.
+ if connector is None:
+ connector = self.driver.get_volume_connector(instance)
attach_ref = self.volume_api.attachment_create(
context, bdm.volume_id, bdm.instance_uuid,
connector=connector, mountpoint=bdm.device_name)
@@ -8328,7 +8723,8 @@ class ComputeManager(manager.Manager):
migrate_data.migration = migration
self._rollback_live_migration(context, instance, dest,
migrate_data=migrate_data,
- source_bdms=source_bdms)
+ source_bdms=source_bdms,
+ pre_live_migration=True)
def _do_pre_live_migration_from_source(self, context, dest, instance,
block_migration, migration,
@@ -8486,8 +8882,9 @@ class ComputeManager(manager.Manager):
# host attachment. We fetch BDMs before that to retain connection_info
# and attachment_id relating to the source host for post migration
# cleanup.
- post_live_migration = functools.partial(self._post_live_migration,
- source_bdms=source_bdms)
+ post_live_migration = functools.partial(
+ self._post_live_migration_update_host, source_bdms=source_bdms
+ )
rollback_live_migration = functools.partial(
self._rollback_live_migration, source_bdms=source_bdms)
@@ -8531,7 +8928,8 @@ class ComputeManager(manager.Manager):
# in order to be able to track and abort it in the future.
self._waiting_live_migrations[instance.uuid] = (None, None)
try:
- future = self._live_migration_executor.submit(
+ future = nova.utils.pass_context(
+ self._live_migration_executor.submit,
self._do_live_migration, context, dest, instance,
block_migration, migration, migrate_data)
self._waiting_live_migrations[instance.uuid] = (migration, future)
@@ -8614,15 +9012,41 @@ class ComputeManager(manager.Manager):
migration, future = (
self._waiting_live_migrations.pop(instance.uuid))
if future and future.cancel():
- # If we got here, we've successfully aborted the queued
- # migration and _do_live_migration won't run so we need
- # to set the migration status to cancelled and send the
- # notification. If Future.cancel() fails, it means
- # _do_live_migration is running and the migration status
- # is preparing, and _do_live_migration() itself will attempt
- # to pop the queued migration, hit a KeyError, and rollback,
- # set the migration to cancelled and send the
- # live.migration.abort.end notification.
+ # If we got here, we've successfully dropped a queued
+ # migration from the queue, so _do_live_migration won't run
+ # and we only need to revert minor changes introduced by Nova
+ # control plane (port bindings, resource allocations and
+ # instance's PCI devices), restore VM's state, set the
+ # migration's status to cancelled and send the notification.
+ # If Future.cancel() fails, it means _do_live_migration is
+ # running and the migration status is preparing, and
+ # _do_live_migration() itself will attempt to pop the queued
+ # migration, hit a KeyError, and rollback, set the migration
+ # to cancelled and send the live.migration.abort.end
+ # notification.
+ self._revert_allocation(context, instance, migration)
+ try:
+ # This call will delete any inactive destination host
+ # port bindings.
+ self.network_api.setup_networks_on_host(
+ context, instance, host=migration.dest_compute,
+ teardown=True)
+ except exception.PortBindingDeletionFailed as e:
+ # Removing the inactive port bindings from the destination
+ # host is not critical so just log an error but don't fail.
+ LOG.error(
+ 'Network cleanup failed for destination host %s '
+ 'during live migration rollback. You may need to '
+ 'manually clean up resources in the network service. '
+ 'Error: %s', migration.dest_compute, str(e))
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(
+ 'An error occurred while cleaning up networking '
+ 'during live migration rollback.',
+ instance=instance)
+ instance.task_state = None
+ instance.save(expected_task_state=[task_states.MIGRATING])
self._set_migration_status(migration, 'cancelled')
except KeyError:
migration = objects.Migration.get_by_id(context, migration_id)
@@ -8671,7 +9095,7 @@ class ComputeManager(manager.Manager):
# must be deleted for preparing next block migration
# must be deleted for preparing next live migration w/o shared
# storage
- # vpmem must be cleanped
+ # vpmem must be cleaned
do_cleanup = not migrate_data.is_shared_instance_path or has_vpmem
destroy_disks = not migrate_data.is_shared_block_storage
elif isinstance(migrate_data, migrate_data_obj.HyperVLiveMigrateData):
@@ -8692,7 +9116,7 @@ class ComputeManager(manager.Manager):
volumes with connection_info set for the source host
"""
# Detaching volumes.
- connector = self.driver.get_volume_connector(instance)
+ connector = None
for bdm in source_bdms:
if bdm.is_volume:
# Detaching volumes is a call to an external API that can fail.
@@ -8712,6 +9136,9 @@ class ComputeManager(manager.Manager):
# remove the volume connection without detaching from
# hypervisor because the instance is not running
# anymore on the current host
+ if connector is None:
+ connector = self.driver.get_volume_connector(
+ instance)
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
@@ -8733,6 +9160,42 @@ class ComputeManager(manager.Manager):
bdm.attachment_id, self.host,
str(e), instance=instance)
+ # TODO(sean-k-mooney): add typing
+ def _post_live_migration_update_host(
+ self, ctxt, instance, dest, block_migration=False,
+ migrate_data=None, source_bdms=None
+ ):
+ try:
+ self._post_live_migration(
+ ctxt, instance, dest, block_migration, migrate_data,
+ source_bdms)
+ except Exception:
+ # Restore the instance object
+ node_name = None
+ try:
+ # get node name of compute, where instance will be
+ # running after migration, that is destination host
+ compute_node = self._get_compute_info(ctxt, dest)
+ node_name = compute_node.hypervisor_hostname
+ except exception.ComputeHostNotFound:
+ LOG.exception('Failed to get compute_info for %s', dest)
+
+ # we can never rollback from post live migration and we can only
+ # get here if the instance is running on the dest so we ensure
+ # the instance.host is set correctly and reraise the original
+ # exception unmodified.
+ if instance.host != dest:
+ # apply saves the new fields while drop actually removes the
+ # migration context from the instance, so migration persists.
+ instance.apply_migration_context()
+ instance.drop_migration_context()
+ instance.host = dest
+ instance.task_state = None
+ instance.node = node_name
+ instance.progress = 0
+ instance.save()
+ raise
+
@wrap_exception()
@wrap_instance_fault
def _post_live_migration(self, ctxt, instance, dest,
@@ -8744,7 +9207,7 @@ class ComputeManager(manager.Manager):
and mainly updating database record.
:param ctxt: security context
- :param instance: instance dict
+ :param instance: instance object
:param dest: destination host
:param block_migration: if true, prepare for block migration
:param migrate_data: if not None, it is a dict which has data
@@ -8784,8 +9247,9 @@ class ComputeManager(manager.Manager):
action=fields.NotificationAction.LIVE_MIGRATION_POST,
phase=fields.NotificationPhase.START)
- migration = {'source_compute': self.host,
- 'dest_compute': dest, }
+ migration = objects.Migration(
+ source_compute=self.host, dest_compute=dest,
+ )
# For neutron, migrate_instance_start will activate the destination
# host port bindings, if there are any created by conductor before live
# migration started.
@@ -9056,7 +9520,8 @@ class ComputeManager(manager.Manager):
def _rollback_live_migration(self, context, instance,
dest, migrate_data=None,
migration_status='failed',
- source_bdms=None):
+ source_bdms=None,
+ pre_live_migration=False):
"""Recovers Instance/volume state from migrating -> running.
:param context: security context
@@ -9106,8 +9571,14 @@ class ComputeManager(manager.Manager):
# for nova-network)
# NOTE(mriedem): This is a no-op for neutron.
self.network_api.setup_networks_on_host(context, instance, self.host)
- self.driver.rollback_live_migration_at_source(context, instance,
- migrate_data)
+
+ # NOTE(erlon): We should make sure that rollback_live_migration_at_src
+ # is not called in the pre_live_migration rollback as that will trigger
+ # the src host to re-attach interfaces which were not detached
+ # previously.
+ if not pre_live_migration:
+ self.driver.rollback_live_migration_at_source(context, instance,
+ migrate_data)
# NOTE(lyarwood): Fetch the current list of BDMs, disconnect any
# connected volumes from the dest and delete any volume attachments
@@ -9476,7 +9947,7 @@ class ComputeManager(manager.Manager):
def _set_migration_to_error(migration, reason, **kwargs):
LOG.warning("Setting migration %(migration_id)s to error: "
"%(reason)s",
- {'migration_id': migration['id'], 'reason': reason},
+ {'migration_id': migration.id, 'reason': reason},
**kwargs)
migration.status = 'error'
migration.save()
@@ -9742,7 +10213,9 @@ class ComputeManager(manager.Manager):
else:
LOG.debug('Triggering sync for uuid %s', uuid)
self._syncs_in_progress[uuid] = True
- self._sync_power_pool.spawn_n(_sync, db_instance)
+ nova.utils.pass_context(self._sync_power_pool.spawn_n,
+ _sync,
+ db_instance)
def _query_driver_power_state_and_sync(self, context, db_instance):
if db_instance.task_state is not None:
@@ -10012,6 +10485,27 @@ class ComputeManager(manager.Manager):
# (e.g. disable the service).
with excutils.save_and_reraise_exception():
LOG.exception("ReshapeNeeded exception is unexpected here!")
+ except exception.PlacementPciException:
+ # If we are at startup and the Placement PCI inventory handling
+ # failed then probably there is a configuration error. Propagate
+ # the error up to kill the service.
+ if startup:
+ raise
+ # If we are not at startup then we can assume that the
+ # configuration was correct at startup so the error is probably
+ # transient. Anyhow we cannot kill the service any more so just
+ # log the error and continue.
+ LOG.exception(
+ "Error updating PCI resources for node %(node)s.",
+ {'node': nodename})
+ except exception.InvalidConfiguration as e:
+ if startup:
+ # If this happens during startup, we need to let it raise to
+ # abort our service startup.
+ raise
+ else:
+ LOG.error("Error updating resources for node %s: %s",
+ nodename, e)
except Exception:
LOG.exception("Error updating resources for node %(node)s.",
{'node': nodename})
@@ -10801,6 +11295,18 @@ class ComputeManager(manager.Manager):
profile['pci_slot'] = pci_dev.address
profile['pci_vendor_info'] = ':'.join([pci_dev.vendor_id,
pci_dev.product_id])
+ if profile.get('card_serial_number'):
+ # Assume it is there since Nova makes sure that PCI devices
+ # tagged as remote-managed have a serial in PCI VPD.
+ profile['card_serial_number'] = pci_dev.card_serial_number
+ if profile.get('pf_mac_address'):
+ profile['pf_mac_address'] = pci_dev.sriov_cap['pf_mac_address']
+ if profile.get('vf_num'):
+ profile['vf_num'] = pci_dev.sriov_cap['vf_num']
+
+ if pci_dev.mac_address:
+ profile['device_mac_address'] = pci_dev.mac_address
+
mig_vif.profile = profile
LOG.debug("Updating migrate VIF profile for port %(port_id)s:"
"%(profile)s", {'port_id': port_id,
@@ -10916,7 +11422,7 @@ class _ComputeV5Proxy(object):
bdms, recreate, on_shared_storage,
preserve_ephemeral, migration,
scheduled_node, limits, request_spec,
- accel_uuids)
+ accel_uuids, False, None)
# 5.13 support for optional accel_uuids argument
def shelve_instance(self, context, instance, image_id,
diff --git a/nova/compute/pci_placement_translator.py b/nova/compute/pci_placement_translator.py
new file mode 100644
index 0000000000..016efd9122
--- /dev/null
+++ b/nova/compute/pci_placement_translator.py
@@ -0,0 +1,623 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import collections
+import copy
+import typing as ty
+
+import os_resource_classes
+import os_traits
+from oslo_log import log as logging
+from oslo_utils import uuidutils
+
+from nova.compute import provider_tree
+import nova.conf
+from nova import exception
+from nova.i18n import _
+from nova.objects import fields
+from nova.objects import pci_device
+from nova.pci import devspec
+from nova.pci import manager as pci_manager
+
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+
+
+# Devs with this type are in one to one mapping with an RP in placement
+PARENT_TYPES = (
+ fields.PciDeviceType.STANDARD, fields.PciDeviceType.SRIOV_PF)
+# Devs with these type need to have a parent and that parent is the one
+# that mapped to a placement RP
+CHILD_TYPES = (
+ fields.PciDeviceType.SRIOV_VF, fields.PciDeviceType.VDPA)
+
+
+def _is_placement_tracking_enabled() -> bool:
+ return CONF.pci.report_in_placement
+
+
+def _normalize_traits(traits: ty.List[str]) -> ty.List[str]:
+ """Make the trait names acceptable for placement.
+
+ It keeps the already valid standard or custom traits but normalizes trait
+ names that are not already normalized.
+ """
+ standard_traits, rest = os_traits.check_traits(traits)
+ custom_traits = []
+ for name in rest:
+ name = name.upper()
+ if os_traits.is_custom(name):
+ custom_traits.append(name)
+ else:
+ custom_traits.append(os_traits.normalize_name(name))
+
+ return list(standard_traits) + custom_traits
+
+
+def get_traits(traits_str: str) -> ty.Set[str]:
+ """Return a normalized set of placement standard and custom traits from
+ a string of comma separated trait names.
+ """
+ # traits is a comma separated list of placement trait names
+ if not traits_str:
+ return set()
+ return set(_normalize_traits(traits_str.split(',')))
+
+
+def _get_traits_for_dev(
+ dev_spec_tags: ty.Dict[str, str],
+) -> ty.Set[str]:
+ return get_traits(dev_spec_tags.get("traits", "")) | {
+ os_traits.COMPUTE_MANAGED_PCI_DEVICE
+ }
+
+
+def _normalize_resource_class(rc: str) -> str:
+ rc = rc.upper()
+ if (
+ rc not in os_resource_classes.STANDARDS and
+ not os_resource_classes.is_custom(rc)
+ ):
+ rc = os_resource_classes.normalize_name(rc)
+ # mypy: normalize_name will return non None for non None input
+ assert rc
+
+ return rc
+
+
+def get_resource_class(
+ requested_name: ty.Optional[str], vendor_id: str, product_id: str
+) -> str:
+ """Return the normalized resource class name based on what is requested
+ or if nothing is requested then generated from the vendor_id and product_id
+ """
+ if requested_name:
+ rc = _normalize_resource_class(requested_name)
+ else:
+ rc = f"CUSTOM_PCI_{vendor_id}_{product_id}".upper()
+ return rc
+
+
+def _get_rc_for_dev(
+ dev: pci_device.PciDevice,
+ dev_spec_tags: ty.Dict[str, str],
+) -> str:
+ """Return the resource class to represent the device.
+
+ It is either provided by the user in the configuration as the
+ resource_class tag, or we are generating one from vendor_id and product_id.
+
+ The user specified resource class is normalized if it is not already an
+ acceptable standard or custom resource class.
+ """
+ rc = dev_spec_tags.get("resource_class")
+ return get_resource_class(rc, dev.vendor_id, dev.product_id)
+
+
+class PciResourceProvider:
+ """A PCI Resource Provider"""
+
+ def __init__(self, name: str) -> None:
+ self.name = name
+ self.parent_dev = None
+ self.children_devs: ty.List[pci_device.PciDevice] = []
+ self.resource_class: ty.Optional[str] = None
+ self.traits: ty.Optional[ty.Set[str]] = None
+
+ @property
+ def devs(self) -> ty.List[pci_device.PciDevice]:
+ return [self.parent_dev] if self.parent_dev else self.children_devs
+
+ @property
+ def to_be_deleted(self):
+ return not bool(self.devs)
+
+ def add_child(self, dev, dev_spec_tags: ty.Dict[str, str]) -> None:
+ if self.parent_dev:
+ raise exception.PlacementPciDependentDeviceException(
+ parent_dev=dev.address,
+ children_devs=",".join(dev.address for dev in self.devs)
+ )
+
+ rc = _get_rc_for_dev(dev, dev_spec_tags)
+ if self.resource_class and rc != self.resource_class:
+ raise exception.PlacementPciMixedResourceClassException(
+ new_rc=rc,
+ new_dev=dev.address,
+ current_rc=self.resource_class,
+ current_devs=",".join(
+ dev.address for dev in self.children_devs)
+ )
+
+ traits = _get_traits_for_dev(dev_spec_tags)
+ if self.traits is not None and self.traits != traits:
+ raise exception.PlacementPciMixedTraitsException(
+ new_traits=",".join(sorted(traits)),
+ new_dev=dev.address,
+ current_traits=",".join(sorted(self.traits)),
+ current_devs=",".join(
+ dev.address for dev in self.children_devs),
+ )
+
+ self.children_devs.append(dev)
+ self.resource_class = rc
+ self.traits = traits
+
+ def add_parent(self, dev, dev_spec_tags: ty.Dict[str, str]) -> None:
+ if self.parent_dev or self.children_devs:
+ raise exception.PlacementPciDependentDeviceException(
+ parent_dev=dev.address,
+ children_devs=",".join(dev.address for dev in self.devs)
+ )
+
+ self.parent_dev = dev
+ self.resource_class = _get_rc_for_dev(dev, dev_spec_tags)
+ self.traits = _get_traits_for_dev(dev_spec_tags)
+
+ def remove_child(self, dev: pci_device.PciDevice) -> None:
+ # Nothing to do here. The update_provider_tree will handle the
+ # inventory decrease or the full RP removal
+ pass
+
+ def remove_parent(self, dev: pci_device.PciDevice) -> None:
+ # Nothing to do here. The update_provider_tree we handle full RP
+ pass
+
+ def _get_allocations(self) -> ty.Mapping[str, int]:
+ """Return a dict of used resources keyed by consumer UUID.
+
+ Note that:
+ 1) a single consumer can consume more than one resource from a single
+ RP. I.e. A VM with two VFs from the same parent PF
+ 2) multiple consumers can consume resources from a single RP. I.e. two
+ VMs consuming one VF from the same PF each
+ 3) regardless of how many consumers we have on a single PCI RP, they
+ are always consuming resources from the same resource class as
+ we are not supporting dependent devices modelled by the same RP but
+ different resource classes.
+ """
+ return collections.Counter(
+ [
+ dev.instance_uuid
+ for dev in self.devs
+ if "instance_uuid" in dev and dev.instance_uuid
+ ]
+ )
+
+ def update_provider_tree(
+ self,
+ provider_tree: provider_tree.ProviderTree,
+ parent_rp_name: str,
+ ) -> None:
+
+ if self.to_be_deleted:
+ # This means we need to delete the RP from placement if exists
+ if provider_tree.exists(self.name):
+ # NOTE(gibi): If there are allocations on this RP then
+ # Placement will reject the update the provider_tree is
+ # synced up.
+ provider_tree.remove(self.name)
+
+ return
+
+ if not provider_tree.exists(self.name):
+ # NOTE(gibi): We need to generate UUID for the new provider in Nova
+ # instead of letting Placement assign one. We are potentially
+ # healing a missing RP along with missing allocations on that RP.
+ # The allocation healing happens with POST /reshape, and that API
+ # only takes RP UUIDs.
+ provider_tree.new_child(
+ self.name,
+ parent_rp_name,
+ uuid=uuidutils.generate_uuid(dashed=True)
+ )
+
+ provider_tree.update_inventory(
+ self.name,
+ # NOTE(gibi): The rest of the inventory fields (reserved,
+ # allocation_ratio, etc.) are defaulted by placement and the
+ # default value make sense for PCI devices, i.e. no overallocation
+ # and PCI can be allocated one by one.
+ # Also, this way if the operator sets reserved value in placement
+ # for the PCI inventories directly then nova will not override that
+ # value periodically.
+ {
+ self.resource_class: {
+ "total": len(self.devs),
+ "max_unit": len(self.devs),
+ }
+ },
+ )
+ provider_tree.update_traits(self.name, self.traits)
+
+ # Here we are sure the RP exists in the provider_tree. So, we can
+ # record the RP UUID in each PciDevice this RP represents
+ rp_uuid = provider_tree.data(self.name).uuid
+ for dev in self.devs:
+ dev.extra_info['rp_uuid'] = rp_uuid
+
+ def update_allocations(
+ self,
+ allocations: dict,
+ provider_tree: provider_tree.ProviderTree,
+ same_host_instances: ty.List[str],
+ ) -> bool:
+ updated = False
+
+ if self.to_be_deleted:
+ # the RP is going away because either removed from the hypervisor
+ # or the compute's config is changed to ignore the device.
+ return updated
+
+ # we assume here that if this RP has been created in the current round
+ # of healing then it already has a UUID assigned.
+ rp_uuid = provider_tree.data(self.name).uuid
+
+ for consumer, amount in self._get_allocations().items():
+ if consumer not in allocations:
+ # We have PCI device(s) allocated to an instance, but we don't
+ # see any instance allocation in placement. This
+ # happens for two reasons:
+ # 1) The instance is being migrated and therefore the
+ # allocation is held by the migration UUID in placement. In
+ # this case the PciDevice is still allocated to the instance
+ # UUID in the nova DB hence our lookup for the instance
+ # allocation here. We can ignore this case as: i) We healed
+ # the PCI allocation for the instance before the migration
+ # was started. ii) Nova simply moves the allocation from the
+ # instance UUID to the migration UUID in placement. So we
+ # assume the migration allocation is correct without
+ # healing. One limitation of this is that if there is in
+ # progress migration when nova is upgraded, then the PCI
+ # allocation of that migration will be missing from
+ # placement on the source host. But it is temporary and the
+ # allocation will be fixed as soon as the migration is
+ # completed or reverted.
+ # 2) We have a bug in the scheduler or placement and the whole
+ # instance allocation is lost. We cannot handle that here.
+ # It is expected to be healed via nova-manage placement
+ # heal_allocation CLI instead.
+ continue
+
+ if consumer in same_host_instances:
+ # This is a nasty special case. This instance is undergoing
+ # a same host resize. So in Placement the source host
+ # allocation is held by the migration UUID *but* the
+ # PciDevice.instance_uuid is set for the instance UUID both
+ # on the source and on the destination host. As the source and
+ # dest are the same for migration we will see PciDevice
+ # objects assigned to this instance that should not be
+ # allocated to the instance UUID in placement.
+ # As noted above we don't want to take care in progress
+ # migration during healing. So we simply ignore this instance.
+ # If the instance needs healing then it will be healed when
+ # after the migration is confirmed or reverted.
+ continue
+
+ current_allocs = allocations[consumer]['allocations']
+ current_rp_allocs = current_allocs.get(rp_uuid)
+
+ if current_rp_allocs:
+ # update an existing allocation if the current one differs
+ current_rc_allocs = current_rp_allocs["resources"].get(
+ self.resource_class, 0)
+ if current_rc_allocs != amount:
+ current_rp_allocs[
+ "resources"][self.resource_class] = amount
+ updated = True
+ else:
+ # insert a new allocation as it is missing
+ current_allocs[rp_uuid] = {
+ "resources": {self.resource_class: amount}
+ }
+ updated = True
+
+ return updated
+
+ def __str__(self) -> str:
+ if self.devs:
+ return (
+ f"RP({self.name}, {self.resource_class}={len(self.devs)}, "
+ f"traits={','.join(sorted(self.traits or set()))})"
+ )
+ else:
+ return f"RP({self.name}, <EMPTY>)"
+
+
+class PlacementView:
+ """The PCI Placement view"""
+
+ def __init__(
+ self,
+ hypervisor_hostname: str,
+ instances_under_same_host_resize: ty.List[str],
+ ) -> None:
+ self.rps: ty.Dict[str, PciResourceProvider] = {}
+ self.root_rp_name = hypervisor_hostname
+ self.same_host_instances = instances_under_same_host_resize
+
+ def _get_rp_name_for_address(self, addr: str) -> str:
+ return f"{self.root_rp_name}_{addr.upper()}"
+
+ def _ensure_rp(self, rp_name: str) -> PciResourceProvider:
+ return self.rps.setdefault(rp_name, PciResourceProvider(rp_name))
+
+ def _get_rp_name_for_child(self, dev: pci_device.PciDevice) -> str:
+ if not dev.parent_addr:
+ msg = _(
+ "Missing parent address for PCI device s(dev)% with "
+ "type s(type)s"
+ ) % {
+ "dev": dev.address,
+ "type": dev.dev_type,
+ }
+ raise exception.PlacementPciException(error=msg)
+
+ return self._get_rp_name_for_address(dev.parent_addr)
+
+ def _add_child(
+ self, dev: pci_device.PciDevice, dev_spec_tags: ty.Dict[str, str]
+ ) -> None:
+ rp_name = self._get_rp_name_for_child(dev)
+ self._ensure_rp(rp_name).add_child(dev, dev_spec_tags)
+
+ def _add_parent(
+ self, dev: pci_device.PciDevice, dev_spec_tags: ty.Dict[str, str]
+ ) -> None:
+ rp_name = self._get_rp_name_for_address(dev.address)
+ self._ensure_rp(rp_name).add_parent(dev, dev_spec_tags)
+
+ def _add_dev(
+ self, dev: pci_device.PciDevice, dev_spec_tags: ty.Dict[str, str]
+ ) -> None:
+ if dev_spec_tags.get("physical_network"):
+ # NOTE(gibi): We ignore devices that has physnet configured as
+ # those are there for Neutron based SRIOV and that is out of scope
+ # for now. Later these devices will be tracked as PCI_NETDEV
+ # devices in placement.
+ return
+
+ if dev.dev_type in PARENT_TYPES:
+ self._add_parent(dev, dev_spec_tags)
+ elif dev.dev_type in CHILD_TYPES:
+ self._add_child(dev, dev_spec_tags)
+ else:
+ msg = _(
+ "Unhandled PCI device type %(type)s for %(dev)s. Please "
+ "report a bug."
+ ) % {
+ "type": dev.dev_type,
+ "dev": dev.address,
+ }
+ raise exception.PlacementPciException(error=msg)
+
+ def _remove_child(self, dev: pci_device.PciDevice) -> None:
+ rp_name = self._get_rp_name_for_child(dev)
+ self._ensure_rp(rp_name).remove_child(dev)
+
+ def _remove_parent(self, dev: pci_device.PciDevice) -> None:
+ rp_name = self._get_rp_name_for_address(dev.address)
+ self._ensure_rp(rp_name).remove_parent(dev)
+
+ def _remove_dev(self, dev: pci_device.PciDevice) -> None:
+ """Remove PCI devices from Placement that existed before but now
+ deleted from the hypervisor or unlisted from [pci]device_spec
+ """
+ if dev.dev_type in PARENT_TYPES:
+ self._remove_parent(dev)
+ elif dev.dev_type in CHILD_TYPES:
+ self._remove_child(dev)
+
+ def process_dev(
+ self,
+ dev: pci_device.PciDevice,
+ dev_spec: ty.Optional[devspec.PciDeviceSpec],
+ ) -> None:
+
+ if dev.status in (
+ fields.PciDeviceStatus.DELETED,
+ fields.PciDeviceStatus.REMOVED,
+ ):
+ # If the PCI tracker marked the device DELETED or REMOVED then
+ # such device is not allocated, so we are free to drop it from
+ # placement too.
+ self._remove_dev(dev)
+ else:
+ if not dev_spec:
+ if dev.instance_uuid:
+ LOG.warning(
+ "Device spec is not found for device %s in "
+ "[pci]device_spec. We are skipping this devices "
+ "during Placement update. The device is allocated by "
+ "%s. You should not remove an allocated device from "
+ "the configuration. Please restore the configuration "
+ "or cold migrate the instance to resolve the "
+ "inconsistency.",
+ dev.address,
+ dev.instance_uuid
+ )
+ else:
+ LOG.warning(
+ "Device spec is not found for device %s in "
+ "[pci]device_spec. Ignoring device in Placement "
+ "resource view. This should not happen. Please file a "
+ "bug.",
+ dev.address
+ )
+
+ return
+
+ self._add_dev(dev, dev_spec.get_tags())
+
+ def __str__(self) -> str:
+ return (
+ f"Placement PCI view on {self.root_rp_name}: "
+ f"{', '.join(str(rp) for rp in self.rps.values())}"
+ )
+
+ def update_provider_tree(
+ self, provider_tree: provider_tree.ProviderTree
+ ) -> None:
+ for rp_name, rp in self.rps.items():
+ rp.update_provider_tree(provider_tree, self.root_rp_name)
+
+ def update_allocations(
+ self,
+ allocations: dict,
+ provider_tree: provider_tree.ProviderTree
+ ) -> bool:
+ """Updates the passed in allocations dict inplace with any PCI
+ allocations that is inferred from the PciDevice objects already added
+ to the view. It returns True if the allocations dict has been changed,
+ False otherwise.
+ """
+ updated = False
+ for rp in self.rps.values():
+ updated |= rp.update_allocations(
+ allocations,
+ provider_tree,
+ self.same_host_instances,
+ )
+ return updated
+
+
+def ensure_no_dev_spec_with_devname(dev_specs: ty.List[devspec.PciDeviceSpec]):
+ for dev_spec in dev_specs:
+ if dev_spec.dev_spec_conf.get("devname"):
+ msg = _(
+ "Invalid [pci]device_spec configuration. PCI Placement "
+ "reporting does not support 'devname' based device "
+ "specification but we got %(dev_spec)s. "
+ "Please use PCI address in the configuration instead."
+ ) % {"dev_spec": dev_spec.dev_spec_conf}
+ raise exception.PlacementPciException(error=msg)
+
+
+def ensure_tracking_was_not_enabled_before(
+ provider_tree: provider_tree.ProviderTree
+) -> None:
+ # If placement tracking was enabled before then we do not support
+ # disabling it later. To check for that we can look for RPs with
+ # the COMPUTE_MANAGED_PCI_DEVICE trait. If any then we raise to
+ # kill the service
+ for rp_uuid in provider_tree.get_provider_uuids():
+ if (
+ os_traits.COMPUTE_MANAGED_PCI_DEVICE
+ in provider_tree.data(rp_uuid).traits
+ ):
+ msg = _(
+ "The [pci]report_in_placement is False but it was enabled "
+ "before on this compute. Nova does not support disabling "
+ "it after it is enabled."
+ )
+ raise exception.PlacementPciException(error=msg)
+
+
+def update_provider_tree_for_pci(
+ provider_tree: provider_tree.ProviderTree,
+ nodename: str,
+ pci_tracker: pci_manager.PciDevTracker,
+ allocations: dict,
+ instances_under_same_host_resize: ty.List[str],
+) -> bool:
+ """Based on the PciDevice objects in the pci_tracker it calculates what
+ inventories and allocations needs to exist in placement and create the
+ missing peaces.
+
+ It returns True if not just the provider_tree but also allocations needed
+ to be changed.
+
+ :param allocations:
+ Dict of allocation data of the form:
+ { $CONSUMER_UUID: {
+ # The shape of each "allocations" dict below is identical
+ # to the return from GET /allocations/{consumer_uuid}
+ "allocations": {
+ $RP_UUID: {
+ "generation": $RP_GEN,
+ "resources": {
+ $RESOURCE_CLASS: $AMOUNT,
+ ...
+ },
+ },
+ ...
+ },
+ "project_id": $PROJ_ID,
+ "user_id": $USER_ID,
+ "consumer_generation": $CONSUMER_GEN,
+ },
+ ...
+ }
+ :param instances_under_same_host_resize: A list of instance UUIDs that
+ are undergoing same host resize on this host.
+ """
+ if not _is_placement_tracking_enabled():
+ ensure_tracking_was_not_enabled_before(provider_tree)
+ # If tracking is not enabled we just return without touching anything
+ return False
+
+ ensure_no_dev_spec_with_devname(pci_tracker.dev_filter.specs)
+
+ LOG.debug(
+ 'Collecting PCI inventories and allocations to track them in Placement'
+ )
+
+ pv = PlacementView(nodename, instances_under_same_host_resize)
+ for dev in pci_tracker.pci_devs:
+ # match the PCI device with the [pci]dev_spec config to access
+ # the configuration metadata tags
+ dev_spec = pci_tracker.dev_filter.get_devspec(dev)
+ pv.process_dev(dev, dev_spec)
+
+ LOG.info("Placement PCI resource view: %s", pv)
+
+ pv.update_provider_tree(provider_tree)
+ old_alloc = copy.deepcopy(allocations)
+ # update_provider_tree correlated the PciDevice objects with RPs in
+ # placement and recorded the RP UUID in the PciDevice object. We need to
+ # trigger an update on the device pools in the tracker to get the device
+ # RP UUID mapped to the device pools
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices()
+ updated = pv.update_allocations(allocations, provider_tree)
+
+ if updated:
+ LOG.debug(
+ "Placement PCI view needs allocation healing. This should only "
+ "happen if [filter_scheduler]pci_in_placement is still disabled. "
+ "Original allocations: %s New allocations: %s",
+ old_alloc,
+ allocations,
+ )
+
+ return updated
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 1c492bcb27..9ee6670c17 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -30,6 +30,7 @@ import retrying
from nova.compute import claims
from nova.compute import monitors
+from nova.compute import pci_placement_translator
from nova.compute import provider_config
from nova.compute import stats as compute_stats
from nova.compute import task_states
@@ -48,6 +49,7 @@ from nova import rpc
from nova.scheduler.client import report
from nova import utils
from nova.virt import hardware
+from nova.virt import node
CONF = nova.conf.CONF
@@ -103,7 +105,7 @@ class ResourceTracker(object):
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
- self.reportclient = reportclient or report.SchedulerReportClient()
+ self.reportclient = reportclient or report.report_client_singleton()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
@@ -144,16 +146,20 @@ class ResourceTracker(object):
during the instance build.
"""
if self.disabled(nodename):
- # instance_claim() was called before update_available_resource()
- # (which ensures that a compute node exists for nodename). We
- # shouldn't get here but in case we do, just set the instance's
- # host and nodename attribute (probably incorrect) and return a
- # NoopClaim.
- # TODO(jaypipes): Remove all the disabled junk from the resource
- # tracker. Servicegroup API-level active-checking belongs in the
- # nova-compute manager.
- self._set_instance_host_and_node(instance, nodename)
- return claims.NopClaim()
+ # If we get here, it means we are trying to claim for an instance
+ # that was scheduled to a node that we do not have in our list,
+ # or is in some other way unmanageable by this node. This would
+ # mean that we are unable to account for resources, create
+ # allocations in placement, or do any of the other accounting
+ # necessary for this to work. In the past, this situation was
+ # effectively ignored silently, but in a world where we track
+ # resources with placement and instance assignment to compute nodes
+ # by service, we can no longer be leaky.
+ raise exception.ComputeResourcesUnavailable(
+ ('Attempt to claim resources for instance %(inst)s '
+ 'on unknown node %(node)s failed') % {
+ 'inst': instance.uuid,
+ 'node': nodename})
# sanity checks:
if instance.host:
@@ -278,9 +284,17 @@ class ResourceTracker(object):
context, instance, new_flavor, nodename, move_type)
if self.disabled(nodename):
- # compute_driver doesn't support resource tracking, just
- # generate the migration record and continue the resize:
- return claims.NopClaim(migration=migration)
+ # This means we were asked to accept an incoming migration to a
+ # node that we do not own or track. We really should not get here,
+ # but if we do, we must refuse to continue with the migration
+ # process, since we cannot account for those resources, create
+ # allocations in placement, etc. This has been a silent resource
+ # leak in the past, but it must be a hard failure now.
+ raise exception.ComputeResourcesUnavailable(
+ ('Attempt to claim move resources for instance %(inst)s on '
+ 'unknown node %(node)s failed') % {
+ 'inst': instance.uuid,
+ 'node': 'nodename'})
cn = self.compute_nodes[nodename]
@@ -328,7 +342,12 @@ class ResourceTracker(object):
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
- old_pci_devices=instance.pci_devices,
+ # NOTE(gibi): the _update_usage_from_migration call below appends
+ # the newly claimed pci devices to the instance.pci_devices list
+ # to keep the migration context independent we need to make a copy
+ # that list here. We need a deep copy as we need to duplicate
+ # the instance.pci_devices.objects list
+ old_pci_devices=copy.deepcopy(instance.pci_devices),
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests,
@@ -613,18 +632,11 @@ class ResourceTracker(object):
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the default.
"""
- # Remove usage for an instance that is tracked in migrations, such as
- # on the dest node during revert resize.
- if instance['uuid'] in self.tracked_migrations:
- migration = self.tracked_migrations.pop(instance['uuid'])
+ if instance["uuid"] in self.tracked_migrations:
if not flavor:
- flavor = self._get_flavor(instance, prefix, migration)
- # Remove usage for an instance that is not tracked in migrations (such
- # as on the source node after a migration).
- # NOTE(lbeliveau): On resize on the same node, the instance is
- # included in both tracked_migrations and tracked_instances.
- elif instance['uuid'] in self.tracked_instances:
- self.tracked_instances.remove(instance['uuid'])
+ flavor = self._get_flavor(
+ instance, prefix, self.tracked_migrations[instance["uuid"]]
+ )
if flavor is not None:
numa_topology = self._get_migration_context_resource(
@@ -640,6 +652,15 @@ class ResourceTracker(object):
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
+ # Remove usage for an instance that is tracked in migrations, such as
+ # on the dest node during revert resize.
+ self.tracked_migrations.pop(instance['uuid'], None)
+ # Remove usage for an instance that is not tracked in migrations (such
+ # as on the source node after a migration).
+ # NOTE(lbeliveau): On resize on the same node, the instance is
+ # included in both tracked_migrations and tracked_instances.
+ self.tracked_instances.discard(instance['uuid'])
+
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, fair=True)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
@@ -660,50 +681,6 @@ class ResourceTracker(object):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
- def _check_for_nodes_rebalance(self, context, resources, nodename):
- """Check if nodes rebalance has happened.
-
- The ironic driver maintains a hash ring mapping bare metal nodes
- to compute nodes. If a compute dies, the hash ring is rebuilt, and
- some of its bare metal nodes (more precisely, those not in ACTIVE
- state) are assigned to other computes.
-
- This method checks for this condition and adjusts the database
- accordingly.
-
- :param context: security context
- :param resources: initial values
- :param nodename: node name
- :returns: True if a suitable compute node record was found, else False
- """
- if not self.driver.rebalances_nodes:
- return False
-
- # Its possible ironic just did a node re-balance, so let's
- # check if there is a compute node that already has the correct
- # hypervisor_hostname. We can re-use that rather than create a
- # new one and have to move existing placement allocations
- cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
- context, nodename)
-
- if len(cn_candidates) == 1:
- cn = cn_candidates[0]
- LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
- {"name": nodename, "old": cn.host, "new": self.host})
- cn.host = self.host
- self.compute_nodes[nodename] = cn
- self._copy_resources(cn, resources)
- self._setup_pci_tracker(context, cn, resources)
- self._update(context, cn)
- return True
- elif len(cn_candidates) > 1:
- LOG.error(
- "Found more than one ComputeNode for nodename %s. "
- "Please clean up the orphaned ComputeNode records in your DB.",
- nodename)
-
- return False
-
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
@@ -721,6 +698,7 @@ class ResourceTracker(object):
False otherwise
"""
nodename = resources['hypervisor_hostname']
+ node_uuid = resources['uuid']
# if there is already a compute node just use resources
# to initialize
@@ -732,23 +710,43 @@ class ResourceTracker(object):
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
- cn = self._get_compute_node(context, nodename)
+
+ # We use read_deleted=True so that we will find and recover a deleted
+ # node object, if necessary.
+ with utils.temporary_mutation(context, read_deleted='yes'):
+ cn = self._get_compute_node(context, node_uuid)
+ if cn and cn.deleted:
+ # Undelete and save this right now so that everything below
+ # can continue without read_deleted=yes
+ LOG.info('Undeleting compute node %s', cn.uuid)
+ cn.deleted = False
+ cn.deleted_at = None
+ cn.save()
if cn:
+ if cn.host != self.host:
+ LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
+ {"name": nodename, "old": cn.host, "new": self.host})
+ cn.host = self.host
+ self._update(context, cn)
+
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
- if self._check_for_nodes_rebalance(context, resources, nodename):
- return False
-
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
- cn.create()
+ try:
+ cn.create()
+ except exception.DuplicateRecord:
+ raise exception.InvalidConfiguration(
+ 'Duplicate compute node record found for host %s node %s' % (
+ cn.host, cn.hypervisor_hostname))
+
# Only map the ComputeNode into compute_nodes if create() was OK
# because if create() fails, on the next run through here nodename
# would be in compute_nodes and we won't try to create again (because
@@ -881,6 +879,14 @@ class ResourceTracker(object):
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
+ if 'uuid' not in resources:
+ # NOTE(danms): Any driver that does not provide a uuid per
+ # node gets the locally-persistent compute_id. Only ironic
+ # should be setting the per-node uuid (and returning
+ # multiple nodes in general). If this is the first time we
+ # are creating a compute node on this host, we will
+ # generate and persist this uuid for the future.
+ resources['uuid'] = node.get_local_node_uuid()
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
@@ -985,8 +991,6 @@ class ResourceTracker(object):
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations)
- dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
- cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
@@ -1008,14 +1012,13 @@ class ResourceTracker(object):
if startup:
self._check_resources(context)
- def _get_compute_node(self, context, nodename):
+ def _get_compute_node(self, context, node_uuid):
"""Returns compute node for the host and nodename."""
try:
- return objects.ComputeNode.get_by_host_and_nodename(
- context, self.host, nodename)
+ return objects.ComputeNode.get_by_uuid(context, node_uuid)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
- {'host': self.host, 'node': nodename})
+ {'host': self.host, 'node': node_uuid})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
@@ -1126,6 +1129,28 @@ class ResourceTracker(object):
LOG.error('Unable to find services table record for nova-compute '
'host %s', self.host)
+ def _should_expose_remote_managed_ports_trait(self,
+ is_supported: bool):
+ """Determine whether COMPUTE_REMOTE_MANAGED_PORTS should be exposed.
+
+ Determines if the COMPUTE_REMOTE_MANAGED_PORTS trait needs to be
+ exposed based on the respective compute driver capability and
+ the presence of remote managed devices on a given host. Whether such
+ devices are present or not depends on the Whitelist configuration
+ (presence of a remote_managed tag association with some PCI devices)
+ and their physical presence (plugged in, enumerated by the OS).
+
+ The aim of having this check is to optimize host lookup by prefiltering
+ hosts that have compute driver support but no hardware. The check
+ does not consider free device count - just the presence of device
+ pools since device availability may change between a prefilter check
+ and a later check in PciPassthroughFilter.
+
+ :param bool is_supported: Is the trait supported by the compute driver
+ """
+ return (is_supported and
+ self.pci_tracker.pci_stats.has_remote_managed_device_pools())
+
def _get_traits(self, context, nodename, provider_tree):
"""Synchronizes internal and external traits for the node provider.
@@ -1149,7 +1174,11 @@ class ResourceTracker(object):
# traits that are missing, and remove any existing set traits
# that are not currently supported.
for trait, supported in self.driver.capabilities_as_traits().items():
- if supported:
+ add_trait = supported
+ if trait == os_traits.COMPUTE_REMOTE_MANAGED_PORTS:
+ add_trait &= self._should_expose_remote_managed_ports_trait(
+ supported)
+ if add_trait:
traits.add(trait)
elif trait in traits:
traits.remove(trait)
@@ -1163,9 +1192,16 @@ class ResourceTracker(object):
return list(traits)
- @retrying.retry(stop_max_attempt_number=4,
- retry_on_exception=lambda e: isinstance(
- e, exception.ResourceProviderUpdateConflict))
+ @retrying.retry(
+ stop_max_attempt_number=4,
+ retry_on_exception=lambda e: isinstance(
+ e,
+ (
+ exception.ResourceProviderUpdateConflict,
+ exception.PlacementReshapeConflict,
+ ),
+ ),
+ )
def _update_to_placement(self, context, compute_node, startup):
"""Send resource and inventory changes to placement."""
# NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the
@@ -1185,7 +1221,9 @@ class ResourceTracker(object):
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
# Let the virt driver rearrange the provider tree and set/update
# the inventory, traits, and aggregates throughout.
- allocs = None
+ allocs = self.reportclient.get_allocations_for_provider_tree(
+ context, nodename)
+ driver_reshaped = False
try:
self.driver.update_provider_tree(prov_tree, nodename)
except exception.ReshapeNeeded:
@@ -1196,10 +1234,9 @@ class ResourceTracker(object):
LOG.info("Performing resource provider inventory and "
"allocation data migration during compute service "
"startup or fast-forward upgrade.")
- allocs = self.reportclient.get_allocations_for_provider_tree(
- context, nodename)
- self.driver.update_provider_tree(prov_tree, nodename,
- allocations=allocs)
+ self.driver.update_provider_tree(
+ prov_tree, nodename, allocations=allocs)
+ driver_reshaped = True
# Inject driver capabilities traits into the provider
# tree. We need to determine the traits that the virt
@@ -1220,25 +1257,77 @@ class ResourceTracker(object):
context, nodename, provider_tree=prov_tree)
prov_tree.update_traits(nodename, traits)
+ instances_under_same_host_resize = [
+ migration.instance_uuid
+ for migration in self.tracked_migrations.values()
+ if migration.is_same_host_resize
+ ]
+ # NOTE(gibi): Tracking PCI in placement is different from other
+ # resources.
+ #
+ # While driver.update_provider_tree is used to let the virt driver
+ # create any kind of placement model for a resource the PCI data
+ # modelling is done virt driver independently by the PCI tracker.
+ # So the placement reporting needs to be also done here in the resource
+ # tracker independently of the virt driver.
+ #
+ # Additionally, when PCI tracking in placement was introduced there was
+ # already PCI allocations in nova. So both the PCI inventories and
+ # allocations needs to be healed. Moreover, to support rolling upgrade
+ # the placement prefilter for PCI devices was not turned on by default
+ # at the first release of this feature. Therefore, there could be new
+ # PCI allocation without placement being involved until the prefilter
+ # is enabled. So we need to be ready to heal PCI allocations at
+ # every call not just at startup.
+ pci_reshaped = pci_placement_translator.update_provider_tree_for_pci(
+ prov_tree,
+ nodename,
+ self.pci_tracker,
+ allocs,
+ instances_under_same_host_resize,
+ )
+
self.provider_tree = prov_tree
# This merges in changes from the provider config files loaded in init
self._merge_provider_configs(self.provider_configs, prov_tree)
- # Flush any changes. If we processed ReshapeNeeded above, allocs is not
- # None, and this will hit placement's POST /reshaper route.
- self.reportclient.update_from_provider_tree(context, prov_tree,
- allocations=allocs)
+ try:
+ # Flush any changes. If we either processed ReshapeNeeded above or
+ # update_provider_tree_for_pci did reshape, then we need to pass
+ # allocs to update_from_provider_tree to hit placement's POST
+ # /reshaper route.
+ self.reportclient.update_from_provider_tree(
+ context,
+ prov_tree,
+ allocations=allocs if driver_reshaped or pci_reshaped else None
+ )
+ except exception.InventoryInUse as e:
+ # This means an inventory reconfiguration (e.g.: removing a parent
+ # PF and adding a VF under that parent) was not possible due to
+ # existing allocations. Translate the exception to prevent the
+ # compute service to start
+ raise exception.PlacementPciException(error=str(e))
def _update(self, context, compute_node, startup=False):
"""Update partial stats locally and populate them to Scheduler."""
+
+ self._update_to_placement(context, compute_node, startup)
+
+ if self.pci_tracker:
+ # sync PCI device pool state stored in the compute node with
+ # the actual state from the PCI tracker as we commit changes in
+ # the DB and in the PCI tracker below
+ dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
+ compute_node.pci_device_pools = dev_pools_obj
+
# _resource_change will update self.old_resources if it detects changes
# but we want to restore those if compute_node.save() fails.
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB. Note that
- # _update_to_placement below does not supersede the need to do this
+ # _update_to_placement above does not supersede the need to do this
# because there are stats-related fields in the ComputeNode object
# which could have changed and still need to be reported to the
# scheduler filters/weighers (which could be out of tree as well).
@@ -1251,8 +1340,6 @@ class ResourceTracker(object):
with excutils.save_and_reraise_exception(logger=LOG):
self.old_resources[nodename] = old_compute
- self._update_to_placement(context, compute_node, startup)
-
if self.pci_tracker:
self.pci_tracker.save(context)
@@ -1825,7 +1912,7 @@ class ResourceTracker(object):
raise ValueError(_(
"Provider config '%(source_file_name)s' attempts "
"to define a trait that is owned by the "
- "virt driver or specified via the placment api. "
+ "virt driver or specified via the placement api. "
"Invalid traits '%(invalid)s' must be removed "
"from '%(source_file_name)s'.") % {
'source_file_name': source_file_name,
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 22afd79109..efc06300db 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -402,6 +402,8 @@ class ComputeAPI(object):
* ... - Rename the instance_type argument of prep_resize() to flavor
* ... - Rename the instance_type argument of resize_instance() to
flavor
+ * 6.1 - Add reimage_boot_volume parameter to rebuild_instance()
+ * 6.2 - Add target_state parameter to rebuild_instance()
'''
VERSION_ALIASES = {
@@ -421,6 +423,9 @@ class ComputeAPI(object):
'victoria': '5.12',
'wallaby': '6.0',
'xena': '6.0',
+ 'yoga': '6.0',
+ 'zed': '6.1',
+ 'antelope': '6.2',
}
@property
@@ -1079,7 +1084,8 @@ class ComputeAPI(object):
self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate, on_shared_storage, host, node,
- preserve_ephemeral, migration, limits, request_spec, accel_uuids):
+ preserve_ephemeral, migration, limits, request_spec, accel_uuids,
+ reimage_boot_volume, target_state):
# NOTE(edleafe): compute nodes can only use the dict form of limits.
if isinstance(limits, objects.SchedulerLimits):
@@ -1091,11 +1097,29 @@ class ComputeAPI(object):
'scheduled_node': node,
'limits': limits,
'request_spec': request_spec,
- 'accel_uuids': accel_uuids
+ 'accel_uuids': accel_uuids,
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
- version = self._ver(ctxt, '5.12')
+ version = '6.2'
client = self.router.client(ctxt)
if not client.can_send_version(version):
+ if msg_args['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance",
+ required="6.2")
+ else:
+ del msg_args['target_state']
+ version = '6.1'
+ if not client.can_send_version(version):
+ if msg_args['reimage_boot_volume']:
+ raise exception.NovaException(
+ 'Compute RPC version does not support '
+ 'reimage_boot_volume parameter.')
+ else:
+ del msg_args['reimage_boot_volume']
+ version = self._ver(ctxt, '5.12')
+ if not client.can_send_version(version):
del msg_args['accel_uuids']
version = '5.0'
cctxt = client.prepare(server=_compute_host(host, instance),
@@ -1502,7 +1526,7 @@ class ComputeAPI(object):
client = self.router.client(ctxt)
cctxt = client.prepare(server=_compute_host(None, instance),
version=version)
- return cctxt.cast(ctxt, "trigger_crash_dump", instance=instance)
+ cctxt.cast(ctxt, "trigger_crash_dump", instance=instance)
def cache_images(self, ctxt, host, image_ids):
version = self._ver(ctxt, '5.4')
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 61a6b40fb1..30efc24fc7 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -1110,6 +1110,8 @@ def check_num_instances_quota(
deltas = {'instances': max_count, 'cores': req_cores, 'ram': req_ram}
try:
+ # NOTE(johngarbutt) when using unified limits, this is call
+ # is a no op, and as such, this function always returns max_count
objects.Quotas.check_deltas(context, deltas,
project_id, user_id=user_id,
check_project_id=project_id,
@@ -1489,7 +1491,7 @@ def notify_about_instance_delete(notifier, context, instance,
phase=fields.NotificationPhase.END)
-def update_pci_request_spec_with_allocated_interface_name(
+def update_pci_request_with_placement_allocations(
context, report_client, pci_requests, provider_mapping):
"""Update the instance's PCI request based on the request group -
resource provider mapping and the device RP name from placement.
@@ -1510,12 +1512,33 @@ def update_pci_request_spec_with_allocated_interface_name(
if not pci_requests:
return
- def needs_update(pci_request, mapping):
+ def needs_update_due_to_qos(pci_request, mapping):
return (pci_request.requester_id and
pci_request.requester_id in mapping)
+ def get_group_mapping_for_flavor_based_pci_request(pci_request, mapping):
+ # NOTE(gibi): for flavor based PCI requests nova generates RequestGroup
+ # suffixes from InstancePCIRequests in the form of
+ # {request_id}-{count_index}
+ # NOTE(gibi): a suffixed request group always fulfilled from a single
+ # RP
+ return {
+ group_id: rp_uuids[0]
+ for group_id, rp_uuids in mapping.items()
+ if group_id.startswith(pci_request.request_id)
+ }
+
for pci_request in pci_requests:
- if needs_update(pci_request, provider_mapping):
+ mapping = get_group_mapping_for_flavor_based_pci_request(
+ pci_request, provider_mapping)
+
+ if mapping:
+ for spec in pci_request.spec:
+ # FIXME(gibi): this is baaad but spec is a dict of strings so
+ # we need to serialize
+ spec['rp_uuids'] = ','.join(mapping.values())
+
+ elif needs_update_due_to_qos(pci_request, provider_mapping):
provider_uuids = provider_mapping[pci_request.requester_id]
if len(provider_uuids) != 1:
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
index 633894c1ea..1a916ea59a 100644
--- a/nova/compute/vm_states.py
+++ b/nova/compute/vm_states.py
@@ -76,3 +76,6 @@ ALLOW_TRIGGER_CRASH_DUMP = [ACTIVE, PAUSED, RESCUED, RESIZED, ERROR]
# states we allow resources to be freed in
ALLOW_RESOURCE_REMOVAL = [DELETED, SHELVED_OFFLOADED]
+
+# states we allow for evacuate instance
+ALLOW_TARGET_STATES = [STOPPED]
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 4d94b680a4..843c8ce3a3 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -144,7 +144,8 @@ class ComputeTaskAPI(object):
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
preserve_ephemeral=False, host=None,
- request_spec=None):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
@@ -157,7 +158,9 @@ class ComputeTaskAPI(object):
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
host=host,
- request_spec=request_spec)
+ request_spec=request_spec,
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def cache_images(self, context, aggregate, image_ids):
"""Request images be pre-cached on hosts within an aggregate.
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 3b43644d9a..4b34b8339c 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -21,8 +21,10 @@ import eventlet
import functools
import sys
+from keystoneauth1 import exceptions as ks_exc
from oslo_config import cfg
from oslo_db import exception as db_exc
+from oslo_limit import exception as limit_exceptions
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
@@ -45,6 +47,7 @@ from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.image import glance
+from nova.limit import placement as placement_limits
from nova import manager
from nova.network import neutron
from nova import notifications
@@ -232,7 +235,7 @@ class ComputeTaskManager:
may involve coordinating activities on multiple compute nodes.
"""
- target = messaging.Target(namespace='compute_task', version='1.23')
+ target = messaging.Target(namespace='compute_task', version='1.25')
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
@@ -241,11 +244,42 @@ class ComputeTaskManager:
self.network_api = neutron.API()
self.servicegroup_api = servicegroup.API()
self.query_client = query.SchedulerQueryClient()
- self.report_client = report.SchedulerReportClient()
self.notifier = rpc.get_notifier('compute')
# Help us to record host in EventReporter
self.host = CONF.host
+ try:
+ # Test our placement client during initialization
+ self.report_client
+ except (ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure) as e:
+ # Non-fatal, likely transient (although not definitely);
+ # continue startup but log the warning so that when things
+ # fail later, it will be clear why we can not do certain
+ # things.
+ LOG.warning('Unable to initialize placement client (%s); '
+ 'Continuing with startup, but some operations '
+ 'will not be possible.', e)
+ except (ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized) as e:
+ # This is almost definitely fatal mis-configuration. The
+ # Unauthorized error might be transient, but it is
+ # probably reasonable to consider it fatal.
+ LOG.error('Fatal error initializing placement client; '
+ 'config is incorrect or incomplete: %s', e)
+ raise
+ except Exception as e:
+ # Unknown/unexpected errors here are fatal
+ LOG.error('Fatal error initializing placement client: %s', e)
+ raise
+
+ @property
+ def report_client(self):
+ return report.report_client_singleton()
+
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
@@ -1003,6 +1037,12 @@ class ComputeTaskManager:
request_spec.requested_resources = res_req
request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we
+ # need to generate request groups from InstancePCIRequests.
+ # This will append new RequestGroup objects to the
+ # request_spec.requested_resources list if needed
+ request_spec.generate_request_groups_from_pci_requests()
+
# NOTE(cfriesen): Ensure that we restrict the scheduler to
# the cell specified by the instance mapping.
self._restrict_request_spec_to_cell(
@@ -1020,6 +1060,12 @@ class ComputeTaskManager:
scheduler_utils.populate_filter_properties(
filter_properties, selection)
(host, node) = (selection.service_host, selection.nodename)
+ LOG.debug(
+ "Scheduler selected host: %s, node:%s",
+ host,
+ node,
+ instance=instance
+ )
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
@@ -1106,7 +1152,8 @@ class ComputeTaskManager:
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
- request_spec=None):
+ request_spec=None, reimage_boot_volume=False,
+ target_state=None):
# recreate=True means the instance is being evacuated from a failed
# host to a new destination host. The 'recreate' variable name is
# confusing, so rename it to evacuate here at the top, which is simpler
@@ -1202,6 +1249,12 @@ class ComputeTaskManager:
request_spec.requested_resources = res_req
request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we
+ # need to generate request groups from InstancePCIRequests.
+ # This will append new RequestGroup objects to the
+ # request_spec.requested_resources list if needed
+ request_spec.generate_request_groups_from_pci_requests()
+
try:
# if this is a rebuild of instance on the same host with
# new image.
@@ -1303,7 +1356,9 @@ class ComputeTaskManager:
node=node,
limits=limits,
request_spec=request_spec,
- accel_uuids=accel_uuids)
+ accel_uuids=accel_uuids,
+ reimage_boot_volume=reimage_boot_volume,
+ target_state=target_state)
def _validate_image_traits_for_rebuild(self, context, instance, image_ref):
"""Validates that the traits specified in the image can be satisfied
@@ -1632,7 +1687,11 @@ class ComputeTaskManager:
compute_utils.check_num_instances_quota(
context, instance.flavor, 0, 0,
orig_num_req=len(build_requests))
- except exception.TooManyInstances as exc:
+ placement_limits.enforce_num_instances_and_flavor(
+ context, context.project_id, instance.flavor,
+ request_specs[0].is_bfv, 0, 0)
+ except (exception.TooManyInstances,
+ limit_exceptions.ProjectOverLimit) as exc:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(context, exc, instances,
build_requests,
@@ -2037,8 +2096,8 @@ class ComputeTaskManager:
skipped_host(target_ctxt, host, image_ids)
continue
- fetch_pool.spawn_n(wrap_cache_images, target_ctxt, host,
- image_ids)
+ utils.pass_context(fetch_pool.spawn_n, wrap_cache_images,
+ target_ctxt, host, image_ids)
# Wait until all those things finish
fetch_pool.waitall()
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 28311af31b..a5f0cf0094 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -286,6 +286,8 @@ class ComputeTaskAPI(object):
1.21 - Added cache_images()
1.22 - Added confirm_snapshot_based_resize()
1.23 - Added revert_snapshot_based_resize()
+ 1.24 - Add reimage_boot_volume parameter to rebuild_instance()
+ 1.25 - Add target_state parameter to rebuild_instance()
"""
def __init__(self):
@@ -426,8 +428,9 @@ class ComputeTaskAPI(object):
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
- preserve_ephemeral=False, request_spec=None):
- version = '1.12'
+ preserve_ephemeral=False, request_spec=None,
+ reimage_boot_volume=False, target_state=None):
+ version = '1.25'
kw = {'instance': instance,
'new_pass': new_pass,
'injected_files': injected_files,
@@ -440,8 +443,25 @@ class ComputeTaskAPI(object):
'preserve_ephemeral': preserve_ephemeral,
'host': host,
'request_spec': request_spec,
+ 'reimage_boot_volume': reimage_boot_volume,
+ 'target_state': target_state,
}
if not self.client.can_send_version(version):
+ if kw['target_state']:
+ raise exception.UnsupportedRPCVersion(
+ api="rebuild_instance", required="1.25")
+ else:
+ del kw['target_state']
+ version = '1.24'
+ if not self.client.can_send_version(version):
+ if kw['reimage_boot_volume']:
+ raise exception.NovaException(
+ 'Conductor RPC version does not support '
+ 'reimage_boot_volume parameter.')
+ else:
+ del kw['reimage_boot_volume']
+ version = '1.12'
+ if not self.client.can_send_version(version):
version = '1.8'
del kw['request_spec']
cctxt = self.client.prepare(version=version)
@@ -473,4 +493,4 @@ class ComputeTaskAPI(object):
raise exception.ServiceTooOld(_('nova-conductor too old'))
kw = {'instance': instance, 'migration': migration}
cctxt = self.client.prepare(version=version)
- return cctxt.cast(ctxt, 'revert_snapshot_based_resize', **kw)
+ cctxt.cast(ctxt, 'revert_snapshot_based_resize', **kw)
diff --git a/nova/conductor/tasks/cross_cell_migrate.py b/nova/conductor/tasks/cross_cell_migrate.py
index d66394cb6e..ab391bc485 100644
--- a/nova/conductor/tasks/cross_cell_migrate.py
+++ b/nova/conductor/tasks/cross_cell_migrate.py
@@ -698,7 +698,7 @@ class CrossCellMigrationTask(base.TaskBase):
LOG.debug('Making sure neutron is new enough for cross-cell resize.')
# Check that the port binding-extended API extension is available in
# neutron because if it's not we can just fail fast.
- if not self.network_api.supports_port_binding_extension(self.context):
+ if not self.network_api.has_port_binding_extension(self.context):
raise exception.MigrationPreCheckError(
reason=_("Required networking service API extension '%s' "
"not found.") %
@@ -1163,7 +1163,7 @@ class RevertResizeTask(base.TaskBase):
setattr(source_obj, field, getattr(target_obj, field))
def _update_bdms_in_source_cell(self, source_cell_context):
- """Update BlockDeviceMapppings in the source cell database.
+ """Update BlockDeviceMappings in the source cell database.
It is possible to attach/detach volumes to/from a resized instance,
which would create/delete BDM records in the target cell, so we have
diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py
index 294abfe4e3..cca97c53f7 100644
--- a/nova/conductor/tasks/live_migrate.py
+++ b/nova/conductor/tasks/live_migrate.py
@@ -245,8 +245,7 @@ class LiveMigrationTask(base.TaskBase):
"are not allowed for live migration.")
# All PCI requests are VIF related, now check neutron,
# source and destination compute nodes.
- if not self.network_api.supports_port_binding_extension(
- self.context):
+ if not self.network_api.has_port_binding_extension(self.context):
raise exception.MigrationPreCheckError(
reason="Cannot live migrate VIF with related PCI, Neutron "
"does not support required port binding extension.")
@@ -348,8 +347,9 @@ class LiveMigrationTask(base.TaskBase):
source_version = source_info.hypervisor_version
destination_version = destination_info.hypervisor_version
- if source_version > destination_version:
- raise exception.DestinationHypervisorTooOld()
+ if not CONF.workarounds.skip_hypervisor_version_check_on_lm:
+ if source_version > destination_version:
+ raise exception.DestinationHypervisorTooOld()
return source_info, destination_info
def _call_livem_checks_on_host(self, destination, provider_mapping):
@@ -366,7 +366,7 @@ class LiveMigrationTask(base.TaskBase):
raise exception.MigrationPreCheckError(msg)
# Check to see that neutron supports the binding-extended API.
- if self.network_api.supports_port_binding_extension(self.context):
+ if self.network_api.has_port_binding_extension(self.context):
bindings = self._bind_ports_on_destination(
destination, provider_mapping)
self._update_migrate_vifs_from_bindings(self.migrate_data.vifs,
@@ -542,7 +542,7 @@ class LiveMigrationTask(base.TaskBase):
# will be persisted when post_live_migration_at_destination
# runs.
compute_utils.\
- update_pci_request_spec_with_allocated_interface_name(
+ update_pci_request_with_placement_allocations(
self.context, self.report_client,
self.instance.pci_requests.requests, provider_mapping)
try:
diff --git a/nova/conductor/tasks/migrate.py b/nova/conductor/tasks/migrate.py
index 6ff6206f65..754f9e5ba7 100644
--- a/nova/conductor/tasks/migrate.py
+++ b/nova/conductor/tasks/migrate.py
@@ -54,7 +54,7 @@ def replace_allocation_with_migration(context, instance, migration):
# and do any rollback required
raise
- reportclient = report.SchedulerReportClient()
+ reportclient = report.report_client_singleton()
orig_alloc = reportclient.get_allocs_for_consumer(
context, instance.uuid)['allocations']
@@ -94,7 +94,7 @@ def replace_allocation_with_migration(context, instance, migration):
def revert_allocation_for_migration(context, source_cn, instance, migration):
"""Revert an allocation made for a migration back to the instance."""
- reportclient = report.SchedulerReportClient()
+ reportclient = report.report_client_singleton()
# FIXME(gibi): This method is flawed in that it does not handle allocations
# against sharing providers in any special way. This leads to duplicate
@@ -258,6 +258,11 @@ class MigrationTask(base.TaskBase):
# resource requests in a single list and add them to the RequestSpec.
self.request_spec.requested_resources = port_res_req
self.request_spec.request_level_params = req_lvl_params
+ # NOTE(gibi): as PCI devices is tracked in placement we need to
+ # generate request groups from InstancePCIRequests. This will append
+ # new RequestGroup objects to the request_spec.requested_resources list
+ # if needed
+ self.request_spec.generate_request_groups_from_pci_requests()
self._set_requested_destination_cell(legacy_props)
diff --git a/nova/conf/__init__.py b/nova/conf/__init__.py
index b8b4d4906a..9e5a57afba 100644
--- a/nova/conf/__init__.py
+++ b/nova/conf/__init__.py
@@ -49,7 +49,6 @@ from nova.conf import novnc
from nova.conf import paths
from nova.conf import pci
from nova.conf import placement
-from nova.conf import powervm
from nova.conf import quota
from nova.conf import rdp
from nova.conf import rpc
@@ -99,7 +98,6 @@ novnc.register_opts(CONF)
paths.register_opts(CONF)
pci.register_opts(CONF)
placement.register_opts(CONF)
-powervm.register_opts(CONF)
quota.register_opts(CONF)
rdp.register_opts(CONF)
rpc.register_opts(CONF)
diff --git a/nova/conf/api.py b/nova/conf/api.py
index 898741c7f5..58cbc4931e 100644
--- a/nova/conf/api.py
+++ b/nova/conf/api.py
@@ -42,7 +42,11 @@ Determine the strategy to use for authentication.
"""),
cfg.BoolOpt("use_forwarded_for",
default=False,
+ deprecated_for_removal=True,
+ deprecated_reason='This feature is duplicate of the HTTPProxyToWSGI '
+ 'middleware in oslo.middleware',
deprecated_group="DEFAULT",
+ deprecated_since='26.0.0',
help="""
When True, the 'X-Forwarded-For' header is treated as the canonical remote
address. When False (the default), the 'remote_address' header is used.
@@ -221,8 +225,11 @@ service.
help="""
Domain name used to configure FQDN for instances.
-Configure a fully-qualified domain name for instance hostnames. If unset, only
-the hostname without a domain will be configured.
+Configure a fully-qualified domain name for instance hostnames. The value is
+suffixed to the instance hostname from the database to construct the hostname
+that appears in the metadata API. To disable this behavior (for example in
+order to correctly support microversion's 2.94 FQDN hostnames), set this to the
+empty string.
Possible values:
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index 5cf8c31714..de2743d850 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -41,7 +41,6 @@ Possible values:
* ``ironic.IronicDriver``
* ``vmwareapi.VMwareVCDriver``
* ``hyperv.HyperVDriver``
-* ``powervm.PowerVMDriver``
* ``zvm.ZVMDriver``
"""),
cfg.BoolOpt('allow_resize_to_same_host',
@@ -200,7 +199,7 @@ The template will be rendered using Jinja2 template engine, and receive a
top-level key called ``interfaces``. This key will contain a list of
dictionaries, one for each interface.
-Refer to the cloudinit documentaion for more information:
+Refer to the cloudinit documentation for more information:
https://cloudinit.readthedocs.io/en/latest/topics/datasources.html
@@ -306,6 +305,21 @@ Related options:
agent disabled. When used with libvirt the instance mode should be
configured as HVM.
"""),
+ cfg.IntOpt('reimage_timeout_per_gb',
+ default=20,
+ min=1,
+ help="""
+Timeout for reimaging a volume.
+
+Number of seconds to wait for volume-reimaged events to arrive before
+continuing or failing.
+
+This is a per gigabyte time which has a default value of 20 seconds and
+will be multiplied by the GB size of image. Eg: an image of 6 GB will have
+a timeout of 20 * 6 = 120 seconds.
+Try increasing the timeout if the image copy per GB takes more time and you
+are hitting timeout failures.
+"""),
]
resource_tracker_opts = [
@@ -426,9 +440,7 @@ allocation_ratio_opts = [
Virtual CPU to physical CPU allocation ratio.
This option is used to influence the hosts selected by the Placement API by
-configuring the allocation ratio for ``VCPU`` inventory. In addition, the
-``AggregateCoreFilter`` (deprecated) will fall back to this configuration value
-if no per-aggregate setting is found.
+configuring the allocation ratio for ``VCPU`` inventory.
.. note::
@@ -459,9 +471,7 @@ Related options:
Virtual RAM to physical RAM allocation ratio.
This option is used to influence the hosts selected by the Placement API by
-configuring the allocation ratio for ``MEMORY_MB`` inventory. In addition, the
-``AggregateRamFilter`` (deprecated) will fall back to this configuration value
-if no per-aggregate setting is found.
+configuring the allocation ratio for ``MEMORY_MB`` inventory.
.. note::
@@ -487,9 +497,7 @@ Related options:
Virtual disk to physical disk allocation ratio.
This option is used to influence the hosts selected by the Placement API by
-configuring the allocation ratio for ``DISK_GB`` inventory. In addition, the
-``AggregateDiskFilter`` (deprecated) will fall back to this configuration value
-if no per-aggregate setting is found.
+configuring the allocation ratio for ``DISK_GB`` inventory.
When configured, a ratio greater than 1.0 will result in over-subscription of
the available physical disk, which can be useful for more efficiently packing
@@ -521,7 +529,7 @@ Related options:
* ``initial_disk_allocation_ratio``
"""),
cfg.FloatOpt('initial_cpu_allocation_ratio',
- default=16.0,
+ default=4.0,
min=0.0,
help="""
Initial virtual CPU to physical CPU allocation ratio.
@@ -537,7 +545,7 @@ Related options:
* ``cpu_allocation_ratio``
"""),
cfg.FloatOpt('initial_ram_allocation_ratio',
- default=1.5,
+ default=1.0,
min=0.0,
help="""
Initial virtual RAM to physical RAM allocation ratio.
@@ -1008,6 +1016,33 @@ Related options:
filtering computes based on supported image types, which is required
to be enabled for this to take effect.
"""),
+ cfg.ListOpt('vmdk_allowed_types',
+ default=['streamOptimized', 'monolithicSparse'],
+ help="""
+A list of strings describing allowed VMDK "create-type" subformats
+that will be allowed. This is recommended to only include
+single-file-with-sparse-header variants to avoid potential host file
+exposure due to processing named extents. If this list is empty, then no
+form of VMDK image will be allowed.
+"""),
+ cfg.BoolOpt('packing_host_numa_cells_allocation_strategy',
+ default=False,
+ help="""
+This option controls allocation strategy used to choose NUMA cells on host for
+placing VM's NUMA cells (for VMs with defined numa topology). By
+default host's NUMA cell with more resources consumed will be chosen last for
+placing attempt. When the packing_host_numa_cells_allocation_strategy variable
+is set to ``False``, host's NUMA cell with more resources available will be
+used. When set to ``True`` cells with some usage will be packed with VM's cell
+until it will be completely exhausted, before a new free host's cell will be
+used.
+
+Possible values:
+
+* ``True``: Packing VM's NUMA cell on most used host NUMA cell.
+* ``False``: Spreading VM's NUMA cell on host's NUMA cells with more resources
+ available.
+"""),
]
interval_opts = [
diff --git a/nova/conf/hyperv.py b/nova/conf/hyperv.py
index caa7a8702b..cce3cdc3e2 100644
--- a/nova/conf/hyperv.py
+++ b/nova/conf/hyperv.py
@@ -320,7 +320,7 @@ configured to claim such devices.
cfg.ListOpt('iscsi_initiator_list',
default=[],
help="""
-List of iSCSI initiators that will be used for estabilishing iSCSI sessions.
+List of iSCSI initiators that will be used for establishing iSCSI sessions.
If none are specified, the Microsoft iSCSI initiator service will choose the
initiator.
diff --git a/nova/conf/ironic.py b/nova/conf/ironic.py
index dc5d2412c4..2734f2b78a 100644
--- a/nova/conf/ironic.py
+++ b/nova/conf/ironic.py
@@ -27,6 +27,7 @@ ironic_group = cfg.OptGroup(
help="""
Configuration options for Ironic driver (Bare Metal).
If using the Ironic driver following options must be set:
+
* auth_type
* auth_url
* project_name
diff --git a/nova/conf/keystone.py b/nova/conf/keystone.py
index d3acdbc58c..73769a8a68 100644
--- a/nova/conf/keystone.py
+++ b/nova/conf/keystone.py
@@ -35,6 +35,10 @@ def list_opts():
return {
keystone_group: (
ks_loading.get_session_conf_options() +
+ ks_loading.get_auth_common_conf_options() +
+ ks_loading.get_auth_plugin_conf_options('password') +
+ ks_loading.get_auth_plugin_conf_options('v2password') +
+ ks_loading.get_auth_plugin_conf_options('v3password') +
confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE)
)
}
diff --git a/nova/conf/libvirt.py b/nova/conf/libvirt.py
index 199cc6cf5e..204fe5c4b8 100644
--- a/nova/conf/libvirt.py
+++ b/nova/conf/libvirt.py
@@ -321,13 +321,14 @@ Please refer to the libvirt documentation for further details.
default=500,
min=100,
help="""
-Maximum permitted downtime, in milliseconds, for live migration
-switchover.
+Target maximum period of time Nova will try to keep the instance paused
+during the last part of the memory copy, in *milliseconds*.
Will be rounded up to a minimum of 100ms. You can increase this value
if you want to allow live-migrations to complete faster, or avoid
live-migration timeout errors by allowing the guest to be paused for
-longer during the live-migration switch over.
+longer during the live-migration switch over. This value may be exceeded
+if there is any reduction on the transfer rate after the VM is paused.
Related options:
@@ -452,7 +453,7 @@ support built into QEMU.
Prerequisite: TLS environment is configured correctly on all relevant
Compute nodes. This means, Certificate Authority (CA), server, client
-certificates, their corresponding keys, and their file permisssions are
+certificates, their corresponding keys, and their file permissions are
in place, and are validated.
Notes:
@@ -704,7 +705,7 @@ the source of entropy on the host. Since libvirt 1.3.4, any path (that
returns random numbers when read) is accepted. The recommended source
of entropy is ``/dev/urandom`` -- it is non-blocking, therefore
relatively fast; and avoids the limitations of ``/dev/random``, which is
-a legacy interface. For more details (and comparision between different
+a legacy interface. For more details (and comparison between different
RNG sources), refer to the "Usage" section in the Linux kernel API
documentation for ``[u]random``:
http://man7.org/linux/man-pages/man4/urandom.4.html and
@@ -986,6 +987,7 @@ slowly to be useful. Actual errors will be reported by Glance and noticed
according to the poll interval.
Related options:
+
* images_type - must be set to ``rbd``
* images_rbd_glance_store_name - must be set to a store name
* images_rbd_glance_copy_poll_interval - controls the failure time-to-notice
@@ -1476,6 +1478,23 @@ Related options:
"""),
]
+libvirt_cpu_mgmt_opts = [
+ cfg.BoolOpt('cpu_power_management',
+ default=False,
+ help='Use libvirt to manage CPU cores performance.'),
+ cfg.StrOpt('cpu_power_management_strategy',
+ choices=['cpu_state', 'governor'],
+ default='cpu_state',
+ help='Tuning strategy to reduce CPU power consumption when '
+ 'unused'),
+ cfg.StrOpt('cpu_power_governor_low',
+ default='powersave',
+ help='Governor to use in order '
+ 'to reduce CPU power consumption'),
+ cfg.StrOpt('cpu_power_governor_high',
+ default='performance',
+ help='Governor to use in order to have best CPU performance'),
+]
ALL_OPTS = list(itertools.chain(
libvirt_general_opts,
@@ -1497,6 +1516,7 @@ ALL_OPTS = list(itertools.chain(
libvirt_volume_nvmeof_opts,
libvirt_pmem_opts,
libvirt_vtpm_opts,
+ libvirt_cpu_mgmt_opts,
))
diff --git a/nova/conf/mks.py b/nova/conf/mks.py
index 1703f5f240..ec403a1a4f 100644
--- a/nova/conf/mks.py
+++ b/nova/conf/mks.py
@@ -23,7 +23,9 @@ Nova compute node uses WebMKS, a desktop sharing protocol to provide
instance console access to VM's created by VMware hypervisors.
Related options:
+
Following options must be set to provide console access.
+
* mksproxy_base_url
* enabled
""")
diff --git a/nova/conf/neutron.py b/nova/conf/neutron.py
index dc391a268e..e6774ced55 100644
--- a/nova/conf/neutron.py
+++ b/nova/conf/neutron.py
@@ -46,7 +46,7 @@ Default name for the floating IP pool.
Specifies the name of floating IP pool used for allocating floating IPs. This
option is only used if Neutron does not specify the floating IP pool name in
-port binding reponses.
+port binding responses.
"""),
cfg.IntOpt('extension_sync_interval',
default=600,
diff --git a/nova/conf/pci.py b/nova/conf/pci.py
index b383d0a69f..533bf52ead 100644
--- a/nova/conf/pci.py
+++ b/nova/conf/pci.py
@@ -67,6 +67,36 @@ Possible Values:
Required NUMA affinity of device. Valid values are: ``legacy``,
``preferred`` and ``required``.
+ ``resource_class``
+ The optional Placement resource class name that is used
+ to track the requested PCI devices in Placement. It can be a standard
+ resource class from the ``os-resource-classes`` lib. Or it can be an
+ arbitrary string. If it is an non-standard resource class then Nova will
+ normalize it to a proper Placement resource class by
+ making it upper case, replacing any consecutive character outside of
+ ``[A-Z0-9_]`` with a single '_', and prefixing the name with ``CUSTOM_`` if
+ not yet prefixed. The maximum allowed length is 255 character including the
+ prefix. If ``resource_class`` is not provided Nova will generate it from
+ ``vendor_id`` and ``product_id`` values of the alias in the form of
+ ``CUSTOM_PCI_{vendor_id}_{product_id}``. The ``resource_class`` requested
+ in the alias is matched against the ``resource_class`` defined in the
+ ``[pci]device_spec``. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
+
+ ``traits``
+ An optional comma separated list of Placement trait names requested to be
+ present on the resource provider that fulfills this alias. Each trait can
+ be a standard trait from ``os-traits`` lib or it can be an arbitrary
+ string. If it is a non-standard trait then Nova will normalize the
+ trait name by making it upper case, replacing any consecutive character
+ outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name
+ with ``CUSTOM_`` if not yet prefixed. The maximum allowed length of a
+ trait name is 255 character including the prefix. Every trait in
+ ``traits`` requested in the alias ensured to be in the list of traits
+ provided in the ``traits`` field of the ``[pci]device_spec`` when
+ scheduling the request. This field can only be used only if
+ ``[filter_scheduler]pci_in_placement`` is enabled.
+
* Supports multiple aliases by repeating the option (not by specifying
a list value)::
@@ -85,16 +115,18 @@ Possible Values:
"numa_policy": "required"
}
"""),
- cfg.MultiStrOpt('passthrough_whitelist',
+ cfg.MultiStrOpt('device_spec',
default=[],
- deprecated_name='pci_passthrough_whitelist',
- deprecated_group='DEFAULT',
+ deprecated_opts=[
+ cfg.DeprecatedOpt('passthrough_whitelist', group='pci'),
+ cfg.DeprecatedOpt('pci_passthrough_whitelist', group='DEFAULT'),
+ ],
help="""
-White list of PCI devices available to VMs.
+Specify the PCI devices available to VMs.
Possible values:
-* A JSON dictionary which describe a whitelisted PCI device. It should take
+* A JSON dictionary which describe a PCI device. It should take
the following format::
["vendor_id": "<id>",] ["product_id": "<id>",]
@@ -129,48 +161,115 @@ Possible values:
have a name.
``<tag>``
- Additional ``<tag>`` and ``<tag_value>`` used for matching PCI devices.
+ Additional ``<tag>`` and ``<tag_value>`` used for specifying PCI devices.
Supported ``<tag>`` values are :
- ``physical_network``
- ``trusted``
+ - ``remote_managed`` - a VF is managed remotely by an off-path networking
+ backend. May have boolean-like string values case-insensitive values:
+ "true" or "false". By default, "false" is assumed for all devices.
+ Using this option requires a networking service backend capable of
+ handling those devices. PCI devices are also required to have a PCI
+ VPD capability with a card serial number (either on a VF itself on
+ its corresponding PF), otherwise they will be ignored and not
+ available for allocation.
+ - ``resource_class`` - optional Placement resource class name to be used
+ to track the matching PCI devices in Placement when [pci]device_spec is
+ True. It can be a standard resource class from the
+ ``os-resource-classes`` lib. Or can be any string. In that case Nova will
+ normalize it to a proper Placement resource class by making it upper
+ case, replacing any consecutive character outside of ``[A-Z0-9_]`` with a
+ single '_', and prefixing the name with ``CUSTOM_`` if not yet prefixed.
+ The maximum allowed length is 255 character including the prefix.
+ If ``resource_class`` is not provided Nova will generate it from the PCI
+ device's ``vendor_id`` and ``product_id`` in the form of
+ ``CUSTOM_PCI_{vendor_id}_{product_id}``.
+ The ``resource_class`` can be requested from a ``[pci]alias``
+ - ``traits`` - optional comma separated list of Placement trait names to
+ report on the resource provider that will represent the matching PCI
+ device. Each trait can be a standard trait from ``os-traits`` lib or can
+ be any string. If it is not a standard trait then Nova will normalize the
+ trait name by making it upper case, replacing any consecutive character
+ outside of ``[A-Z0-9_]`` with a single '_', and prefixing the name with
+ ``CUSTOM_`` if not yet prefixed. The maximum allowed length of a trait
+ name is 255 character including the prefix.
+ Any trait from ``traits`` can be requested from a ``[pci]alias``.
+
Valid examples are::
- passthrough_whitelist = {"devname":"eth0",
- "physical_network":"physnet"}
- passthrough_whitelist = {"address":"*:0a:00.*"}
- passthrough_whitelist = {"address":":0a:00.",
- "physical_network":"physnet1"}
- passthrough_whitelist = {"vendor_id":"1137",
- "product_id":"0071"}
- passthrough_whitelist = {"vendor_id":"1137",
- "product_id":"0071",
- "address": "0000:0a:00.1",
- "physical_network":"physnet1"}
- passthrough_whitelist = {"address":{"domain": ".*",
- "bus": "02", "slot": "01",
- "function": "[2-7]"},
- "physical_network":"physnet1"}
- passthrough_whitelist = {"address":{"domain": ".*",
- "bus": "02", "slot": "0[1-2]",
- "function": ".*"},
- "physical_network":"physnet1"}
- passthrough_whitelist = {"devname": "eth0", "physical_network":"physnet1",
- "trusted": "true"}
+ device_spec = {"devname":"eth0",
+ "physical_network":"physnet"}
+ device_spec = {"address":"*:0a:00.*"}
+ device_spec = {"address":":0a:00.",
+ "physical_network":"physnet1"}
+ device_spec = {"vendor_id":"1137",
+ "product_id":"0071"}
+ device_spec = {"vendor_id":"1137",
+ "product_id":"0071",
+ "address": "0000:0a:00.1",
+ "physical_network":"physnet1"}
+ device_spec = {"address":{"domain": ".*",
+ "bus": "02", "slot": "01",
+ "function": "[2-7]"},
+ "physical_network":"physnet1"}
+ device_spec = {"address":{"domain": ".*",
+ "bus": "02", "slot": "0[1-2]",
+ "function": ".*"},
+ "physical_network":"physnet1"}
+ device_spec = {"devname": "eth0", "physical_network":"physnet1",
+ "trusted": "true"}
+ device_spec = {"vendor_id":"a2d6",
+ "product_id":"15b3",
+ "remote_managed": "true"}
+ device_spec = {"vendor_id":"a2d6",
+ "product_id":"15b3",
+ "address": "0000:82:00.0",
+ "physical_network":"physnet1",
+ "remote_managed": "true"}
+ device_spec = {"vendor_id":"1002",
+ "product_id":"6929",
+ "address": "0000:82:00.0",
+ "resource_class": "PGPU",
+ "traits": "HW_GPU_API_VULKAN,my-awesome-gpu"}
The following are invalid, as they specify mutually exclusive options::
- passthrough_whitelist = {"devname":"eth0",
- "physical_network":"physnet",
- "address":"*:0a:00.*"}
+ device_spec = {"devname":"eth0",
+ "physical_network":"physnet",
+ "address":"*:0a:00.*"}
+
+ The following example is invalid because it specifies the ``remote_managed``
+ tag for a PF - it will result in an error during config validation at the
+ Nova Compute service startup::
+
+ device_spec = {"address": "0000:82:00.0",
+ "product_id": "a2d6",
+ "vendor_id": "15b3",
+ "physical_network": null,
+ "remote_managed": "true"}
* A JSON list of JSON dictionaries corresponding to the above format. For
example::
- passthrough_whitelist = [{"product_id":"0001", "vendor_id":"8086"},
- {"product_id":"0002", "vendor_id":"8086"}]
-""")
+ device_spec = [{"product_id":"0001", "vendor_id":"8086"},
+ {"product_id":"0002", "vendor_id":"8086"}]
+"""),
+ cfg.BoolOpt('report_in_placement',
+ default=False,
+ help="""
+Enable PCI resource inventory reporting to Placement. If it is enabled then the
+nova-compute service will report PCI resource inventories to Placement
+according to the [pci]device_spec configuration and the PCI devices reported
+by the hypervisor. Once it is enabled it cannot be disabled any more. In a
+future release the default of this config will be change to True.
+
+Related options:
+
+* [pci]device_spec: to define which PCI devices nova are allowed to track and
+ assign to guests.
+"""),
]
diff --git a/nova/conf/powervm.py b/nova/conf/powervm.py
deleted file mode 100644
index 7efdbedfb6..0000000000
--- a/nova/conf/powervm.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2018 IBM Corporation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-
-
-powervm_group = cfg.OptGroup(
- name="powervm",
- title="PowerVM Options",
- help="""
-PowerVM options allow cloud administrators to configure how OpenStack will work
-with the PowerVM hypervisor.
-""")
-
-powervm_opts = [
- cfg.FloatOpt(
- 'proc_units_factor',
- default=0.1,
- min=0.05,
- max=1,
- help="""
-Factor used to calculate the amount of physical processor compute power given
-to each vCPU. E.g. A value of 1.0 means a whole physical processor, whereas
-0.05 means 1/20th of a physical processor.
-"""),
- cfg.StrOpt('disk_driver',
- choices=['localdisk', 'ssp'], ignore_case=True,
- default='localdisk',
- help="""
-The disk driver to use for PowerVM disks. PowerVM provides support for
-localdisk and PowerVM Shared Storage Pool disk drivers.
-
-Related options:
-
-* volume_group_name - required when using localdisk
-
-"""),
- cfg.StrOpt('volume_group_name',
- default='',
- help="""
-Volume Group to use for block device operations. If disk_driver is localdisk,
-then this attribute must be specified. It is strongly recommended NOT to use
-rootvg since that is used by the management partition and filling it will cause
-failures.
-"""),
-]
-
-
-def register_opts(conf):
- conf.register_group(powervm_group)
- conf.register_opts(powervm_opts, group=powervm_group)
-
-
-def list_opts():
- return {powervm_group: powervm_opts}
diff --git a/nova/conf/quota.py b/nova/conf/quota.py
index bb37c0c28b..e5b4b8dc73 100644
--- a/nova/conf/quota.py
+++ b/nova/conf/quota.py
@@ -147,7 +147,7 @@ Possible values:
deprecated_group='DEFAULT',
deprecated_name='quota_server_groups',
help="""
-The maxiumum number of server groups per project.
+The maximum number of server groups per project.
Server groups are used to control the affinity and anti-affinity scheduling
policy for a group of servers or instances. Reducing the quota will not affect
@@ -181,6 +181,8 @@ Possible values:
'on-demand.'),
('nova.quota.NoopQuotaDriver', 'Ignores quota and treats all '
'resources as unlimited.'),
+ ('nova.quota.UnifiedLimitsDriver', 'Do not use. Still being '
+ 'developed.')
],
help="""
Provides abstraction for quota checks. Users can configure a specific
diff --git a/nova/conf/scheduler.py b/nova/conf/scheduler.py
index 8b3b616987..c7aa2ad76d 100644
--- a/nova/conf/scheduler.py
+++ b/nova/conf/scheduler.py
@@ -464,6 +464,49 @@ Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
"""),
+ cfg.FloatOpt("hypervisor_version_weight_multiplier",
+ default=1.0,
+ help="""
+Hypervisor Version weight multiplier ratio.
+
+The multiplier is used for weighting hosts based on the reported
+hypervisor version.
+Negative numbers indicate preferring older hosts,
+the default is to prefer newer hosts to aid with upgrades.
+
+Possible values:
+
+* An integer or float value, where the value corresponds to the multiplier
+ ratio for this weigher.
+
+Example:
+
+* Strongly prefer older hosts
+
+ .. code-block:: ini
+
+ [filter_scheduler]
+ hypervisor_version_weight_multiplier=-1000
+
+
+* Moderately prefer new hosts
+
+ .. code-block:: ini
+
+ [filter_scheduler]
+ hypervisor_version_weight_multiplier=2.5
+
+* Disable weigher influence
+
+ .. code-block:: ini
+
+ [filter_scheduler]
+ hypervisor_version_weight_multiplier=0
+
+Related options:
+
+* ``[filter_scheduler] weight_classes``
+"""),
cfg.FloatOpt("io_ops_weight_multiplier",
default=-1.0,
help="""
@@ -745,7 +788,26 @@ Possible values:
Related options:
* ``[filter_scheduler] aggregate_image_properties_isolation_namespace``
-""")]
+"""),
+ cfg.BoolOpt(
+ "pci_in_placement",
+ default=False,
+ help="""
+Enable scheduling and claiming PCI devices in Placement.
+
+This can be enabled after ``[pci]report_in_placement`` is enabled on all
+compute hosts.
+
+When enabled the scheduler queries Placement about the PCI device
+availability to select destination for a server with PCI request. The scheduler
+also allocates the selected PCI devices in Placement. Note that this logic
+does not replace the PCIPassthroughFilter but extends it.
+
+* ``[pci] report_in_placement``
+* ``[pci] alias``
+* ``[pci] device_spec``
+"""),
+]
metrics_group = cfg.OptGroup(
name="metrics",
@@ -780,7 +842,7 @@ follows:
Possible values:
-* An integer or float value, where the value corresponds to the multipler
+* An integer or float value, where the value corresponds to the multiplier
ratio for this weigher.
Related options:
@@ -857,7 +919,7 @@ of any actual metric value:
Possible values:
-* An integer or float value, where the value corresponds to the multipler
+* An integer or float value, where the value corresponds to the multiplier
ratio for this weigher.
Related options:
diff --git a/nova/conf/spice.py b/nova/conf/spice.py
index 59ed4e80a0..e5854946f1 100644
--- a/nova/conf/spice.py
+++ b/nova/conf/spice.py
@@ -85,6 +85,59 @@ Agent. With the Spice agent installed the following features are enabled:
needing to click inside the console or press keys to release it. The
performance of mouse movement is also improved.
"""),
+ cfg.StrOpt('image_compression',
+ advanced=True,
+ choices=[
+ ('auto_glz', 'enable image compression mode to choose between glz '
+ 'and quic algorithm, based on image properties'),
+ ('auto_lz', 'enable image compression mode to choose between lz '
+ 'and quic algorithm, based on image properties'),
+ ('quic', 'enable image compression based on the SFALIC algorithm'),
+ ('glz', 'enable image compression using lz with history based '
+ 'global dictionary'),
+ ('lz', 'enable image compression with the Lempel-Ziv algorithm'),
+ ('off', 'disable image compression')
+ ],
+ help="""
+Configure the SPICE image compression (lossless).
+"""),
+ cfg.StrOpt('jpeg_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable JPEG image compression automatically'),
+ ('never', 'disable JPEG image compression'),
+ ('always', 'enable JPEG image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossy for slow links).
+"""),
+ cfg.StrOpt('zlib_compression',
+ advanced=True,
+ choices=[
+ ('auto', 'enable zlib image compression automatically'),
+ ('never', 'disable zlib image compression'),
+ ('always', 'enable zlib image compression')
+ ],
+ help="""
+Configure the SPICE wan image compression (lossless for slow links).
+"""),
+ cfg.BoolOpt('playback_compression',
+ advanced=True,
+ help="""
+Enable the SPICE audio stream compression (using celt).
+"""),
+ cfg.StrOpt('streaming_mode',
+ advanced=True,
+ choices=[
+ ('filter', 'SPICE server adds additional filters to decide if '
+ 'video streaming should be activated'),
+ ('all', 'any fast-refreshing window can be encoded into a video '
+ 'stream'),
+ ('off', 'no video detection and (lossy) compression is performed')
+ ],
+ help="""
+Configure the SPICE video stream detection and (lossy) compression.
+"""),
cfg.URIOpt('html5proxy_base_url',
default='http://127.0.0.1:6082/spice_auto.html',
help="""
diff --git a/nova/conf/vmware.py b/nova/conf/vmware.py
index 63a5f04ea4..17a2676b64 100644
--- a/nova/conf/vmware.py
+++ b/nova/conf/vmware.py
@@ -76,7 +76,9 @@ Possible values:
* Any valid URI (The scheme is 'telnet' or 'telnets'.)
Related options:
+
This option is ignored if serial_port_service_uri is not specified.
+
* serial_port_service_uri
"""),
cfg.StrOpt('serial_log_dir',
@@ -112,6 +114,7 @@ If true, the vCenter server certificate is not verified. If false,
then the default CA truststore is used for verification.
Related options:
+
* ca_file: This option is ignored if "ca_file" is set.
"""),
cfg.StrOpt('cluster_name',
@@ -158,7 +161,9 @@ Possible values:
* Any valid port number within 5900 -(5900 + vnc_port_total)
Related options:
+
Below options should be set to enable VNC client.
+
* vnc.enabled = True
* vnc_port_total
"""),
diff --git a/nova/conf/workarounds.py b/nova/conf/workarounds.py
index 6d6e1d0adf..943ec74885 100644
--- a/nova/conf/workarounds.py
+++ b/nova/conf/workarounds.py
@@ -313,6 +313,7 @@ use outside of a CI or developer cloud.
"vdpa",
"accelerator-direct",
"accelerator-direct-physical",
+ "remote-managed",
]),
default=[],
help="""
@@ -336,6 +337,7 @@ during hard reboot. The possible values are neutron port vnic types:
* vdpa
* accelerator-direct
* accelerator-direct-physical
+* remote-managed
Adding a ``vnic_type`` to this configuration makes Nova wait for a
network-vif-plugged event for each of the instance's vifs having the specific
@@ -366,10 +368,34 @@ If it is set to True the libvirt driver will try as a best effort to send
the announce-self command to the QEMU monitor so that it generates RARP frames
to update network switches in the post live migration phase on the destination.
+Please note that this causes the domain to be considered tainted by libvirt.
+
Related options:
* :oslo.config:option:`DEFAULT.compute_driver` (libvirt)
"""),
+ cfg.IntOpt('qemu_monitor_announce_self_count',
+ default=3,
+ min=1,
+ help="""
+The total number of times to send the announce_self command to the QEMU
+monitor when enable_qemu_monitor_announce_self is enabled.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
+ cfg.IntOpt('qemu_monitor_announce_self_interval',
+ default=1,
+ min=1,
+ help="""
+The number of seconds to wait before re-sending the announce_self
+command to the QEMU monitor.
+
+Related options:
+
+* :oslo.config:option:`WORKAROUNDS.enable_qemu_monitor_announce_self` (libvirt)
+"""),
cfg.BoolOpt('disable_compute_service_check_for_ffu',
default=False,
help="""
@@ -380,6 +406,61 @@ before compute nodes have been able to update their service record. In an FFU,
the service records in the database will be more than one version old until
the compute nodes start up, but control services need to be online first.
"""),
+ cfg.BoolOpt('unified_limits_count_pcpu_as_vcpu',
+ default=False,
+ help="""
+When using unified limits, use VCPU + PCPU for VCPU quota usage.
+
+If the deployment is configured to use unified limits via
+``[quota]driver=nova.quota.UnifiedLimitsDriver``, by default VCPU resources are
+counted independently from PCPU resources, consistent with how they are
+represented in the placement service.
+
+Legacy quota behavior counts PCPU as VCPU and returns the sum of VCPU + PCPU
+usage as the usage count for VCPU. Operators relying on the aggregation of
+VCPU and PCPU resource usage counts should set this option to True.
+
+Related options:
+
+* :oslo.config:option:`quota.driver`
+"""),
+ cfg.BoolOpt('skip_cpu_compare_on_dest',
+ default=False,
+ help="""
+With the libvirt driver, during live migration, skip comparing guest CPU
+with the destination host. When using QEMU >= 2.9 and libvirt >=
+4.4.0, libvirt will do the correct thing with respect to checking CPU
+compatibility on the destination host during live migration.
+"""),
+ cfg.BoolOpt('skip_cpu_compare_at_startup',
+ default=False,
+ help="""
+This will skip the CPU comparison call at the startup of Compute
+service and lets libvirt handle it.
+"""),
+
+ cfg.BoolOpt(
+ 'skip_hypervisor_version_check_on_lm',
+ default=False,
+ help="""
+When this is enabled, it will skip version-checking of hypervisors
+during live migration.
+"""),
+ cfg.BoolOpt(
+ 'skip_reserve_in_use_ironic_nodes',
+ default=False,
+ help="""
+This may be useful if you use the Ironic driver, but don't have
+automatic cleaning enabled in Ironic. Nova, by default, will mark
+Ironic nodes as reserved as soon as they are in use. When you free
+the Ironic node (by deleting the nova instance) it takes a while
+for Nova to un-reserve that Ironic node in placement. Usually this
+is a good idea, because it avoids placement providing an Ironic
+as a valid candidate when it is still being cleaned.
+Howerver, if you don't use automatic cleaning, it can cause an
+extra delay before and Ironic node is available for building a
+new Nova instance.
+"""),
]
diff --git a/nova/console/websocketproxy.py b/nova/console/websocketproxy.py
index a5797adff5..88979dd047 100644
--- a/nova/console/websocketproxy.py
+++ b/nova/console/websocketproxy.py
@@ -305,7 +305,7 @@ class NovaWebSocketProxy(websockify.WebSocketProxy):
# ssl_options unset to default to the underlying system defaults.
# We do this to avoid using websockify's behaviour for 'default'
# in select_ssl_version(), which hardcodes the versions to be
- # quite relaxed and prevents us from using sytem crypto policies.
+ # quite relaxed and prevents us from using system crypto policies.
ssl_min_version = kwargs.pop('ssl_minimum_version', None)
if ssl_min_version and ssl_min_version != 'default':
kwargs['ssl_options'] = websockify.websocketproxy. \
diff --git a/nova/context.py b/nova/context.py
index 619c39e212..dc42b38b5b 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -424,7 +424,7 @@ def scatter_gather_cells(context, cell_mappings, timeout, fn, *args, **kwargs):
# Only log the exception traceback for non-nova exceptions.
if not isinstance(e, exception.NovaException):
LOG.exception('Error gathering result from cell %s', cell_uuid)
- result = e.__class__(e.args)
+ result = e
# The queue is already synchronized.
queue.put((cell_uuid, result))
diff --git a/nova/db/api/legacy_migrations/README b/nova/db/api/legacy_migrations/README
deleted file mode 100644
index 6218f8cac4..0000000000
--- a/nova/db/api/legacy_migrations/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This is a database migration repository.
-
-More information at
-http://code.google.com/p/sqlalchemy-migrate/
diff --git a/nova/db/api/legacy_migrations/migrate.cfg b/nova/db/api/legacy_migrations/migrate.cfg
deleted file mode 100644
index 3e2ccef016..0000000000
--- a/nova/db/api/legacy_migrations/migrate.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-[db_settings]
-# Used to identify which repository this database is versioned under.
-# You can use the name of your project.
-repository_id=nova_api
-
-# The name of the database table used to track the schema version.
-# This name shouldn't already be used by your project.
-# If this is changed once a database is under version control, you'll need to
-# change the table name in each database too.
-version_table=migrate_version
-
-# When committing a change script, Migrate will attempt to generate the
-# sql for all supported databases; normally, if one of them fails - probably
-# because you don't have that database installed - it is ignored and the
-# commit continues, perhaps ending successfully.
-# Databases in this list MUST compile successfully during a commit, or the
-# entire commit will fail. List the databases your application will actually
-# be using to ensure your updates to that database work properly.
-# This must be a list; example: ['postgres','sqlite']
-required_dbs=[]
diff --git a/nova/db/api/legacy_migrations/versions/067_train.py b/nova/db/api/legacy_migrations/versions/067_train.py
deleted file mode 100644
index 6b82b17e4b..0000000000
--- a/nova/db/api/legacy_migrations/versions/067_train.py
+++ /dev/null
@@ -1,602 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate.changeset.constraint import ForeignKeyConstraint
-from migrate import UniqueConstraint
-import sqlalchemy as sa
-from sqlalchemy import dialects
-
-from nova.db import types
-from nova.objects import keypair
-
-
-def InetSmall():
- return sa.String(length=39).with_variant(
- dialects.postgresql.INET(), 'postgresql'
- )
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData()
- # NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
- # is sqlalchemy-migrate which requires this. We'll remove these migrations
- # when dropping SQLAlchemy < 2.x support
- meta.bind = migrate_engine
-
- cell_mappings = sa.Table('cell_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('name', sa.String(length=255)),
- sa.Column('transport_url', sa.Text()),
- sa.Column('database_connection', sa.Text()),
- # NOTE(stephenfin): These were originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'disabled', sa.Boolean(create_constraint=False), default=False),
- UniqueConstraint('uuid', name='uniq_cell_mappings0uuid'),
- sa.Index('uuid_idx', 'uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- host_mappings = sa.Table('host_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('cell_id', sa.Integer, nullable=False),
- sa.Column('host', sa.String(length=255), nullable=False),
- UniqueConstraint(
- 'host', name='uniq_host_mappings0host'),
- sa.Index('host_idx', 'host'),
- ForeignKeyConstraint(
- columns=['cell_id'], refcolumns=[cell_mappings.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_mappings = sa.Table('instance_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_uuid', sa.String(length=36), nullable=False),
- sa.Column('cell_id', sa.Integer, nullable=True),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- # NOTE(stephenfin): These were originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'queued_for_delete', sa.Boolean(create_constraint=False),
- default=False),
- sa.Column('user_id', sa.String(length=255), nullable=True),
- UniqueConstraint(
- 'instance_uuid', name='uniq_instance_mappings0instance_uuid'),
- sa.Index('instance_uuid_idx', 'instance_uuid'),
- sa.Index('project_id_idx', 'project_id'),
- sa.Index(
- 'instance_mappings_user_id_project_id_idx', 'user_id',
- 'project_id'),
- ForeignKeyConstraint(
- columns=['cell_id'], refcolumns=[cell_mappings.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- flavors = sa.Table('flavors', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('memory_mb', sa.Integer, nullable=False),
- sa.Column('vcpus', sa.Integer, nullable=False),
- sa.Column('swap', sa.Integer, nullable=False),
- sa.Column('vcpu_weight', sa.Integer),
- sa.Column('flavorid', sa.String(length=255), nullable=False),
- sa.Column('rxtx_factor', sa.Float),
- sa.Column('root_gb', sa.Integer),
- sa.Column('ephemeral_gb', sa.Integer),
- sa.Column('disabled', sa.Boolean),
- sa.Column('is_public', sa.Boolean),
- sa.Column('description', sa.Text()),
- UniqueConstraint('flavorid', name='uniq_flavors0flavorid'),
- UniqueConstraint('name', name='uniq_flavors0name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- flavor_extra_specs = sa.Table('flavor_extra_specs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('flavor_id', sa.Integer, nullable=False),
- sa.Column('key', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255)),
- UniqueConstraint(
- 'flavor_id', 'key', name='uniq_flavor_extra_specs0flavor_id0key'),
- sa.Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'),
- ForeignKeyConstraint(columns=['flavor_id'], refcolumns=[flavors.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- flavor_projects = sa.Table('flavor_projects', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('flavor_id', sa.Integer, nullable=False),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- UniqueConstraint(
- 'flavor_id', 'project_id',
- name='uniq_flavor_projects0flavor_id0project_id'),
- ForeignKeyConstraint(
- columns=['flavor_id'], refcolumns=[flavors.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- request_specs = sa.Table('request_specs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_uuid', sa.String(36), nullable=False),
- sa.Column('spec', types.MediumText(), nullable=False),
- UniqueConstraint(
- 'instance_uuid', name='uniq_request_specs0instance_uuid'),
- sa.Index('request_spec_instance_uuid_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- build_requests = sa.Table('build_requests', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('request_spec_id', sa.Integer, nullable=True),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- sa.Column('user_id', sa.String(length=255), nullable=True),
- sa.Column('display_name', sa.String(length=255)),
- sa.Column('instance_metadata', sa.Text),
- sa.Column('progress', sa.Integer),
- sa.Column('vm_state', sa.String(length=255)),
- sa.Column('task_state', sa.String(length=255)),
- sa.Column('image_ref', sa.String(length=255)),
- sa.Column('access_ip_v4', InetSmall()),
- sa.Column('access_ip_v6', InetSmall()),
- sa.Column('info_cache', sa.Text),
- sa.Column('security_groups', sa.Text, nullable=True),
- sa.Column('config_drive', sa.Boolean, default=False, nullable=True),
- sa.Column('key_name', sa.String(length=255)),
- sa.Column(
- 'locked_by',
- sa.Enum('owner', 'admin', name='build_requests0locked_by')),
- sa.Column('instance_uuid', sa.String(length=36)),
- sa.Column('instance', types.MediumText()),
- sa.Column('block_device_mappings', types.MediumText()),
- sa.Column('tags', sa.Text()),
- UniqueConstraint(
- 'instance_uuid', name='uniq_build_requests0instance_uuid'),
- sa.Index('build_requests_project_id_idx', 'project_id'),
- sa.Index('build_requests_instance_uuid_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- keypairs = sa.Table('key_pairs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(255), nullable=False),
- sa.Column('user_id', sa.String(255), nullable=False),
- sa.Column('fingerprint', sa.String(255)),
- sa.Column('public_key', sa.Text()),
- sa.Column(
- 'type',
- sa.Enum('ssh', 'x509', metadata=meta, name='keypair_types'),
- nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH),
- UniqueConstraint(
- 'user_id', 'name', name='uniq_key_pairs0user_id0name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- projects = sa.Table('projects', meta,
- sa.Column(
- 'id', sa.Integer, primary_key=True, nullable=False,
- autoincrement=True),
- sa.Column('external_id', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- UniqueConstraint('external_id', name='uniq_projects0external_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- users = sa.Table('users', meta,
- sa.Column(
- 'id', sa.Integer, primary_key=True, nullable=False,
- autoincrement=True),
- sa.Column('external_id', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- UniqueConstraint('external_id', name='uniq_users0external_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- resource_classes = sa.Table('resource_classes', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- UniqueConstraint('name', name='uniq_resource_classes0name'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- nameargs = {}
- if migrate_engine.name == 'mysql':
- nameargs['collation'] = 'utf8_bin'
-
- resource_providers = sa.Table(
- 'resource_providers', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(36), nullable=False),
- sa.Column('name', sa.Unicode(200, **nameargs), nullable=True),
- sa.Column('generation', sa.Integer, default=0),
- sa.Column('can_host', sa.Integer, default=0),
- sa.Column(
- 'root_provider_id', sa.Integer,
- sa.ForeignKey('resource_providers.id')),
- sa.Column(
- 'parent_provider_id', sa.Integer,
- sa.ForeignKey('resource_providers.id')),
- UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
- UniqueConstraint('name', name='uniq_resource_providers0name'),
- sa.Index('resource_providers_name_idx', 'name'),
- sa.Index('resource_providers_uuid_idx', 'uuid'),
- sa.Index(
- 'resource_providers_root_provider_id_idx', 'root_provider_id'),
- sa.Index(
- 'resource_providers_parent_provider_id_idx', 'parent_provider_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- inventories = sa.Table(
- 'inventories', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('resource_provider_id', sa.Integer, nullable=False),
- sa.Column('resource_class_id', sa.Integer, nullable=False),
- sa.Column('total', sa.Integer, nullable=False),
- sa.Column('reserved', sa.Integer, nullable=False),
- sa.Column('min_unit', sa.Integer, nullable=False),
- sa.Column('max_unit', sa.Integer, nullable=False),
- sa.Column('step_size', sa.Integer, nullable=False),
- sa.Column('allocation_ratio', sa.Float, nullable=False),
- sa.Index(
- 'inventories_resource_provider_id_idx', 'resource_provider_id'),
- sa.Index(
- 'inventories_resource_provider_resource_class_idx',
- 'resource_provider_id', 'resource_class_id'),
- sa.Index(
- 'inventories_resource_class_id_idx', 'resource_class_id'),
- UniqueConstraint(
- 'resource_provider_id', 'resource_class_id',
- name='uniq_inventories0resource_provider_resource_class'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- traits = sa.Table(
- 'traits', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column(
- 'id', sa.Integer, primary_key=True, nullable=False,
- autoincrement=True),
- sa.Column('name', sa.Unicode(255, **nameargs), nullable=False),
- UniqueConstraint('name', name='uniq_traits0name'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- allocations = sa.Table(
- 'allocations', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('resource_provider_id', sa.Integer, nullable=False),
- sa.Column('consumer_id', sa.String(36), nullable=False),
- sa.Column('resource_class_id', sa.Integer, nullable=False),
- sa.Column('used', sa.Integer, nullable=False),
- sa.Index(
- 'allocations_resource_provider_class_used_idx',
- 'resource_provider_id', 'resource_class_id', 'used'),
- sa.Index(
- 'allocations_resource_class_id_idx', 'resource_class_id'),
- sa.Index('allocations_consumer_id_idx', 'consumer_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- consumers = sa.Table(
- 'consumers', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column(
- 'id', sa.Integer, primary_key=True, nullable=False,
- autoincrement=True),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('project_id', sa.Integer, nullable=False),
- sa.Column('user_id', sa.Integer, nullable=False),
- sa.Column(
- 'generation', sa.Integer, default=0, server_default=sa.text('0'),
- nullable=False),
- sa.Index('consumers_project_id_uuid_idx', 'project_id', 'uuid'),
- sa.Index(
- 'consumers_project_id_user_id_uuid_idx', 'project_id', 'user_id',
- 'uuid'),
- UniqueConstraint('uuid', name='uniq_consumers0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- resource_provider_aggregates = sa.Table(
- 'resource_provider_aggregates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column(
- 'resource_provider_id', sa.Integer, primary_key=True,
- nullable=False),
- sa.Column(
- 'aggregate_id', sa.Integer, primary_key=True, nullable=False),
- sa.Index(
- 'resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- resource_provider_traits = sa.Table(
- 'resource_provider_traits', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column(
- 'trait_id', sa.Integer, sa.ForeignKey('traits.id'),
- primary_key=True, nullable=False),
- sa.Column(
- 'resource_provider_id', sa.Integer, primary_key=True,
- nullable=False),
- sa.Index(
- 'resource_provider_traits_resource_provider_trait_idx',
- 'resource_provider_id', 'trait_id'),
- ForeignKeyConstraint(
- columns=['resource_provider_id'],
- refcolumns=[resource_providers.c.id]),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- placement_aggregates = sa.Table('placement_aggregates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), index=True),
- UniqueConstraint('uuid', name='uniq_placement_aggregates0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='latin1'
- )
-
- aggregates = sa.Table('aggregates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36)),
- sa.Column('name', sa.String(length=255)),
- sa.Index('aggregate_uuid_idx', 'uuid'),
- UniqueConstraint('name', name='uniq_aggregate0name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregate_hosts = sa.Table('aggregate_hosts', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('host', sa.String(length=255)),
- sa.Column(
- 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
- nullable=False),
- UniqueConstraint(
- 'host', 'aggregate_id',
- name='uniq_aggregate_hosts0host0aggregate_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregate_metadata = sa.Table('aggregate_metadata', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
- nullable=False),
- sa.Column('key', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255), nullable=False),
- UniqueConstraint(
- 'aggregate_id', 'key',
- name='uniq_aggregate_metadata0aggregate_id0key'),
- sa.Index('aggregate_metadata_key_idx', 'key'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- groups = sa.Table('instance_groups', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('name', sa.String(length=255)),
- UniqueConstraint(
- 'uuid', name='uniq_instance_groups0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- group_policy = sa.Table('instance_group_policy', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('policy', sa.String(length=255)),
- sa.Column(
- 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
- nullable=False),
- sa.Column('rules', sa.Text),
- sa.Index('instance_group_policy_policy_idx', 'policy'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- group_member = sa.Table('instance_group_member', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_uuid', sa.String(length=255)),
- sa.Column(
- 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
- nullable=False),
- sa.Index('instance_group_member_instance_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- quota_classes = sa.Table('quota_classes', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('class_name', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255)),
- sa.Column('hard_limit', sa.Integer),
- sa.Index('quota_classes_class_name_idx', 'class_name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- quota_usages = sa.Table('quota_usages', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('in_use', sa.Integer, nullable=False),
- sa.Column('reserved', sa.Integer, nullable=False),
- sa.Column('until_refresh', sa.Integer),
- sa.Column('user_id', sa.String(length=255)),
- sa.Index('quota_usages_project_id_idx', 'project_id'),
- sa.Index('quota_usages_user_id_idx', 'user_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- quotas = sa.Table('quotas', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('hard_limit', sa.Integer),
- UniqueConstraint(
- 'project_id', 'resource', name='uniq_quotas0project_id0resource'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- project_user_quotas = sa.Table('project_user_quotas', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('user_id', sa.String(length=255), nullable=False),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('hard_limit', sa.Integer, nullable=True),
- UniqueConstraint(
- 'user_id', 'project_id', 'resource',
- name='uniq_project_user_quotas0user_id0project_id0resource'),
- sa.Index(
- 'project_user_quotas_project_id_idx', 'project_id'),
- sa.Index(
- 'project_user_quotas_user_id_idx', 'user_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- reservations = sa.Table('reservations', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column(
- 'usage_id', sa.Integer, sa.ForeignKey('quota_usages.id'),
- nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255)),
- sa.Column('delta', sa.Integer, nullable=False),
- sa.Column('expire', sa.DateTime),
- sa.Column('user_id', sa.String(length=255)),
- sa.Index('reservations_project_id_idx', 'project_id'),
- sa.Index('reservations_uuid_idx', 'uuid'),
- sa.Index('reservations_expire_idx', 'expire'),
- sa.Index('reservations_user_id_idx', 'user_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- tables = [
- cell_mappings,
- host_mappings,
- instance_mappings,
- flavors,
- flavor_extra_specs,
- flavor_projects,
- request_specs,
- build_requests,
- keypairs,
- projects,
- users,
- resource_classes,
- resource_providers,
- inventories,
- traits,
- allocations,
- consumers,
- resource_provider_aggregates,
- resource_provider_traits,
- placement_aggregates,
- aggregates,
- aggregate_hosts,
- aggregate_metadata,
- groups,
- group_policy,
- group_member,
- quota_classes,
- quota_usages,
- quotas,
- project_user_quotas,
- reservations,
- ]
- for table in tables:
- table.create(checkfirst=True)
diff --git a/nova/db/api/legacy_migrations/versions/068_placeholder.py b/nova/db/api/legacy_migrations/versions/068_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/068_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/069_placeholder.py b/nova/db/api/legacy_migrations/versions/069_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/069_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/070_placeholder.py b/nova/db/api/legacy_migrations/versions/070_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/070_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/071_placeholder.py b/nova/db/api/legacy_migrations/versions/071_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/071_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/072_placeholder.py b/nova/db/api/legacy_migrations/versions/072_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/072_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/073_placeholder.py b/nova/db/api/legacy_migrations/versions/073_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/073_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/074_placeholder.py b/nova/db/api/legacy_migrations/versions/074_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/074_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/075_placeholder.py b/nova/db/api/legacy_migrations/versions/075_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/075_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/076_placeholder.py b/nova/db/api/legacy_migrations/versions/076_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/076_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/077_placeholder.py b/nova/db/api/legacy_migrations/versions/077_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/077_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/078_placeholder.py b/nova/db/api/legacy_migrations/versions/078_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/078_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/079_placeholder.py b/nova/db/api/legacy_migrations/versions/079_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/079_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/080_placeholder.py b/nova/db/api/legacy_migrations/versions/080_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/080_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/081_placeholder.py b/nova/db/api/legacy_migrations/versions/081_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/081_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/082_placeholder.py b/nova/db/api/legacy_migrations/versions/082_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/082_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/083_placeholder.py b/nova/db/api/legacy_migrations/versions/083_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/083_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/084_placeholder.py b/nova/db/api/legacy_migrations/versions/084_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/084_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/085_placeholder.py b/nova/db/api/legacy_migrations/versions/085_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/085_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/086_placeholder.py b/nova/db/api/legacy_migrations/versions/086_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/086_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/api/legacy_migrations/versions/087_placeholder.py b/nova/db/api/legacy_migrations/versions/087_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/api/legacy_migrations/versions/087_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/api.py b/nova/db/main/api.py
index 4c40be905e..7d24f974f9 100644
--- a/nova/db/main/api.py
+++ b/nova/db/main/api.py
@@ -79,11 +79,28 @@ def _context_manager_from_context(context):
pass
-def _joinedload_all(column):
+def _joinedload_all(lead_entity, column):
+ """Do a nested load.
+
+ For example, resolve the following::
+
+ _joinedload_all(models.SecurityGroup, 'instances.info_cache')
+
+ to:
+
+ orm.joinedload(
+ models.SecurityGroup.instances
+ ).joinedload(
+ Instance.info_cache
+ )
+ """
elements = column.split('.')
- joined = orm.joinedload(elements.pop(0))
+ relationship_attr = getattr(lead_entity, elements.pop(0))
+ joined = orm.joinedload(relationship_attr)
for element in elements:
- joined = joined.joinedload(element)
+ relationship_entity = relationship_attr.entity.class_
+ relationship_attr = getattr(relationship_entity, element)
+ joined = joined.joinedload(relationship_attr)
return joined
@@ -562,7 +579,7 @@ def _compute_node_select(context, filters=None, limit=None, marker=None):
if filters is None:
filters = {}
- cn_tbl = sa.alias(models.ComputeNode.__table__, name='cn')
+ cn_tbl = models.ComputeNode.__table__.alias('cn')
select = sa.select(cn_tbl)
if context.read_deleted == "no":
@@ -595,9 +612,9 @@ def _compute_node_select(context, filters=None, limit=None, marker=None):
def _compute_node_fetchall(context, filters=None, limit=None, marker=None):
select = _compute_node_select(context, filters, limit=limit, marker=marker)
engine = get_engine(context=context)
- conn = engine.connect()
- results = conn.execute(select).fetchall()
+ with engine.connect() as conn, conn.begin():
+ results = conn.execute(select).fetchall()
# Callers expect dict-like objects, not SQLAlchemy RowProxy objects...
results = [dict(r._mapping) for r in results]
@@ -909,7 +926,7 @@ def compute_node_statistics(context):
engine = get_engine(context=context)
services_tbl = models.Service.__table__
- inner_sel = sa.alias(_compute_node_select(context), name='inner_sel')
+ inner_sel = _compute_node_select(context).alias('inner_sel')
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
@@ -966,9 +983,9 @@ def compute_node_statistics(context):
).label('disk_available_least'),
]
select = sql.select(*agg_cols).select_from(j)
- conn = engine.connect()
- results = conn.execute(select).fetchone()
+ with engine.connect() as conn, conn.begin():
+ results = conn.execute(select).fetchone()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
@@ -976,7 +993,6 @@ def compute_node_statistics(context):
'current_workload', 'running_vms', 'disk_available_least')
results = {field: int(results[idx] or 0)
for idx, field in enumerate(fields)}
- conn.close()
return results
@@ -1381,9 +1397,9 @@ def instance_get_by_uuid(context, uuid, columns_to_join=None):
def _instance_get_by_uuid(context, uuid, columns_to_join=None):
- result = _build_instance_get(context, columns_to_join=columns_to_join).\
- filter_by(uuid=uuid).\
- first()
+ result = _build_instance_get(
+ context, columns_to_join=columns_to_join
+ ).filter_by(uuid=uuid).first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
@@ -1411,9 +1427,13 @@ def instance_get(context, instance_id, columns_to_join=None):
def _build_instance_get(context, columns_to_join=None):
- query = model_query(context, models.Instance, project_only=True).\
- options(_joinedload_all('security_groups.rules')).\
- options(orm.joinedload('info_cache'))
+ query = model_query(
+ context, models.Instance, project_only=True,
+ ).options(
+ orm.joinedload(
+ models.Instance.security_groups
+ ).joinedload(models.SecurityGroup.rules)
+ ).options(orm.joinedload(models.Instance.info_cache))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
@@ -1421,7 +1441,10 @@ def _build_instance_get(context, columns_to_join=None):
# Already always joined above
continue
if 'extra.' in column:
- query = query.options(orm.undefer(column))
+ column_ref = getattr(models.InstanceExtra, column.split('.')[1])
+ query = query.options(
+ orm.joinedload(models.Instance.extra).undefer(column_ref)
+ )
elif column in ['metadata', 'system_metadata']:
# NOTE(melwitt): We use subqueryload() instead of joinedload() for
# metadata and system_metadata because of the one-to-many
@@ -1431,13 +1454,16 @@ def _build_instance_get(context, columns_to_join=None):
# in a large data transfer. Instead, the subqueryload() will
# perform additional queries to obtain metadata and system_metadata
# for the instance.
- query = query.options(orm.subqueryload(column))
+ column_ref = getattr(models.Instance, column)
+ query = query.options(orm.subqueryload(column_ref))
else:
- query = query.options(orm.joinedload(column))
+ column_ref = getattr(models.Instance, column)
+ query = query.options(orm.joinedload(column_ref))
# NOTE(alaski) Stop lazy loading of columns not needed.
- for col in ['metadata', 'system_metadata']:
- if col not in columns_to_join:
- query = query.options(orm.noload(col))
+ for column in ['metadata', 'system_metadata']:
+ if column not in columns_to_join:
+ column_ref = getattr(models.Instance, column)
+ query = query.options(orm.noload(column_ref))
# NOTE(melwitt): We need to use order_by(<unique column>) so that the
# additional queries emitted by subqueryload() include the same ordering as
# used by the parent query.
@@ -1530,7 +1556,8 @@ def instance_get_all(context, columns_to_join=None):
_manual_join_columns(columns_to_join))
query = model_query(context, models.Instance)
for column in columns_to_join_new:
- query = query.options(orm.joinedload(column))
+ column_ref = getattr(models.Instance, column)
+ query = query.options(orm.joinedload(column_ref))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
@@ -1671,9 +1698,13 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
query_prefix = context.session.query(models.Instance)
for column in columns_to_join_new:
if 'extra.' in column:
- query_prefix = query_prefix.options(orm.undefer(column))
+ column_ref = getattr(models.InstanceExtra, column.split('.')[1])
+ query_prefix = query_prefix.options(
+ orm.joinedload(models.Instance.extra).undefer(column_ref)
+ )
else:
- query_prefix = query_prefix.options(orm.joinedload(column))
+ column_ref = getattr(models.Instance, column)
+ query_prefix = query_prefix.options(orm.joinedload(column_ref))
# Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
# no need to do it here as well
@@ -1683,9 +1714,9 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
filters = copy.deepcopy(filters)
model_object = models.Instance
- query_prefix = _get_query_nova_resource_by_changes_time(query_prefix,
- filters,
- model_object)
+ query_prefix = _get_query_nova_resource_by_changes_time(
+ query_prefix, filters, model_object,
+ )
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
@@ -1697,14 +1728,12 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
- query_prefix = query_prefix.\
- filter(delete)
+ query_prefix = query_prefix.filter(delete)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
- query_prefix = query_prefix.\
- filter_by(deleted=0)
+ query_prefix = query_prefix.filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
@@ -1794,19 +1823,25 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
if marker is not None:
try:
marker = _instance_get_by_uuid(
- context.elevated(read_deleted='yes'), marker)
+ context.elevated(read_deleted='yes'), marker,
+ )
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker=marker)
try:
- query_prefix = sqlalchemyutils.paginate_query(query_prefix,
- models.Instance, limit,
- sort_keys,
- marker=marker,
- sort_dirs=sort_dirs)
+ query_prefix = sqlalchemyutils.paginate_query(
+ query_prefix,
+ models.Instance,
+ limit,
+ sort_keys,
+ marker=marker,
+ sort_dirs=sort_dirs,
+ )
except db_exc.InvalidSortKey:
raise exception.InvalidSortKey()
- return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
+ instances = query_prefix.all()
+
+ return _instances_fill_metadata(context, instances, manual_joins)
@require_context
@@ -2059,9 +2094,13 @@ def instance_get_active_by_window_joined(context, begin, end=None,
for column in columns_to_join_new:
if 'extra.' in column:
- query = query.options(orm.undefer(column))
+ column_ref = getattr(models.InstanceExtra, column.split('.')[1])
+ query = query.options(
+ orm.joinedload(models.Instance.extra).undefer(column_ref)
+ )
else:
- query = query.options(orm.joinedload(column))
+ column_ref = getattr(models.Instance, column)
+ query = query.options(orm.joinedload(column_ref))
query = query.filter(sql.or_(
models.Instance.terminated_at == sql.null(),
@@ -2081,23 +2120,31 @@ def instance_get_active_by_window_joined(context, begin, end=None,
raise exception.MarkerNotFound(marker=marker)
query = sqlalchemyutils.paginate_query(
- query, models.Instance, limit, ['project_id', 'uuid'], marker=marker)
+ query, models.Instance, limit, ['project_id', 'uuid'], marker=marker,
+ )
+ instances = query.all()
- return _instances_fill_metadata(context, query.all(), manual_joins)
+ return _instances_fill_metadata(context, instances, manual_joins)
def _instance_get_all_query(context, project_only=False, joins=None):
if joins is None:
joins = ['info_cache', 'security_groups']
- query = model_query(context,
- models.Instance,
- project_only=project_only)
+ query = model_query(
+ context,
+ models.Instance,
+ project_only=project_only,
+ )
for column in joins:
if 'extra.' in column:
- query = query.options(orm.undefer(column))
+ column_ref = getattr(models.InstanceExtra, column.split('.')[1])
+ query = query.options(
+ orm.joinedload(models.Instance.extra).undefer(column_ref)
+ )
else:
- query = query.options(orm.joinedload(column))
+ column_ref = getattr(models.Instance, column)
+ query = query.options(orm.joinedload(column_ref))
return query
@@ -2105,9 +2152,12 @@ def _instance_get_all_query(context, project_only=False, joins=None):
def instance_get_all_by_host(context, host, columns_to_join=None):
"""Get all instances belonging to a host."""
query = _instance_get_all_query(context, joins=columns_to_join)
- return _instances_fill_metadata(context,
- query.filter_by(host=host).all(),
- manual_joins=columns_to_join)
+ instances = query.filter_by(host=host).all()
+ return _instances_fill_metadata(
+ context,
+ instances,
+ manual_joins=columns_to_join,
+ )
def _instance_get_all_uuids_by_hosts(context, hosts):
@@ -2147,19 +2197,26 @@ def instance_get_all_by_host_and_node(
candidates = ['system_metadata', 'metadata']
manual_joins = [x for x in columns_to_join if x in candidates]
columns_to_join = list(set(columns_to_join) - set(candidates))
- return _instances_fill_metadata(context,
- _instance_get_all_query(
- context,
- joins=columns_to_join).filter_by(host=host).
- filter_by(node=node).all(), manual_joins=manual_joins)
+ instances = _instance_get_all_query(
+ context,
+ joins=columns_to_join,
+ ).filter_by(host=host).filter_by(node=node).all()
+ return _instances_fill_metadata(
+ context,
+ instances,
+ manual_joins=manual_joins,
+ )
@pick_context_manager_reader
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
"""Get all instances belonging to a host with a different type_id."""
- return _instances_fill_metadata(context,
- _instance_get_all_query(context).filter_by(host=host).
- filter(models.Instance.instance_type_id != type_id).all())
+ instances = _instance_get_all_query(context).filter_by(
+ host=host,
+ ).filter(
+ models.Instance.instance_type_id != type_id
+ ).all()
+ return _instances_fill_metadata(context, instances)
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@@ -2172,11 +2229,14 @@ def instance_get_all_hung_in_rebooting(context, reboot_window):
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
- return _instances_fill_metadata(context,
- model_query(context, models.Instance).
- filter(models.Instance.updated_at <= reboot_window).
- filter_by(task_state=task_states.REBOOTING).all(),
- manual_joins=[])
+ instances = model_query(context, models.Instance).filter(
+ models.Instance.updated_at <= reboot_window
+ ).filter_by(task_state=task_states.REBOOTING).all()
+ return _instances_fill_metadata(
+ context,
+ instances,
+ manual_joins=[],
+ )
def _retry_instance_update():
@@ -2505,13 +2565,15 @@ def instance_extra_get_by_instance_uuid(
:param instance_uuid: UUID of the instance tied to the topology record
:param columns: A list of the columns to load, or None for 'all of them'
"""
- query = model_query(context, models.InstanceExtra).\
- filter_by(instance_uuid=instance_uuid)
+ query = model_query(context, models.InstanceExtra).filter_by(
+ instance_uuid=instance_uuid,
+ )
if columns is None:
columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model',
'trusted_certs', 'resources', 'migration_context']
for column in columns:
- query = query.options(orm.undefer(column))
+ column_ref = getattr(models.InstanceExtra, column)
+ query = query.options(orm.undefer(column_ref))
instance_extra = query.first()
return instance_extra
@@ -2733,7 +2795,8 @@ def _block_device_mapping_get_query(context, columns_to_join=None):
query = model_query(context, models.BlockDeviceMapping)
for column in columns_to_join:
- query = query.options(orm.joinedload(column))
+ column_ref = getattr(models.BlockDeviceMapping, column)
+ query = query.options(orm.joinedload(column_ref))
return query
@@ -2950,10 +3013,18 @@ def security_group_create(context, values):
def _security_group_get_query(context, read_deleted=None,
project_only=False, join_rules=True):
- query = model_query(context, models.SecurityGroup,
- read_deleted=read_deleted, project_only=project_only)
+ query = model_query(
+ context,
+ models.SecurityGroup,
+ read_deleted=read_deleted,
+ project_only=project_only,
+ )
if join_rules:
- query = query.options(_joinedload_all('rules.grantee_group'))
+ query = query.options(
+ orm.joinedload(
+ models.SecurityGroup.rules
+ ).joinedload(models.SecurityGroupIngressRule.grantee_group)
+ )
return query
@@ -2998,8 +3069,7 @@ def security_group_get(context, security_group_id, columns_to_join=None):
if columns_to_join is None:
columns_to_join = []
for column in columns_to_join:
- if column.startswith('instances'):
- query = query.options(_joinedload_all(column))
+ query = query.options(_joinedload_all(models.SecurityGroup, column))
result = query.first()
if not result:
@@ -3011,25 +3081,27 @@ def security_group_get(context, security_group_id, columns_to_join=None):
@require_context
@pick_context_manager_reader
-def security_group_get_by_name(
- context, project_id, group_name, columns_to_join=None,
-):
+def security_group_get_by_name(context, project_id, group_name):
"""Returns a security group with the specified name from a project."""
- query = _security_group_get_query(context,
- read_deleted="no", join_rules=False).\
- filter_by(project_id=project_id).\
- filter_by(name=group_name)
-
- if columns_to_join is None:
- columns_to_join = ['instances', 'rules.grantee_group']
-
- for column in columns_to_join:
- query = query.options(_joinedload_all(column))
+ query = _security_group_get_query(
+ context, read_deleted="no", join_rules=False,
+ ).filter_by(
+ project_id=project_id,
+ ).filter_by(
+ name=group_name,
+ ).options(
+ orm.joinedload(models.SecurityGroup.instances)
+ ).options(
+ orm.joinedload(
+ models.SecurityGroup.rules
+ ).joinedload(models.SecurityGroupIngressRule.grantee_group)
+ )
result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
- project_id=project_id, security_group_id=group_name)
+ project_id=project_id, security_group_id=group_name,
+ )
return result
@@ -3077,14 +3149,11 @@ def security_group_in_use(context, group_id):
@require_context
@pick_context_manager_writer
-def security_group_update(context, security_group_id, values,
- columns_to_join=None):
+def security_group_update(context, security_group_id, values):
"""Update a security group."""
query = model_query(context, models.SecurityGroup).filter_by(
- id=security_group_id)
- if columns_to_join:
- for column in columns_to_join:
- query = query.options(_joinedload_all(column))
+ id=security_group_id,
+ )
security_group_ref = query.first()
if not security_group_ref:
@@ -3265,20 +3334,36 @@ def migration_get_in_progress_by_host_and_node(context, host, node):
# 'finished' means a resize is finished on the destination host
# and the instance is in VERIFY_RESIZE state, so the end state
# for a resize is actually 'confirmed' or 'reverted'.
- return model_query(context, models.Migration).\
- filter(sql.or_(
- sql.and_(
- models.Migration.source_compute == host,
- models.Migration.source_node == node),
- sql.and_(
- models.Migration.dest_compute == host,
- models.Migration.dest_node == node))).\
- filter(~models.Migration.status.in_(['confirmed', 'reverted',
- 'error', 'failed',
- 'completed', 'cancelled',
- 'done'])).\
- options(_joinedload_all('instance.system_metadata')).\
- all()
+ return model_query(
+ context, models.Migration,
+ ).filter(
+ sql.or_(
+ sql.and_(
+ models.Migration.source_compute == host,
+ models.Migration.source_node == node,
+ ),
+ sql.and_(
+ models.Migration.dest_compute == host,
+ models.Migration.dest_node == node,
+ ),
+ )
+ ).filter(
+ ~models.Migration.status.in_(
+ [
+ 'confirmed',
+ 'reverted',
+ 'error',
+ 'failed',
+ 'completed',
+ 'cancelled',
+ 'done',
+ ]
+ )
+ ).options(
+ orm.joinedload(
+ models.Migration.instance
+ ).joinedload(models.Instance.system_metadata)
+ ).all()
@pick_context_manager_reader
@@ -3413,19 +3498,32 @@ def migration_get_in_progress_and_error_by_host_and_node(context, host, node):
"""Finds all in progress migrations and error migrations for the given
host and node.
"""
- return model_query(context, models.Migration).\
- filter(sql.or_(
- sql.and_(
- models.Migration.source_compute == host,
- models.Migration.source_node == node),
- sql.and_(
- models.Migration.dest_compute == host,
- models.Migration.dest_node == node))).\
- filter(~models.Migration.status.in_(['confirmed', 'reverted',
- 'failed', 'completed',
- 'cancelled', 'done'])).\
- options(_joinedload_all('instance.system_metadata')).\
- all()
+ return model_query(
+ context, models.Migration,
+ ).filter(
+ sql.or_(
+ sql.and_(
+ models.Migration.source_compute == host,
+ models.Migration.source_node == node),
+ sql.and_(
+ models.Migration.dest_compute == host,
+ models.Migration.dest_node == node,
+ ),
+ )
+ ).filter(
+ ~models.Migration.status.in_([
+ 'confirmed',
+ 'reverted',
+ 'failed',
+ 'completed',
+ 'cancelled',
+ 'done',
+ ])
+ ).options(
+ orm.joinedload(
+ models.Migration.instance
+ ).joinedload(models.Instance.system_metadata)
+ ).all()
########################
@@ -4176,6 +4274,12 @@ def _get_fk_stmts(metadata, conn, table, column, records):
fk_column = fk_table.c.id
for fk in fk_table.foreign_keys:
+ if table != fk.column.table:
+ # if the foreign key doesn't actually point to the table we're
+ # archiving entries from then it's not relevant; trying to
+ # resolve this would result in a cartesian product
+ continue
+
# We need to find the records in the referring (child) table that
# correspond to the records in our (parent) table so we can archive
# them.
@@ -4194,7 +4298,8 @@ def _get_fk_stmts(metadata, conn, table, column, records):
select = sql.select(fk.column).where(
sql.and_(fk.parent == fk.column, column.in_(records))
)
- rows = conn.execute(select).fetchall()
+ with conn.begin():
+ rows = conn.execute(select).fetchall()
p_records = [r[0] for r in rows]
# Then, select rows in the child table that correspond to the
# parent table records that were passed in.
@@ -4209,7 +4314,8 @@ def _get_fk_stmts(metadata, conn, table, column, records):
fk_select = sql.select(fk_column).where(
sql.and_(fk.parent == fk.column, fk.column.in_(p_records))
)
- fk_rows = conn.execute(fk_select).fetchall()
+ with conn.begin():
+ fk_rows = conn.execute(fk_select).fetchall()
fk_records = [r[0] for r in fk_rows]
if fk_records:
# If we found any records in the child table, create shadow
@@ -4225,6 +4331,7 @@ def _get_fk_stmts(metadata, conn, table, column, records):
# deque.
fk_delete = fk_table.delete().where(fk_column.in_(fk_records))
deletes.appendleft(fk_delete)
+
# Repeat for any possible nested child tables.
i, d = _get_fk_stmts(metadata, conn, fk_table, fk_column, fk_records)
inserts.extendleft(i)
@@ -4262,9 +4369,11 @@ def _archive_deleted_rows_for_table(
deleted_instance_uuids = []
try:
shadow_table = schema.Table(
- shadow_tablename, metadata, autoload_with=conn)
+ shadow_tablename, metadata, autoload_with=conn,
+ )
except sqla_exc.NoSuchTableError:
# No corresponding shadow table; skip it.
+ conn.close()
return rows_archived, deleted_instance_uuids, {}
# TODO(stephenfin): Drop this when we drop the table
@@ -4296,7 +4405,8 @@ def _archive_deleted_rows_for_table(
select = select.where(table.c.updated_at < before)
select = select.order_by(column).limit(max_rows)
- rows = conn.execute(select).fetchall()
+ with conn.begin():
+ rows = conn.execute(select).fetchall()
records = [r[0] for r in rows]
# We will archive deleted rows for this table and also generate insert and
@@ -4332,7 +4442,8 @@ def _archive_deleted_rows_for_table(
query_select = sql.select(table.c.uuid).where(
table.c.id.in_(records)
)
- rows = conn.execute(query_select).fetchall()
+ with conn.begin():
+ rows = conn.execute(query_select).fetchall()
deleted_instance_uuids = [r[0] for r in rows]
try:
@@ -4354,6 +4465,8 @@ def _archive_deleted_rows_for_table(
"%(tablename)s: %(error)s",
{'tablename': tablename, 'error': str(ex)})
+ conn.close()
+
return rows_archived, deleted_instance_uuids, extras
@@ -4476,7 +4589,8 @@ def purge_shadow_tables(context, before_date, status_fn=None):
else:
delete = table.delete()
- deleted = conn.execute(delete)
+ with conn.begin():
+ deleted = conn.execute(delete)
if deleted.rowcount > 0:
status_fn(_('Deleted %(rows)i rows from %(table)s based on '
'timestamp column %(col)s') % {
@@ -4485,6 +4599,8 @@ def purge_shadow_tables(context, before_date, status_fn=None):
'col': col is None and '(n/a)' or col.name})
total_deleted += deleted.rowcount
+ conn.close()
+
return total_deleted
diff --git a/nova/db/main/legacy_migrations/README b/nova/db/main/legacy_migrations/README
deleted file mode 100644
index c5f51f2280..0000000000
--- a/nova/db/main/legacy_migrations/README
+++ /dev/null
@@ -1,4 +0,0 @@
-This is a database migration repository.
-
-More information at
-https://sqlalchemy-migrate.readthedocs.io/en/latest/
diff --git a/nova/db/main/legacy_migrations/manage.py b/nova/db/main/legacy_migrations/manage.py
deleted file mode 100644
index 6c2b3842ba..0000000000
--- a/nova/db/main/legacy_migrations/manage.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate.versioning.shell import main
-
-
-if __name__ == '__main__':
- main(debug='False', repository='.')
diff --git a/nova/db/main/legacy_migrations/migrate.cfg b/nova/db/main/legacy_migrations/migrate.cfg
deleted file mode 100644
index 006e01e406..0000000000
--- a/nova/db/main/legacy_migrations/migrate.cfg
+++ /dev/null
@@ -1,20 +0,0 @@
-[db_settings]
-# Used to identify which repository this database is versioned under.
-# You can use the name of your project.
-repository_id=nova
-
-# The name of the database table used to track the schema version.
-# This name shouldn't already be used by your project.
-# If this is changed once a database is under version control, you'll need to
-# change the table name in each database too.
-version_table=migrate_version
-
-# When committing a change script, Migrate will attempt to generate the
-# sql for all supported databases; normally, if one of them fails - probably
-# because you don't have that database installed - it is ignored and the
-# commit continues, perhaps ending successfully.
-# Databases in this list MUST compile successfully during a commit, or the
-# entire commit will fail. List the databases your application will actually
-# be using to ensure your updates to that database work properly.
-# This must be a list; example: ['postgres','sqlite']
-required_dbs=[]
diff --git a/nova/db/main/legacy_migrations/versions/402_train.py b/nova/db/main/legacy_migrations/versions/402_train.py
deleted file mode 100644
index ad6e65d011..0000000000
--- a/nova/db/main/legacy_migrations/versions/402_train.py
+++ /dev/null
@@ -1,1617 +0,0 @@
-# Copyright 2012 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate.changeset import UniqueConstraint
-from oslo_log import log as logging
-import sqlalchemy as sa
-from sqlalchemy import dialects
-from sqlalchemy.ext import compiler
-from sqlalchemy import types as sqla_types
-
-from nova.db import types
-from nova.objects import keypair
-
-LOG = logging.getLogger(__name__)
-
-
-def Inet():
- return sa.String(length=43).with_variant(
- dialects.postgresql.INET(), 'postgresql',
- )
-
-
-def InetSmall():
- return sa.String(length=39).with_variant(
- dialects.postgresql.INET(), 'postgresql',
- )
-
-
-# We explicitly name many of our foreignkeys for MySQL so they match Havana
-@compiler.compiles(sa.ForeignKeyConstraint, 'postgresql')
-def process(element, compiler, **kw):
- element.name = None
- return compiler.visit_foreign_key_constraint(element, **kw)
-
-
-def _create_shadow_tables(migrate_engine):
- meta = sa.MetaData()
- meta.reflect(migrate_engine)
- table_names = list(meta.tables.keys())
-
- # NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
- # is sqlalchemy-migrate which requires this. We'll remove these migrations
- # when dropping SQLAlchemy < 2.x support
- meta.bind = migrate_engine
-
- for table_name in table_names:
- # Skip tables that are not soft-deletable
- if table_name in (
- 'tags',
- 'resource_providers',
- 'inventories',
- 'allocations',
- 'resource_provider_aggregates',
- 'console_auth_tokens',
- ):
- continue
-
- table = sa.Table(table_name, meta, autoload_with=migrate_engine)
-
- columns = []
- for column in table.columns:
- column_copy = None
-
- # NOTE(boris-42): BigInteger is not supported by sqlite, so after
- # copy it will have NullType. The other types that are used in Nova
- # are supported by sqlite
- if isinstance(column.type, sqla_types.NullType):
- column_copy = sa.Column(
- column.name, sa.BigInteger(), default=0,
- )
-
- if table_name == 'instances' and column.name == 'locked_by':
- enum = sa.Enum(
- 'owner', 'admin', name='shadow_instances0locked_by',
- )
- column_copy = sa.Column(column.name, enum)
-
- # TODO(stephenfin): Fix these various bugs in a follow-up
-
- # 244_increase_user_id_length_volume_usage_cache; this
- # alteration should apply to shadow tables also
-
- if table_name == 'volume_usage_cache' and column.name == 'user_id':
- # nullable should be True
- column_copy = sa.Column('user_id', sa.String(36))
-
- # 247_nullable_mismatch; these alterations should apply to shadow
- # tables also
-
- if table_name == 'quota_usages' and column.name == 'resources':
- # nullable should be False
- column_copy = sa.Column('resource', sa.String(length=255))
-
- if table_name == 'pci_devices':
- if column.name == 'deleted':
- # nullable should be True
- column_copy = sa.Column(
- 'deleted', sa.Integer, default=0, nullable=False,
- )
-
- if column.name == 'product_id':
- # nullable should be False
- column_copy = sa.Column('product_id', sa.String(4))
-
- if column.name == 'vendor_id':
- # nullable should be False
- column_copy = sa.Column('vendor_id', sa.String(4))
-
- if column.name == 'dev_type':
- # nullable should be False
- column_copy = sa.Column('dev_type', sa.String(8))
-
- # 280_add_nullable_false_to_keypairs_name; this should apply to the
- # shadow table also
-
- if table_name == 'key_pairs' and column.name == 'name':
- # nullable should be False
- column_copy = sa.Column('name', sa.String(length=255))
-
- # NOTE(stephenfin): By default, 'sqlalchemy.Enum' will issue a
- # 'CREATE TYPE' command on PostgreSQL, even if the type already
- # exists. We work around this by using the PostgreSQL-specific
- # 'sqlalchemy.dialects.postgresql.ENUM' type and setting
- # 'create_type' to 'False'. See [1] for more information.
- #
- # [1] https://stackoverflow.com/a/28894354/613428
- if migrate_engine.name == 'postgresql':
- if table_name == 'key_pairs' and column.name == 'type':
- enum = dialects.postgresql.ENUM(
- 'ssh', 'x509', name='keypair_types', create_type=False)
- column_copy = sa.Column(
- column.name, enum, nullable=False,
- server_default=keypair.KEYPAIR_TYPE_SSH)
- elif (
- table_name == 'migrations' and
- column.name == 'migration_type'
- ):
- enum = dialects.postgresql.ENUM(
- 'migration', 'resize', 'live-migration', 'evacuation',
- name='migration_type', create_type=False)
- column_copy = sa.Column(column.name, enum, nullable=True)
-
- if column_copy is None:
- column_copy = column.copy()
-
- columns.append(column_copy)
-
- shadow_table = sa.Table(
- 'shadow_' + table_name, meta, *columns, mysql_engine='InnoDB',
- )
-
- try:
- shadow_table.create()
- except Exception:
- LOG.info(repr(shadow_table))
- LOG.exception('Exception while creating table.')
- raise
-
- # TODO(stephenfin): Fix these various bugs in a follow-up
-
- # 252_add_instance_extra_table; we don't create indexes for shadow tables
- # in general and these should be removed
-
- table = sa.Table(
- 'shadow_instance_extra', meta, autoload_with=migrate_engine,
- )
- idx = sa.Index('shadow_instance_extra_idx', table.c.instance_uuid)
- idx.create(migrate_engine)
-
- # 373_migration_uuid; we should't create indexes for shadow tables
-
- table = sa.Table('shadow_migrations', meta, autoload_with=migrate_engine)
- idx = sa.Index('shadow_migrations_uuid', table.c.uuid, unique=True)
- idx.create(migrate_engine)
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData()
- # NOTE(stephenfin): This is not compatible with SQLAlchemy 2.0 but neither
- # is sqlalchemy-migrate which requires this. We'll remove these migrations
- # when dropping SQLAlchemy < 2.x support
- meta.bind = migrate_engine
-
- agent_builds = sa.Table('agent_builds', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('hypervisor', sa.String(length=255)),
- sa.Column('os', sa.String(length=255)),
- sa.Column('architecture', sa.String(length=255)),
- sa.Column('version', sa.String(length=255)),
- sa.Column('url', sa.String(length=255)),
- sa.Column('md5hash', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'agent_builds_hypervisor_os_arch_idx',
- 'hypervisor', 'os', 'architecture'),
- UniqueConstraint(
- 'hypervisor', 'os', 'architecture', 'deleted',
- name='uniq_agent_builds0hypervisor0os0architecture0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregate_hosts = sa.Table('aggregate_hosts', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('host', sa.String(length=255)),
- sa.Column(
- 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
- nullable=False),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'host', 'aggregate_id', 'deleted',
- name='uniq_aggregate_hosts0host0aggregate_id0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregate_metadata = sa.Table('aggregate_metadata', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'aggregate_id', sa.Integer, sa.ForeignKey('aggregates.id'),
- nullable=False),
- sa.Column('key', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255), nullable=False),
- sa.Column('deleted', sa.Integer),
- sa.Index('aggregate_metadata_key_idx', 'key'),
- sa.Index('aggregate_metadata_value_idx', 'value'),
- UniqueConstraint(
- 'aggregate_id', 'key', 'deleted',
- name='uniq_aggregate_metadata0aggregate_id0key0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- aggregates = sa.Table('aggregates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Column('uuid', sa.String(36)),
- sa.Index('aggregate_uuid_idx', 'uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- allocations = sa.Table('allocations', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('resource_provider_id', sa.Integer, nullable=False),
- sa.Column('consumer_id', sa.String(36), nullable=False),
- sa.Column('resource_class_id', sa.Integer, nullable=False),
- sa.Column('used', sa.Integer, nullable=False),
- sa.Index(
- 'allocations_resource_provider_class_used_idx',
- 'resource_provider_id', 'resource_class_id', 'used'),
- sa.Index('allocations_consumer_id_idx', 'consumer_id'),
- sa.Index('allocations_resource_class_id_idx', 'resource_class_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- block_device_mapping = sa.Table('block_device_mapping', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('device_name', sa.String(length=255), nullable=True),
- sa.Column('delete_on_termination', sa.Boolean),
- sa.Column('snapshot_id', sa.String(length=36), nullable=True),
- sa.Column('volume_id', sa.String(length=36), nullable=True),
- sa.Column('volume_size', sa.Integer),
- sa.Column('no_device', sa.Boolean),
- sa.Column('connection_info', types.MediumText()),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid',
- name='block_device_mapping_instance_uuid_fkey')),
- sa.Column('deleted', sa.Integer),
- sa.Column('source_type', sa.String(length=255), nullable=True),
- sa.Column('destination_type', sa.String(length=255), nullable=True),
- sa.Column('guest_format', sa.String(length=255), nullable=True),
- sa.Column('device_type', sa.String(length=255), nullable=True),
- sa.Column('disk_bus', sa.String(length=255), nullable=True),
- sa.Column('boot_index', sa.Integer),
- sa.Column('image_id', sa.String(length=36), nullable=True),
- sa.Column('tag', sa.String(255)),
- sa.Column('attachment_id', sa.String(36), nullable=True),
- sa.Column('uuid', sa.String(36), nullable=True),
- sa.Column('volume_type', sa.String(255), nullable=True),
- sa.Index('snapshot_id', 'snapshot_id'),
- sa.Index('volume_id', 'volume_id'),
- sa.Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'),
- sa.Index(
- 'block_device_mapping_instance_uuid_device_name_idx',
- 'instance_uuid', 'device_name'),
- sa.Index(
- 'block_device_mapping_instance_uuid_volume_id_idx',
- 'instance_uuid', 'volume_id'),
- UniqueConstraint('uuid', name='uniq_block_device_mapping0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- bw_usage_cache = sa.Table('bw_usage_cache', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('start_period', sa.DateTime, nullable=False),
- sa.Column('last_refreshed', sa.DateTime),
- sa.Column('bw_in', sa.BigInteger),
- sa.Column('bw_out', sa.BigInteger),
- sa.Column('mac', sa.String(length=255)),
- sa.Column('uuid', sa.String(length=36)),
- sa.Column('last_ctr_in', sa.BigInteger()),
- sa.Column('last_ctr_out', sa.BigInteger()),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'bw_usage_cache_uuid_start_period_idx',
- 'uuid', 'start_period'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- cells = sa.Table('cells', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('api_url', sa.String(length=255)),
- sa.Column('weight_offset', sa.Float),
- sa.Column('weight_scale', sa.Float),
- sa.Column('name', sa.String(length=255)),
- sa.Column('is_parent', sa.Boolean),
- sa.Column('deleted', sa.Integer),
- sa.Column('transport_url', sa.String(length=255), nullable=False),
- UniqueConstraint(
- 'name', 'deleted',
- name='uniq_cells0name0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- certificates = sa.Table('certificates', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('file_name', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'certificates_project_id_deleted_idx',
- 'project_id', 'deleted'),
- sa.Index('certificates_user_id_deleted_idx', 'user_id', 'deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- compute_nodes = sa.Table('compute_nodes', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('service_id', sa.Integer, nullable=True),
- sa.Column('vcpus', sa.Integer, nullable=False),
- sa.Column('memory_mb', sa.Integer, nullable=False),
- sa.Column('local_gb', sa.Integer, nullable=False),
- sa.Column('vcpus_used', sa.Integer, nullable=False),
- sa.Column('memory_mb_used', sa.Integer, nullable=False),
- sa.Column('local_gb_used', sa.Integer, nullable=False),
- sa.Column('hypervisor_type', types.MediumText(), nullable=False),
- sa.Column('hypervisor_version', sa.Integer, nullable=False),
- sa.Column('cpu_info', types.MediumText(), nullable=False),
- sa.Column('disk_available_least', sa.Integer),
- sa.Column('free_ram_mb', sa.Integer),
- sa.Column('free_disk_gb', sa.Integer),
- sa.Column('current_workload', sa.Integer),
- sa.Column('running_vms', sa.Integer),
- sa.Column('hypervisor_hostname', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Column('host_ip', InetSmall()),
- sa.Column('supported_instances', sa.Text),
- sa.Column('pci_stats', sa.Text, nullable=True),
- sa.Column('metrics', sa.Text, nullable=True),
- sa.Column('extra_resources', sa.Text, nullable=True),
- sa.Column('stats', sa.Text, default='{}'),
- sa.Column('numa_topology', sa.Text, nullable=True),
- sa.Column('host', sa.String(255), nullable=True),
- sa.Column('ram_allocation_ratio', sa.Float, nullable=True),
- sa.Column('cpu_allocation_ratio', sa.Float, nullable=True),
- sa.Column('uuid', sa.String(36), nullable=True),
- sa.Column('disk_allocation_ratio', sa.Float, nullable=True),
- sa.Column('mapped', sa.Integer, default=0, nullable=True),
- sa.Index('compute_nodes_uuid_idx', 'uuid', unique=True),
- UniqueConstraint(
- 'host', 'hypervisor_hostname', 'deleted',
- name='uniq_compute_nodes0host0hypervisor_hostname0deleted',
- ),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- console_auth_tokens = sa.Table('console_auth_tokens', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('token_hash', sa.String(255), nullable=False),
- sa.Column('console_type', sa.String(255), nullable=False),
- sa.Column('host', sa.String(255), nullable=False),
- sa.Column('port', sa.Integer, nullable=False),
- sa.Column('internal_access_path', sa.String(255)),
- sa.Column('instance_uuid', sa.String(36), nullable=False),
- sa.Column('expires', sa.Integer, nullable=False),
- sa.Column('access_url_base', sa.String(255), nullable=True),
- sa.Index('console_auth_tokens_instance_uuid_idx', 'instance_uuid'),
- sa.Index('console_auth_tokens_host_expires_idx', 'host', 'expires'),
- sa.Index('console_auth_tokens_token_hash_idx', 'token_hash'),
- sa.Index(
- 'console_auth_tokens_token_hash_instance_uuid_idx',
- 'token_hash', 'instance_uuid'),
- UniqueConstraint(
- 'token_hash', name='uniq_console_auth_tokens0token_hash'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- console_pools = sa.Table('console_pools', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('address', InetSmall()),
- sa.Column('username', sa.String(length=255)),
- sa.Column('password', sa.String(length=255)),
- sa.Column('console_type', sa.String(length=255)),
- sa.Column('public_hostname', sa.String(length=255)),
- sa.Column('host', sa.String(length=255)),
- sa.Column('compute_host', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'host', 'console_type', 'compute_host', 'deleted',
- name='uniq_console_pools0host0console_type0compute_host0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- consoles = sa.Table('consoles', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_name', sa.String(length=255)),
- sa.Column('password', sa.String(length=255)),
- sa.Column('port', sa.Integer),
- sa.Column('pool_id', sa.Integer, sa.ForeignKey('console_pools.id')),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='consoles_instance_uuid_fkey')),
- sa.Column('deleted', sa.Integer),
- sa.Index('consoles_instance_uuid_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- dns_domains = sa.Table('dns_domains', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Boolean),
- sa.Column(
- 'domain', sa.String(length=255), primary_key=True, nullable=False),
- sa.Column('scope', sa.String(length=255)),
- sa.Column('availability_zone', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'),
- sa.Index('dns_domains_project_id_idx', 'project_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- fixed_ips = sa.Table('fixed_ips', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('address', InetSmall()),
- sa.Column('network_id', sa.Integer),
- sa.Column('allocated', sa.Boolean),
- sa.Column('leased', sa.Boolean),
- sa.Column('reserved', sa.Boolean),
- sa.Column('virtual_interface_id', sa.Integer),
- sa.Column('host', sa.String(length=255)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='fixed_ips_instance_uuid_fkey'),
- ),
- sa.Column('deleted', sa.Integer),
- sa.Index('network_id', 'network_id'),
- sa.Index('address', 'address'),
- sa.Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'),
- sa.Index(
- 'fixed_ips_virtual_interface_id_fkey',
- 'virtual_interface_id'),
- sa.Index('fixed_ips_host_idx', 'host'),
- sa.Index(
- 'fixed_ips_network_id_host_deleted_idx', 'network_id',
- 'host', 'deleted'),
- sa.Index(
- 'fixed_ips_address_reserved_network_id_deleted_idx',
- 'address', 'reserved',
- 'network_id', 'deleted'),
- sa.Index(
- 'fixed_ips_deleted_allocated_idx',
- 'address', 'deleted', 'allocated'),
- sa.Index(
- 'fixed_ips_deleted_allocated_updated_at_idx',
- 'deleted', 'allocated', 'updated_at'),
- UniqueConstraint(
- 'address', 'deleted',
- name='uniq_fixed_ips0address0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- floating_ips = sa.Table('floating_ips', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('address', InetSmall()),
- sa.Column('fixed_ip_id', sa.Integer),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('host', sa.String(length=255)),
- sa.Column('auto_assigned', sa.Boolean),
- sa.Column('pool', sa.String(length=255)),
- sa.Column('interface', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index('fixed_ip_id', 'fixed_ip_id'),
- sa.Index('floating_ips_host_idx', 'host'),
- sa.Index('floating_ips_project_id_idx', 'project_id'),
- sa.Index(
- 'floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
- 'pool', 'deleted', 'fixed_ip_id', 'project_id'),
- UniqueConstraint(
- 'address', 'deleted',
- name='uniq_floating_ips0address0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_faults = sa.Table('instance_faults', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='fk_instance_faults_instance_uuid')),
- sa.Column('code', sa.Integer, nullable=False),
- sa.Column('message', sa.String(length=255)),
- sa.Column('details', types.MediumText()),
- sa.Column('host', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index('instance_faults_host_idx', 'host'),
- sa.Index(
- 'instance_faults_instance_uuid_deleted_created_at_idx',
- 'instance_uuid', 'deleted', 'created_at'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_id_mappings = sa.Table('instance_id_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(36), nullable=False),
- sa.Column('deleted', sa.Integer),
- sa.Index('ix_instance_id_mappings_uuid', 'uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_info_caches = sa.Table('instance_info_caches', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('network_info', types.MediumText()),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid',
- name='instance_info_caches_instance_uuid_fkey'),
- nullable=False),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'instance_uuid',
- name='uniq_instance_info_caches0instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- groups = sa.Table('instance_groups', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('name', sa.String(length=255)),
- UniqueConstraint(
- 'uuid', 'deleted',
- name='uniq_instance_groups0uuid0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- group_policy = sa.Table('instance_group_policy', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('policy', sa.String(length=255)),
- sa.Column(
- 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
- nullable=False),
- sa.Index('instance_group_policy_policy_idx', 'policy'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- group_member = sa.Table('instance_group_member', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('instance_id', sa.String(length=255)),
- sa.Column(
- 'group_id', sa.Integer, sa.ForeignKey('instance_groups.id'),
- nullable=False),
- sa.Index(
- 'instance_group_member_instance_idx',
- 'instance_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- instance_metadata = sa.Table('instance_metadata', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('key', sa.String(length=255)),
- sa.Column('value', sa.String(length=255)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='instance_metadata_instance_uuid_fkey'),
- nullable=True),
- sa.Column('deleted', sa.Integer),
- sa.Index('instance_metadata_instance_uuid_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_system_metadata = sa.Table('instance_system_metadata', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='instance_system_metadata_ibfk_1'),
- nullable=False),
- sa.Column('key', sa.String(length=255), nullable=False),
- sa.Column('value', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index('instance_uuid', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- # TODO(stephenfin): Remove this table since it has been moved to the API DB
- instance_type_extra_specs = sa.Table('instance_type_extra_specs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_type_id', sa.Integer, sa.ForeignKey('instance_types.id'),
- nullable=False),
- sa.Column('key', sa.String(length=255)),
- sa.Column('value', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'instance_type_extra_specs_instance_type_id_key_idx',
- 'instance_type_id', 'key'),
- UniqueConstraint(
- 'instance_type_id', 'key', 'deleted',
- name='uniq_instance_type_extra_specs0instance_type_id0key0deleted'
- ),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- # TODO(stephenfin): Remove this table since it has been moved to the API DB
- instance_type_projects = sa.Table('instance_type_projects', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_type_id', sa.Integer,
- sa.ForeignKey(
- 'instance_types.id', name='instance_type_projects_ibfk_1'),
- nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'instance_type_id', 'project_id', 'deleted',
- name='uniq_instance_type_projects0instance_type_id0project_id'
- '0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- # TODO(stephenfin): Remove this table since it has been moved to the API DB
- instance_types = sa.Table('instance_types', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('name', sa.String(length=255)),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('memory_mb', sa.Integer, nullable=False),
- sa.Column('vcpus', sa.Integer, nullable=False),
- sa.Column('swap', sa.Integer, nullable=False),
- sa.Column('vcpu_weight', sa.Integer),
- sa.Column('flavorid', sa.String(length=255)),
- sa.Column('rxtx_factor', sa.Float),
- sa.Column('root_gb', sa.Integer),
- sa.Column('ephemeral_gb', sa.Integer),
- sa.Column('disabled', sa.Boolean),
- sa.Column('is_public', sa.Boolean),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'name', 'deleted',
- name='uniq_instance_types0name0deleted'),
- UniqueConstraint(
- 'flavorid', 'deleted',
- name='uniq_instance_types0flavorid0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instances = sa.Table('instances', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('internal_id', sa.Integer),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('image_ref', sa.String(length=255)),
- sa.Column('kernel_id', sa.String(length=255)),
- sa.Column('ramdisk_id', sa.String(length=255)),
- sa.Column('launch_index', sa.Integer),
- sa.Column('key_name', sa.String(length=255)),
- sa.Column('key_data', types.MediumText()),
- sa.Column('power_state', sa.Integer),
- sa.Column('vm_state', sa.String(length=255)),
- sa.Column('memory_mb', sa.Integer),
- sa.Column('vcpus', sa.Integer),
- sa.Column('hostname', sa.String(length=255)),
- sa.Column('host', sa.String(length=255)),
- sa.Column('user_data', types.MediumText()),
- sa.Column('reservation_id', sa.String(length=255)),
- sa.Column('launched_at', sa.DateTime),
- sa.Column('terminated_at', sa.DateTime),
- sa.Column('display_name', sa.String(length=255)),
- sa.Column('display_description', sa.String(length=255)),
- sa.Column('availability_zone', sa.String(length=255)),
- sa.Column('locked', sa.Boolean),
- sa.Column('os_type', sa.String(length=255)),
- sa.Column('launched_on', types.MediumText()),
- sa.Column('instance_type_id', sa.Integer),
- sa.Column('vm_mode', sa.String(length=255)),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('architecture', sa.String(length=255)),
- sa.Column('root_device_name', sa.String(length=255)),
- sa.Column('access_ip_v4', InetSmall()),
- sa.Column('access_ip_v6', InetSmall()),
- sa.Column('config_drive', sa.String(length=255)),
- sa.Column('task_state', sa.String(length=255)),
- sa.Column('default_ephemeral_device', sa.String(length=255)),
- sa.Column('default_swap_device', sa.String(length=255)),
- sa.Column('progress', sa.Integer),
- sa.Column('auto_disk_config', sa.Boolean),
- sa.Column('shutdown_terminate', sa.Boolean),
- sa.Column('disable_terminate', sa.Boolean),
- sa.Column('root_gb', sa.Integer),
- sa.Column('ephemeral_gb', sa.Integer),
- sa.Column('cell_name', sa.String(length=255)),
- sa.Column('node', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Column(
- 'locked_by',
- sa.Enum('owner', 'admin', name='instances0locked_by')),
- sa.Column('cleaned', sa.Integer, default=0),
- sa.Column('ephemeral_key_uuid', sa.String(36)),
- # NOTE(danms): This column originally included default=False. We
- # discovered in bug #1862205 that this will attempt to rewrite
- # the entire instances table with that value, which can time out
- # for large data sets (and does not even abort).
- # NOTE(stephenfin): This was originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column('hidden', sa.Boolean(create_constraint=False)),
- sa.Index('uuid', 'uuid', unique=True),
- sa.Index('instances_reservation_id_idx', 'reservation_id'),
- sa.Index(
- 'instances_terminated_at_launched_at_idx',
- 'terminated_at', 'launched_at'),
- sa.Index(
- 'instances_task_state_updated_at_idx',
- 'task_state', 'updated_at'),
- sa.Index('instances_uuid_deleted_idx', 'uuid', 'deleted'),
- sa.Index('instances_host_node_deleted_idx', 'host', 'node', 'deleted'),
- sa.Index(
- 'instances_host_deleted_cleaned_idx',
- 'host', 'deleted', 'cleaned'),
- sa.Index('instances_project_id_deleted_idx', 'project_id', 'deleted'),
- sa.Index('instances_deleted_created_at_idx', 'deleted', 'created_at'),
- sa.Index('instances_project_id_idx', 'project_id'),
- sa.Index(
- 'instances_updated_at_project_id_idx',
- 'updated_at', 'project_id'),
- UniqueConstraint('uuid', name='uniq_instances0uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- instance_actions = sa.Table('instance_actions', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('action', sa.String(length=255)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='fk_instance_actions_instance_uuid')),
- sa.Column('request_id', sa.String(length=255)),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('start_time', sa.DateTime),
- sa.Column('finish_time', sa.DateTime),
- sa.Column('message', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Index('instance_uuid_idx', 'instance_uuid'),
- sa.Index('request_id_idx', 'request_id'),
- sa.Index(
- 'instance_actions_instance_uuid_updated_at_idx',
- 'instance_uuid', 'updated_at'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- instance_actions_events = sa.Table('instance_actions_events', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('event', sa.String(length=255)),
- sa.Column(
- 'action_id', sa.Integer, sa.ForeignKey('instance_actions.id')),
- sa.Column('start_time', sa.DateTime),
- sa.Column('finish_time', sa.DateTime),
- sa.Column('result', sa.String(length=255)),
- sa.Column('traceback', sa.Text),
- sa.Column('deleted', sa.Integer),
- sa.Column('host', sa.String(255)),
- sa.Column('details', sa.Text),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- instance_extra = sa.Table('instance_extra', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='instance_extra_instance_uuid_fkey'),
- nullable=False),
- sa.Column('numa_topology', sa.Text, nullable=True),
- sa.Column('pci_requests', sa.Text, nullable=True),
- sa.Column('flavor', sa.Text, nullable=True),
- sa.Column('vcpu_model', sa.Text, nullable=True),
- sa.Column('migration_context', sa.Text, nullable=True),
- sa.Column('keypairs', sa.Text, nullable=True),
- sa.Column('device_metadata', sa.Text, nullable=True),
- sa.Column('trusted_certs', sa.Text, nullable=True),
- sa.Column('vpmems', sa.Text, nullable=True),
- sa.Column('resources', sa.Text, nullable=True),
- sa.Index('instance_extra_idx', 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- inventories = sa.Table('inventories', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('resource_provider_id', sa.Integer, nullable=False),
- sa.Column('resource_class_id', sa.Integer, nullable=False),
- sa.Column('total', sa.Integer, nullable=False),
- sa.Column('reserved', sa.Integer, nullable=False),
- sa.Column('min_unit', sa.Integer, nullable=False),
- sa.Column('max_unit', sa.Integer, nullable=False),
- sa.Column('step_size', sa.Integer, nullable=False),
- sa.Column('allocation_ratio', sa.Float, nullable=False),
- sa.Index(
- 'inventories_resource_provider_id_idx', 'resource_provider_id'),
- sa.Index(
- 'inventories_resource_class_id_idx', 'resource_class_id'),
- sa.Index(
- 'inventories_resource_provider_resource_class_idx',
- 'resource_provider_id', 'resource_class_id'),
- UniqueConstraint(
- 'resource_provider_id', 'resource_class_id',
- name='uniq_inventories0resource_provider_resource_class'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- key_pairs = sa.Table('key_pairs', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(length=255), nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('fingerprint', sa.String(length=255)),
- sa.Column('public_key', types.MediumText()),
- sa.Column('deleted', sa.Integer),
- sa.Column(
- 'type', sa.Enum('ssh', 'x509', name='keypair_types'),
- nullable=False, server_default=keypair.KEYPAIR_TYPE_SSH),
- UniqueConstraint(
- 'user_id', 'name', 'deleted',
- name='uniq_key_pairs0user_id0name0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- migrations = sa.Table('migrations', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('source_compute', sa.String(length=255)),
- sa.Column('dest_compute', sa.String(length=255)),
- sa.Column('dest_host', sa.String(length=255)),
- sa.Column('status', sa.String(length=255)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid', name='fk_migrations_instance_uuid')),
- sa.Column('old_instance_type_id', sa.Integer),
- sa.Column('new_instance_type_id', sa.Integer),
- sa.Column('source_node', sa.String(length=255)),
- sa.Column('dest_node', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- sa.Column(
- 'migration_type',
- sa.Enum(
- 'migration', 'resize', 'live-migration', 'evacuation',
- name='migration_type'),
- nullable=True),
- # NOTE(stephenfin): This was originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'hidden', sa.Boolean(create_constraint=False), default=False),
- sa.Column('memory_total', sa.BigInteger, nullable=True),
- sa.Column('memory_processed', sa.BigInteger, nullable=True),
- sa.Column('memory_remaining', sa.BigInteger, nullable=True),
- sa.Column('disk_total', sa.BigInteger, nullable=True),
- sa.Column('disk_processed', sa.BigInteger, nullable=True),
- sa.Column('disk_remaining', sa.BigInteger, nullable=True),
- sa.Column('uuid', sa.String(36)),
- # NOTE(stephenfin): This was originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'cross_cell_move', sa.Boolean(create_constraint=False),
- default=False),
- sa.Column('user_id', sa.String(255), nullable=True),
- sa.Column('project_id', sa.String(255), nullable=True),
- sa.Index('migrations_uuid', 'uuid', unique=True),
- sa.Index(
- 'migrations_instance_uuid_and_status_idx',
- 'deleted', 'instance_uuid', 'status'),
- sa.Index('migrations_updated_at_idx', 'updated_at'),
- # mysql-specific index by leftmost 100 chars. (mysql gets angry if the
- # index key length is too long.)
- sa.Index(
- 'migrations_by_host_nodes_and_status_idx',
- 'deleted', 'source_compute', 'dest_compute', 'source_node',
- 'dest_node', 'status',
- mysql_length={
- 'source_compute': 100,
- 'dest_compute': 100,
- 'source_node': 100,
- 'dest_node': 100,
- }),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- networks = sa.Table('networks', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('injected', sa.Boolean),
- sa.Column('cidr', Inet()),
- sa.Column('netmask', InetSmall()),
- sa.Column('bridge', sa.String(length=255)),
- sa.Column('gateway', InetSmall()),
- sa.Column('broadcast', InetSmall()),
- sa.Column('dns1', InetSmall()),
- sa.Column('vlan', sa.Integer),
- sa.Column('vpn_public_address', InetSmall()),
- sa.Column('vpn_public_port', sa.Integer),
- sa.Column('vpn_private_address', InetSmall()),
- sa.Column('dhcp_start', InetSmall()),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('host', sa.String(length=255)),
- sa.Column('cidr_v6', Inet()),
- sa.Column('gateway_v6', InetSmall()),
- sa.Column('label', sa.String(length=255)),
- sa.Column('netmask_v6', InetSmall()),
- sa.Column('bridge_interface', sa.String(length=255)),
- sa.Column('multi_host', sa.Boolean),
- sa.Column('dns2', InetSmall()),
- sa.Column('uuid', sa.String(length=36)),
- sa.Column('priority', sa.Integer),
- sa.Column('rxtx_base', sa.Integer),
- sa.Column('deleted', sa.Integer),
- sa.Column('mtu', sa.Integer),
- sa.Column('dhcp_server', types.IPAddress),
- # NOTE(stephenfin): These were originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'enable_dhcp', sa.Boolean(create_constraint=False), default=True),
- sa.Column(
- 'share_address', sa.Boolean(create_constraint=False),
- default=False),
- sa.Index('networks_host_idx', 'host'),
- sa.Index('networks_cidr_v6_idx', 'cidr_v6'),
- sa.Index('networks_bridge_deleted_idx', 'bridge', 'deleted'),
- sa.Index('networks_project_id_deleted_idx', 'project_id', 'deleted'),
- sa.Index(
- 'networks_uuid_project_id_deleted_idx',
- 'uuid', 'project_id', 'deleted'),
- sa.Index('networks_vlan_deleted_idx', 'vlan', 'deleted'),
- UniqueConstraint('vlan', 'deleted', name='uniq_networks0vlan0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- pci_devices = sa.Table('pci_devices', meta,
- sa.Column('created_at', sa.DateTime(timezone=False)),
- sa.Column('updated_at', sa.DateTime(timezone=False)),
- sa.Column('deleted_at', sa.DateTime(timezone=False)),
- sa.Column('deleted', sa.Integer, default=0, nullable=True),
- sa.Column('id', sa.Integer, primary_key=True),
- sa.Column(
- 'compute_node_id', sa.Integer,
- sa.ForeignKey(
- 'compute_nodes.id', name='pci_devices_compute_node_id_fkey'),
- nullable=False),
- sa.Column('address', sa.String(12), nullable=False),
- sa.Column('product_id', sa.String(4), nullable=False),
- sa.Column('vendor_id', sa.String(4), nullable=False),
- sa.Column('dev_type', sa.String(8), nullable=False),
- sa.Column('dev_id', sa.String(255)),
- sa.Column('label', sa.String(255), nullable=False),
- sa.Column('status', sa.String(36), nullable=False),
- sa.Column('extra_info', sa.Text, nullable=True),
- sa.Column('instance_uuid', sa.String(36), nullable=True),
- sa.Column('request_id', sa.String(36), nullable=True),
- sa.Column('numa_node', sa.Integer, default=None),
- sa.Column('parent_addr', sa.String(12), nullable=True),
- sa.Column('uuid', sa.String(36)),
- sa.Index(
- 'ix_pci_devices_instance_uuid_deleted',
- 'instance_uuid', 'deleted'),
- sa.Index(
- 'ix_pci_devices_compute_node_id_deleted',
- 'compute_node_id', 'deleted'),
- sa.Index(
- 'ix_pci_devices_compute_node_id_parent_addr_deleted',
- 'compute_node_id', 'parent_addr', 'deleted'),
- UniqueConstraint(
- 'compute_node_id', 'address', 'deleted',
- name='uniq_pci_devices0compute_node_id0address0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- provider_fw_rules = sa.Table('provider_fw_rules', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('protocol', sa.String(length=5)),
- sa.Column('from_port', sa.Integer),
- sa.Column('to_port', sa.Integer),
- sa.Column('cidr', Inet()),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- quota_classes = sa.Table('quota_classes', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('class_name', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255)),
- sa.Column('hard_limit', sa.Integer),
- sa.Column('deleted', sa.Integer),
- sa.Index('ix_quota_classes_class_name', 'class_name'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- quota_usages = sa.Table('quota_usages', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('in_use', sa.Integer, nullable=False),
- sa.Column('reserved', sa.Integer, nullable=False),
- sa.Column('until_refresh', sa.Integer),
- sa.Column('deleted', sa.Integer),
- sa.Column('user_id', sa.String(length=255)),
- sa.Index('ix_quota_usages_project_id', 'project_id'),
- sa.Index('ix_quota_usages_user_id_deleted', 'user_id', 'deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- quotas = sa.Table('quotas', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('hard_limit', sa.Integer),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'project_id', 'resource', 'deleted',
- name='uniq_quotas0project_id0resource0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- project_user_quotas = sa.Table('project_user_quotas', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('user_id', sa.String(length=255), nullable=False),
- sa.Column('project_id', sa.String(length=255), nullable=False),
- sa.Column('resource', sa.String(length=255), nullable=False),
- sa.Column('hard_limit', sa.Integer, nullable=True),
- sa.Index(
- 'project_user_quotas_project_id_deleted_idx',
- 'project_id', 'deleted'),
- sa.Index(
- 'project_user_quotas_user_id_deleted_idx',
- 'user_id', 'deleted'),
- UniqueConstraint(
- 'user_id', 'project_id', 'resource', 'deleted',
- name='uniq_project_user_quotas0user_id0project_id0resource0'
- 'deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- reservations = sa.Table('reservations', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column(
- 'usage_id', sa.Integer,
- sa.ForeignKey('quota_usages.id', name='reservations_ibfk_1'),
- nullable=False),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('resource', sa.String(length=255)),
- sa.Column('delta', sa.Integer, nullable=False),
- sa.Column('expire', sa.DateTime),
- sa.Column('deleted', sa.Integer),
- sa.Column('user_id', sa.String(length=255)),
- sa.Index('ix_reservations_project_id', 'project_id'),
- sa.Index('ix_reservations_user_id_deleted', 'user_id', 'deleted'),
- sa.Index('reservations_uuid_idx', 'uuid'),
- sa.Index('reservations_deleted_expire_idx', 'deleted', 'expire'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- resource_providers = sa.Table('resource_providers', meta,
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(36), nullable=False),
- sa.Column('name', sa.Unicode(200), nullable=True),
- sa.Column('generation', sa.Integer, default=0),
- sa.Column('can_host', sa.Integer, default=0),
- UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
- UniqueConstraint('name', name='uniq_resource_providers0name'),
- sa.Index('resource_providers_name_idx', 'name'),
- sa.Index('resource_providers_uuid_idx', 'uuid'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- resource_provider_aggregates = sa.Table(
- 'resource_provider_aggregates', meta,
- sa.Column(
- 'resource_provider_id', sa.Integer, primary_key=True,
- nullable=False),
- sa.Column(
- 'aggregate_id', sa.Integer, primary_key=True, nullable=False),
- sa.Index(
- 'resource_provider_aggregates_aggregate_id_idx', 'aggregate_id'),
- mysql_engine='InnoDB',
- mysql_charset='latin1',
- )
-
- s3_images = sa.Table('s3_images', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- security_group_instance_association = sa.Table(
- 'security_group_instance_association', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'security_group_id', sa.Integer,
- sa.ForeignKey(
- 'security_groups.id',
- name='security_group_instance_association_ibfk_1'),
- ),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid',
- name='security_group_instance_association_instance_uuid_fkey'),
- ),
- sa.Column('deleted', sa.Integer),
- sa.Index(
- 'security_group_instance_association_instance_uuid_idx',
- 'instance_uuid'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- security_group_rules = sa.Table('security_group_rules', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column(
- 'parent_group_id', sa.Integer,
- sa.ForeignKey('security_groups.id')),
- sa.Column('protocol', sa.String(length=255)),
- sa.Column('from_port', sa.Integer),
- sa.Column('to_port', sa.Integer),
- sa.Column('cidr', Inet()),
- sa.Column('group_id', sa.Integer, sa.ForeignKey('security_groups.id')),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- security_groups = sa.Table('security_groups', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('name', sa.String(length=255)),
- sa.Column('description', sa.String(length=255)),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('deleted', sa.Integer),
- UniqueConstraint(
- 'project_id', 'name', 'deleted',
- name='uniq_security_groups0project_id0name0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- security_group_default_rules = sa.Table(
- 'security_group_default_rules', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('deleted', sa.Integer, default=0),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('protocol', sa.String(length=5)),
- sa.Column('from_port', sa.Integer),
- sa.Column('to_port', sa.Integer),
- sa.Column('cidr', Inet()),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- services = sa.Table('services', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('host', sa.String(length=255)),
- sa.Column('binary', sa.String(length=255)),
- sa.Column('topic', sa.String(length=255)),
- sa.Column('report_count', sa.Integer, nullable=False),
- sa.Column('disabled', sa.Boolean),
- sa.Column('deleted', sa.Integer),
- sa.Column('disabled_reason', sa.String(length=255)),
- sa.Column('last_seen_up', sa.DateTime, nullable=True),
- # NOTE(stephenfin): This was originally added by sqlalchemy-migrate
- # which did not generate the constraints
- sa.Column(
- 'forced_down', sa.Boolean(create_constraint=False), default=False),
- sa.Column('version', sa.Integer, default=0),
- sa.Column('uuid', sa.String(36), nullable=True),
- sa.Index('services_uuid_idx', 'uuid', unique=True),
- UniqueConstraint(
- 'host', 'topic', 'deleted',
- name='uniq_services0host0topic0deleted'),
- UniqueConstraint(
- 'host', 'binary', 'deleted',
- name='uniq_services0host0binary0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- snapshot_id_mappings = sa.Table('snapshot_id_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- snapshots = sa.Table('snapshots', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column(
- 'id', sa.String(length=36), primary_key=True, nullable=False),
- sa.Column('volume_id', sa.String(length=36), nullable=False),
- sa.Column('user_id', sa.String(length=255)),
- sa.Column('project_id', sa.String(length=255)),
- sa.Column('status', sa.String(length=255)),
- sa.Column('progress', sa.String(length=255)),
- sa.Column('volume_size', sa.Integer),
- sa.Column('scheduled_at', sa.DateTime),
- sa.Column('display_name', sa.String(length=255)),
- sa.Column('display_description', sa.String(length=255)),
- sa.Column('deleted', sa.String(length=36)),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- tags = sa.Table('tags', meta,
- sa.Column(
- 'resource_id', sa.String(36), primary_key=True, nullable=False),
- sa.Column('tag', sa.Unicode(80), primary_key=True, nullable=False),
- sa.Index('tags_tag_idx', 'tag'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- task_log = sa.Table('task_log', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('task_name', sa.String(length=255), nullable=False),
- sa.Column('state', sa.String(length=255), nullable=False),
- sa.Column('host', sa.String(length=255), nullable=False),
- sa.Column('period_beginning', sa.DateTime, nullable=False),
- sa.Column('period_ending', sa.DateTime, nullable=False),
- sa.Column('message', sa.String(length=255), nullable=False),
- sa.Column('task_items', sa.Integer),
- sa.Column('errors', sa.Integer),
- sa.Column('deleted', sa.Integer),
- sa.Index('ix_task_log_period_beginning', 'period_beginning'),
- sa.Index('ix_task_log_host', 'host'),
- sa.Index('ix_task_log_period_ending', 'period_ending'),
- UniqueConstraint(
- 'task_name', 'host', 'period_beginning', 'period_ending',
- name='uniq_task_log0task_name0host0period_beginning0period_ending',
- ),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- virtual_interfaces = sa.Table('virtual_interfaces', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('address', sa.String(length=255)),
- sa.Column('network_id', sa.Integer),
- sa.Column('uuid', sa.String(length=36)),
- sa.Column(
- 'instance_uuid', sa.String(length=36),
- sa.ForeignKey(
- 'instances.uuid',
- name='virtual_interfaces_instance_uuid_fkey'),
- nullable=True),
- sa.Column('deleted', sa.Integer),
- sa.Column('tag', sa.String(255)),
- sa.Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'),
- sa.Index('virtual_interfaces_network_id_idx', 'network_id'),
- sa.Index('virtual_interfaces_uuid_idx', 'uuid'),
- UniqueConstraint(
- 'address', 'deleted',
- name='uniq_virtual_interfaces0address0deleted'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- volume_id_mappings = sa.Table('volume_id_mappings', meta,
- sa.Column('created_at', sa.DateTime),
- sa.Column('updated_at', sa.DateTime),
- sa.Column('deleted_at', sa.DateTime),
- sa.Column('id', sa.Integer, primary_key=True, nullable=False),
- sa.Column('uuid', sa.String(length=36), nullable=False),
- sa.Column('deleted', sa.Integer),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- volume_usage_cache = sa.Table('volume_usage_cache', meta,
- sa.Column('created_at', sa.DateTime(timezone=False)),
- sa.Column('updated_at', sa.DateTime(timezone=False)),
- sa.Column('deleted_at', sa.DateTime(timezone=False)),
- sa.Column('id', sa.Integer(), primary_key=True, nullable=False),
- sa.Column('volume_id', sa.String(36), nullable=False),
- sa.Column('tot_last_refreshed', sa.DateTime(timezone=False)),
- sa.Column('tot_reads', sa.BigInteger(), default=0),
- sa.Column('tot_read_bytes', sa.BigInteger(), default=0),
- sa.Column('tot_writes', sa.BigInteger(), default=0),
- sa.Column('tot_write_bytes', sa.BigInteger(), default=0),
- sa.Column('curr_last_refreshed', sa.DateTime(timezone=False)),
- sa.Column('curr_reads', sa.BigInteger(), default=0),
- sa.Column('curr_read_bytes', sa.BigInteger(), default=0),
- sa.Column('curr_writes', sa.BigInteger(), default=0),
- sa.Column('curr_write_bytes', sa.BigInteger(), default=0),
- sa.Column('deleted', sa.Integer),
- sa.Column('instance_uuid', sa.String(length=36)),
- sa.Column('project_id', sa.String(length=36)),
- sa.Column('user_id', sa.String(length=64)),
- sa.Column('availability_zone', sa.String(length=255)),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
-
- # create all tables
- tables = [instances, aggregates, console_auth_tokens,
- console_pools, instance_types,
- security_groups, snapshots,
- # those that are children and others later
- agent_builds, aggregate_hosts, aggregate_metadata,
- block_device_mapping, bw_usage_cache, cells,
- certificates, compute_nodes, consoles,
- dns_domains, fixed_ips, floating_ips,
- instance_faults, instance_id_mappings, instance_info_caches,
- instance_metadata, instance_system_metadata,
- instance_type_extra_specs, instance_type_projects,
- instance_actions, instance_actions_events, instance_extra,
- groups, group_policy, group_member,
- key_pairs, migrations, networks,
- pci_devices, provider_fw_rules, quota_classes, quota_usages,
- quotas, project_user_quotas,
- reservations, s3_images, security_group_instance_association,
- security_group_rules, security_group_default_rules,
- services, snapshot_id_mappings, tags, task_log,
- virtual_interfaces,
- volume_id_mappings,
- volume_usage_cache,
- resource_providers, inventories, allocations,
- resource_provider_aggregates]
-
- for table in tables:
- try:
- table.create()
- except Exception:
- LOG.info(repr(table))
- LOG.exception('Exception while creating table.')
- raise
-
- # MySQL specific indexes
- if migrate_engine.name == 'mysql':
- # NOTE(stephenfin): For some reason, we have to put this within the if
- # statement to avoid it being evaluated for the sqlite case. Even
- # though we don't call create except in the MySQL case... Failure to do
- # this will result in the following ugly error message:
- #
- # sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) no such
- # index: instance_type_id
- #
- # Yeah, I don't get it either...
- mysql_specific_indexes = [
- sa.Index(
- 'instance_type_id',
- instance_type_projects.c.instance_type_id),
- sa.Index('usage_id', reservations.c.usage_id),
- sa.Index(
- 'security_group_id',
- security_group_instance_association.c.security_group_id),
- ]
-
- for index in mysql_specific_indexes:
- index.create(migrate_engine)
-
- if migrate_engine.name == 'mysql':
- # In Folsom we explicitly converted migrate_version to UTF8.
- with migrate_engine.connect() as conn:
- conn.exec_driver_sql(
- 'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8'
- )
- # Set default DB charset to UTF8.
- conn.exec_driver_sql(
- 'ALTER DATABASE `%s` DEFAULT CHARACTER SET utf8' % (
- migrate_engine.url.database,
- )
- )
-
- # NOTE(cdent): The resource_providers table is defined as latin1 to
- # be more efficient. Now we need the name column to be UTF8. We
- # modify it here otherwise the declarative handling in sqlalchemy
- # gets confused.
- conn.exec_driver_sql(
- 'ALTER TABLE resource_providers MODIFY name '
- 'VARCHAR(200) CHARACTER SET utf8'
- )
-
- _create_shadow_tables(migrate_engine)
-
- # TODO(stephenfin): Fix these various bugs in a follow-up
-
- # 298_mysql_extra_specs_binary_collation; we should update the shadow table
- # also
-
- if migrate_engine.name == 'mysql':
- with migrate_engine.connect() as conn:
- # Use binary collation for extra specs table
- conn.exec_driver_sql(
- 'ALTER TABLE instance_type_extra_specs '
- 'CONVERT TO CHARACTER SET utf8 '
- 'COLLATE utf8_bin'
- )
diff --git a/nova/db/main/legacy_migrations/versions/403_placeholder.py b/nova/db/main/legacy_migrations/versions/403_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/403_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/404_placeholder.py b/nova/db/main/legacy_migrations/versions/404_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/404_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/405_placeholder.py b/nova/db/main/legacy_migrations/versions/405_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/405_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/406_placeholder.py b/nova/db/main/legacy_migrations/versions/406_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/406_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/407_placeholder.py b/nova/db/main/legacy_migrations/versions/407_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/407_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/408_placeholder.py b/nova/db/main/legacy_migrations/versions/408_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/408_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/409_placeholder.py b/nova/db/main/legacy_migrations/versions/409_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/409_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/410_placeholder.py b/nova/db/main/legacy_migrations/versions/410_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/410_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/411_placeholder.py b/nova/db/main/legacy_migrations/versions/411_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/411_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/412_placeholder.py b/nova/db/main/legacy_migrations/versions/412_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/412_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/413_placeholder.py b/nova/db/main/legacy_migrations/versions/413_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/413_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/414_placeholder.py b/nova/db/main/legacy_migrations/versions/414_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/414_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/415_placeholder.py b/nova/db/main/legacy_migrations/versions/415_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/415_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/416_placeholder.py b/nova/db/main/legacy_migrations/versions/416_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/416_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/417_placeholder.py b/nova/db/main/legacy_migrations/versions/417_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/417_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/418_placeholder.py b/nova/db/main/legacy_migrations/versions/418_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/418_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/419_placeholder.py b/nova/db/main/legacy_migrations/versions/419_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/419_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/420_placeholder.py b/nova/db/main/legacy_migrations/versions/420_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/420_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/421_placeholder.py b/nova/db/main/legacy_migrations/versions/421_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/421_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/legacy_migrations/versions/422_placeholder.py b/nova/db/main/legacy_migrations/versions/422_placeholder.py
deleted file mode 100644
index 7a93224504..0000000000
--- a/nova/db/main/legacy_migrations/versions/422_placeholder.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is a placeholder for backports.
-# Do not use this number for new work. New work starts after
-# all the placeholders.
-#
-# See this for more information:
-# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
-
-
-def upgrade(migrate_engine):
- pass
diff --git a/nova/db/main/migrations/versions/8f2f1571d55b_initial_version.py b/nova/db/main/migrations/versions/8f2f1571d55b_initial_version.py
index 33bd840f25..da1cf3bac3 100644
--- a/nova/db/main/migrations/versions/8f2f1571d55b_initial_version.py
+++ b/nova/db/main/migrations/versions/8f2f1571d55b_initial_version.py
@@ -159,7 +159,9 @@ def _create_shadow_tables(connection):
column_copy = sa.Column(column.name, enum, nullable=True)
if column_copy is None:
- column_copy = column.copy()
+ # NOTE(stephenfin): Yes, this is private. Yes, this is what we
+ # were told to use. Blame zzzeek!
+ column_copy = column._copy()
columns.append(column_copy)
diff --git a/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py b/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py
new file mode 100644
index 0000000000..f4666a2b00
--- /dev/null
+++ b/nova/db/main/migrations/versions/960aac0e09ea_de_duplicate_indexes_in_instances__.py
@@ -0,0 +1,35 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""de-duplicate_indexes_in_instances__console_auth_tokens
+
+Revision ID: 960aac0e09ea
+Revises: ccb0fa1a2252
+Create Date: 2022-09-15 17:00:23.175991
+"""
+
+from alembic import op
+
+
+# revision identifiers, used by Alembic.
+revision = '960aac0e09ea'
+down_revision = 'ccb0fa1a2252'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ with op.batch_alter_table('console_auth_tokens', schema=None) as batch_op:
+ batch_op.drop_index('console_auth_tokens_token_hash_idx')
+
+ with op.batch_alter_table('instances', schema=None) as batch_op:
+ batch_op.drop_index('uuid')
diff --git a/nova/db/main/migrations/versions/ccb0fa1a2252_add_encryption_fields_to_.py b/nova/db/main/migrations/versions/ccb0fa1a2252_add_encryption_fields_to_.py
new file mode 100644
index 0000000000..1fd3fb4780
--- /dev/null
+++ b/nova/db/main/migrations/versions/ccb0fa1a2252_add_encryption_fields_to_.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Add encryption fields to BlockDeviceMapping
+
+Revision ID: ccb0fa1a2252
+Revises: 16f1fbcab42b
+Create Date: 2022-01-12 15:22:47.524285
+"""
+
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision = 'ccb0fa1a2252'
+down_revision = '16f1fbcab42b'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ for prefix in ('', 'shadow_'):
+ table_name = prefix + 'block_device_mapping'
+ with op.batch_alter_table(table_name, schema=None) as batch_op:
+ batch_op.add_column(
+ sa.Column(
+ 'encrypted',
+ sa.Boolean(),
+ nullable=True,
+ )
+ )
+ batch_op.add_column(
+ sa.Column(
+ 'encryption_secret_uuid',
+ sa.String(length=36),
+ nullable=True,
+ )
+ )
+ batch_op.add_column(
+ sa.Column('encryption_format',
+ sa.String(length=128),
+ nullable=True,
+ )
+ )
+ batch_op.add_column(
+ sa.Column('encryption_options',
+ sa.String(length=4096),
+ nullable=True,
+ )
+ )
diff --git a/nova/db/main/models.py b/nova/db/main/models.py
index f2f58b2db1..f8363a89c0 100644
--- a/nova/db/main/models.py
+++ b/nova/db/main/models.py
@@ -266,7 +266,6 @@ class Instance(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a guest VM."""
__tablename__ = 'instances'
__table_args__ = (
- sa.Index('uuid', 'uuid', unique=True),
sa.Index('instances_project_id_idx', 'project_id'),
sa.Index('instances_project_id_deleted_idx',
'project_id', 'deleted'),
@@ -654,9 +653,15 @@ class BlockDeviceMapping(BASE, NovaBase, models.SoftDeleteMixin):
attachment_id = sa.Column(sa.String(36))
+ encrypted = sa.Column(sa.Boolean, default=False)
+ encryption_secret_uuid = sa.Column(sa.String(36))
+ encryption_format = sa.Column(sa.String(128))
+ encryption_options = sa.Column(sa.String(4096))
# TODO(stephenfin): Remove once we drop the security_groups field from the
# Instance table. Until then, this is tied to the SecurityGroup table
+
+
class SecurityGroupInstanceAssociation(BASE, NovaBase, models.SoftDeleteMixin):
__tablename__ = 'security_group_instance_association'
__table_args__ = (
@@ -679,7 +684,7 @@ class SecurityGroup(BASE, NovaBase, models.SoftDeleteMixin):
name='uniq_security_groups0project_id0'
'name0deleted'),
)
- id = sa.Column(sa.Integer, primary_key=True)
+ id = sa.Column(sa.Integer, primary_key = True)
name = sa.Column(sa.String(255))
description = sa.Column(sa.String(255))
@@ -687,8 +692,8 @@ class SecurityGroup(BASE, NovaBase, models.SoftDeleteMixin):
project_id = sa.Column(sa.String(255))
instances = orm.relationship(Instance,
- secondary="security_group_instance_association",
- primaryjoin='and_('
+ secondary = "security_group_instance_association",
+ primaryjoin = 'and_('
'SecurityGroup.id == '
'SecurityGroupInstanceAssociation.security_group_id,'
'SecurityGroupInstanceAssociation.deleted == 0,'
@@ -1040,7 +1045,6 @@ class ConsoleAuthToken(BASE, NovaBase):
__table_args__ = (
sa.Index('console_auth_tokens_instance_uuid_idx', 'instance_uuid'),
sa.Index('console_auth_tokens_host_expires_idx', 'host', 'expires'),
- sa.Index('console_auth_tokens_token_hash_idx', 'token_hash'),
sa.Index(
'console_auth_tokens_token_hash_instance_uuid_idx', 'token_hash',
'instance_uuid',
diff --git a/nova/db/migration.py b/nova/db/migration.py
index 80410c3192..2b185af1a6 100644
--- a/nova/db/migration.py
+++ b/nova/db/migration.py
@@ -19,24 +19,12 @@ import os
from alembic import command as alembic_api
from alembic import config as alembic_config
from alembic.runtime import migration as alembic_migration
-from migrate import exceptions as migrate_exceptions
-from migrate.versioning import api as migrate_api
-from migrate.versioning import repository as migrate_repository
from oslo_log import log as logging
from nova.db.api import api as api_db_api
from nova.db.main import api as main_db_api
from nova import exception
-MIGRATE_INIT_VERSION = {
- 'main': 401,
- 'api': 66,
-}
-ALEMBIC_INIT_VERSION = {
- 'main': '8f2f1571d55b',
- 'api': 'd67eeaabee36',
-}
-
LOG = logging.getLogger(__name__)
@@ -48,16 +36,6 @@ def _get_engine(database='main', context=None):
return api_db_api.get_engine()
-def _find_migrate_repo(database='main'):
- """Get the path for the migrate repository."""
-
- path = os.path.join(
- os.path.abspath(os.path.dirname(__file__)),
- database, 'legacy_migrations')
-
- return migrate_repository.Repository(path)
-
-
def _find_alembic_conf(database='main'):
"""Get the path for the alembic repository."""
@@ -73,35 +51,6 @@ def _find_alembic_conf(database='main'):
return config
-def _is_database_under_migrate_control(engine, repository):
- try:
- migrate_api.db_version(engine, repository)
- return True
- except migrate_exceptions.DatabaseNotControlledError:
- return False
-
-
-def _is_database_under_alembic_control(engine):
- with engine.connect() as conn:
- context = alembic_migration.MigrationContext.configure(conn)
- return bool(context.get_current_revision())
-
-
-def _init_alembic_on_legacy_database(engine, database, repository, config):
- """Init alembic in an existing environment with sqlalchemy-migrate."""
- LOG.info(
- 'The database is still under sqlalchemy-migrate control; '
- 'applying any remaining sqlalchemy-migrate-based migrations '
- 'and fake applying the initial alembic migration'
- )
- migrate_api.upgrade(engine, repository)
-
- # re-use the connection rather than creating a new one
- with engine.begin() as connection:
- config.attributes['connection'] = connection
- alembic_api.stamp(config, ALEMBIC_INIT_VERSION[database])
-
-
def _upgrade_alembic(engine, config, version):
# re-use the connection rather than creating a new one
with engine.begin() as connection:
@@ -126,7 +75,6 @@ def db_sync(version=None, database='main', context=None):
engine = _get_engine(database, context=context)
- repository = _find_migrate_repo(database)
config = _find_alembic_conf(database)
# discard the URL stored in alembic.ini in favour of the URL configured
# for the engine, casting from 'sqlalchemy.engine.url.URL' to str in the
@@ -138,16 +86,6 @@ def db_sync(version=None, database='main', context=None):
url = str(engine.url).replace('%', '%%')
config.set_main_option('sqlalchemy.url', url)
- # if we're in a deployment where sqlalchemy-migrate is already present,
- # then apply all the updates for that and fake apply the initial alembic
- # migration; if we're not then 'upgrade' will take care of everything
- # this should be a one-time operation
- if (
- _is_database_under_migrate_control(engine, repository) and
- not _is_database_under_alembic_control(engine)
- ):
- _init_alembic_on_legacy_database(engine, database, repository, config)
-
# apply anything later
LOG.info('Applying migration(s)')
@@ -161,17 +99,10 @@ def db_version(database='main', context=None):
if database not in ('main', 'api'):
raise exception.Invalid('%s is not a valid database' % database)
- repository = _find_migrate_repo(database)
engine = _get_engine(database, context=context)
- migrate_version = None
- if _is_database_under_migrate_control(engine, repository):
- migrate_version = migrate_api.db_version(engine, repository)
-
- alembic_version = None
- if _is_database_under_alembic_control(engine):
- with engine.connect() as conn:
- m_context = alembic_migration.MigrationContext.configure(conn)
- alembic_version = m_context.get_current_revision()
+ with engine.connect() as conn:
+ m_context = alembic_migration.MigrationContext.configure(conn)
+ version = m_context.get_current_revision()
- return alembic_version or migrate_version
+ return version
diff --git a/nova/exception.py b/nova/exception.py
index c0f25bd260..0c0ffa85a1 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -135,6 +135,10 @@ class GlanceConnectionFailed(NovaException):
"%(reason)s")
+class KeystoneConnectionFailed(NovaException):
+ msg_fmt = _("Connection to keystone host failed: %(reason)s")
+
+
class CinderConnectionFailed(NovaException):
msg_fmt = _("Connection to cinder host failed: %(reason)s")
@@ -175,6 +179,11 @@ class ForbiddenPortsWithAccelerator(NotSupported):
msg_fmt = _("Feature not supported with Ports that have accelerators.")
+class ForbiddenWithRemoteManagedPorts(NotSupported):
+ msg_fmt = _("This feature is not supported when remote-managed ports"
+ " are in use.")
+
+
class AdminRequired(Forbidden):
msg_fmt = _("User does not have admin privileges")
@@ -198,6 +207,21 @@ class Invalid(NovaException):
code = 400
+class InvalidVIOMMUMachineType(Invalid):
+ msg_fmt = _("vIOMMU is not supported by Current machine type %(mtype)s "
+ "(Architecture: %(arch)s).")
+
+
+class InvalidVIOMMUArchitecture(Invalid):
+ msg_fmt = _("vIOMMU required either x86 or AArch64 architecture, "
+ "but given architecture %(arch)s.")
+
+
+class InstanceQuiesceFailed(Invalid):
+ msg_fmt = _("Failed to quiesce instance: %(reason)s")
+ code = 409
+
+
class InvalidConfiguration(Invalid):
msg_fmt = _("Configuration is Invalid.")
@@ -731,6 +755,10 @@ class InvalidImageRef(Invalid):
msg_fmt = _("Invalid image href %(image_href)s.")
+class InvalidImagePropertyName(Invalid):
+ msg_fmt = _("Invalid image property name %(image_property_name)s.")
+
+
class AutoDiskConfigDisabledByImage(Invalid):
msg_fmt = _("Requested image %(image)s "
"has automatic disk resize disabled.")
@@ -993,10 +1021,6 @@ class QuotaClassExists(NovaException):
msg_fmt = _("Quota class %(class_name)s exists for resource %(resource)s")
-class OverQuota(NovaException):
- msg_fmt = _("Quota exceeded for resources: %(overs)s")
-
-
class SecurityGroupNotFound(NotFound):
msg_fmt = _("Security group %(security_group_id)s not found.")
@@ -1233,29 +1257,26 @@ class MaxRetriesExceeded(NoValidHost):
msg_fmt = _("Exceeded maximum number of retries. %(reason)s")
-class QuotaError(NovaException):
- msg_fmt = _("Quota exceeded: code=%(code)s")
- # NOTE(cyeoh): 413 should only be used for the ec2 API
- # The error status code for out of quota for the nova api should be
- # 403 Forbidden.
+class OverQuota(NovaException):
+ msg_fmt = _("Quota exceeded for resources: %(overs)s")
code = 413
safe = True
-class TooManyInstances(QuotaError):
+class TooManyInstances(OverQuota):
msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)s of %(allowed)s %(overs)s")
-class FloatingIpLimitExceeded(QuotaError):
+class FloatingIpLimitExceeded(OverQuota):
msg_fmt = _("Maximum number of floating IPs exceeded")
-class MetadataLimitExceeded(QuotaError):
+class MetadataLimitExceeded(OverQuota):
msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
-class OnsetFileLimitExceeded(QuotaError):
+class OnsetFileLimitExceeded(OverQuota):
msg_fmt = _("Personality file limit exceeded")
@@ -1267,18 +1288,26 @@ class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded):
msg_fmt = _("Personality file content exceeds maximum %(allowed)s")
-class KeypairLimitExceeded(QuotaError):
- msg_fmt = _("Maximum number of key pairs exceeded")
+class KeypairLimitExceeded(OverQuota):
+ msg_fmt = _("Quota exceeded, too many key pairs.")
-class SecurityGroupLimitExceeded(QuotaError):
+class SecurityGroupLimitExceeded(OverQuota):
msg_fmt = _("Maximum number of security groups or rules exceeded")
-class PortLimitExceeded(QuotaError):
+class PortLimitExceeded(OverQuota):
msg_fmt = _("Maximum number of ports exceeded")
+class ServerGroupLimitExceeded(OverQuota):
+ msg_fmt = _("Quota exceeded, too many server groups.")
+
+
+class GroupMemberLimitExceeded(OverQuota):
+ msg_fmt = _("Quota exceeded, too many servers in group")
+
+
class AggregateNotFound(NotFound):
msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
@@ -1422,6 +1451,11 @@ class InstanceEvacuateNotSupported(Invalid):
msg_fmt = _('Instance evacuate is not supported.')
+class InstanceEvacuateNotSupportedTargetState(Invalid):
+ msg_fmt = _("Target state '%(target_state)s' for instance evacuate "
+ "is not supported.")
+
+
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
@@ -1450,6 +1484,11 @@ class UnsupportedRescueImage(Invalid):
msg_fmt = _("Requested rescue image '%(image)s' is not supported")
+class UnsupportedRPCVersion(Invalid):
+ msg_fmt = _("Unsupported RPC version for %(api)s. "
+ "Required >= %(required)s")
+
+
class Base64Exception(NovaException):
msg_fmt = _("Invalid Base 64 data for file %(path)s")
@@ -1556,8 +1595,8 @@ class PciRequestAliasNotDefined(NovaException):
msg_fmt = _("PCI alias %(alias)s is not defined")
-class PciConfigInvalidWhitelist(Invalid):
- msg_fmt = _("Invalid PCI devices Whitelist config: %(reason)s")
+class PciConfigInvalidSpec(Invalid):
+ msg_fmt = _("Invalid [pci]device_spec config: %(reason)s")
class PciRequestFromVIFNotFound(NotFound):
@@ -1565,6 +1604,16 @@ class PciRequestFromVIFNotFound(NotFound):
"PCI address: %(pci_slot)s on compute node: %(node_id)s")
+class PciDeviceRemoteManagedNotPresent(NovaException):
+ msg_fmt = _('Invalid PCI Whitelist: A device specified as "remote_managed"'
+ ' is not actually present on the host')
+
+
+class PciDeviceInvalidPFRemoteManaged(NovaException):
+ msg_fmt = _('Invalid PCI Whitelist: PFs must not have the "remote_managed"'
+ 'tag, device address: %(address)s')
+
+
# Cannot be templated, msg needs to be constructed when raised.
class InternalError(NovaException):
"""Generic hypervisor errors.
@@ -1652,9 +1701,15 @@ class MismatchVolumeAZException(Invalid):
class UnshelveInstanceInvalidState(InstanceInvalidState):
- msg_fmt = _('Specifying an availability zone when unshelving server '
- '%(instance_uuid)s with status "%(state)s" is not supported. '
- 'The server status must be SHELVED_OFFLOADED.')
+ msg_fmt = _('Specifying an availability zone or a host when unshelving '
+ 'server "%(instance_uuid)s" with status "%(state)s" is not '
+ 'supported. The server status must be SHELVED_OFFLOADED.')
+ code = 409
+
+
+class UnshelveHostNotInAZ(Invalid):
+ msg_fmt = _('Host "%(host)s" is not in the availability zone '
+ '"%(availability_zone)s".')
code = 409
@@ -1831,6 +1886,17 @@ class MemoryPageSizeNotSupported(Invalid):
msg_fmt = _("Page size %(pagesize)s is not supported by the host.")
+class LockMemoryForbidden(Forbidden):
+ msg_fmt = _("locked_memory value in image or flavor is forbidden when "
+ "mem_page_size is not set.")
+
+
+class FlavorImageLockedMemoryConflict(NovaException):
+ msg_fmt = _("locked_memory value in image (%(image)s) and flavor "
+ "(%(flavor)s) conflict. A consistent value is expected if "
+ "both specified.")
+
+
class CPUPinningInvalid(Invalid):
msg_fmt = _("CPU set to pin %(requested)s must be a subset of "
"free CPU set %(available)s")
@@ -1861,11 +1927,6 @@ class ImageCPUThreadPolicyForbidden(Forbidden):
"override CPU thread pinning policy set against the flavor")
-class ImagePMUConflict(Forbidden):
- msg_fmt = _("Image property 'hw_pmu' is not permitted to "
- "override the PMU policy set in the flavor")
-
-
class UnsupportedPolicyException(Invalid):
msg_fmt = _("ServerGroup policy is not supported: %(reason)s")
@@ -1920,6 +1981,10 @@ class SecureBootNotSupported(Invalid):
msg_fmt = _("Secure Boot is not supported by host")
+class FirmwareSMMNotSupported(Invalid):
+ msg_fmt = _("This firmware doesn't require (support) SMM")
+
+
class TriggerCrashDumpNotSupported(Invalid):
msg_fmt = _("Triggering crash dump is not supported")
@@ -2048,6 +2113,16 @@ class ResourceProviderUpdateConflict(PlacementAPIConflict):
"provider %(uuid)s (generation %(generation)d): %(error)s")
+class PlacementReshapeConflict(PlacementAPIConflict):
+ """A 409 caused by generation mismatch from attempting to reshape a
+ provider tree.
+ """
+ msg_fmt = _(
+ "A conflict was encountered attempting to reshape a provider tree: "
+ "$(error)s"
+ )
+
+
class InvalidResourceClass(Invalid):
msg_fmt = _("Resource class '%(resource_class)s' invalid.")
@@ -2111,11 +2186,6 @@ class InvalidPCINUMAAffinity(Invalid):
msg_fmt = _("Invalid PCI NUMA affinity configured: %(policy)s")
-class PowerVMAPIFailed(NovaException):
- msg_fmt = _("PowerVM API failed to complete for instance=%(inst_name)s. "
- "%(reason)s")
-
-
class TraitRetrievalFailed(NovaException):
msg_fmt = _("Failed to retrieve traits from the placement API: %(error)s")
@@ -2403,3 +2473,51 @@ class ProviderConfigException(NovaException):
"""
msg_fmt = _("An error occurred while processing "
"a provider config file: %(error)s")
+
+
+class PlacementPciException(NovaException):
+ msg_fmt = _(
+ "Failed to gather or report PCI resources to Placement: %(error)s")
+
+
+class PlacementPciDependentDeviceException(PlacementPciException):
+ msg_fmt = _(
+ "Configuring both %(parent_dev)s and %(children_devs)s in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured."
+ )
+
+
+class PlacementPciMixedResourceClassException(PlacementPciException):
+ msg_fmt = _(
+ "VFs from the same PF cannot be configured with different "
+ "'resource_class' values in [pci]device_spec. We got %(new_rc)s "
+ "for %(new_dev)s and %(current_rc)s for %(current_devs)s."
+ )
+
+
+class PlacementPciMixedTraitsException(PlacementPciException):
+ msg_fmt = _(
+ "VFs from the same PF cannot be configured with different set "
+ "of 'traits' in [pci]device_spec. We got %(new_traits)s for "
+ "%(new_dev)s and %(current_traits)s for %(current_devs)s."
+ )
+
+
+class ReimageException(NovaException):
+ msg_fmt = _("Reimaging volume failed.")
+
+
+class InvalidNodeConfiguration(NovaException):
+ msg_fmt = _('Invalid node identity configuration: %(reason)s')
+
+
+class DuplicateRecord(NovaException):
+ msg_fmt = _('Unable to create duplicate record for %(target)s')
+
+
+class NotSupportedComputeForEvacuateV295(NotSupported):
+ msg_fmt = _("Starting with microversion 2.95, evacuate API will stop "
+ "instance on destination. To evacuate before upgrades are "
+ "complete please use an older microversion. Required version "
+ "for compute %(expected), current version %(currently)s")
diff --git a/nova/filesystem.py b/nova/filesystem.py
new file mode 100644
index 0000000000..5394d2d835
--- /dev/null
+++ b/nova/filesystem.py
@@ -0,0 +1,59 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Functions to address filesystem calls, particularly sysfs."""
+
+import os
+
+from oslo_log import log as logging
+
+from nova import exception
+
+LOG = logging.getLogger(__name__)
+
+
+SYS = '/sys'
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+def read_sys(path: str) -> str:
+ """Reads the content of a file in the sys filesystem.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't read that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='r') as data:
+ return data.read()
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
+
+
+# NOTE(bauzas): this method is deliberately not wrapped in a privsep entrypoint
+# In order to correctly use it, you need to decorate the caller with a specific
+# privsep entrypoint.
+def write_sys(path: str, data: str) -> None:
+ """Writes the content of a file in the sys filesystem with data.
+
+ :param path: relative or absolute. If relative, will be prefixed by /sys.
+ :param data: the data to write.
+ :returns: contents of that file.
+ :raises: nova.exception.FileNotFound if we can't write that file.
+ """
+ try:
+ # The path can be absolute with a /sys prefix but that's fine.
+ with open(os.path.join(SYS, path), mode='w') as fd:
+ fd.write(data)
+ except (OSError, ValueError) as exc:
+ raise exception.FileNotFound(file_path=path) from exc
diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py
index abe6c0ab17..704538250f 100644
--- a/nova/hacking/checks.py
+++ b/nova/hacking/checks.py
@@ -140,6 +140,9 @@ mock_class_as_new_value_in_patching_re = re.compile(
rwlock_re = re.compile(
r"(?P<module_part>(oslo_concurrency\.)?(lockutils|fasteners))"
r"\.ReaderWriterLock\(.*\)")
+six_re = re.compile(r"^(import six(\..*)?|from six(\..*)? import .*)$")
+# Regex for catching the setDaemon method
+set_daemon_re = re.compile(r"\.setDaemon\(")
class BaseASTChecker(ast.NodeVisitor):
@@ -1030,3 +1033,69 @@ def check_lockutils_rwlocks(logical_line):
0,
msg % {'module': match.group('module_part')}
)
+
+
+@core.flake8ext
+def check_six(logical_line):
+ """Check for use of six
+
+ nova is now Python 3-only so we don't want six. However, people might use
+ it out of habit and it will likely work since six is a transitive
+ dependency.
+
+ N370
+ """
+ match = re.match(six_re, logical_line)
+ if match:
+ yield (0, "N370: Don't use or import six")
+
+
+@core.flake8ext
+def import_stock_mock(logical_line):
+ """Use python's mock, not the mock library.
+
+ Since we `dropped support for python 2`__, we no longer need to use the
+ mock library, which existed to backport py3 functionality into py2. Change
+ Ib44b5bff657c8e76c4f701e14d51a4efda3f6d32 cut over to importing the stock
+ mock, which must be done by saying::
+
+ from unittest import mock
+
+ ...because if you say::
+
+ import mock
+
+ ...you may be getting the stock mock; or, due to transitive dependencies in
+ the environment, the library mock. This check can be removed in the future
+ (and we can start saying ``import mock`` again) if we manage to purge these
+ transitive dependencies.
+
+ .. __: https://review.opendev.org/#/c/687954/
+
+ N371
+ """
+ if logical_line == 'import mock' or logical_line.startswith('from mock'):
+ yield (
+ 0,
+ "N371: You must explicitly import python's mock: "
+ "``from unittest import mock``"
+ )
+
+
+@core.flake8ext
+def check_set_daemon(logical_line):
+ """Check for use of the setDaemon method of the threading.Thread class
+
+ The setDaemon method of the threading.Thread class has been deprecated
+ since Python 3.10. Use the daemon attribute instead.
+
+ See
+ https://docs.python.org/3.10/library/threading.html#threading.Thread.setDaemon
+ for details.
+
+ N372
+ """
+ res = set_daemon_re.search(logical_line)
+ if res:
+ yield (0, "N372: Don't use the setDaemon method. "
+ "Use the daemon attribute instead.")
diff --git a/nova/db/api/legacy_migrations/__init__.py b/nova/limit/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/db/api/legacy_migrations/__init__.py
+++ b/nova/limit/__init__.py
diff --git a/nova/limit/local.py b/nova/limit/local.py
new file mode 100644
index 0000000000..f4e02c8020
--- /dev/null
+++ b/nova/limit/local.py
@@ -0,0 +1,234 @@
+# Copyright 2022 StackHPC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import typing as ty
+
+from oslo_limit import exception as limit_exceptions
+from oslo_limit import limit
+from oslo_log import log as logging
+
+import nova.conf
+from nova import exception
+from nova.limit import utils as nova_limit_utils
+from nova import objects
+
+LOG = logging.getLogger(__name__)
+CONF = nova.conf.CONF
+
+# Entity types for API Limits, same as names of config options prefixed with
+# "server_" to disambiguate them in keystone
+SERVER_METADATA_ITEMS = "server_metadata_items"
+INJECTED_FILES = "server_injected_files"
+INJECTED_FILES_CONTENT = "server_injected_file_content_bytes"
+INJECTED_FILES_PATH = "server_injected_file_path_bytes"
+API_LIMITS = set([
+ SERVER_METADATA_ITEMS,
+ INJECTED_FILES,
+ INJECTED_FILES_CONTENT,
+ INJECTED_FILES_PATH,
+])
+
+# Entity types for all DB limits, same as names of config options prefixed with
+# "server_" to disambiguate them in keystone
+KEY_PAIRS = "server_key_pairs"
+SERVER_GROUPS = "server_groups"
+SERVER_GROUP_MEMBERS = "server_group_members"
+DB_LIMITS = set([
+ KEY_PAIRS,
+ SERVER_GROUPS,
+ SERVER_GROUP_MEMBERS,
+])
+
+# Checks only happen when we are using the unified limits driver
+UNIFIED_LIMITS_DRIVER = "nova.quota.UnifiedLimitsDriver"
+
+# Map entity types to the exception we raise in the case that the resource is
+# over the allowed limit. Each of these should be a subclass of
+# exception.OverQuota.
+EXCEPTIONS = {
+ KEY_PAIRS: exception.KeypairLimitExceeded,
+ INJECTED_FILES_CONTENT: exception.OnsetFileContentLimitExceeded,
+ INJECTED_FILES_PATH: exception.OnsetFilePathLimitExceeded,
+ INJECTED_FILES: exception.OnsetFileLimitExceeded,
+ SERVER_METADATA_ITEMS: exception.MetadataLimitExceeded,
+ SERVER_GROUPS: exception.ServerGroupLimitExceeded,
+ SERVER_GROUP_MEMBERS: exception.GroupMemberLimitExceeded,
+}
+
+# Map new limit-based quota names to the legacy ones.
+LEGACY_LIMITS = {
+ SERVER_METADATA_ITEMS: "metadata_items",
+ INJECTED_FILES: "injected_files",
+ INJECTED_FILES_CONTENT: "injected_file_content_bytes",
+ INJECTED_FILES_PATH: "injected_file_path_bytes",
+ KEY_PAIRS: "key_pairs",
+ SERVER_GROUPS: SERVER_GROUPS,
+ SERVER_GROUP_MEMBERS: SERVER_GROUP_MEMBERS,
+}
+
+
+def get_in_use(
+ context: 'nova.context.RequestContext', project_id: str
+) -> ty.Dict[str, int]:
+ """Returns in use counts for each resource, for given project.
+
+ This sounds simple but many resources can't be counted per project,
+ so the only sensible value is 0. For example, key pairs are counted
+ per user, and server group members are counted per server group,
+ and metadata items are counted per server.
+ This behaviour is consistent with what is returned today by the
+ DB based quota driver.
+ """
+ count = _server_group_count(context, project_id)['server_groups']
+ usages = {
+ # DB limits
+ SERVER_GROUPS: count,
+ SERVER_GROUP_MEMBERS: 0,
+ KEY_PAIRS: 0,
+ # API limits
+ SERVER_METADATA_ITEMS: 0,
+ INJECTED_FILES: 0,
+ INJECTED_FILES_CONTENT: 0,
+ INJECTED_FILES_PATH: 0,
+ }
+ return _convert_keys_to_legacy_name(usages)
+
+
+def always_zero_usage(
+ project_id: str, resource_names: ty.List[str]
+) -> ty.Dict[str, int]:
+ """Called by oslo_limit's enforcer"""
+ # Return usage of 0 for API limits. Values in API requests will be used as
+ # the deltas.
+ return {resource_name: 0 for resource_name in resource_names}
+
+
+def enforce_api_limit(entity_type: str, count: int) -> None:
+ """Check if the values given are over the limit for that key.
+
+ This is generally used for limiting the size of certain API requests
+ that eventually get stored in the database.
+ """
+ if not nova_limit_utils.use_unified_limits():
+ return
+
+ if entity_type not in API_LIMITS:
+ fmt = "%s is not a valid API limit: %s"
+ raise ValueError(fmt % (entity_type, API_LIMITS))
+
+ try:
+ enforcer = limit.Enforcer(always_zero_usage)
+ except limit_exceptions.SessionInitError as e:
+ msg = ("Failed to connect to keystone while enforcing %s quota limit."
+ % entity_type)
+ LOG.error(msg + " Error: " + str(e))
+ raise exception.KeystoneConnectionFailed(msg)
+
+ try:
+ enforcer.enforce(None, {entity_type: count})
+ except limit_exceptions.ProjectOverLimit as e:
+ # Copy the exception message to a OverQuota to propagate to the
+ # API layer.
+ raise EXCEPTIONS.get(entity_type, exception.OverQuota)(str(e))
+
+
+def enforce_db_limit(
+ context: 'nova.context.RequestContext',
+ entity_type: str,
+ entity_scope: ty.Any,
+ delta: int
+) -> None:
+ """Check provided delta does not put resource over limit.
+
+ Firstly we count the current usage given the specified scope.
+ We then add that count to the specified delta to see if we
+ are over the limit for that kind of entity.
+
+ Note previously we used to recheck these limits.
+ However these are really soft DDoS protections,
+ not hard resource limits, so we don't do the recheck for these.
+
+ The scope is specific to the limit type:
+ * key_pairs scope is context.user_id
+ * server_groups scope is context.project_id
+ * server_group_members scope is server_group_uuid
+ """
+ if not nova_limit_utils.use_unified_limits():
+ return
+
+ if entity_type not in DB_COUNT_FUNCTION.keys():
+ fmt = "%s does not have a DB count function defined: %s"
+ raise ValueError(fmt % (entity_type, DB_COUNT_FUNCTION.keys()))
+ if delta < 0:
+ raise ValueError("delta must be a positive integer")
+
+ count_function = DB_COUNT_FUNCTION[entity_type]
+
+ try:
+ enforcer = limit.Enforcer(
+ functools.partial(count_function, context, entity_scope))
+ except limit_exceptions.SessionInitError as e:
+ msg = ("Failed to connect to keystone while enforcing %s quota limit."
+ % entity_type)
+ LOG.error(msg + " Error: " + str(e))
+ raise exception.KeystoneConnectionFailed(msg)
+
+ try:
+ enforcer.enforce(None, {entity_type: delta})
+ except limit_exceptions.ProjectOverLimit as e:
+ # Copy the exception message to a OverQuota to propagate to the
+ # API layer.
+ raise EXCEPTIONS.get(entity_type, exception.OverQuota)(str(e))
+
+
+def _convert_keys_to_legacy_name(
+ new_dict: ty.Dict[str, int]
+) -> ty.Dict[str, int]:
+ legacy = {}
+ for new_name, old_name in LEGACY_LIMITS.items():
+ # defensive incase oslo or keystone doesn't give us an answer
+ legacy[old_name] = new_dict.get(new_name) or 0
+ return legacy
+
+
+def get_legacy_default_limits() -> ty.Dict[str, int]:
+ # TODO(johngarbutt): need oslo.limit API for this, it should do caching
+ enforcer = limit.Enforcer(lambda: None)
+ new_limits = enforcer.get_registered_limits(LEGACY_LIMITS.keys())
+ return _convert_keys_to_legacy_name(dict(new_limits))
+
+
+def _keypair_count(context, user_id, *args):
+ count = objects.KeyPairList.get_count_by_user(context, user_id)
+ return {'server_key_pairs': count}
+
+
+def _server_group_count(context, project_id, *args):
+ raw_counts = objects.InstanceGroupList.get_counts(context, project_id)
+ return {'server_groups': raw_counts['project']['server_groups']}
+
+
+def _server_group_members_count(context, server_group_uuid, *args):
+ # NOTE(johngarbutt) we used to count members added per user
+ server_group = objects.InstanceGroup.get_by_uuid(context,
+ server_group_uuid)
+ return {'server_group_members': len(server_group.members)}
+
+
+DB_COUNT_FUNCTION = {
+ KEY_PAIRS: _keypair_count,
+ SERVER_GROUPS: _server_group_count,
+ SERVER_GROUP_MEMBERS: _server_group_members_count
+}
diff --git a/nova/limit/placement.py b/nova/limit/placement.py
new file mode 100644
index 0000000000..eedf7d69e1
--- /dev/null
+++ b/nova/limit/placement.py
@@ -0,0 +1,217 @@
+# Copyright 2022 StackHPC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import typing as ty
+
+import os_resource_classes as orc
+from oslo_limit import exception as limit_exceptions
+from oslo_limit import limit
+from oslo_log import log as logging
+
+import nova.conf
+from nova import exception
+from nova.limit import utils as limit_utils
+from nova import objects
+from nova import quota
+from nova.scheduler.client import report
+from nova.scheduler import utils
+
+LOG = logging.getLogger(__name__)
+CONF = nova.conf.CONF
+
+# Cache to avoid repopulating ksa state
+PLACEMENT_CLIENT = None
+
+LEGACY_LIMITS = {
+ "servers": "instances",
+ "class:VCPU": "cores",
+ "class:MEMORY_MB": "ram",
+}
+
+
+def _get_placement_usages(
+ context: 'nova.context.RequestContext', project_id: str
+) -> ty.Dict[str, int]:
+ return report.report_client_singleton().get_usages_counts_for_limits(
+ context, project_id)
+
+
+def _get_usage(
+ context: 'nova.context.RequestContext',
+ project_id: str,
+ resource_names: ty.List[str],
+) -> ty.Dict[str, int]:
+ """Called by oslo_limit's enforcer"""
+ if not limit_utils.use_unified_limits():
+ raise NotImplementedError("Unified limits support is disabled")
+
+ count_servers = False
+ resource_classes = []
+
+ for resource in resource_names:
+ if resource == "servers":
+ count_servers = True
+ continue
+
+ if not resource.startswith("class:"):
+ raise ValueError("Unknown resource type: %s" % resource)
+
+ # Temporarily strip resource class prefix as placement does not use it.
+ # Example: limit resource 'class:VCPU' will be returned as 'VCPU' from
+ # placement.
+ r_class = resource.lstrip("class:")
+ if r_class in orc.STANDARDS or orc.is_custom(r_class):
+ resource_classes.append(r_class)
+ else:
+ raise ValueError("Unknown resource class: %s" % r_class)
+
+ if not count_servers and len(resource_classes) == 0:
+ raise ValueError("no resources to check")
+
+ resource_counts = {}
+ if count_servers:
+ # TODO(melwitt): Change this to count servers from placement once nova
+ # is using placement consumer types and is able to differentiate
+ # between "instance" allocations vs "migration" allocations.
+ if not quota.is_qfd_populated(context):
+ LOG.error('Must migrate all instance mappings before using '
+ 'unified limits')
+ raise ValueError("must first migrate instance mappings")
+ mappings = objects.InstanceMappingList.get_counts(context, project_id)
+ resource_counts['servers'] = mappings['project']['instances']
+
+ try:
+ usages = _get_placement_usages(context, project_id)
+ except exception.UsagesRetrievalFailed as e:
+ msg = ("Failed to retrieve usages from placement while enforcing "
+ "%s quota limits." % ", ".join(resource_names))
+ LOG.error(msg + " Error: " + str(e))
+ raise exception.UsagesRetrievalFailed(msg)
+
+ # Use legacy behavior VCPU = VCPU + PCPU if configured.
+ if CONF.workarounds.unified_limits_count_pcpu_as_vcpu:
+ # If PCPU is in resource_classes, that means it was specified in the
+ # flavor explicitly. In that case, we expect it to have its own limit
+ # registered and we should not fold it into VCPU.
+ if orc.PCPU in usages and orc.PCPU not in resource_classes:
+ usages[orc.VCPU] = (usages.get(orc.VCPU, 0) +
+ usages.get(orc.PCPU, 0))
+
+ for resource_class in resource_classes:
+ # Need to add back resource class prefix that was stripped earlier
+ resource_name = 'class:' + resource_class
+ # Placement doesn't know about classes with zero usage
+ # so default to zero to tell oslo.limit usage is zero
+ resource_counts[resource_name] = usages.get(resource_class, 0)
+
+ return resource_counts
+
+
+def _get_deltas_by_flavor(
+ flavor: 'objects.Flavor', is_bfv: bool, count: int
+) -> ty.Dict[str, int]:
+ if flavor is None:
+ raise ValueError("flavor")
+ if count < 0:
+ raise ValueError("count")
+
+ # NOTE(johngarbutt): this skips bfv, port, and cyborg resources
+ # but it still gives us better checks than before unified limits
+ # We need an instance in the DB to use the current is_bfv logic
+ # which doesn't work well for instances that don't yet have a uuid
+ deltas_from_flavor = utils.resources_for_limits(flavor, is_bfv)
+
+ deltas = {"servers": count}
+ for resource, amount in deltas_from_flavor.items():
+ if amount != 0:
+ deltas["class:%s" % resource] = amount * count
+ return deltas
+
+
+def _get_enforcer(
+ context: 'nova.context.RequestContext', project_id: str
+) -> limit.Enforcer:
+ # NOTE(johngarbutt) should we move context arg into oslo.limit?
+ def callback(project_id, resource_names):
+ return _get_usage(context, project_id, resource_names)
+
+ return limit.Enforcer(callback)
+
+
+def enforce_num_instances_and_flavor(
+ context: 'nova.context.RequestContext',
+ project_id: str,
+ flavor: 'objects.Flavor',
+ is_bfvm: bool,
+ min_count: int,
+ max_count: int,
+ enforcer: ty.Optional[limit.Enforcer] = None
+) -> int:
+ """Return max instances possible, else raise TooManyInstances exception."""
+ if not limit_utils.use_unified_limits():
+ return max_count
+
+ # Ensure the recursion will always complete
+ if min_count < 0 or min_count > max_count:
+ raise ValueError("invalid min_count")
+ if max_count < 0:
+ raise ValueError("invalid max_count")
+
+ deltas = _get_deltas_by_flavor(flavor, is_bfvm, max_count)
+ enforcer = _get_enforcer(context, project_id)
+ try:
+ enforcer.enforce(project_id, deltas)
+ except limit_exceptions.ProjectOverLimit as e:
+ # NOTE(johngarbutt) we can do better, but this is very simple
+ LOG.debug("Limit check failed with count %s retrying with count %s",
+ max_count, max_count - 1)
+ try:
+ return enforce_num_instances_and_flavor(context, project_id,
+ flavor, is_bfvm, min_count,
+ max_count - 1,
+ enforcer=enforcer)
+ except ValueError:
+ # Copy the *original* exception message to a OverQuota to
+ # propagate to the API layer
+ raise exception.TooManyInstances(str(e))
+
+ # no problems with max_count, so we return max count
+ return max_count
+
+
+def _convert_keys_to_legacy_name(new_dict):
+ legacy = {}
+ for new_name, old_name in LEGACY_LIMITS.items():
+ # defensive incase oslo or keystone doesn't give us an answer
+ legacy[old_name] = new_dict.get(new_name) or 0
+ return legacy
+
+
+def get_legacy_default_limits():
+ enforcer = limit.Enforcer(lambda: None)
+ new_limits = enforcer.get_registered_limits(LEGACY_LIMITS.keys())
+ return _convert_keys_to_legacy_name(dict(new_limits))
+
+
+def get_legacy_project_limits(project_id):
+ enforcer = limit.Enforcer(lambda: None)
+ new_limits = enforcer.get_project_limits(project_id, LEGACY_LIMITS.keys())
+ return _convert_keys_to_legacy_name(dict(new_limits))
+
+
+def get_legacy_counts(context, project_id):
+ resource_names = list(LEGACY_LIMITS.keys())
+ resource_names.sort()
+ new_usage = _get_usage(context, project_id, resource_names)
+ return _convert_keys_to_legacy_name(new_usage)
diff --git a/nova/db/api/legacy_migrations/manage.py b/nova/limit/utils.py
index 6c2b3842ba..bced2452c7 100644
--- a/nova/db/api/legacy_migrations/manage.py
+++ b/nova/limit/utils.py
@@ -1,5 +1,4 @@
-#!/usr/bin/env python
-# Copyright 2012 OpenStack Foundation
+# Copyright 2022 StackHPC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -13,8 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from migrate.versioning.shell import main
+import nova.conf
+CONF = nova.conf.CONF
+UNIFIED_LIMITS_DRIVER = "nova.quota.UnifiedLimitsDriver"
-if __name__ == '__main__':
- main(debug='False', repository='.')
+
+def use_unified_limits():
+ return CONF.quota.driver == UNIFIED_LIMITS_DRIVER
diff --git a/nova/locale/cs/LC_MESSAGES/nova.po b/nova/locale/cs/LC_MESSAGES/nova.po
index df7433d4c0..16baa8e1bb 100644
--- a/nova/locale/cs/LC_MESSAGES/nova.po
+++ b/nova/locale/cs/LC_MESSAGES/nova.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -132,18 +132,6 @@ msgid "Affinity instance group policy was violated."
msgstr "Zásada skupiny sluÄivosti instance byla poruÅ¡ena."
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "Agent volání nepodporuje: %(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"Sestavení agenta existuje s hypervizorem %(hypervisor)s, operaÄním systémem "
-"%(os)s a architekturou %(architecture)s"
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "Agregát %(aggregate_id)s již má hostitele %(host)s."
@@ -161,19 +149,9 @@ msgstr ""
"Agregát %(aggregate_id)s nemá žádná metadata s klíÄem %(metadata_key)s."
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"Agregát %(aggregate_id)s: Äinnost '%(action)s' způsobila chybu: %(reason)s."
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "Agregát %(aggregate_name)s již existuje."
-#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "Agregát pro poÄet hostitelů %(host)s nelze nalézt."
-
msgid "An unknown error has occurred. Please try your request again."
msgstr "Vyskytla se neznámá chyba. Prosím zopakujte Váš požadavek."
@@ -331,12 +309,6 @@ msgstr "Nelze najít požadovaný obraz"
msgid "Can not handle authentication request for %d credentials"
msgstr "Nelze zpracovat žádost o ověření přihlašovacích údajů %d"
-msgid "Can't resize a disk to 0 GB."
-msgstr "Nelze změnit velikost disku na 0 GB."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "Nelze zmenšit efemerní disky."
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr "Nelze získat cestu kořenového zařízení z nastavení libvirt instance"
@@ -370,12 +342,6 @@ msgstr "Nelze připojit jeden nebo více svazků k mnoha instancím"
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr "Nelze volat %(method)s na osiřelém objektu %(objtype)s"
-msgid "Cannot find SR of content-type ISO"
-msgstr "Nelze najít SR typu obsahu ISO"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "Nelze najít SR pro Ätení/zápis VDI."
-
msgid "Cannot find image for rebuild"
msgstr "Nelze najít obraz ke znovu sestavení"
@@ -494,10 +460,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "Připojování k libvirt ztraceno: %s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "Připojení k hypervizoru je rozbité na hostiteli: %(host)s"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -569,19 +531,9 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "Výchozí zásada PVM je pro povolení PBM vyžadována."
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "Smazáno %(records)d záznamů z tabulky '%(table_name)s'."
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "Zařízení '%(device)s' nenalezeno."
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr ""
-"ID zařízení %(id)s zadáno, ale není podporováno verzí hypervizoru %(version)s"
-
msgid "Device name contains spaces."
msgstr "Název zařízení obsahuje mezery."
@@ -589,18 +541,6 @@ msgid "Device name empty or too long."
msgstr "Název zařízení je prázdný nebo příliš dlouhý."
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"Různé typy v %(table)s.%(column)s a stínové tabulce: %(c_type)s "
-"%(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr "Disk obsahuje souborový systém, jehož velikost nelze změnit: %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Formát disku %(disk_format)s není přijatelný"
@@ -608,13 +548,6 @@ msgstr "Formát disku %(disk_format)s není přijatelný"
msgid "Disk info file is invalid: %(reason)s"
msgstr "Soubor informací o disku je neplatný: %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "Disk musí mít pouze jeden oddíl."
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "Disk s id: %s nebylo připojeno k instanci."
-
#, python-format
msgid "Driver Error: %s"
msgstr "Chyba ovladaÄe: %s"
@@ -628,10 +561,6 @@ msgstr ""
"'%(state)s'."
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "Chyba během následujícího volání agenta: %(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "Chyba během vyskladňování instance %(instance_id)s: %(reason)s"
@@ -683,9 +612,6 @@ msgstr "Chyba při připojování %(image)s pomocí libguestfs (%(e)s)"
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Chyba při vytváření monitoru zdroje: %(monitor)s"
-msgid "Error: Agent is disabled"
-msgstr "Chyba: Agent je zakázán"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "Událost %(event)s nenalezena pro id žádosti %(action_id)s"
@@ -717,10 +643,6 @@ msgstr "PÅ™ekroÄen maximální poÄet pokusů. %(reason)s"
msgid "Expected a uuid but received %(uuid)s."
msgstr "OÄekáváno uuid ale obdrženo %(uuid)s."
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "Sloupec %(table)s.%(column)s je ve stínové tabulce navíc"
-
msgid "Extracting vmdk from OVA failed."
msgstr "Extrahování vmdk z OVA selhalo."
@@ -775,9 +697,6 @@ msgstr "Nelze mapovat oddíly: %s"
msgid "Failed to mount filesystem: %s"
msgstr "Nelze připojit souborový systém: %s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr "Nelze zpracovat informace o zařízení pci pro průchod"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "Nelze vypnout instanci: %(reason)s"
@@ -787,14 +706,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "Nelze zapnout instanci: %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"Nelze připravit PCI zařízení %(id)s pro instanci %(instance_uuid)s: "
-"%(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "Poskytnutí instance %(inst)s selhalo: %(reason)s"
@@ -826,9 +737,6 @@ msgstr "Nelze spustit qemu-img info na %(path)s : %(error)s"
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr "Nelze nastavit heslo správce v %(instance)s z důvodu %(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "Nelze vytvořit, vráceno zpět"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "Nelze pozastavit instanci: %(reason)s"
@@ -845,10 +753,6 @@ msgid "File %(file_path)s could not be found."
msgstr "Soubor %(file_path)s nemohl být nalezen."
#, python-format
-msgid "File path %s not valid"
-msgstr "Cesta souboru %s není paltná"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr "Pevná IP %(ip)s není platnou ip adresou pro síť %(network_id)s."
@@ -970,18 +874,6 @@ msgid "Found no disk to snapshot."
msgstr "Nenalezen žádný disk k pořízení snímku."
#, python-format
-msgid "Found no network for bridge %s"
-msgstr "Žádná síť pro most %s nenalezena"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "Nalezena sít mostu %s, která není jedineÄná"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "Nalezena síť s názvem Å¡títku %s, který není jedineÄný"
-
-#, python-format
msgid "Host %(host)s could not be found."
msgstr "Hostitel %(host)s nemohl být nalezen."
@@ -1001,9 +893,6 @@ msgstr "Hostitel nepodporuje hosty s nastavenou topologií NUMA"
msgid "Host does not support guests with custom memory page sizes"
msgstr "Hostitel nepodporuje hosty s vlastní velikostí stránek paměti"
-msgid "Host startup on XenServer is not supported."
-msgstr "Spuštění hostitele na XenServer není podporováno."
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"OvladaÄ hypervizoru nepodporuje metodu po pÅ™esunutí za provozu ve zdroji"
@@ -1207,10 +1096,6 @@ msgstr "Instance nemá žádného zdrojového hostitele"
msgid "Instance has not been resized."
msgstr "Instanci nebyla změněna velikost."
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "Instance již je v záchranném režimu: %s"
-
msgid "Instance is not a member of specified network"
msgstr "Instance není Älenem zadané sítÄ›"
@@ -1231,10 +1116,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "NedostateÄné výpoÄetní zdroje: %(reason)s."
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr "Pro spuÅ¡tÄ›ní %(uuid)s je ve výpoÄetním uzlu nedostatek volné pamÄ›ti."
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "Rozhraní %(interface)s nenalezeno."
@@ -1425,13 +1306,6 @@ msgid ""
"It is not allowed to create an interface on external network %(network_uuid)s"
msgstr "Na vnější síti %(network_uuid)s není povoleno vytvářet rozhraní"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"Obraz Kernel/Ramdisk je příliš velký: %(vdi_size)d bajtů, max %(max_size)d "
-"bajtů"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1455,12 +1329,6 @@ msgstr "Pár klíÄů %(name)s nenalezena pro uživatele %(user_id)s"
msgid "Keypair data is invalid: %(reason)s"
msgstr "Data páru klíÄů jsou neplatná: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Název páru klíÄů obsahuje nebezpeÄné znaky"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "Název páru klíÄe musí být Å™etÄ›zec dlouhý 1 až 255 znaků"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Limity jsou podporovány pouze ve vCenter verze 6.0 a vyšší"
@@ -1489,9 +1357,6 @@ msgstr "Indikátor %(marker)s nemohl být nalezen."
msgid "Maximum number of floating IPs exceeded"
msgstr "PÅ™ekroÄen maximálních poÄet plovoucích IP adres"
-msgid "Maximum number of key pairs exceeded"
-msgstr "PÅ™ekroÄen maximálních poÄet párů klíÄů"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "Maximální poÄet popisných položek pÅ™ekraÄuje %(allowed)d"
@@ -1522,12 +1387,6 @@ msgstr ""
"Metrika %(name)s nemohla být nalezena v uzlu výpoÄetního hostitele %(host)s."
"%(node)s."
-msgid "Migrate Receive failed"
-msgstr "Přijetí přesunu selhalo"
-
-msgid "Migrate Send failed"
-msgstr "Odeslání přesunu selhalo"
-
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr "Přesun %(migration_id)s nemohl být nalezen."
@@ -1552,10 +1411,6 @@ msgstr "Chyba kontroly před přesunem: %(reason)s"
msgid "Missing arguments: %s"
msgstr "Chybí argumenty: %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "Ve stínové tabulce chybí sloupec %(table)s.%(column)s"
-
msgid "Missing device UUID."
msgstr "UUID zařízení chybí."
@@ -1638,13 +1493,6 @@ msgid "Must not input both network_id and port_id"
msgstr "Id sítě a id portu nesmí být zadány najednou"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"Pro použití compute_driver=xenapi.XenAPIDriver musíte zadat url připojení a "
-"volitelně uživatelské jméno a heslo připojení"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1694,10 +1542,6 @@ msgstr "Žádné mapování blokového zařízení s id %(id)s."
msgid "No Unique Match Found."
msgstr "Nenalezena žádná jedineÄná shoda."
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "Žádné sestavení agenta není přidruženo k id %(id)s."
-
msgid "No compute host specified"
msgstr "Nezadán žádný výpoÄetní hostitel"
@@ -1748,16 +1592,9 @@ msgstr "V %(root)s z %(image)s nenalezeny žádné body připojení"
msgid "No operating system found in %s"
msgstr "V %s nenalezen žádný operaÄní systém"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "Nenalezen žádný hlavní VDI pro %s"
-
msgid "No root disk defined."
msgstr "Nezadán žádný kořenový disk."
-msgid "No suitable network for migrate"
-msgstr "Žádné vhodné sítě pro přesun"
-
msgid "No valid host found for cold migrate"
msgstr "Nebyl nalezen žádný platný hostitel pro přesun při nepoužívání"
@@ -1831,14 +1668,6 @@ msgstr "Jeden nebo více hostitelů již jsou v zónách dostupnosti %s"
msgid "Only administrators may list deleted instances"
msgstr "Pouze správci mohou vypsat smazané instance"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"Pouze souborové SR (ext/NFS) jsou touto funkcí podporovány. SR %(uuid)s má "
-"typ %(type)s"
-
msgid "Origin header does not match this host."
msgstr "HlaviÄka původu neodpovídá tomuto hostiteli."
@@ -1881,10 +1710,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "Žádost zařízení PCI %(requests)s selhala"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "Fyz. rozhraní %s neobsahuje IP adresu"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "Velikost stránky %(pagesize)s je zakázána v '%(against)s'"
@@ -1983,10 +1808,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "Kvóta pÅ™ekroÄena, příliÅ¡ mnoho serverů ve skupinÄ›"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "Kvóta pÅ™ekroÄena: kód=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "Kvóta existuje pro projekt %(project_id)s, zdroj %(resource)s"
@@ -2016,10 +1837,6 @@ msgid ""
msgstr ""
"Limit kvóty %(limit)s pro %(resource)s musí být menší nebo rovno %(maximum)s."
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "Dosaženo maximálního poÄtu nových pokusů o odpojení VBD %s"
-
msgid "Request body and URI mismatch"
msgstr "Neshoda s tělem požadavku a URI"
@@ -2178,10 +1995,6 @@ msgid "Set admin password is not supported"
msgstr "Nastavení hesla správce není podporováno"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "Stínová tabulka s názvem %(name)s již existuje."
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "Sdílení '%s' není podporováno"
@@ -2189,13 +2002,6 @@ msgstr "Sdílení '%s' není podporováno"
msgid "Share level '%s' cannot have share configured"
msgstr "Úroveň sdílení '%s' nemůže mít sdílení nastavena"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"Zmenšení souborového systému pomocí resize2fs selhalo, prosím zkontrolujte, "
-"zda máte na svém disku dostatek volného místa."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Snímek %(snapshot_id)s nemohl být nalezen."
@@ -2220,12 +2026,6 @@ msgstr "Zadaný klÃ­Ä Å™azení byl neplatný."
msgid "Specified fixed address not assigned to instance"
msgstr "Zadaná pevná adresa není k instanci přidělena"
-msgid "Specify `table_name` or `table` param"
-msgstr "Zadejte parametr `table_name` nebo `table`"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "Zadejte pouze jeden parametr `table_name` `table`"
-
msgid "Started"
msgstr "Spuštěno"
@@ -2291,9 +2091,6 @@ msgstr "Instance vyžaduje novější verzi hypervizoru, než byla poskytnuta."
msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr "PoÄet zadaných portů: %(ports)d pÅ™esahuje limit: %(quota)d"
-msgid "The only partition should be partition 1."
-msgstr "Jediným oddílem by měl být oddíl 1."
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr "Zadaná cesta zařízení RNG: (%(path)s) se nevyskytuje na hostiteli."
@@ -2356,48 +2153,13 @@ msgstr ""
"Svazek nemůže být přidělen ke stejnému názvu zařízení jako kořenové zařízení "
"%s"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"V tabulce '%(table_name)s' existuje %(records)d záznamů, kde uuid nebo uuid "
-"instance je prázdné. Poté, co jste provedli zálohu všech důležitých dat, "
-"spusťte tento příkaz znovu s volbou --delete."
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"V tabulce '%(table_name)s' existuje %(records)d záznamů, kde uuid nebo uuid "
-"instance je prázdné. Tyto musí být ruÄnÄ› vyÄiÅ¡tÄ›ny pÅ™edtím, než bude "
-"pÅ™esunutí úspěšnÄ› dokonÄeno. Zvažte spustit příkaz 'nova-manage db "
-"null_instance_uuid_scan'."
-
msgid "There are not enough hosts available."
msgstr "Není dostatek dostupných hostitelů."
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"Stále existuje %(count)i nepřesunutých záznamů konfigurace. Přesun nemůže "
-"pokraÄovat, dokud nebudou vÅ¡echny záznamy konfigurace instance pÅ™esunuty do "
-"nového formátu. Prosím nejdříve spusťte `nova-manage db migrate_flavor_data'."
-
-#, python-format
msgid "There is no such action: %s"
msgstr "Žádná taková Äinnost: %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "Nebyly nalezeny žádné záznamy, kde uuid instance bylo prázdné."
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
@@ -2406,9 +2168,6 @@ msgstr ""
"Hypervizor tohoto uzlu výpoÄtu je starší než minimální podporovaná verze: "
"%(version)s."
-msgid "This domU must be running on the host specified by connection_url"
-msgstr "Tento domU musí být spuštěn na hostiteli zadaném v url připojení."
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2428,10 +2187,6 @@ msgstr ""
"Tato služba je starší (v%(thisver)i) než minimální verze (v%(minver)i) ve "
"zbytku nasazení. Nelze pokraÄovat."
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "VyprÅ¡el Äasový limit pÅ™i Äekání na vytvoÅ™ení zařízení %s"
-
msgid "Timeout waiting for response from cell"
msgstr "PÅ™i Äekání na odpovÄ›Ä od buňky vyprÅ¡el Äas"
@@ -2470,19 +2225,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "Nelze ověřit klienta ironic."
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr ""
-"Nelze kontaktovat agenta hosta. Následujícímu volání vyprÅ¡el Äas: %(method)s"
-
-#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "Nelze zniÄit VBD %s"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "Nelze zniÄit VDI %s"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "Nelze zjistit Å™adiÄ disku pro '%s'"
@@ -2491,22 +2233,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "Nelze zjistit předponu disku pro %s"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "Nelze vyjmout %s ze zásoby; Nenalezen žádný správce zásoby"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "Nelze vyjmout %s ze zásoby; zásoba není prázdná"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "Nelze najít SR z VBD %s"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "Nelze najít SR z VDI %s"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "Nelze najít soubor certifikaÄní autority : %s"
@@ -2525,9 +2251,6 @@ msgstr "Nelze najít cíl ISCSI"
msgid "Unable to find key_file : %s"
msgstr "Nelze najít soubor s klíÄem : %s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "Nelze najít kořen VBD/VDI pro VM"
-
msgid "Unable to find volume"
msgstr "Nelze najít svazek"
@@ -2537,22 +2260,6 @@ msgstr "Nelze získat UUID hostitele: /etc/machine-id neexistuje"
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "Nelze získat UUID hostitele: /etc/machine-id je prázdné"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "Nelze získat záznam VDI %s na"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "Nelze zavést VDI pro SR %s"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "Nelze zavést VDI na SR %s"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "Nelze připojit %s do zásoby"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2583,10 +2290,6 @@ msgstr ""
"Nelze pÅ™esunout instanci (%(instance_id)s) na souÄasného hostitele "
"(%(host)s)."
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "Nelze získat informace o cíli %s"
-
msgid "Unable to resize disk down."
msgstr "Nelze zmenšit velikost disku."
@@ -2596,13 +2299,6 @@ msgstr "Nelze nastavit heslo instance"
msgid "Unable to shrink disk."
msgstr "Nelze zmenšit disk."
-msgid "Unable to terminate instance."
-msgstr "Nelze ukonÄit instanci."
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "Nelze odpojit VBD %s"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "Nepřijatelné informace o CPU: %(reason)s"
@@ -2622,16 +2318,6 @@ msgstr ""
"mapování blokového zařízení z více instancí."
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"NeoÄekávaná chyba API. Prosím nahlaste ji na http://bugs.launchpad.net/nova/ "
-"a pokud možno připojte k ní záznam Nova API.\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "NeoÄekávaná Äinnost agregátu %s"
@@ -2691,9 +2377,6 @@ msgstr "Pokus o vyskladnění, ale obraz %s nemůže být nalezen."
msgid "Unsupported Content-Type"
msgstr "Nepodporovaný Content-Type"
-msgid "Upgrade DB using Essex release first."
-msgstr "Nejdříve aktualizujte DB pomocí verze z Essex."
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "Uživatel %(username)s nenalezen v souboru hesel."
@@ -2717,24 +2400,6 @@ msgstr ""
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s má %(virtual_size)d bajtů, což jje více než velikost "
-"konfigurace mající %(new_disk_size)d bajtů."
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"VDi nenalezeno v SR %(sr)s (uuid vid %(vdi_uuid)s,cílový lun %(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr "PÅ™ekroÄen poÄet pokusů (%d) o splynutí VHD, operace zruÅ¡ena..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -2803,13 +2468,6 @@ msgstr ""
"Svazek nastavuje velikost bloku, ale souÄasný hypervizor libvirt '%s' "
"nepodporuje vlastní velikost bloku"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr ""
-"Schéma '%s' není podporováno v Python s verzí < 2.7.4, prosím použijte http "
-"nebo https"
-
msgid "When resizing, instances must change flavor!"
msgstr "Při změně velikosti musí instance změnit konfiguraci!"
@@ -2824,9 +2482,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "Špatná metoda kvóty %(method)s použita na zdroj %(res)s"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr "Špatný typ metody háku. Jsou povoleny pouze typy 'pre¨a 'post'"
-
msgid "X-Forwarded-For is missing from request."
msgstr "X-Forwarded-For v žádosti chybí"
@@ -2842,9 +2497,6 @@ msgstr "X-Metadata-Provider v žádosti chybí."
msgid "X-Tenant-ID header is missing from request."
msgstr "HlaviÄka X-Tenant-ID v žádosti chybí."
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "Vyžadováno XAPI podporující relax-xsm-sr-check=true"
-
msgid "You are not allowed to delete the image."
msgstr "Nemáte oprávnění smazat tento obraz."
@@ -2868,16 +2520,6 @@ msgstr "Je dostupných nula plovoucích IP adres."
msgid "admin password can't be changed on existing disk"
msgstr "heslo správce nelze měnit na existujícím disku"
-msgid "aggregate deleted"
-msgstr "agregát smazán"
-
-msgid "aggregate in error"
-msgstr "agregát má chybu"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate selhalo protože: %s"
-
msgid "cannot understand JSON"
msgstr "JSON nelze porozumět"
@@ -2938,9 +2580,6 @@ msgstr "obraz již je připojen"
msgid "instance %s is not running"
msgstr "Instance %s není spuštěna"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "Instance má kernel nebo ramdisk, ale ne oba"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr "instance je povinný argument pro použití @refresh_cache"
@@ -2993,9 +2632,6 @@ msgstr "zařízení nbd %s se nezobrazilo"
msgid "nbd unavailable: module not loaded"
msgstr "nbd nedostupné: modul nenaÄten"
-msgid "no hosts to remove"
-msgstr "žádní hostitelé pro odstranění"
-
#, python-format
msgid "no match found for %s"
msgstr "nebyla nalezena shoda pro %s"
@@ -3044,9 +2680,6 @@ msgstr ""
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr "set_admin_password není tímto ovladaÄem nebo hostem instance zavedeno."
-msgid "setup in progress"
-msgstr "probíhá nastavování"
-
#, python-format
msgid "snapshot for %s"
msgstr "snímek pro %s"
@@ -3063,9 +2696,6 @@ msgstr "příliÅ¡ mnoho klíÄů tÄ›la"
msgid "unpause not supported for vmwareapi"
msgstr "zrušení pozastavení není v vmwareapi podporováno"
-msgid "version should be an integer"
-msgstr "verze by mÄ›la být celé Äíslo"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "vg %s musí být skupina svazku LVM"
@@ -3092,14 +2722,3 @@ msgid ""
"volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status"
msgstr ""
"stav svazku '%(vol)s' musí být 'in-use'. Nyní je ve stavu '%(status)s'."
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake nemá zavedeno %s"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr ""
-"xenapi.fake nemá zavedeno %s, nebo byl zavolán se Å¡patným poÄtem argumentů"
diff --git a/nova/locale/de/LC_MESSAGES/nova.po b/nova/locale/de/LC_MESSAGES/nova.po
index 6da448a4ca..32e7c52060 100644
--- a/nova/locale/de/LC_MESSAGES/nova.po
+++ b/nova/locale/de/LC_MESSAGES/nova.po
@@ -17,7 +17,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -147,18 +147,6 @@ msgid "Affinity instance group policy was violated."
msgstr "Gruppenrichtlinie von Affinitätsinstanz wurde nicht eingehalten."
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "Agent unterstützt den Aufruf nicht: %(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"Agentenbuild mit Hypervisor %(hypervisor)s, Betriebssystem %(os)s, "
-"Architektur %(architecture)s ist bereits vorhanden."
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "Aggregate %(aggregate_id)s hat bereits einen Host %(host)s."
@@ -177,13 +165,6 @@ msgstr ""
"%(metadata_key)s."
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"Aggregat %(aggregate_id)s: Aktion '%(action)s' hat einen Fehler verursacht: "
-"%(reason)s."
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "Aggregat %(aggregate_name)s ist bereits vorhanden."
@@ -192,10 +173,6 @@ msgid "Aggregate %s does not support empty named availability zone"
msgstr "Aggregat %s unterstützt keine leeren, bezeichneten Verfügbarkeitszonen"
#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "Aggregat für Host %(host)s konnte nicht gefunden werden. "
-
-#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
msgstr ""
"Es wurde ein ungültiger 'name'-Wert angegeben. Der Name muss lauten: "
@@ -383,12 +360,6 @@ msgstr ""
"Authentifizierungsanforderung für %d-Berechtigungsnachweis kann nicht "
"verarbeitet werden"
-msgid "Can't resize a disk to 0 GB."
-msgstr "Die Größe einer Festplatte kann nicht auf 0 GB geändert werden."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "Größe von inaktiven ephemeren Platten kann nicht geändert werden."
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
"Stammdatenträgerpfad kann nicht aus libvirt-Konfiguration der Instanz "
@@ -438,12 +409,6 @@ msgstr ""
"Übergeordneter Speicherpool für %s wurde nicht erkannt. Der Speicherort für "
"Abbilder kann nicht ermittelt werden."
-msgid "Cannot find SR of content-type ISO"
-msgstr "SR mit 'content-type' = ISO kann nicht gefunden werden"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "SR zum Lesen/Schreiben von VDI kann nicht gefunden werden."
-
msgid "Cannot find image for rebuild"
msgstr "Image für Wiederherstellung kann nicht gefunden werden"
@@ -580,10 +545,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "Verbindung zu libvirt verloren: %s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "Verbindung zum Hypervisor ist unterbrochen auf Host: %(host)s"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -660,13 +621,6 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "Standard-PBM-Richtlinie ist erforderlich, wenn PBM aktiviert ist."
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "%(records)d Datensätze aus Tabelle '%(table_name)s' gelöscht."
-
-msgid "Destroy instance failed"
-msgstr "Zerstören der Instanz fehlgeschlagen"
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "Das Gerät '%(device)s' wurde nicht gefunden."
@@ -674,13 +628,6 @@ msgstr "Das Gerät '%(device)s' wurde nicht gefunden."
msgid "Device detach failed for %(device)s: %(reason)s"
msgstr "Abhängen des Geräts fehlgeschlagen für %(device)s: %(reason)s"
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr ""
-"Angegebene Einheiten-ID %(id)s wird nicht unterstützt von Hypervisorversion "
-"%(version)s"
-
msgid "Device name contains spaces."
msgstr "Gerätename enthält Leerzeichen."
@@ -692,19 +639,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "Gerätetypabweichung für Alias '%s'"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"Verschiedenen Typen in %(table)s.%(column)s und der Spiegeltabelle: "
-"%(c_type)s %(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr ""
-"Platte enthält ein Dateisystem, dessen Größe nicht geändert werden kann: %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Datenträgerformat %(disk_format)s ist nicht zulässig"
@@ -712,13 +646,6 @@ msgstr "Datenträgerformat %(disk_format)s ist nicht zulässig"
msgid "Disk info file is invalid: %(reason)s"
msgstr "Datei mit Datenträgerinformationen ist ungültig: %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "Festplatte darf nur eine Partition haben."
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "Platte mit ID %s nicht an Instanz angehängt gefunden."
-
#, python-format
msgid "Driver Error: %s"
msgstr "Treiberfehler: %s"
@@ -736,10 +663,6 @@ msgstr ""
"ist noch '%(state)s'."
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "Fehler bei folgendem Aufruf an Agenten: %(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "Fehler beim Aufnehmen von Instanz %(instance_id)s: %(reason)s"
@@ -791,9 +714,6 @@ msgstr "Fehler beim Einhängen %(image)s mit libguestfs (%(e)s)"
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Fehler beim Erstellen von Ressourcenüberwachung: %(monitor)s"
-msgid "Error: Agent is disabled"
-msgstr "Fehler: Agent ist deaktiviert"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "Ereignis %(event)s nicht gefunden für Aktions-ID '%(action_id)s'"
@@ -825,10 +745,6 @@ msgstr "Maximale Anzahl der WIederholungen überschritten. %(reason)s"
msgid "Expected a uuid but received %(uuid)s."
msgstr "UUID erwartet, aber %(uuid)s erhalten."
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "Zusatzspalte %(table)s.%(column)s in Spiegeltabelle"
-
msgid "Extracting vmdk from OVA failed."
msgstr "Extraktion von vmdk aus OVA fehlgeschlagen."
@@ -858,10 +774,6 @@ msgstr ""
"Anhängen des Netzwerkgeräteadapters an %(instance_uuid)s fehlgeschlagen"
#, python-format
-msgid "Failed to create vif %s"
-msgstr "Vif %s konnte nicht erstellt werden."
-
-#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "Instanz konnte nicht implementiert werden: %(reason)s"
@@ -879,10 +791,6 @@ msgid "Failed to encrypt text: %(reason)s"
msgstr "Fehler beim Verschlüsseln des Textes: %(reason)s"
#, python-format
-msgid "Failed to forget the SR for volume %s"
-msgstr "Fehler beim Vergessen des SR für Datenträger %s"
-
-#, python-format
msgid "Failed to launch instances: %(reason)s"
msgstr "Fehler beim Starten der Instanz: %(reason)s"
@@ -894,11 +802,6 @@ msgstr "Partitionen konnten nicht zugeordnet werden: %s"
msgid "Failed to mount filesystem: %s"
msgstr "Fehler beim Anhängen von Dateisystem: %s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr ""
-"Fehler beim Analysieren von Informationen zu einer PCI-Einheit für "
-"Passthrough"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "Instanz konnte nicht ausgeschaltet werden: %(reason)s"
@@ -908,14 +811,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "Instanz konnte nicht eingeschaltet werden: %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"Vorbereitung des PCI Gerätes %(id)s für Instanz %(instance_uuid)s "
-"fehlgeschlagen: %(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "Fehler beim Bereitstellen der Instanz %(inst)s: %(reason)s"
@@ -953,9 +848,6 @@ msgstr ""
"Administratorkennwort für %(instance)s konnte nicht festgelegt werden "
"aufgrund von %(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "Generierung nicht möglich, Rollback wird durchgeführt"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "Instanz konnte nicht ausgesetzt werden: %(reason)s"
@@ -965,10 +857,6 @@ msgid "Failed to terminate instance: %(reason)s"
msgstr "Instanz konnte nicht beendet werden: %(reason)s"
#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "Vif %s konnte nicht entfernt werden."
-
-#, python-format
msgid "Failed to unplug virtual interface: %(reason)s"
msgstr "Virtuelle Schnittstelle konnte nicht entfernt werden: %(reason)s"
@@ -980,10 +868,6 @@ msgid "File %(file_path)s could not be found."
msgstr "Datei %(file_path)s konnte nicht gefunden werden."
#, python-format
-msgid "File path %s not valid"
-msgstr "Dateipfad %s nicht gültig"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr ""
"Feste IP %(ip)s ist keine gültige IP Adresse für Netzwerk %(network_id)s."
@@ -1116,21 +1000,6 @@ msgstr ""
msgid "Found no disk to snapshot."
msgstr "Es wurde keine Platte für eine Momentaufnahme gefunden."
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "Kein Netz für Brücke %s gefunden"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "Nicht eindeutiges Netz für Brücke %s gefunden"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "Nicht eindeutiges Netz für name_label %s gefunden"
-
-msgid "Guest does not have a console available"
-msgstr "Gast hat keine Konsole verfügbar"
-
msgid "Guest does not have a console available."
msgstr "Für Gast ist keine Konsole verfügbar."
@@ -1159,9 +1028,6 @@ msgid "Host does not support guests with custom memory page sizes"
msgstr ""
"Host unterstützt keine Gäste mit benutzerdefinierter Speicherseitengröße"
-msgid "Host startup on XenServer is not supported."
-msgstr "Hoststart auf XenServer wird nicht unterstützt."
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"Hypervisortreiber unterstützt die Methode post_live_migration_at_source nicht"
@@ -1394,10 +1260,6 @@ msgstr "Instanzgröße wurde nicht angepasst."
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "Instanzhostname %(hostname)s ist kein gültiger DNS-Name."
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "Instanz ist bereits im Rettungsmodus: %s"
-
msgid "Instance is not a member of specified network"
msgstr "Instanz ist nicht Mitglied des angegebenen Netzes"
@@ -1419,12 +1281,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "Nicht genug Compute Ressourcen: %(reason)s"
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr ""
-"Nicht genügend freier Speicherplatz auf Rechenknoten zum Starten von "
-"%(uuid)s."
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "Schnittstelle %(interface)s nicht gefunden."
@@ -1624,13 +1480,6 @@ msgstr ""
"Das Erstellen einer Schnittstelle auf externem Netz %(network_uuid)s ist "
"nicht zulässig"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"Kernel-/RAM-Plattenimage ist zu groß: %(vdi_size)d Byte, maximal "
-"%(max_size)d Byte"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1654,14 +1503,6 @@ msgstr "Schlüsselpaar %(name)s für Benutzer %(user_id)s nicht gefunden"
msgid "Keypair data is invalid: %(reason)s"
msgstr "Schlüsselpaardaten ungültig: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Name von Schlüsselpaar enthält unsichere Zeichen"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"Name des Schlüsselpares muss eine Zeichenkette und zwischen 1 und 255 "
-"Zeichen lang sein."
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Grenzwerte werden nur von vCenter ab Version 6.0 unterstützt."
@@ -1695,9 +1536,6 @@ msgstr "Marker %(marker)s konnte nicht gefunden werden. "
msgid "Maximum number of floating IPs exceeded"
msgstr "Maximale Anzahl an Floating IPs überschritten"
-msgid "Maximum number of key pairs exceeded"
-msgstr "Maximale Anzahl an Schlüsselpaaren überschritten"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "Maximale Anzahl an Metadatenelementen überschreitet %(allowed)d"
@@ -1728,12 +1566,6 @@ msgstr ""
"Messwert %(name)s konnte auf dem Rechenhostknoten %(host)s.%(node)s nicht "
"gefunden werden."
-msgid "Migrate Receive failed"
-msgstr "Empfangen der Migration fehlgeschlagen"
-
-msgid "Migrate Send failed"
-msgstr "Senden der Migration fehlgeschlagen"
-
#, python-format
msgid "Migration %(id)s for server %(uuid)s is not live-migration."
msgstr "Die Migration %(id)s für den Server %(uuid)s ist keine Livermigration."
@@ -1780,10 +1612,6 @@ msgstr "Fehler für ausgewählte Migrationsziele: %(reason)s"
msgid "Missing arguments: %s"
msgstr "Fehlende Argumente: %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "Fehlende Spalte %(table)s.%(column)s in Spiegeltabelle"
-
msgid "Missing device UUID."
msgstr "Fehlende Gerät-UUID."
@@ -1867,14 +1695,6 @@ msgid "Must not input both network_id and port_id"
msgstr "Es dürfen nicht sowohl network_id als auch port_id eingegeben werden"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"Angabe von connection_url, connection_username (optional) und "
-"connection_password erforderlich zum Verwenden von compute_driver=xenapi."
-"XenAPIDriver"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1932,10 +1752,6 @@ msgstr "Keine Block-Geräte-Zuordnung mit ID %(id)s."
msgid "No Unique Match Found."
msgstr "Keine eindeutige Ãœbereinstimmung gefunden."
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "Kein Agenten-Build ist ID '%(id)s' zugeordnet."
-
msgid "No compute host specified"
msgstr "Kein Rechenhost angegeben "
@@ -1970,10 +1786,6 @@ msgstr "Keine freien nbd-Geräte"
msgid "No host available on cluster"
msgstr "Kein Host verfügbar auf Cluster"
-#, python-format
-msgid "No host with name %s found"
-msgstr "Kein Host mit dem Namen %s gefunden"
-
msgid "No hosts found to map to cell, exiting."
msgstr "Keine Host für die Zuordnung zur Zelle gefunden. Wird beendet."
@@ -2007,10 +1819,6 @@ msgstr "Kein Einhängepunkt gefunden in %(root)s von %(image)s"
msgid "No operating system found in %s"
msgstr "Kein Betriebssystem gefunden in %s"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "Kein primäres VDI für %s gefunden"
-
msgid "No root disk defined."
msgstr "Keine Root-Festplatte bestimmt."
@@ -2022,9 +1830,6 @@ msgstr ""
"Es wurde kein bestimmtes Netzwerk angefordert und für das Projekt "
"'%(project_id)s' ist kein Netzwerk verfügbar."
-msgid "No suitable network for migrate"
-msgstr "Kein geeignetes Netzwerk zum Migrieren"
-
msgid "No valid host found for cold migrate"
msgstr "Keinen gültigen Host gefunden für Migration ohne Daten"
@@ -2113,14 +1918,6 @@ msgstr "Nur %d SCSI-Controller dürfen in dieser Instanz erstellt werden."
msgid "Only administrators may list deleted instances"
msgstr "Nur Administratoren können gelöschte Instanzen auflisten"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"Nur dateibasierte SRs (ext/NFS) werden von dieser Funktion unterstützt. SR "
-"%(uuid)s weist den Typ %(type)s auf"
-
msgid "Origin header does not match this host."
msgstr "Ursprungsheader stimmt nicht mit diesem Host überein."
@@ -2163,10 +1960,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "PCI-Geräteanforderung %(requests)s fehlgeschlagen"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIF %s enthält keine IP-Adresse"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "Seitengröße %(pagesize)s nicht zulässig für '%(against)s'"
@@ -2286,10 +2079,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "Quote überschritten, zu viele Server in Gruppe"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "Quote überschritten: code=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr ""
"Für Projekt %(project_id)s, Ressource %(resource)s ist eine Quote vorhanden"
@@ -2321,18 +2110,6 @@ msgstr ""
"Quotengrenzwert %(limit)s für %(resource)s muss kleiner-gleich %(maximum)s "
"sein."
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "Maximale Anzahl an Wiederholungen für das Trennen von VBD %s erreicht"
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"Die Echtzeitrichtlinie erfordert eine mit 1 RT-vCPU und 1 normalen vCPU "
-"konfigurierte vCPU(s)-Maske. Weitere Informationen finden Sie unter 'hw:"
-"cpu_realtime_mask' bzw. 'hw_cpu_realtime_mask'."
-
msgid "Request body and URI mismatch"
msgstr "Abweichung zwischen Anforderungshauptteil und URI"
@@ -2509,10 +2286,6 @@ msgid "Set admin password is not supported"
msgstr "Das Setzen des Admin-Passwortes wird nicht unterstützt"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "Spiegeltabelle mit dem Namen '%(name)s' ist bereits vorhanden."
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "Das freigegebene Verzeichnis '%s' wird nicht unterstützt"
@@ -2520,13 +2293,6 @@ msgstr "Das freigegebene Verzeichnis '%s' wird nicht unterstützt"
msgid "Share level '%s' cannot have share configured"
msgstr "Geteilte Ebene '%s' kann keine geteilte Konfigration haben"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"Verkleinern des Dateisystems mit resize2fs ist fehlgeschlagen; überprüfen "
-"Sie, ob auf Ihrer Platte noch genügend freier Speicherplatz vorhanden ist."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Momentaufnahme %(snapshot_id)s konnte nicht gefunden werden."
@@ -2552,12 +2318,6 @@ msgstr "Der angegebene Sortierschlüssel war nicht gültig. "
msgid "Specified fixed address not assigned to instance"
msgstr "Angegebene statische Adresse ist nicht der Instanz zugeordnet"
-msgid "Specify `table_name` or `table` param"
-msgstr "Geben Sie den Parameter `table_name` oder `table` an"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "Geben Sie nur einen Parameter an, `table_name` oder `table`"
-
msgid "Started"
msgstr "Gestartet"
@@ -2637,9 +2397,6 @@ msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr ""
"Die Anzahl der definierten Ports (%(ports)d) ist über dem Limit: %(quota)d"
-msgid "The only partition should be partition 1."
-msgstr "Die einzige Partition sollte Partition 1 sein."
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr ""
@@ -2705,50 +2462,13 @@ msgstr ""
"Der Datenträger kann nicht zum delben Gerätenamen wir das Root Gerät %s "
"zugewiesen werden"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"Es gibt %(records)d Datensätze in der Tabelle '%(table_name)s', bei denen "
-"die Spalte 'uuid' oder 'instance_uuid' NULL ist. Führen Sie diesen Befehl "
-"erneut mit der Option --delete aus, nachdem Sie alle erforderlichen Daten "
-"gesichert haben."
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"Es gibt %(records)d Datensätze in der Tabelle '%(table_name)s', bei denen "
-"die Spalte 'uuid' oder' instance_uuid' NULL ist. Diese müssen manuell "
-"bereinigt werden, bevor die Migration übergeben wird. Möglicherweise sollten "
-"Sie den Befehl 'nova-manage db null_instance_uuid_scan' ausführen."
-
msgid "There are not enough hosts available."
msgstr "Es sind nicht genügend Hosts verfügbar."
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"Es gibt immer noch %(count)i nicht migrierte Versionseinträge. Migration "
-"kann nicht fortgesetzt werden, solange nicht alle Instanz-Versionseinträge "
-"zum neuen Format migriert sind. Bitte starten Sie zuerst `nova-manage db "
-"migrate_flavor_data'."
-
-#, python-format
msgid "There is no such action: %s"
msgstr "Aktion existiert nicht: %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "Es wurden keine Datensätze gefunden, in denen instance_uuid NULL war."
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
@@ -2757,11 +2477,6 @@ msgstr ""
"Die Hypervisorversion des Compute-Knotens ist älter als die Version, die für "
"die Mindestunterstützung erforderlich ist: %(version)s."
-msgid "This domU must be running on the host specified by connection_url"
-msgstr ""
-"Diese domU muss auf dem von 'connection_url' angegebenen Host ausgeführt "
-"werden"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2782,10 +2497,6 @@ msgstr ""
"Dieser Dienst ist älter (v%(thisver)i) als die Mindestversion (v%(minver)i) "
"der übrigen Implementierung. Fortfahren nicht möglich. "
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "Zeitüberschreitung bei der Erstellung des Gerätes: %s"
-
msgid "Timeout waiting for response from cell"
msgstr "Zeitüberschreitung beim Warten auf Antwort von der Zelle"
@@ -2828,12 +2539,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "Ironic-Client kann nicht authentifiziert werden. "
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr ""
-"Gastagent konnte nicht kontaktiert werden. Der folgende Aufruf ist "
-"abgelaufen: %(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "Abbild kann nicht konvertiert werden in %(format)s: %(exp)s"
@@ -2842,14 +2547,6 @@ msgid "Unable to convert image to raw: %(exp)s"
msgstr "Abbild kann nicht in ein Rohformat konvertiert werden: %(exp)s"
#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "VBD %s kann nicht gelöscht werden"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "VDI %s kann nicht gelöscht werden"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "Plattenbus für '%s' kann nicht bestimmt werden"
@@ -2858,29 +2555,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "Plattenpräfix für %s kann nicht bestimmt werden"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr ""
-"%s kann nicht aus Pool entnommen werden; keine übergeordnete Einheit gefunden"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "%s kann nicht aus Pool entnommen werden; Pool ist nicht leer"
-
-msgid "Unable to find SR from VBD"
-msgstr "Konnte kein SR finden für VBD"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "SR kann nicht ausgehend von VBD '%s' gefunden werden"
-
-msgid "Unable to find SR from VDI"
-msgstr "Konnte kein SR finden für VDI"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "SR von VDI %s kann nicht gefunden werden"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "'ca_file' konnte nicht gefunden werden: %s"
@@ -2899,9 +2573,6 @@ msgstr "iSCSI-Ziel konnte nicht gefunden worden"
msgid "Unable to find key_file : %s"
msgstr "'key_file' konnte nicht gefunden werden: %s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "Root-VBD/VDI für VM kann nicht gefunden werden"
-
msgid "Unable to find volume"
msgstr "Datenträger kann nicht gefunden werden"
@@ -2911,31 +2582,6 @@ msgstr "Host UUID kann nicht abgerufen werden: /etc/machine-id existiert nicht"
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "Host UUID kann nicht abgerufen werden: /etc/machine-id ist leer"
-msgid "Unable to get record of VDI"
-msgstr "Konnte keinen EIntrag für VDI beziehen"
-
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "Datensatz von VDI %s kann nicht abgerufen werden auf"
-
-msgid "Unable to introduce VDI for SR"
-msgstr "Bekanntmachung VDI an SR nicht möglich"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "VDI kann für SR %s nicht eingeführt werden"
-
-msgid "Unable to introduce VDI on SR"
-msgstr "Bekanntmachung VDI an SR nicht möglich"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "VDI kann nicht in SR '%s' eingeführt werden"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "Verknüpfung von %s im Pool nicht möglich"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2967,10 +2613,6 @@ msgstr ""
"Instanz (%(instance_id)s) kann nicht auf aktuellen Host (%(host)s) migriert "
"werden."
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "Zielinformationen zu %s können nicht abgerufen werden"
-
msgid "Unable to resize disk down."
msgstr "Größe der inaktiven Platte kann nicht geändert werden."
@@ -2980,13 +2622,6 @@ msgstr "Es kann kein Kennwort für die Instanz festgelegt werden"
msgid "Unable to shrink disk."
msgstr "Platte kann nicht verkleinert werden."
-msgid "Unable to terminate instance."
-msgstr "Instanz kann nicht beendet werden."
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "Verbindung zu VBD %s kann nicht getrennt werden"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "Unzulässige CPU Information: %(reason)s"
@@ -3006,16 +2641,6 @@ msgstr ""
"BlockDeviceMappingList enthält Blockgerätezuordnungen aus mehreren Instanzen."
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"Unerwarteter API-Fehler. Melden Sie ihn unter http://bugs.launchpad.net/"
-"nova/ und hängen Sie das Nova-API-Protokoll an, falls möglich.\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "Unerwartete Aggregataktion %s"
@@ -3081,11 +2706,6 @@ msgstr "Aufnahme wurde versucht, aber das Image %s kann nicht gefunden werden."
msgid "Unsupported Content-Type"
msgstr "Nicht unterstützter Inhaltstyp"
-msgid "Upgrade DB using Essex release first."
-msgstr ""
-"Führen Sie zuerst ein Upgrade für die Datenbank unter Verwendung des Essex-"
-"Release durch."
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "Benutzer %(username)s in Kennwortdatei nicht gefunden."
@@ -3108,25 +2728,6 @@ msgstr ""
"erlaubt in der selben Anfrage."
#, python-format
-msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s hat eine Größe von %(virtual_size)d Bytes und ist damit "
-"größer als die Versionsgröße von %(new_disk_size)d Bytes."
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"VDI nicht gefunden auf SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun "
-"'%(target_lun)s')"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr "Mehr als (%d) VHD-Verbindungsversuche unternommen, Abbruch ..."
-
-#, python-format
msgid "Value must match %s"
msgstr "Wert muss %s entsprechen"
@@ -3213,13 +2814,6 @@ msgstr ""
msgid "Volume type %(id_or_name)s could not be found."
msgstr "Datenträgertyp %(id_or_name)s wurde nicht gefunden."
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr ""
-"Wir unterstützen Schema '%s' unter Python bis Version 2.7.4 nicht, verwenden "
-"Sie bitte HTTP oder HTTPS"
-
msgid "When resizing, instances must change flavor!"
msgstr "Beim Ändern der Größe muss die Version der Instanzen geändert werden!"
@@ -3235,10 +2829,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "Falsche Quotenmethode %(method)s für Ressource %(res)s verwendet"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr ""
-"Falscher Typ von Hookmethode. Nur die Typen 'pre' und 'post' sind zulässig"
-
msgid "X-Forwarded-For is missing from request."
msgstr "X-Forwarded-For wird in der Anfrage vermisst."
@@ -3254,9 +2844,6 @@ msgstr "X-Metadata-Provider wird in der Anfrage vermisst."
msgid "X-Tenant-ID header is missing from request."
msgstr "X-Tenant-ID-Header fehlt in Anforderung."
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "XAPI mit Unterstützung für 'relax-xsm-sr-check=true' erforderlich"
-
msgid "You are not allowed to delete the image."
msgstr "Sie sind nicht berechtigt, dieses Image zu löschen."
@@ -3286,20 +2873,6 @@ msgstr ""
"Das Administrator Passwort kann nicht auf der bestehenden Festplatte "
"geändert werden"
-msgid "aggregate deleted"
-msgstr "Aggregat gelöscht"
-
-msgid "aggregate in error"
-msgstr "Aggregat fehlerhaft"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate fehlgeschlagen. Ursache: %s"
-
-#, python-format
-msgid "attach network interface %s failed."
-msgstr "Anhängen von Netzwerkschnittstelle %s fehlgeschlagen."
-
msgid "cannot understand JSON"
msgstr "kann JSON nicht verstehen"
@@ -3314,10 +2887,6 @@ msgstr "Verbindungsinfo: %s"
msgid "connecting to: %(host)s:%(port)s"
msgstr "verbinden mit: %(host)s:%(port)s"
-#, python-format
-msgid "detach network interface %s failed."
-msgstr "Abtrennen von Netzwerkschnittstelle %s fehlgeschlagen."
-
msgid "direct_snapshot() is not implemented"
msgstr "direct_snapshot() ist nicht implementiert"
@@ -3367,9 +2936,6 @@ msgstr "Abbild bereits eingehängt"
msgid "instance %s is not running"
msgstr "Instanz %s wird nicht ausgeführt"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "Instanz weist Kernel oder RAM-Platte auf, aber nicht beides"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr ""
"Instanz ist ein erforderliches Argument für die Verwendung von "
@@ -3427,9 +2993,6 @@ msgstr "NBD-Einheit %s wurde nicht angezeigt"
msgid "nbd unavailable: module not loaded"
msgstr "nbd nicht verfügbar: Modul nicht geladen"
-msgid "no hosts to remove"
-msgstr "Keine Hosts zum Entfernen vorhanden"
-
#, python-format
msgid "no match found for %s"
msgstr "keine Übereinstimmung gefunden für %s"
@@ -3505,9 +3068,6 @@ msgstr ""
"'set_admin_password' wird von diesem Treiber oder dieser Gastinstanz nicht "
"implementiert"
-msgid "setup in progress"
-msgstr "Konfiguration in Bearbeitung"
-
#, python-format
msgid "snapshot for %s"
msgstr "Momentaufnahme für %s"
@@ -3524,9 +3084,6 @@ msgstr "zu viele Textschlüssel"
msgid "unpause not supported for vmwareapi"
msgstr "'unpause' nicht unterstützt für 'vmwareapi'"
-msgid "version should be an integer"
-msgstr "Version sollte eine Ganzzahl sein"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "Datenträgergruppe '%s' muss sich in LVM-Datenträgergruppe befinden"
@@ -3553,15 +3110,3 @@ msgid ""
msgstr ""
"Status von Datenträger '%(vol)s' muss 'in-use' lauten. Weist derzeit den "
"Status '%(status)s' auf"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake hat keine Implementierung für %s"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr ""
-"xenapi.fake weist keine Implementierung für %s auf oder wurde mit einer "
-"falschen Anzahl von Argumenten aufgerufen"
diff --git a/nova/locale/es/LC_MESSAGES/nova.po b/nova/locale/es/LC_MESSAGES/nova.po
index 3eea84f917..4edd7b0ae3 100644
--- a/nova/locale/es/LC_MESSAGES/nova.po
+++ b/nova/locale/es/LC_MESSAGES/nova.po
@@ -16,7 +16,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -145,18 +145,6 @@ msgid "Affinity instance group policy was violated."
msgstr "Se ha infringido la política de afinidad de instancia de grupo "
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "El agente no soporta la llamada %(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"Compilación agente con hipervisor %(hypervisor)s S.O. %(os)s arquitectura "
-"%(architecture)s existe."
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "El agregado %(aggregate_id)s ya tiene el host %(host)s."
@@ -175,13 +163,6 @@ msgstr ""
"%(metadata_key)s."
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"Agregado %(aggregate_id)s: la acción '%(action)s' ha producido un error: "
-"%(reason)s."
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "El agregado %(aggregate_name)s ya existe."
@@ -191,10 +172,6 @@ msgstr ""
"El agregado %s no admite una zona de disponibilidad con el nombre vacío"
#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "No se ha podido encontrar el agregado para el host %(host)s. "
-
-#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
msgstr ""
"Se ha proporcionado un valor no válido en el campo 'name'. El nombre debe "
@@ -385,12 +362,6 @@ msgid "Can not handle authentication request for %d credentials"
msgstr ""
"No se puede manejar la solicitud de autenticación para las credenciales %d"
-msgid "Can't resize a disk to 0 GB."
-msgstr "No se puede cambiar el tamaño de archivo a 0 GB."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "No se puede reducir el tamaño de los discos efímeros."
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
"No se puede recuperar la vía de acceso ed dispositivo raíz de la "
@@ -435,12 +406,6 @@ msgstr ""
"No se puede determinar la agrupación de almacenamiento padre para %s; no se "
"puede determinar dónde se deben almacenar las imágenes"
-msgid "Cannot find SR of content-type ISO"
-msgstr "No se puede encontrar SR de content-type ISO"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "No se puede encontrar SR para leer/grabar VDI."
-
msgid "Cannot find image for rebuild"
msgstr "No se puede encontrar la imagen para reconstrucción "
@@ -566,10 +531,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "Conexión hacia libvirt perdida: %s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "La conexión al hipervisor está perdida en el anfitrión: %(host)s"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -646,20 +607,9 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "Se requiere una política PBM por defecto si se habilita PBM."
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "Se han eliminado %(records)d registros de la tabla '%(table_name)s'."
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "No se ha encontrado el disposisitvo'%(device)s'."
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr ""
-"El dispositivo con identificador %(id)s especificado no está soportado por "
-"la versión del hipervisor %(version)s"
-
msgid "Device name contains spaces."
msgstr "El nombre del dispositivo contiene espacios."
@@ -671,19 +621,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "Discrepancia de tipo de dispositivo para el alias '%s'"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"Diferentes tipos en %(table)s.%(column)s y la tabla shadow: %(c_type)s "
-"%(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr ""
-"El disco contiene un sistema de archivos incapaz de modificar su tamaño: %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Formato de disco %(disk_format)s no es aceptable"
@@ -691,13 +628,6 @@ msgstr "Formato de disco %(disk_format)s no es aceptable"
msgid "Disk info file is invalid: %(reason)s"
msgstr "El archivo de información de disco es inválido: %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "el disco debe tener una sola partición."
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "Disco identificado como: %s no se ha encontrado adjunto a la instancia"
-
#, python-format
msgid "Driver Error: %s"
msgstr "Error de dispositivo: %s"
@@ -715,10 +645,6 @@ msgstr ""
"es '%(state)s'."
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "Error durante la siguiente llamada al agente: %(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr ""
"Error durante la extracción de la instancia %(instance_id)s: %(reason)s"
@@ -771,9 +697,6 @@ msgstr "Error al montar %(image)s con libguestfs (%(e)s)"
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Error al crear monitor de recursos: %(monitor)s"
-msgid "Error: Agent is disabled"
-msgstr "Error: El agente está inhabilitado"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr ""
@@ -807,10 +730,6 @@ msgstr "Se ha excedido el número máximo de intentos. %(reason)s"
msgid "Expected a uuid but received %(uuid)s."
msgstr "Se esperaba un uuid pero se ha recibido %(uuid)s."
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "Columna extra %(table)s.%(column)s en la tabla shadow"
-
msgid "Extracting vmdk from OVA failed."
msgstr "Error al extraer vmdk de OVA."
@@ -838,10 +757,6 @@ msgid "Failed to attach network adapter device to %(instance_uuid)s"
msgstr "Error al conectar el dispositivo adaptador de red a %(instance_uuid)s"
#, python-format
-msgid "Failed to create vif %s"
-msgstr "Error al crear la VIF %s"
-
-#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "Fallo al desplegar instancia: %(reason)s"
@@ -870,9 +785,6 @@ msgstr "No se han podido correlacionar particiones: %s"
msgid "Failed to mount filesystem: %s"
msgstr "Fallo al montar el sistema de ficheros: %s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr "Fallo al pasar información sobre el dispositivo pci para el traspaso"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "Fallo al apagar la instancia: %(reason)s"
@@ -882,14 +794,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "Fallo al arrancar la instancia: %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"Fallo al preparar el dispositivo PCI %(id)s para la instancia "
-"%(instance_uuid)s: %(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "Fallo al proporcionar instancia la instancia %(inst)s: %(reason)s"
@@ -925,9 +829,6 @@ msgstr ""
"No se ha podido establecer la contraseña de administrador en %(instance)s "
"debido a %(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "No se ha podido generar, retrotrayendo"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "Fallo al suspender instancia: %(reason)s"
@@ -936,10 +837,6 @@ msgstr "Fallo al suspender instancia: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Fallo al terminar la instancia: %(reason)s"
-#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "No se ha podido desconectar la VIF %s"
-
msgid "Failure prepping block device."
msgstr "Fallo al preparar el dispositivo de bloque."
@@ -948,10 +845,6 @@ msgid "File %(file_path)s could not be found."
msgstr "No se ha podido encontrar el archivo %(file_path)s."
#, python-format
-msgid "File path %s not valid"
-msgstr "La vía de acceso de archivo %s no es válida"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr ""
"La IP fija %(ip)s no es una direccion IP valida para la red %(network_id)s."
@@ -1083,18 +976,6 @@ msgstr ""
msgid "Found no disk to snapshot."
msgstr "No se ha encontrado disco relacionado a instantánea."
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "No se ha encontrado red para el puente %s"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "Encontrada una red no única para el puente %s"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "Se ha encontrado una red no exclusiva para name_label %s"
-
msgid "Guest does not have a console available."
msgstr "El invitado no tiene una consola disponible."
@@ -1123,9 +1004,6 @@ msgid "Host does not support guests with custom memory page sizes"
msgstr ""
"Host no soporta invitados con tamaños de página de memoria perzonalizados"
-msgid "Host startup on XenServer is not supported."
-msgstr "No se soporta el arranque de host en XenServer."
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"El controlador del hipervisor no soporta método post_live_migration_at_source"
@@ -1357,10 +1235,6 @@ msgstr "La instancia no se ha redimensionado."
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "El nombre de host de instancia %(hostname)s no es un nombre DNS válido"
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "La instancia ya está en modalidad de rescate: %s "
-
msgid "Instance is not a member of specified network"
msgstr "La instancia no es miembro de la red especificada"
@@ -1381,11 +1255,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "Recursos de cómputo insuficientes: %(reason)s."
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr ""
-"No hay suficiente memoria libre en el nodo de cálculo para iniciar %(uuid)s."
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "No se ha encontrado la interfaz %(interface)s."
@@ -1582,13 +1451,6 @@ msgid ""
msgstr ""
"No está permitido crear una interfaz en una red externa %(network_uuid)s"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"La imagen de kernel/disco RAM es demasiado grande: %(vdi_size)d bytes, máx. "
-"%(max_size)d bytes"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1613,13 +1475,6 @@ msgstr ""
msgid "Keypair data is invalid: %(reason)s"
msgstr "El conjunto de claves son inválidos: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "El nombre de par de claves contiene caracteres no seguros"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"El nombre de par de claves debe ser serial y contener de 1 a 255 caracteres"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Sólo se admiten límites a partir de vCenter 6.0 "
@@ -1653,9 +1508,6 @@ msgstr "No se ha podido encontrar el marcador %(marker)s."
msgid "Maximum number of floating IPs exceeded"
msgstr "Se ha superado el número máximo de IP flotantes"
-msgid "Maximum number of key pairs exceeded"
-msgstr "Se ha superado el número máximo de pares de claves"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "El número máximo de elementos de metadatos supera %(allowed)d"
@@ -1686,12 +1538,6 @@ msgstr ""
"La métrica %(name)s no se puede encontrar en el nodo de cómputo anfitrión "
"%(host)s:%(node)s."
-msgid "Migrate Receive failed"
-msgstr "Ha fallado la recepción de migración"
-
-msgid "Migrate Send failed"
-msgstr "Ha fallado el envío de migración"
-
#, python-format
msgid "Migration %(id)s for server %(uuid)s is not live-migration."
msgstr ""
@@ -1742,10 +1588,6 @@ msgstr "Error de selección de destinos de migración: %(reason)s"
msgid "Missing arguments: %s"
msgstr "Faltan argumentos: %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "Columna omitida %(table)s.%(column)s en la tabla de shadow"
-
msgid "Missing device UUID."
msgstr "Dispositivo UUID perdido."
@@ -1833,13 +1675,6 @@ msgid "Must not input both network_id and port_id"
msgstr "No se debe ingresar ni a network_id ni a port_id"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"Se debe especificar connection_url, connection_username (opcionalmente, y "
-"connection_password para utilizar compute_driver=xenapi.XenAPIDriver"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1895,10 +1730,6 @@ msgstr "No hay mapeo de dispositivo de bloque identificado como %(id)s."
msgid "No Unique Match Found."
msgstr "No se ha encontrado una sola coincidencia."
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "No hay ninguna compilación de agente asociada con el id %(id)s."
-
msgid "No compute host specified"
msgstr "No se ha especificado ningún host de cálculo"
@@ -1968,10 +1799,6 @@ msgstr "No se han encontrado puntos de montaje en %(root)s de %(image)s"
msgid "No operating system found in %s"
msgstr "No se ha encontrado ningún sistema operativo en %s"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "No se ha encontrado VDI primario para %s"
-
msgid "No root disk defined."
msgstr "No se ha definido un disco raíz."
@@ -1983,9 +1810,6 @@ msgstr ""
"No se ha solicitado ninguna red específica y no hay ninguna disponible para "
"el proyecto '%(project_id)s'."
-msgid "No suitable network for migrate"
-msgstr "No hay red adecuada para migrar"
-
msgid "No valid host found for cold migrate"
msgstr "No se ha encontrado anfitrión para migración en frío"
@@ -2067,14 +1891,6 @@ msgstr "Uno o más hosts ya se encuentran en zona(s) de disponibilidad %s"
msgid "Only administrators may list deleted instances"
msgstr "Sólo los administradores pueden listar instancias suprimidas "
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"Solo los SRs basados en archivo (ext/NFS) están soportados por esta "
-"característica. SR %(uuid)s es del tipo %(type)s"
-
msgid "Origin header does not match this host."
msgstr "Cabecera de origen no coincide con este host."
@@ -2117,10 +1933,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "La solicitud de dispositivo PCI %(requests)s ha fallado"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIC %s no contiene una dirección IP"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "El tamaño de página %(pagesize)s no es permitido por '%(against)s'"
@@ -2239,10 +2051,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "Capacidad excedida, demasiados servidores en grupo"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "Cuota excedida: código=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "Cuota existente para el proyecto %(project_id)s, recurso %(resource)s"
@@ -2273,18 +2081,6 @@ msgstr ""
"Capacidad límite %(limit)s para %(resource)s debe ser menor o igual que "
"%(maximum)s."
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "Se ha alcanzado el número máximo de reintentos de desconectar VBD %s "
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"La política en tiempo real requiere una máscara de vCPU(s) con al menos 1 RT "
-"vCPU y una vCPU ordinaria. Consulte hw:cpu_realtime_mask o "
-"hw_cpu_realtime_mask"
-
msgid "Request body and URI mismatch"
msgstr "Discrepancia de URI y cuerpo de solicitud"
@@ -2458,10 +2254,6 @@ msgid "Set admin password is not supported"
msgstr "No se soporta el establecer de la constraseña del admin"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "Una Tabla Shadow con nombre %(name)s ya existe."
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "Compartido %s no está soportado."
@@ -2469,13 +2261,6 @@ msgstr "Compartido %s no está soportado."
msgid "Share level '%s' cannot have share configured"
msgstr "Nivel compartido '%s' no puede tener configurado compartido"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"La reducción del sistema de archivos con resize2fs ha fallado, por favor "
-"verifica si tienes espacio libre suficiente en tu disco."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "No se ha podido encontrar la instantánea %(snapshot_id)s."
@@ -2502,12 +2287,6 @@ msgstr "La clave de clasificación proporcionada no es válida. "
msgid "Specified fixed address not assigned to instance"
msgstr "Dirección fija especificada no asignada a la instancia"
-msgid "Specify `table_name` or `table` param"
-msgstr "Especificar parámetro `table_name` o `table`"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "Especificar solamente un parámetro `table_name` `table`"
-
msgid "Started"
msgstr "Arrancado"
@@ -2582,9 +2361,6 @@ msgstr ""
msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr "El número de puertos definidos: %(ports)d es más del límite: %(quota)d"
-msgid "The only partition should be partition 1."
-msgstr "La unica partición debe ser la partición 1."
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr ""
@@ -2651,50 +2427,13 @@ msgstr ""
"No se puede asignar al volumen el mismo nombre de dispositivo del "
"dispositivo principal %s"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"Hay %(records)d registros en la '%(table_name)s' tabla donde el uuid o "
-"columna instance_uuid es NO VÃLIDA. Ejecute de nuevo este comando con la "
-"opción --eliminar después hacer una copia de seguridad de cualquier "
-"información necesaria."
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"Hay %(records)d registros en la '%(table_name)s' tabla donde el uuid o "
-"columna instance_uuid es NO VÃLIDA. Estos de deben limpiar manualmente antes "
-"de autorizar la migración. Considere ejecutar el comando 'nova-manage db "
-"null_instance_uuid_scan'."
-
msgid "There are not enough hosts available."
msgstr "No hay suficientes hosts disponibles."
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"Aún hay %(count)i registros de tipo sin migrar. La migración no puede "
-"continuar hasta que todos los registros de tipo de instancia hayan sido "
-"migradas a un nuevo formato. Por favor ejecute primero la base de datos nova-"
-"manage migrate_flavor_data'"
-
-#, python-format
msgid "There is no such action: %s"
msgstr "No existe esta acción: %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "No se encontraron registros donde instance_uuid era NO VÃLIDA."
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
@@ -2703,11 +2442,6 @@ msgstr ""
"El hipervisor de este nodo de cálculo es anterior a la versión mínima "
"soportada: %(version)s."
-msgid "This domU must be running on the host specified by connection_url"
-msgstr ""
-"Este domU debe estar en ejecución en el anfitrión especificado por "
-"connection_url"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2727,10 +2461,6 @@ msgstr ""
"Este servicio es anterior (v%(thisver)i) a la versión mímima soportada (v"
"%(minver)i) del resto del despliegue. No se puede continuar."
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "Se ha excedido el tiempo esperando a que se creara el dispositivo %s"
-
msgid "Timeout waiting for response from cell"
msgstr "Se ha excedido el tiempo de espera de respuesta de la célula"
@@ -2776,12 +2506,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "No se puede autenticar cliente Ironic."
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr ""
-"Unposible contactar al agente invitado. La siguiente llamada agotó su tiempo "
-"de espera: %(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "No se puede convertir la imagen a %(format)s: %(exp)s"
@@ -2790,14 +2514,6 @@ msgid "Unable to convert image to raw: %(exp)s"
msgstr "No se puede convertir la imagen a sin formato: %(exp)s"
#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "Imposible destruir VBD %s"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "No se puede destruir VDI %s"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "No se puede determinar el bus de disco para '%s'"
@@ -2806,22 +2522,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "No se puede determinar el prefijo de disco para %s "
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "Incapaz de expulsar %s del conjunto: No se ha encontrado maestro"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "Incapaz de expulsar %s del conjunto; el conjunto no está vacío"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "Imposible encontrar SR en VBD %s"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "No ha sido posible encontrar SR desde VDI %s"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "No se puede encontrar ca_file: %s"
@@ -2840,9 +2540,6 @@ msgstr "No se puede encontrar el destino iSCSI "
msgid "Unable to find key_file : %s"
msgstr "No se puede encontrar key_file: %s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "No se puede encontrar VBD/VDI de raíz para VM"
-
msgid "Unable to find volume"
msgstr "No se puede encontrar volumen"
@@ -2852,22 +2549,6 @@ msgstr "No se puede obtener el UUID de host: /etc/machine-id no existe"
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "No se puede obtener el UUID de host: /etc/machine-id está vacío"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "Imposible obtener copia del VDI %s en"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "Inposible insertar VDI para SR %s"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "Incapaz de insertar VDI en SR %s"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "Incapaz de incorporar %s al conjunto"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2899,10 +2580,6 @@ msgstr ""
"Incapaz de emigrar la instancia %(instance_id)s al actual anfitrion "
"(%(host)s)"
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "Incapaz de obtener información del destino %s"
-
msgid "Unable to resize disk down."
msgstr "Incapaz de reducir el tamaño del disco."
@@ -2912,13 +2589,6 @@ msgstr "No se puede establecer contraseña en la instancia"
msgid "Unable to shrink disk."
msgstr "No se puede empaquetar disco."
-msgid "Unable to terminate instance."
-msgstr "Incapaz de terminar instancia."
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "Imposible desconectar VBD %s"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "Información de CPU inválida: %(reason)s"
@@ -2939,16 +2609,6 @@ msgstr ""
"dispositivos de bloques de diversas instancias."
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"Error inesperado de API. Por favor reporta esto en http://bugs.launchpad.net/"
-"nova/ e incluye el registro de Nova API en lo posible.\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "Acción de agregado inesperada %s"
@@ -3008,9 +2668,6 @@ msgstr "Se ha intentado la extracción pero la imagen %s no ha sido encontrada."
msgid "Unsupported Content-Type"
msgstr "Tipo de contenido no soportado"
-msgid "Upgrade DB using Essex release first."
-msgstr "Actualice la base de datos utilizando primero el release de Essex."
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr ""
@@ -3036,26 +2693,6 @@ msgstr ""
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"El VDI %(vdi_ref)s es de %(virtual_size)d bytes lo que es mayor que el "
-"tamaño de ll tipo de %(new_disk_size)d bytes."
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"VDI no encontrado en SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun "
-"%(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr ""
-"Intentos de incorporación de VHD excedidos (%d), dejando de intentar..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -3127,13 +2764,6 @@ msgstr ""
"El volúmen establece el tamaño de bloque, pero el hipervisor libvirt actual "
"'%s' no soporta tamaño de bloque personalizado."
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr ""
-"No se soporta esquema '%s' bajo Python < 2.7.4, por favor utilice http o "
-"https"
-
msgid "When resizing, instances must change flavor!"
msgstr "Al redimensionarse, las instancias deben cambiar de tipo."
@@ -3148,11 +2778,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "Método de contingencia %(method)s usado en recurso %(res)s es erróneo"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr ""
-"Método de tipo de enlace incorrecto. Solo se permiten los tipos 'pre' y "
-"'post'"
-
msgid "X-Forwarded-For is missing from request."
msgstr "X-Forwarded-For no está presente en la petición."
@@ -3168,9 +2793,6 @@ msgstr "X-Metadata-Provider no está presente en la petición."
msgid "X-Tenant-ID header is missing from request."
msgstr "Falta cabecera X-Tenant-ID en la solicitud."
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "Se requiere una XAPI que soporte relax-xsm-sr-check=true"
-
msgid "You are not allowed to delete the image."
msgstr "No le está permitido suprimir la imagen."
@@ -3198,16 +2820,6 @@ msgid "admin password can't be changed on existing disk"
msgstr ""
"No se puede cambiar la contraseña de administrador en el disco existente"
-msgid "aggregate deleted"
-msgstr "agregado eliminado"
-
-msgid "aggregate in error"
-msgstr "error en agregado"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate ha fallado debido a: %s"
-
msgid "cannot understand JSON"
msgstr "no se puede entender JSON"
@@ -3271,9 +2883,6 @@ msgstr "imagen ya montada"
msgid "instance %s is not running"
msgstr "No se está ejecutando instancia %s"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "la instancia tiene un kernel o un disco RAM, pero no ambos"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr "la instancia es un argumento necesario para utilizar @refresh_cache "
@@ -3328,9 +2937,6 @@ msgstr "el dispositivo nbd %s no se ha mostrado"
msgid "nbd unavailable: module not loaded"
msgstr "nbd no disponible: módulo no cargado"
-msgid "no hosts to remove"
-msgstr "no hay hosts que eliminar"
-
#, python-format
msgid "no match found for %s"
msgstr "No se ha encontrado coincidencia para %s"
@@ -3407,9 +3013,6 @@ msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
"esta instancia de invitado o controlador no implementa set_admin_password ."
-msgid "setup in progress"
-msgstr "Configuración en progreso"
-
#, python-format
msgid "snapshot for %s"
msgstr "instantánea para %s "
@@ -3426,9 +3029,6 @@ msgstr "demasiadas claves de cuerpo"
msgid "unpause not supported for vmwareapi"
msgstr "cancelación de pausa no soportada para vmwareapi"
-msgid "version should be an integer"
-msgstr "la versión debe ser un entero"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "El grupo de volúmenes %s debe ser el grupo de volúmenes LVM"
@@ -3457,15 +3057,3 @@ msgid ""
msgstr ""
"estado de volumen '%(vol)s' debe ser 'en-uso'. Actualmente en '%(status)s' "
"estado"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake no tiene una implementación para %s"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr ""
-"xenapi.fake no tiene una implementación para %s o ha sido llamada con un "
-"número incorrecto de argumentos"
diff --git a/nova/locale/fr/LC_MESSAGES/nova.po b/nova/locale/fr/LC_MESSAGES/nova.po
index e18cdfbc94..07946f1ddc 100644
--- a/nova/locale/fr/LC_MESSAGES/nova.po
+++ b/nova/locale/fr/LC_MESSAGES/nova.po
@@ -28,7 +28,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -157,18 +157,6 @@ msgid "Affinity instance group policy was violated."
msgstr "La stratégie de groupe d'instances anti-affinité a été violée."
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "L'agent ne supporte l'appel : %(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"La génération d'agent avec l'hyperviseur %(hypervisor)s le système "
-"d'exploitation %(os)s et l'architecture %(architecture)s existe."
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "L'agrégat %(aggregate_id)s a déjà l'hôte %(host)s."
@@ -187,13 +175,6 @@ msgstr ""
"%(metadata_key)s."
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"Agrégat %(aggregate_id)s : l'action '%(action)s' a généré une erreur : "
-"%(reason)s."
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "L'agrégat %(aggregate_name)s existe déjà."
@@ -203,10 +184,6 @@ msgstr ""
"L'agrégat de %s ne prend pas en charge la zone de disponibilité nommée vide"
#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "Agrégat introuvable pour le nombre d'hôtes %(host)s"
-
-#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
msgstr ""
"Une valeur 'name' non valide a été fournie. Le nom doit être : %(reason)s"
@@ -395,12 +372,6 @@ msgstr ""
"Impossible de traiter la demande d'authentification pour les données "
"d'identification %d"
-msgid "Can't resize a disk to 0 GB."
-msgstr "Impossible de redimensionner un disque à 0 Go."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "Impossible de réduire la taille des disques éphémères."
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
"Impossible d'extraire le chemin d'unité racine de la configuration libvirt "
@@ -446,12 +417,6 @@ msgstr ""
"Impossible de déterminer le pool de stockage parent pour %s; impossible de "
"déterminer où stocker les images"
-msgid "Cannot find SR of content-type ISO"
-msgstr "Impossible de trouver le référentiel de stockage ISO content-type"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "Impossible de trouver le SR pour lire/écrire le VDI."
-
msgid "Cannot find image for rebuild"
msgstr "Image introuvable pour la régénération"
@@ -580,10 +545,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "Perte de la connexion à libvirt : %s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "La connexion à l'hyperviseur est cassée sur l'hôte : %(host)s"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -660,20 +621,9 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "La règle PBM par défaut est nécessaire si PBM est activé."
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "%(records)d entrée supprimer de la table '%(table_name)s'."
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "Device '%(device)s' introuvable."
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr ""
-"L'ID d'unité %(id)s indiquée n'est pas prise en charge par la version "
-"%(version)s"
-
msgid "Device name contains spaces."
msgstr "Le nom du périphérique contient des espaces."
@@ -685,20 +635,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "Type de périphérique non concordant pour l'alias '%s'"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-" Types différents entre %(table)s.%(column)s et la table shadow: %(c_type)s "
-"%(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr ""
-"Le disque contient un système de fichiers qui ne peut pas être "
-"redimensionné : %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Le format de disque %(disk_format)s n'est pas acceptable"
@@ -706,13 +642,6 @@ msgstr "Le format de disque %(disk_format)s n'est pas acceptable"
msgid "Disk info file is invalid: %(reason)s"
msgstr "Le ficher d'information du disque est invalide : %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "Le disque doit comporter une seule partition."
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "Disque introuvable avec l'ID %s et connecté à l'instance."
-
#, python-format
msgid "Driver Error: %s"
msgstr "Erreur du pilote: %s"
@@ -730,10 +659,6 @@ msgstr ""
"la mise à disposition est encore '%(state)s'."
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "Erreur durant l'appel de l'agent: %(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr ""
"Erreur durant la dé-réservation de l'instance %(instance_id)s: %(reason)s"
@@ -786,9 +711,6 @@ msgstr "Erreur lors du montage de %(image)s avec libguestfs (%(e)s)"
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Erreur lors de la création du moniteur de ressource : %(monitor)s"
-msgid "Error: Agent is disabled"
-msgstr "Erreur : agent désactivé"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "Evénement %(event)s non trouvé pour l'ID action %(action_id)s"
@@ -820,10 +742,6 @@ msgstr "Nombre maximum d'essai dépassé. %(reason)s."
msgid "Expected a uuid but received %(uuid)s."
msgstr "UUID attendu mais %(uuid)s reçu."
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "Colonne supplémentaire %(table)s.%(column)s dans la table image"
-
msgid "Extracting vmdk from OVA failed."
msgstr "Echec de l'extraction de vmdk à partir d'OVA."
@@ -852,10 +770,6 @@ msgid "Failed to attach network adapter device to %(instance_uuid)s"
msgstr "Impossible de connecter la carte réseau avec %(instance_uuid)s"
#, python-format
-msgid "Failed to create vif %s"
-msgstr "Échec de création du vif %s"
-
-#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "Echec de déploiement de l'instance: %(reason)s"
@@ -883,11 +797,6 @@ msgstr "Echec de mappage des partitions : %s"
msgid "Failed to mount filesystem: %s"
msgstr "Impossible de monter le système de fichier : %s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr ""
-"Echec de l'analyse des informations d'un périphérique pci pour le passe-"
-"système"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "Échec à éteindre l'instance : %(reason)s"
@@ -897,14 +806,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "Echec à faire fonctionner l'instance : %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"Échec de la préparation du périphérique PCI %(id)s pour l'instance "
-"%(instance_uuid)s : %(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "Echec de la mise à disposition de l'instance %(inst)s : %(reason)s"
@@ -939,9 +840,6 @@ msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
"Echec de définition du mot de passe d'admin sur %(instance)s car %(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "Echec de la génération, annulation"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "Échec à suspendre l'instance : %(reason)s"
@@ -950,10 +848,6 @@ msgstr "Échec à suspendre l'instance : %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Échec à terminer l'instance : %(reason)s"
-#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "Impossible de déconnecter le vif %s"
-
msgid "Failure prepping block device."
msgstr "Echec de préparation de l'unité par bloc."
@@ -962,10 +856,6 @@ msgid "File %(file_path)s could not be found."
msgstr "Fichier %(file_path)s introuvable."
#, python-format
-msgid "File path %s not valid"
-msgstr "Chemin d'accès au fichier %s non valide"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr ""
"L'IP fixe %(ip)s n'est pas une adresse IP valide pour le réseau "
@@ -1097,18 +987,6 @@ msgstr ""
msgid "Found no disk to snapshot."
msgstr "Aucun disque trouvé pour l'instantané."
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "Aucun réseau trouvé pour le bridge %s"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "Réseau non unique trouvé pour le bridge %s"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "Réseau non unique trouvé pour name_label %s"
-
msgid "Guest does not have a console available."
msgstr "Aucune console n'est disponible pour l'invité."
@@ -1140,9 +1018,6 @@ msgstr ""
"L'hôte ne prend pas en charge les invités avec des tailles de pages de "
"mémoire personnalisées"
-msgid "Host startup on XenServer is not supported."
-msgstr "Le démarrage à chaud sur XenServer n'est pas pris en charge."
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"Le pilote de l'hyperviseur ne prend pas en charge la méthode "
@@ -1373,10 +1248,6 @@ msgstr "L'instance n'a pas été redimensionnée."
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "Le nom d'hôte de l'instance %(hostname)s n'est pas un nom DNS valide"
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "Instance déjà en mode secours : %s"
-
msgid "Instance is not a member of specified network"
msgstr "L'instance n'est pas un membre du réseau spécifié"
@@ -1397,12 +1268,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "Ressources de calcul insuffisante : %(reason)s."
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr ""
-"Mémoire libre insuffisante sur le noeud de calcul pour le démarrage de "
-"%(uuid)s."
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "L'interface %(interface)s non trouvée."
@@ -1596,13 +1461,6 @@ msgid ""
msgstr ""
"Il est interdit de créer une interface sur le réseau externe %(network_uuid)s"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"L'image Kernel/Ramdisk est trop volumineuse : %(vdi_size)d octets, max "
-"%(max_size)d octets"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1628,14 +1486,6 @@ msgstr ""
msgid "Keypair data is invalid: %(reason)s"
msgstr "La donnée de paire de clés est invalide : %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Le nom de la paire de clés contient des caractères non sécurisés"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"La paire de clé doit être une chaîne et de longueur comprise entre 1 et 255 "
-"caractères"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Limites seulement supportées sur vCenter 6.0 et supérieur"
@@ -1669,9 +1519,6 @@ msgstr "Le marqueur %(marker)s est introuvable."
msgid "Maximum number of floating IPs exceeded"
msgstr "Nombre maximal d'adresses IP flottantes dépassé"
-msgid "Maximum number of key pairs exceeded"
-msgstr "Nombre maximal de paires de clés dépassé"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "Le nombre maximal d'éléments de métadonnées dépasse %(allowed)d"
@@ -1704,12 +1551,6 @@ msgstr ""
"La métrique %(name)s ne peut être trouvé sur le noeud de calcul de l'hôte "
"%(host)s.%(node)s."
-msgid "Migrate Receive failed"
-msgstr "Echec de réception de la migration"
-
-msgid "Migrate Send failed"
-msgstr "Echec d'envoi de la migration"
-
#, python-format
msgid "Migration %(id)s for server %(uuid)s is not live-migration."
msgstr ""
@@ -1759,10 +1600,6 @@ msgstr "Erreur de sélection de destinations de migration : %(reason)s"
msgid "Missing arguments: %s"
msgstr "Arguments manquants : %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "Colonne %(table)s.%(column)s manquante dans la table shadow"
-
msgid "Missing device UUID."
msgstr "Périphérique UUID manquant."
@@ -1847,13 +1684,6 @@ msgid "Must not input both network_id and port_id"
msgstr "Vous ne devez pas entrer à la fois network_id et port_id"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"Il faut indiquer connection_url, connection_username (facultatif) et "
-"connection_password pour utiliser compute_driver=xenapi.XenAPIDriver"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1908,10 +1738,6 @@ msgstr "Pas de mappage d'unité par bloc avec l'id %(id)s."
msgid "No Unique Match Found."
msgstr "Correspondance unique non trouvée."
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "Aucune génération d'agent associée à l'ID %(id)s."
-
msgid "No compute host specified"
msgstr "Aucun hôte de calcul spécifié"
@@ -1984,10 +1810,6 @@ msgstr "Aucun point de montage trouvé dans %(root)s de l'image %(image)s"
msgid "No operating system found in %s"
msgstr "Aucun système d'exploitation trouvé dans %s"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "Aucun VDI primaire trouvé pour %s"
-
msgid "No root disk defined."
msgstr "Aucun disque racine défini."
@@ -1999,9 +1821,6 @@ msgstr ""
"Aucun réseau spécifique n'a été demandé et il n'existe aucun réseau "
"disponible pour le projet '%(project_id)s'."
-msgid "No suitable network for migrate"
-msgstr "Aucun réseau adéquat pour migrer"
-
msgid "No valid host found for cold migrate"
msgstr "Aucun hôte valide n'a été trouvé pour la migration à froid"
@@ -2084,15 +1903,6 @@ msgstr ""
msgid "Only administrators may list deleted instances"
msgstr "Seul l'administrateur peut afficher la liste des instances supprimées"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"Seuls les demandes de service (ext/NFS) basées sur des fichiers sont pris en "
-"charge par cette fonctionnalité. La demande de service %(uuid)s est de type "
-"%(type)s"
-
msgid "Origin header does not match this host."
msgstr "L'en-tête d'origine ne correspond pas à cet hôte."
@@ -2135,10 +1945,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "La requête %(requests)s au périphérique PCI a échoué."
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "INT. PHYS. %s ne contient pas l'adresse IP"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "Taille de page %(pagesize)s interdite sur '%(against)s'"
@@ -2257,10 +2063,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "Quota dépassé, trop de serveurs dans le groupe"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "Quota dépassé: code=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "Le quota existe pour le projet %(project_id)s, ressource %(resource)s"
@@ -2291,20 +2093,6 @@ msgstr ""
"Le quota limite %(limit)s pour %(resource)s doit être inferieur ou égal à "
"%(maximum)s."
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr ""
-"Nombre maximal de nouvelles tentatives atteint pour le débranchement de VBD "
-"%s"
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"La stratégie en temps réel nécessite le masque vCPU(s) configuré avec au "
-"moins 1 RT vCPU et 1 vCPU ordinaire. Voir hw:cpu_realtime_mask ou "
-"hw_cpu_realtime_mask"
-
msgid "Request body and URI mismatch"
msgstr "Corps et URI de demande discordants"
@@ -2475,10 +2263,6 @@ msgid "Set admin password is not supported"
msgstr "La définition du mot de passe admin n'est pas supportée"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "La table fantôme avec le nom %(name)s existe déjà."
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "Le partage '%s' n'est pas pris en charge"
@@ -2486,14 +2270,6 @@ msgstr "Le partage '%s' n'est pas pris en charge"
msgid "Share level '%s' cannot have share configured"
msgstr "Le niveau de partage '%s' n'a pas de partage configuré"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"Echec de la réduction de la taille du système de fichiers avec resize2fs, "
-"veuillez vérifier si vous avez suffisamment d'espace disponible sur votre "
-"disque."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Le snapshot %(snapshot_id)s n'a pas été trouvé."
@@ -2519,12 +2295,6 @@ msgstr "La clé de tri fournie n'était pas valide."
msgid "Specified fixed address not assigned to instance"
msgstr "L'adresse fixe spécifiée n'est pas assignée à une instance"
-msgid "Specify `table_name` or `table` param"
-msgstr "Spécifiez un paramètre pour`table_name` ou `table`"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "Spécifiez seulement un paramètre pour `table_name` `table`"
-
msgid "Started"
msgstr "Démarré"
@@ -2596,9 +2366,6 @@ msgstr ""
msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr "Le nombre de ports définis (%(ports)d) dépasse la limite (%(quota)d)"
-msgid "The only partition should be partition 1."
-msgstr "La seule partition doit être la partition 1."
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr ""
@@ -2664,52 +2431,13 @@ msgid ""
msgstr ""
"Le volume ne peut pas recevoir le même nom d'unité que l'unité racine %s"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"%(records)d enregistrements sont présents dans la table '%(table_name)s' "
-"dans laquelle la colonne uuid ou instance_uuid a pour valeur NULL. "
-"Réexécutez cette commande avec l'option --delete après la sauvegarde des "
-"données nécessaires."
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"%(records)d enregistrements sont présents dans la table '%(table_name)s' "
-"dans laquelle la colonne uuid ou instance_uuid a pour valeur NULL. Ils "
-"doivent être manuellement nettoyés avant la migration. Prévoyez d'exécuter "
-"la commande 'nova-manage db null_instance_uuid_scan'."
-
msgid "There are not enough hosts available."
msgstr "Le nombre d'hôtes disponibles est insuffisant"
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"Il existe encore %(count)i enregistrements de version non migrés. La "
-"migration ne peut pas continuer tant que tous les enregistrements de version "
-"d'instance n'ont pas été migrés vers le nouveau format. Exécutez tout "
-"d'abord `nova-manage db migrate_flavor_data'."
-
-#, python-format
msgid "There is no such action: %s"
msgstr "Aucune action de ce type : %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr ""
-"Aucun enregistrement n'a été trouvé lorsque instance_uuid avait pour valeur "
-"NULL."
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
@@ -2718,9 +2446,6 @@ msgstr ""
"L'hyperviseur de ce noeud de calcul est plus ancien que la version minimale "
"prise en charge : %(version)s."
-msgid "This domU must be running on the host specified by connection_url"
-msgstr "Ce domU doit s'exécuter sur l'hôte indiqué par connection_url"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2740,10 +2465,6 @@ msgstr ""
"Ce service est plus ancien (v%(thisver)i) que la version minimale (v"
"%(minver)i) du reste du déploiement. Impossible de continuer."
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "Dépassement du délai d'attente pour l'unité %s à créer"
-
msgid "Timeout waiting for response from cell"
msgstr "Dépassement du délai d'attente pour la réponse de la cellule"
@@ -2789,12 +2510,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "Impossible d'authentifier le client Ironic."
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr ""
-"Impossible d'appeler l'agent invité. L'appel suivant a mit trop de temps: "
-"%(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "Impossible de convertir l'image en %(format)s : %(exp)s"
@@ -2803,14 +2518,6 @@ msgid "Unable to convert image to raw: %(exp)s"
msgstr "Impossible de convertir l'image en raw : %(exp)s"
#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "Impossible de supprimer le VBD %s"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "Impossible de détruire VDI %s"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "Impossible de déterminer le bus de disque pour '%s'"
@@ -2819,22 +2526,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "Impossible de déterminer le préfixe du disque pour %s"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "Impossible d'éjecter %s du pool ; aucun maître trouvé"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "Impossible d'éjecter %s du pool ; pool non vide"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "Impossible de trouver SR du VDB %s"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "Impossible de trouver la demande de service depuis VDI %s"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "Impossible de trouver ca_file : %s"
@@ -2853,9 +2544,6 @@ msgstr "Cible iSCSI introuvable"
msgid "Unable to find key_file : %s"
msgstr "Impossible de trouver key_file : %s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "Impossible de trouver le VBD/VDI racine pour la machine virtuelle"
-
msgid "Unable to find volume"
msgstr "Volume introuvable"
@@ -2865,22 +2553,6 @@ msgstr "Impossible d'obtenir l'UUID de l'hôte : /etc/machine-id n'existe pas"
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "Impossible d'obtenir l'UUID de l'hôte : /etc/machine-id est vide"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "Impossible de récuppérer l'enregistrement du VDI %s sur"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "Impossible d'introduire le VDI pour SR %s"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "Impossible d'introduire VDI sur SR %s"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "Impossible de joindre %s dans le pool"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2912,10 +2584,6 @@ msgstr ""
"Impossible de migrer l'instance (%(instance_id)s) vers l'hôte actuel "
"(%(host)s)."
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "Impossible d'obtenir les informations de la cible %s"
-
msgid "Unable to resize disk down."
msgstr "Impossible de redimensionner le disque à la baisse."
@@ -2925,13 +2593,6 @@ msgstr "Impossible de définir le mot de passe sur l'instance"
msgid "Unable to shrink disk."
msgstr "Impossible de redimensionner le disque."
-msgid "Unable to terminate instance."
-msgstr "Impossibilité de mettre fin à l'instance."
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "Impossible de deconnecter le VBD %s"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "Information CPU inacceptable : %(reason)s"
@@ -2951,16 +2612,6 @@ msgstr ""
"contient des mappages d'unité par bloc provenant de plusieurs instances."
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"Erreur de l'API inattendue. Merci de la reporter sur http://bugs.launchpad."
-"net/nova/ et d'y joindre le rapport de L'API Nova si possible.\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "Action d'agrégat inattendue : %s"
@@ -3019,9 +2670,6 @@ msgstr "Extraction tentée mais l'image %s est introuvable."
msgid "Unsupported Content-Type"
msgstr "Type de contenu non pris en charge"
-msgid "Upgrade DB using Essex release first."
-msgstr "Mettez à jour la BD en utilisant la version Essex préalablement."
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "Utilisateur %(username)s non trouvé dans le fichier de mot de passe."
@@ -3045,27 +2693,6 @@ msgstr ""
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s a pour taille %(virtual_size)d octets, qui est supérieure à "
-"la taille de version de %(new_disk_size)d octets."
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"VDI introuvable sur le référentiel de stockage %(sr)s (vdi_uuid "
-"%(vdi_uuid)s, target_lun %(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr ""
-"Nombre de tentatives de coalescence du disque VHD supérieur à (%d), "
-"abandon..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -3136,12 +2763,6 @@ msgstr ""
"Le volume définit la taille de bloc, mais l'hyperviseur libvirt en cours "
"'%s' ne prend pas en charge la taille de bloc personnalisée"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr ""
-"Schéma '%s' non pris en charge sous Python < 2.7.4. Utilisez HTTP ou HTTPS"
-
msgid "When resizing, instances must change flavor!"
msgstr "Lors du redimensionnement, les instances doivent changer la version !"
@@ -3156,11 +2777,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "Mauvaise méthode de quota %(method)s utilisée sur la ressource %(res)s"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr ""
-"Type de point d'ancrage non valide. Seuls les types 'pre' et 'post' sont "
-"autorisés"
-
msgid "X-Forwarded-For is missing from request."
msgstr "X-Forwarded-For est manquant dans la requête"
@@ -3176,9 +2792,6 @@ msgstr "X-Metadata-Provider est manquant dans la requête"
msgid "X-Tenant-ID header is missing from request."
msgstr "L'entête X-Tenant-ID est manquante dans la requête."
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "XAPI prenant en charge relax-xsm-sr-check=true obligatoire"
-
msgid "You are not allowed to delete the image."
msgstr "Vous n'êtes pas autorisé à supprimer l'image."
@@ -3207,16 +2820,6 @@ msgstr "Aucune adresse IP flottante n'est disponible."
msgid "admin password can't be changed on existing disk"
msgstr "Impossible de modifier le mot de passe admin sur le disque existant"
-msgid "aggregate deleted"
-msgstr "agrégat supprimé"
-
-msgid "aggregate in error"
-msgstr "agrégat en erreur"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate a échoué à cause de : %s"
-
msgid "cannot understand JSON"
msgstr "impossible de comprendre JSON"
@@ -3280,9 +2883,6 @@ msgstr "image déjà montée"
msgid "instance %s is not running"
msgstr "instance %s n'est pas en cours d'exécution"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "l'instance a un noyau ou un disque mais pas les deux"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr ""
"l'instance est un argument obligatoire pour l'utilisation de @refresh_cache"
@@ -3339,9 +2939,6 @@ msgstr "Device nbd %s n'est pas apparu"
msgid "nbd unavailable: module not loaded"
msgstr "nbd non disponible : module non chargé"
-msgid "no hosts to remove"
-msgstr "aucun hôte à retirer"
-
#, python-format
msgid "no match found for %s"
msgstr "aucune occurrence trouvée pour %s"
@@ -3419,9 +3016,6 @@ msgstr ""
"set_admin_password n'est pas implémenté par ce pilote ou par cette instance "
"invitée."
-msgid "setup in progress"
-msgstr "Configuration en cours"
-
#, python-format
msgid "snapshot for %s"
msgstr "instantané pour %s"
@@ -3438,9 +3032,6 @@ msgstr "trop de clés de corps"
msgid "unpause not supported for vmwareapi"
msgstr "annulation de la mise en pause non prise en charge pour vmwareapi"
-msgid "version should be an integer"
-msgstr "la version doit être un entier"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "vg %s doit être un groupe de volumes LVM"
@@ -3468,15 +3059,3 @@ msgid ""
msgstr ""
"Le statut du volume '%(vol)s' doit être 'in-use'. Statut actuel : "
"'%(status)s'"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake n'a pas d'implémentation pour %s"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr ""
-"xenapi.fake n'a pas d'implementation pour %s ou il a été appelé avec le "
-"mauvais nombre d'arguments"
diff --git a/nova/locale/it/LC_MESSAGES/nova.po b/nova/locale/it/LC_MESSAGES/nova.po
index 1fd106d755..e1e7b777a7 100644
--- a/nova/locale/it/LC_MESSAGES/nova.po
+++ b/nova/locale/it/LC_MESSAGES/nova.po
@@ -13,7 +13,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -145,18 +145,6 @@ msgid "Affinity instance group policy was violated."
msgstr "La politica di affinità del gruppo di istanze è stata violata."
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "L'agent non supporta la chiamata: %(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"L'agent-build con architettura hypervisor %(hypervisor)s os %(os)s "
-"%(architecture)s esiste."
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "L'aggregato %(aggregate_id)s dispone già dell'host %(host)s."
@@ -175,13 +163,6 @@ msgstr ""
"%(metadata_key)s."
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"Aggregato %(aggregate_id)s: azione '%(action)s' ha causato un errore: "
-"%(reason)s."
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "L'aggregato %(aggregate_name)s esiste già."
@@ -191,10 +172,6 @@ msgstr ""
"L'aggregazione %s non supporta la zona di disponibilità denominata vuota"
#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "Aggregato per il conteggio host %(host)s non è stato trovato."
-
-#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
msgstr ""
"È stato fornito un valore 'name' non valido. Il nome deve essere: %(reason)s"
@@ -382,12 +359,6 @@ msgid "Can not handle authentication request for %d credentials"
msgstr ""
"Impossibile gestire la richiesta di autenticazione per le credenziali %d"
-msgid "Can't resize a disk to 0 GB."
-msgstr "Impossibile ridimensionare un disco a 0 GB."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "Impossibile ridimensionare verso il basso i dischi effimeri."
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
"Impossibile recuperare il percorso root dell'unità dalla configurazione "
@@ -433,12 +404,6 @@ msgstr ""
"Impossibile determinare il pool di archiviazione parent per %s; impossibile "
"determinare dove archiviare le immagini"
-msgid "Cannot find SR of content-type ISO"
-msgstr "Impossibile trovare SR del tipo di contenuto ISO"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "Impossibile trovare SR per la lettura/scrittura di VDI."
-
msgid "Cannot find image for rebuild"
msgstr "Impossibile trovare l'immagine per la nuova build"
@@ -567,10 +532,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "Connessione a libvirt non riuscita: %s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "La connessione all'hypervisor è interrotta sull'host: %(host)s"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -643,20 +604,9 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "La politica PBM predefinita è richiesta se PBM è abilitato."
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "%(records)d record eliminati dalla tabella '%(table_name)s'."
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "Dispositivo '%(device)s' non trovato."
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr ""
-"L'ID dispositivo %(id)s specificato non è supportato dalla versione "
-"hypervisor %(version)s"
-
msgid "Device name contains spaces."
msgstr "Il nome dispositivo contiene degli spazi."
@@ -668,20 +618,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "Mancata corrispondenza del tipo di dispositivo per l'alias '%s'"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"Tipi differenti in %(table)s.%(column)s e nella tabella cronologica: "
-"%(c_type)s %(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr ""
-"Il disco contiene un file system che non è in grado di eseguire il "
-"ridimensionamento: %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Il formato disco %(disk_format)s non è accettabile"
@@ -689,13 +625,6 @@ msgstr "Il formato disco %(disk_format)s non è accettabile"
msgid "Disk info file is invalid: %(reason)s"
msgstr "Il file di informazioni sul disco non è valido: %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "Il disco deve avere solo una partizione."
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "Il disco con ID: %s non è stato trovato collegato all'istanza."
-
#, python-format
msgid "Driver Error: %s"
msgstr "Errore del driver: %s"
@@ -713,10 +642,6 @@ msgstr ""
"ancora '%(state)s'."
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "Errore durante la seguente chiamata all'agent: %(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "Errore durante l'istanza non rinviata %(instance_id)s: %(reason)s"
@@ -768,9 +693,6 @@ msgstr "Errore di montaggio %(image)s con libguestfs (%(e)s)"
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Errore durante la creazione del monitor di risorse: %(monitor)s"
-msgid "Error: Agent is disabled"
-msgstr "Errore: l'agent è disabilitato"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "L'evento %(event)s per l'id azione %(action_id)s non è stato trovato"
@@ -802,10 +724,6 @@ msgstr "Superato numero massimo di tentativi. %(reason)s"
msgid "Expected a uuid but received %(uuid)s."
msgstr "Era previsto un uuid ma è stato ricevuto %(uuid)s."
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "Colonna supplementare %(table)s.%(column)s nella tabella cronologica"
-
msgid "Extracting vmdk from OVA failed."
msgstr "Estrazione di vmdk da OVA non riuscita."
@@ -838,10 +756,6 @@ msgstr ""
"%(instance_uuid)s"
#, python-format
-msgid "Failed to create vif %s"
-msgstr "Impossibile creare vif %s"
-
-#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "Impossibile distribuire l'istanza: %(reason)s"
@@ -871,11 +785,6 @@ msgstr "Impossibile associare le partizioni: %s"
msgid "Failed to mount filesystem: %s"
msgstr "Impossibile montare il file system: %s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr ""
-"Impossibile analizzare le informazioni relative ad una periferica PCI per il "
-"trasferimento"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "Impossibile disattivare l'istanza: %(reason)s"
@@ -885,14 +794,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "Impossibile alimentare l'istanza: %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"Impossibile preparare il dispositivo PCI %(id)s per l'istanza "
-"%(instance_uuid)s: %(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "Impossibile fornire l'istanza %(inst)s: %(reason)s"
@@ -927,9 +828,6 @@ msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
"Impossibile impostare la password admin in %(instance)s perché %(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "Generazione non riuscita, ripristino in corso"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "Impossibile sospendere l'istanza: %(reason)s"
@@ -938,10 +836,6 @@ msgstr "Impossibile sospendere l'istanza: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Impossibile terminare l'istanza: %(reason)s"
-#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "Impossibile scollegare vif %s"
-
msgid "Failure prepping block device."
msgstr "Errore durante l'esecuzione preparatoria del dispositivo di blocco."
@@ -950,10 +844,6 @@ msgid "File %(file_path)s could not be found."
msgstr "Impossibile trovare il file %(file_path)s."
#, python-format
-msgid "File path %s not valid"
-msgstr "Percorso file %s non valido"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr ""
"L'IP fisso %(ip)s non è un indirizzo IP valido per la rete %(network_id)s."
@@ -1082,18 +972,6 @@ msgstr ""
msgid "Found no disk to snapshot."
msgstr "Non è stato trovato nessun disco per l'istantanea."
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "Non sono state trovate reti per il bridge %s"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "Sono state trovate reti non univoche per il bridge %s"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "Sono state trovate reti non univoche per name_label %s"
-
msgid "Guest does not have a console available."
msgstr "Guest non dispone di una console disponibile."
@@ -1122,9 +1000,6 @@ msgid "Host does not support guests with custom memory page sizes"
msgstr ""
"L'host non supporta guest con dimensioni pagina di memoria personalizzate"
-msgid "Host startup on XenServer is not supported."
-msgstr "L'avvio dell'host su XenServer non è supportato."
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"Il driver hypervisor non supporta il metodo post_live_migration_at_source"
@@ -1356,10 +1231,6 @@ msgstr "L'istanza non è stata ridmensionata."
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "Il nome host %(hostname)s dell'istanza non è un nome DNS valido"
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "L'istanza è già in modalità di ripristino: %s"
-
msgid "Instance is not a member of specified network"
msgstr "L'istanza non è un membro della rete specificata"
@@ -1380,11 +1251,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "Risorse di elaborazione insufficienti: %(reason)s."
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr ""
-"Memoria disponibile insufficiente sul nodo di calcolo per avviare %(uuid)s."
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "Impossibile trovare l'interfaccia %(interface)s."
@@ -1578,13 +1444,6 @@ msgid ""
msgstr ""
"Non è consentito creare un'interfaccia sulla rete esterna %(network_uuid)s"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"L'immagine Kernel/Ramdisk è troppo grande: %(vdi_size)d bytes, massimo "
-"%(max_size)d byte"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1609,14 +1468,6 @@ msgstr ""
msgid "Keypair data is invalid: %(reason)s"
msgstr "I dati della coppia di chiavi non sono validi: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Il nome coppia di chiavi contiene caratteri non sicuri"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"Il nome coppia di chiavi deve essere una stringa compresa tra 1 e 255 "
-"caratteri"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Limiti supportati solo da vCenter 6.0 e successivi"
@@ -1649,9 +1500,6 @@ msgstr "Impossibile trovare l'indicatore %(marker)s."
msgid "Maximum number of floating IPs exceeded"
msgstr "Il numero massimo di IP mobili è stato superato"
-msgid "Maximum number of key pairs exceeded"
-msgstr "Il numero massimo di coppie di chiavi è stato superato"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "Il numero massimo di elementi metadati è stato superato %(allowed)d"
@@ -1682,12 +1530,6 @@ msgstr ""
"Impossibile trovare la metrica %(name)s sul nodo host compute %(host)s."
"%(node)s."
-msgid "Migrate Receive failed"
-msgstr "Migrazione di Receive non riuscita"
-
-msgid "Migrate Send failed"
-msgstr "Migrazione di Send non riuscita"
-
#, python-format
msgid "Migration %(id)s for server %(uuid)s is not live-migration."
msgstr ""
@@ -1736,10 +1578,6 @@ msgstr "Errore delle destinazioni di selezione della migrazione: %(reason)s"
msgid "Missing arguments: %s"
msgstr "Argomenti mancanti: %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "Manca la colonna %(table)s.%(column)s nella tabella cronologica"
-
msgid "Missing device UUID."
msgstr "Manca l'UUID del dispositivo."
@@ -1824,14 +1662,6 @@ msgid "Must not input both network_id and port_id"
msgstr "Non si deve immettere entrambi network_id e port_id"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"È necessario specificare connection_url, connection_username "
-"(facoltativamente) e connection_password per l'utilizzo di "
-"compute_driver=xenapi.XenAPIDriver"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1885,10 +1715,6 @@ msgstr "Nessuna associazione dispositivo di blocco con id %(id)s."
msgid "No Unique Match Found."
msgstr "Non è stata trovata nessuna corrispondenza univoca."
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "Nessuna agent-build associata all'id %(id)s."
-
msgid "No compute host specified"
msgstr "Nessun host di calcolo specificato"
@@ -1957,10 +1783,6 @@ msgstr "Nessun punto di montaggio trovato in %(root)s di %(image)s"
msgid "No operating system found in %s"
msgstr "Nessun sistema operativo rilevato in %s"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "Nessuna VDI principale trovata per %s"
-
msgid "No root disk defined."
msgstr "Nessun disco root definito"
@@ -1972,9 +1794,6 @@ msgstr ""
"Nessuna rete specifica era richiesta e nessuna è disponibile per il progetto "
"'%(project_id)s'."
-msgid "No suitable network for migrate"
-msgstr "Nessuna rete adatta per la migrazione"
-
msgid "No valid host found for cold migrate"
msgstr "Nessun host valido trovato per la migrazione a freddo"
@@ -2056,14 +1875,6 @@ msgstr "Uno o più host sono già nelle zone di disponibilità %s"
msgid "Only administrators may list deleted instances"
msgstr "Solo gli amministratori possono elencare le istanze eliminate"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"In questa funzione sono supportati solo gli SR basati su file (ext/NFS). SR "
-"%(uuid)s è di tipo %(type)s"
-
msgid "Origin header does not match this host."
msgstr "L'intestazione origine non corrisponde a questo host."
@@ -2106,10 +1917,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "La richiesta del dispositivo PCI %(requests)s non è riuscita"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIF %s non contiene l'indirizzo IP"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "Dimensione pagina %(pagesize)s non consentita su '%(against)s'"
@@ -2227,10 +2034,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "Quota superata, troppi server nel gruppo"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "Quota superata: code=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "la quota per il progetto %(project_id)s esiste, risorsa %(resource)s"
@@ -2261,18 +2064,6 @@ msgstr ""
"Il limite della quota %(limit)s per %(resource)s deve essere inferiore o "
"uguale a %(maximum)s."
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "Raggiunto numero massimo di tentativi per scollegare VBD %s"
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"La politica in tempo reale necessita della maschera vCPU(s) configurata con "
-"almeno 1 vCPU RT e 1 vCPU ordinaria. Vedere hw:cpu_realtime_mask o "
-"hw_cpu_realtime_mask"
-
msgid "Request body and URI mismatch"
msgstr "Il corpo della richiesta e l'URI non corrispondono"
@@ -2445,10 +2236,6 @@ msgid "Set admin password is not supported"
msgstr "L'impostazione della password admin non è supportata"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "La tabella cronologia con il nome %(name)s esiste già."
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "La condivisione '%s' non è supportata"
@@ -2457,13 +2244,6 @@ msgid "Share level '%s' cannot have share configured"
msgstr ""
"Il livello di condivisione '%s' non può avere la condivisione configurata"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"Riduzione del filesystem con resize2fs non riuscita, controllare se si "
-"dispone di spazio sufficiente sul proprio disco."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Impossibile trovare l'istantanea %(snapshot_id)s."
@@ -2491,12 +2271,6 @@ msgstr "La chiave di ordinamento fornita non è valida."
msgid "Specified fixed address not assigned to instance"
msgstr "L'indirizzo fisso specificato non è stato assegnato all'istanza"
-msgid "Specify `table_name` or `table` param"
-msgstr "Specificare il parametro `table_name` o `table`"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "Specificare solo un parametro `table_name` `table`"
-
msgid "Started"
msgstr "Avviato"
@@ -2567,9 +2341,6 @@ msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr ""
"Il numero di porte definite: %(ports)d è superiore al limite: %(quota)d"
-msgid "The only partition should be partition 1."
-msgstr "L'unica partizione dovrebbe essere la partizione 1."
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr ""
@@ -2635,49 +2406,13 @@ msgstr ""
"Non è possibile assegnare al volume lo stesso nome dispositivo assegnato al "
"dispositivo root %s"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"Sono presenti %(records)d record nella tabella '%(table_name)s' in cui la "
-"colonna uuid o instance_uuid è NULL. Eseguire nuovamente questo comando con "
-"l'opzione --delete dopo aver eseguito il backup dei dati necessari."
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"Sono presenti %(records)d record nella tabella '%(table_name)s' in cui la "
-"colonna uuid o instance_uuid è NULL. È necessario eliminarli manualmente "
-"prima della migrazione. Prendere in considerazione l'esecuzione del comando "
-"'nova-manage db null_instance_uuid_scan'."
-
msgid "There are not enough hosts available."
msgstr "Numero di host disponibili non sufficiente."
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"Ci sono ancora %(count)i record di versione non migrati. La migrazione non "
-"può continuare finché tutti i record di versione istanza non sono stati "
-"migrati al nuovo formato. Eseguire prima 'nova-manage db "
-"migrate_flavor_data'."
-
-#, python-format
msgid "There is no such action: %s"
msgstr "Non esiste alcuna azione simile: %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "Nessun record trovato in cui instance_uuid era NULL."
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
@@ -2686,10 +2421,6 @@ msgstr ""
"L' hypervisor del nodo di calcolo è più vecchio della versione minima "
"supportata: %(version)s."
-msgid "This domU must be running on the host specified by connection_url"
-msgstr ""
-"Questo domU deve essere in esecuzione sull'host specificato da connection_url"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2709,10 +2440,6 @@ msgstr ""
"Questo servizio è più vecchio (v%(thisver)i) della versione minima (v"
"%(minver)i) del resto della distribuzione. Impossibile continuare."
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "Timeout in attesa che l'unità %s venga creata"
-
msgid "Timeout waiting for response from cell"
msgstr "Timeout in attesa di risposta dalla cella"
@@ -2757,12 +2484,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "Impossibile autenticare il client Ironic."
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr ""
-"Impossibile contattare l'agent guest. La seguente chiamata è scaduta: "
-"%(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "Impossibile convertire l'immagine in %(format)s: %(exp)s"
@@ -2771,14 +2492,6 @@ msgid "Unable to convert image to raw: %(exp)s"
msgstr "Impossibile convertire l'immagine in immagine non elaborata: %(exp)s"
#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "Impossibile distruggere VBD %s"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "Impossibile distruggere VDI %s"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "Impossibile determinare il bus del disco per '%s'"
@@ -2787,22 +2500,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "Impossibile determinare il prefisso del disco per %s"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "Impossibile espellere %s dal pool; Non è stato trovato alcun master"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "Impossibile espellere %s dal pool; il pool non è vuoto"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "Impossibile trovare SR da VBD %s"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "Impossibile trovare SR da VDI %s"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "Impossibile trovare il file_ca: %s"
@@ -2821,9 +2518,6 @@ msgstr "Impossibile trovare la destinazione iSCSI"
msgid "Unable to find key_file : %s"
msgstr "Impossibile trovare il file_chiavi : %s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "Impossibile trovare la root VBD/VDI per la VM"
-
msgid "Unable to find volume"
msgstr "Impossibile trovare il volume"
@@ -2833,22 +2527,6 @@ msgstr "Impossibile richiamare l'UUID host: /etc/machine-id non esiste"
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "Impossibile richiamare l'UUID host: /etc/machine-id è vuoto"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "Impossibile acquisire un record di VDI %s in"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "Impossibile introdurre VDI per SR %s"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "Impossibile introdurre VDI in SR %s"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "Impossibile unire %s nel pool"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2880,10 +2558,6 @@ msgstr ""
"Impossibile migrare l'istanza (%(instance_id)s) nell'host corrente "
"(%(host)s)."
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "Impossibile ottenere le informazioni sulla destinazione %s"
-
msgid "Unable to resize disk down."
msgstr "Impossibile ridurre il disco a dimensioni inferiori."
@@ -2893,13 +2567,6 @@ msgstr "Impossibile impostare la password sull'istanza"
msgid "Unable to shrink disk."
msgstr "Impossibile ridurre il disco."
-msgid "Unable to terminate instance."
-msgstr "Impossibile terminare l'istanza."
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "Impossibile scollegare VBD %s"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "Informazioni CPU non accettabili: %(reason)s"
@@ -2920,16 +2587,6 @@ msgstr ""
"più istanze."
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"Errore API non previsto. Segnalarlo a http://bugs.launchpad.net/nova/ e "
-"allegare il log Nova API, se possibile.\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "Azione aggregato non prevista %s"
@@ -2989,9 +2646,6 @@ msgstr "Accantonamento tentato, ma l'immagine %s non è stata trovata"
msgid "Unsupported Content-Type"
msgstr "Tipo-contenuto non supportato"
-msgid "Upgrade DB using Essex release first."
-msgstr "Aggiorna il DB utilizzando prima la release Essex."
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "Utente %(username)s non trovato nel file di password."
@@ -3015,26 +2669,6 @@ msgstr ""
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s è %(virtual_size)d byte che è maggiore della dimensione del "
-"flavor di %(new_disk_size)d byte."
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"VDI non trovato su SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun "
-"%(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr ""
-"I tentativi di unione VHD sono stati superati (%d), rinuncia in corso..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -3105,12 +2739,6 @@ msgstr ""
"Il volume imposta la dimensione del blocco ma l'hypervisor libvirt corrente "
"'%s' non supporta la dimensione del blocco personalizzata"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr ""
-"Lo schema '%s' non è supportato in Python < 2.7.4, utilizzare http o https"
-
msgid "When resizing, instances must change flavor!"
msgstr "Durante il ridimensionamento, le istanze devono cambiare tipologia!"
@@ -3125,10 +2753,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "Metodo quota errato %(method)s utilizzato per la risorsa %(res)s"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr ""
-"Tipo di metodo hook non valido. Sono consentiti solo i tipi 'pre' e 'post'"
-
msgid "X-Forwarded-For is missing from request."
msgstr "X-Forwarded-For manca dalla richiesta."
@@ -3144,9 +2768,6 @@ msgstr "X-Metadata-Provider manca dalla richiesta."
msgid "X-Tenant-ID header is missing from request."
msgstr "L'intestazione X-Tenant-ID non è presente nella richiesta."
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "XAPI richiesto per il supporto di relax-xsm-sr-check=true"
-
msgid "You are not allowed to delete the image."
msgstr "Non è consentito eliminare l'immagine."
@@ -3175,16 +2796,6 @@ msgstr "Nessun IP mobile disponibile."
msgid "admin password can't be changed on existing disk"
msgstr "La password admin non può essere modificata sul disco esistente"
-msgid "aggregate deleted"
-msgstr "aggregato eliminato"
-
-msgid "aggregate in error"
-msgstr "aggregato in errore"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate non riuscito a causa di: %s"
-
msgid "cannot understand JSON"
msgstr "impossibile riconoscere JSON"
@@ -3248,9 +2859,6 @@ msgstr "immagine già montata"
msgid "instance %s is not running"
msgstr "l'istanza %s non è in esecuzione"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "l'istanza ha un kernel o ramdisk ma non entrambi"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr "istanza è un argomento obbligatorio per utilizzare @refresh_cache"
@@ -3307,9 +2915,6 @@ msgstr "unità nbd %s non visualizzata"
msgid "nbd unavailable: module not loaded"
msgstr "nbd non disponibile: modulo non caricato"
-msgid "no hosts to remove"
-msgstr "nessun host da rimuovere"
-
#, python-format
msgid "no match found for %s"
msgstr "nessuna corrispondenza trovata per %s"
@@ -3381,9 +2986,6 @@ msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
"set_admin_password non è implementato da questo driver o istanza guest."
-msgid "setup in progress"
-msgstr "impostazione in corso"
-
#, python-format
msgid "snapshot for %s"
msgstr "istantanea per %s"
@@ -3400,9 +3002,6 @@ msgstr "troppe chiavi del corpo"
msgid "unpause not supported for vmwareapi"
msgstr "annullamento sospensione non supportato per vmwareapi"
-msgid "version should be an integer"
-msgstr "la versione deve essere un numero intero"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "vg %s deve essere il gruppo di volumi LVM"
@@ -3430,15 +3029,3 @@ msgid ""
msgstr ""
"Lo stato del volume '%(vol)s' deve essere 'in-use'. Attualmente lo stato è "
"'%(status)s'"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake non dispone di un'implementazione per %s"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr ""
-"xenapi.fake non dispone di un'implementazione per %s o è stato chiamato con "
-"il numero errato di argomenti"
diff --git a/nova/locale/ja/LC_MESSAGES/nova.po b/nova/locale/ja/LC_MESSAGES/nova.po
index 6e2aace84d..1a3a0dfc82 100644
--- a/nova/locale/ja/LC_MESSAGES/nova.po
+++ b/nova/locale/ja/LC_MESSAGES/nova.po
@@ -13,7 +13,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -144,18 +144,6 @@ msgid "Affinity instance group policy was violated."
msgstr "Affinity インスタンスグループãƒãƒªã‚·ãƒ¼ã«é•åã—ã¾ã—ãŸ"
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "エージェントã¯ã€å‘¼ã³å‡ºã— %(method)s をサãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"ãƒã‚¤ãƒ‘ーãƒã‚¤ã‚¶ãƒ¼ %(hypervisor)s ã® OS %(os)s アーキテクãƒãƒ£ãƒ¼ "
-"%(architecture)s ã®ã‚¨ãƒ¼ã‚¸ã‚§ãƒ³ãƒˆãƒ“ルドãŒå­˜åœ¨ã—ã¾ã™ã€‚"
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "アグリゲート %(aggregate_id)s ã«ã¯æ—¢ã«ãƒ›ã‚¹ãƒˆ %(host)s ãŒã‚ã‚Šã¾ã™ã€‚"
@@ -174,13 +162,6 @@ msgstr ""
"ã‚Šã¾ã›ã‚“。"
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"アグリゲート %(aggregate_id)s: アクション '%(action)s' ã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—"
-"ãŸ: %(reason)s。"
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "アグリゲート %(aggregate_name)s ã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™ã€‚"
@@ -188,10 +169,6 @@ msgstr "アグリゲート %(aggregate_name)s ã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™ã€‚"
msgid "Aggregate %s does not support empty named availability zone"
msgstr "アグリゲート %s ã¯ç©ºã®åå‰ã®ã‚¢ãƒ™ã‚¤ãƒ©ãƒ“リティーゾーンをサãƒãƒ¼ãƒˆã—ã¾ã›ã‚“"
-#, fuzzy, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "ホスト %(host)s カウントã®ç·è¨ˆãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
-
#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
msgstr ""
@@ -377,12 +354,6 @@ msgstr "è¦æ±‚ã•ã‚ŒãŸã‚¤ãƒ¡ãƒ¼ã‚¸ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
msgid "Can not handle authentication request for %d credentials"
msgstr "%d èªè¨¼æƒ…å ±ã«é–¢ã™ã‚‹èªè¨¼è¦æ±‚を処ç†ã§ãã¾ã›ã‚“"
-msgid "Can't resize a disk to 0 GB."
-msgstr "ディスクã®ã‚µã‚¤ã‚ºã‚’ 0 GB ã«å¤‰æ›´ã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“。"
-
-msgid "Can't resize down ephemeral disks."
-msgstr "一時ディスクã®ã‚µã‚¤ã‚ºã‚’減らã™ã“ã¨ã¯ã§ãã¾ã›ã‚“。"
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr "インスタンス㮠libvirt 設定ã‹ã‚‰ãƒ«ãƒ¼ãƒˆãƒ‡ãƒã‚¤ã‚¹ã®ãƒ‘スをå–å¾—ã§ãã¾ã›ã‚“"
@@ -425,12 +396,6 @@ msgstr ""
"%s ã®è¦ªã®ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ãƒ—ールを検出ã§ãã¾ã›ã‚“。イメージをä¿å­˜ã™ã‚‹å ´æ‰€ã‚’決定ã§ãã¾"
"ã›ã‚“。"
-msgid "Cannot find SR of content-type ISO"
-msgstr "コンテンツタイプ㌠ISO ã® SR ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "VDI ã®èª­ã¿å–ã‚Š/書ãè¾¼ã¿ç”¨ã® SR ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
-
msgid "Cannot find image for rebuild"
msgstr "å†ä½œæˆç”¨ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
@@ -552,10 +517,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "libvirt ã¨ã®æŽ¥ç¶šãŒå¤±ã‚ã‚Œã¾ã—ãŸ: %s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "ホスト %(host)s ã§ãƒã‚¤ãƒ‘ーãƒã‚¤ã‚¶ãƒ¼ã¸ã®æŽ¥ç¶šãŒãŠã‹ã—ããªã£ã¦ã„ã¾ã™"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -628,20 +589,9 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "PBM ãŒæœ‰åŠ¹ã«ãªã£ã¦ã„ã‚‹å ´åˆã€ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆ PBM ãƒãƒªã‚·ãƒ¼ã¯å¿…é ˆã§ã™ã€‚"
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "レコード %(records)d ãŒãƒ†ãƒ¼ãƒ–ル '%(table_name)s' ã‹ã‚‰å‰Šé™¤ã•ã‚Œã¾ã—ãŸã€‚"
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "デãƒã‚¤ã‚¹ '%(device)s' ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr ""
-"指定ã•ã‚ŒãŸãƒ‡ãƒã‚¤ã‚¹ id %(id)s ã¯ãƒã‚¤ãƒ‘ーãƒã‚¤ã‚¶ãƒ¼ãƒãƒ¼ã‚¸ãƒ§ãƒ³ %(version)s ã§ã¯ã‚µ"
-"ãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“"
-
msgid "Device name contains spaces."
msgstr "デãƒã‚¤ã‚¹åã«ç©ºç™½ãŒå«ã¾ã‚Œã¦ã„ã¾ã™ã€‚"
@@ -653,18 +603,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "別å '%s' ã®ãƒ‡ãƒã‚¤ã‚¹ã‚¿ã‚¤ãƒ—ãŒä¸€è‡´ã—ã¾ã›ã‚“"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"%(table)s.%(column)s ã¨ã‚·ãƒ£ãƒ‰ãƒ¼ãƒ†ãƒ¼ãƒ–ル内ã®ã‚¿ã‚¤ãƒ—ãŒç•°ãªã‚Šã¾ã™: %(c_type)s "
-"%(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr "ディスクã«ã‚µã‚¤ã‚ºå¤‰æ›´ã§ããªã„ファイルシステムãŒå«ã¾ã‚Œã¦ã„ã¾ã™: %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "ãƒ‡ã‚£ã‚¹ã‚¯å½¢å¼ %(disk_format)s ã¯å—ã‘付ã‘られã¾ã›ã‚“"
@@ -672,13 +610,6 @@ msgstr "ãƒ‡ã‚£ã‚¹ã‚¯å½¢å¼ %(disk_format)s ã¯å—ã‘付ã‘られã¾ã›ã‚“"
msgid "Disk info file is invalid: %(reason)s"
msgstr "ディスク情報ファイルãŒç„¡åŠ¹ã§ã™: %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "ディスクã®ãƒ‘ーティション㯠1 ã¤ã®ã¿ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“。"
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "ID ㌠%s ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã«æŽ¥ç¶šã•ã‚ŒãŸãƒ‡ã‚£ã‚¹ã‚¯ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
-
#, python-format
msgid "Driver Error: %s"
msgstr "ドライãƒãƒ¼ã‚¨ãƒ©ãƒ¼: %s"
@@ -696,10 +627,6 @@ msgstr ""
"ビジョニング状態㯠'%(state)s' ã§ã™ã€‚"
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "エージェントã«å¯¾ã™ã‚‹ %(method)s ã®å‘¼ã³å‡ºã—中ã«ã€ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "インスタンス %(instance_id)s ã®å¾©å…ƒä¸­ã®ã‚¨ãƒ©ãƒ¼: %(reason)s"
@@ -753,9 +680,6 @@ msgstr ""
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "リソースモニター %(monitor)s を作æˆã™ã‚‹ã¨ãã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ"
-msgid "Error: Agent is disabled"
-msgstr "エラー: エージェントã¯ç„¡åŠ¹ã«ãªã£ã¦ã„ã¾ã™"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr ""
@@ -788,10 +712,6 @@ msgstr "å†è©¦è¡Œã®æœ€å¤§å›žæ•°ã‚’超ãˆã¾ã—ãŸã€‚%(reason)s"
msgid "Expected a uuid but received %(uuid)s."
msgstr "UUID ãŒå¿…è¦ã§ã™ãŒã€%(uuid)s ã‚’å—ã‘å–ã‚Šã¾ã—ãŸã€‚"
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "シャドーテーブルã«ä½™åˆ†ãªã‚«ãƒ©ãƒ  %(table)s.%(column)s ãŒã‚ã‚Šã¾ã™"
-
msgid "Extracting vmdk from OVA failed."
msgstr "OVA ã‹ã‚‰ã® vmdk ã®å–å¾—ã«å¤±æ•—ã—ã¾ã—ãŸã€‚"
@@ -824,10 +744,6 @@ msgstr ""
"ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã‚¢ãƒ€ãƒ—ターデãƒã‚¤ã‚¹ã‚’ %(instance_uuid)s ã«æŽ¥ç¶šã§ãã¾ã›ã‚“ã§ã—ãŸ"
#, python-format
-msgid "Failed to create vif %s"
-msgstr "vif %s ã®ä½œæˆã«å¤±æ•—ã—ã¾ã—ãŸ"
-
-#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "インスタンスをデプロイã§ãã¾ã›ã‚“ã§ã—ãŸ: %(reason)s"
@@ -857,9 +773,6 @@ msgstr "パーティションã®ãƒžãƒƒãƒ”ングã«å¤±æ•—ã—ã¾ã—ãŸ: %s"
msgid "Failed to mount filesystem: %s"
msgstr "ファイルシステム %s ã®ãƒžã‚¦ãƒ³ãƒˆã«å¤±æ•—ã—ã¾ã—ãŸã€‚"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr "パススルー用㮠PCI デãƒã‚¤ã‚¹ã«é–¢ã™ã‚‹æƒ…å ±ã®è§£æžã«å¤±æ•—ã—ã¾ã—ãŸ"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "インスタンスã®é›»æºã‚ªãƒ•ã«å¤±æ•—ã—ã¾ã—ãŸ: %(reason)s"
@@ -869,14 +782,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "インスタンスã®é›»æºã‚ªãƒ³ã«å¤±æ•—ã—ã¾ã—ãŸ: %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"インスタンス %(instance_uuid)s 用㫠PCI デãƒã‚¤ã‚¹ %(id)s を準備ã§ãã¾ã›ã‚“ã§ã—"
-"ãŸ: %(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "インスタンス %(inst)s をプロビジョニングã§ãã¾ã›ã‚“ã§ã—ãŸ: %(reason)s"
@@ -909,9 +814,6 @@ msgstr "qemu-img info ã‚’ %(path)s ã«å¯¾ã—ã¦å®Ÿè¡Œã§ãã¾ã›ã‚“ã§ã—ãŸ: %
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr "%(instance)s ã§ç®¡ç†è€…パスワードã®è¨­å®šã«å¤±æ•—ã—ã¾ã—ãŸã€‚ç†ç”±: %(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "èµ·å‹•ã«å¤±æ•—ã—ã¾ã—ãŸã€‚ロールãƒãƒƒã‚¯ã—ã¦ã„ã¾ã™"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "インスタンスを休止ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(reason)s"
@@ -920,10 +822,6 @@ msgstr "インスタンスを休止ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "インスタンスを削除ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(reason)s"
-#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "vif %s ã®å–り外ã—ã«å¤±æ•—ã—ã¾ã—ãŸ"
-
msgid "Failure prepping block device."
msgstr "ブロックデãƒã‚¤ã‚¹ã‚’準備ã§ãã¾ã›ã‚“ã§ã—ãŸ"
@@ -932,10 +830,6 @@ msgid "File %(file_path)s could not be found."
msgstr "ファイル %(file_path)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚"
#, python-format
-msgid "File path %s not valid"
-msgstr "ファイルパス %s ã¯ç„¡åŠ¹ã§ã™"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr ""
"Fixed IP %(ip)s ã¯ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ %(network_id)s ã®æœ‰åŠ¹ãª IP アドレスã§ã¯ã‚ã‚Šã¾"
@@ -1068,18 +962,6 @@ msgstr ""
msgid "Found no disk to snapshot."
msgstr "スナップショットã®ä½œæˆå¯¾è±¡ã®ãƒ‡ã‚£ã‚¹ã‚¯ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "ブリッジ %s ã«å¯¾ã™ã‚‹ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãŒå­˜åœ¨ã—ã¾ã›ã‚“。"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "ブリッジ %s ã«ã¤ã„ã¦ä¸€æ„ã§ãªã„ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸ"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "name_label %s ã«ã¤ã„ã¦ä¸€æ„ã§ãªã„ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸ"
-
msgid "Guest does not have a console available."
msgstr "ゲストã¯ã‚³ãƒ³ã‚½ãƒ¼ãƒ«ã‚’使用ã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“。"
@@ -1109,9 +991,6 @@ msgstr ""
"ホストãŒã‚«ã‚¹ã‚¿ãƒ ã®ãƒ¡ãƒ¢ãƒªãƒ¼ãƒšãƒ¼ã‚¸ã‚µã‚¤ã‚ºãŒæŒ‡å®šã•ã‚ŒãŸã‚²ã‚¹ãƒˆã‚’サãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›"
"ã‚“"
-msgid "Host startup on XenServer is not supported."
-msgstr "XenServer 上ã§ã®ãƒ›ã‚¹ãƒˆã®èµ·å‹•ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“。"
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"ãƒã‚¤ãƒ‘ーãƒã‚¤ã‚¶ãƒ¼ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ãŒ post_live_migration_at_source メソッドをサãƒãƒ¼ãƒˆ"
@@ -1343,10 +1222,6 @@ msgstr "インスタンスã®ã‚µã‚¤ã‚ºå¤‰æ›´ãŒè¡Œã‚ã‚Œã¦ã„ã¾ã›ã‚“"
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "インスタンスã®ãƒ›ã‚¹ãƒˆå %(hostname)s ã¯æœ‰åŠ¹ãª DNS åã§ã¯ã‚ã‚Šã¾ã›ã‚“。"
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "インスタンスã¯æ—¢ã«ãƒ¬ã‚¹ã‚­ãƒ¥ãƒ¼ãƒ¢ãƒ¼ãƒ‰ã§ã™: %s"
-
msgid "Instance is not a member of specified network"
msgstr "インスタンスã¯æŒ‡å®šã•ã‚ŒãŸãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã®ãƒ¡ãƒ³ãƒãƒ¼ã§ã¯ã‚ã‚Šã¾ã›ã‚“"
@@ -1367,12 +1242,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "コンピュートリソースãŒä¸å分ã§ã™: %(reason)s。"
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr ""
-"コンピュートノードã«ã¯ %(uuid)s を開始ã™ã‚‹ãŸã‚ã®å分ãªç©ºãメモリーãŒã‚ã‚Šã¾ã›"
-"ん。"
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "インターフェース %(interface)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
@@ -1565,13 +1434,6 @@ msgstr ""
"外部ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ %(network_uuid)s ã§ã‚¤ãƒ³ã‚¿ãƒ¼ãƒ•ã‚§ãƒ¼ã‚¹ã‚’作æˆã™ã‚‹ã“ã¨ã¯è¨±å¯ã•ã‚Œ"
"ã¦ã„ã¾ã›ã‚“"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"カーãƒãƒ«ã‚¤ãƒ¡ãƒ¼ã‚¸/RAM ディスクイメージãŒå¤§ãã™ãŽã¾ã™: %(vdi_size)d ãƒã‚¤ãƒˆã€æœ€"
-"大値㯠%(max_size)d ãƒã‚¤ãƒˆ"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1595,12 +1457,6 @@ msgstr "ユーザー %(user_id)s ã®ã‚­ãƒ¼ãƒšã‚¢ %(name)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã
msgid "Keypair data is invalid: %(reason)s"
msgstr "キーペアデータãŒç„¡åŠ¹ã§ã™: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "キーペアåã«å®‰å…¨ã§ã¯ãªã„文字ãŒå«ã¾ã‚Œã¦ã„ã¾ã™"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "キーペアå㯠1 ã‹ã‚‰ 255 文字ã®é•·ã•ã®æ–‡å­—列ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "上é™ãŒé©ç”¨ã•ã‚Œã‚‹ã®ã¯ã€vCenter 6.0 以é™ã®å ´åˆã®ã¿ã§ã™ã€‚"
@@ -1633,9 +1489,6 @@ msgstr "マーカー %(marker)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚"
msgid "Maximum number of floating IPs exceeded"
msgstr "Floating IP ã®æœ€å¤§æ•°ã‚’超ãˆã¾ã—ãŸ"
-msgid "Maximum number of key pairs exceeded"
-msgstr "キーペアã®æœ€å¤§æ•°ã‚’超ãˆã¾ã—ãŸ"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "メタデータ項目ã®æœ€å¤§æ•°ãŒ %(allowed)d を超ãˆã¦ã„ã¾ã™"
@@ -1666,12 +1519,6 @@ msgstr ""
"コンピュートホストノード %(host)s.%(node)s ã§ã¯ã€ãƒ¡ãƒˆãƒªãƒƒã‚¯ %(name)s ã¯è¦‹ã¤ã‹"
"ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚"
-msgid "Migrate Receive failed"
-msgstr "マイグレーションã®å—ã‘å–ã‚ŠãŒå¤±æ•—ã—ã¾ã—ãŸ"
-
-msgid "Migrate Send failed"
-msgstr "マイグレーションã®é€ä¿¡ãŒå¤±æ•—ã—ã¾ã—ãŸ"
-
#, python-format
msgid "Migration %(id)s for server %(uuid)s is not live-migration."
msgstr ""
@@ -1725,10 +1572,6 @@ msgstr "マイグレーション先ã®é¸æŠžã‚¨ãƒ©ãƒ¼: %(reason)s"
msgid "Missing arguments: %s"
msgstr "引数 %s ãŒã‚ã‚Šã¾ã›ã‚“"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "シャドーテーブルã«ã‚«ãƒ©ãƒ  %(table)s.%(column)s ãŒã‚ã‚Šã¾ã›ã‚“"
-
msgid "Missing device UUID."
msgstr "デãƒã‚¤ã‚¹ UUID ãŒã‚ã‚Šã¾ã›ã‚“。"
@@ -1812,14 +1655,6 @@ msgid "Must not input both network_id and port_id"
msgstr "network_id 㨠port_id ã®ä¸¡æ–¹ã‚’入力ã—ãªã„ã§ãã ã•ã„"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"compute_driver=xenapi.XenAPIDriver を使用ã™ã‚‹ã«ã¯ã€connection_urlã€"
-"connection_username (オプション)ã€ãŠã‚ˆã³ connection_password を指定ã™ã‚‹å¿…è¦ãŒ"
-"ã‚ã‚Šã¾ã™"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1874,10 +1709,6 @@ msgstr "ID %(id)s ã‚’æŒã¤ãƒ–ロックデãƒã‚¤ã‚¹ãƒžãƒƒãƒ”ングãŒã‚ã‚Šã¾ã
msgid "No Unique Match Found."
msgstr "1 ã¤ã ã‘一致ã™ã‚‹ãƒ‡ãƒ¼ã‚¿ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "ID %(id)s ã«é–¢é€£ä»˜ã‘られãŸã‚¨ãƒ¼ã‚¸ã‚§ãƒ³ãƒˆãƒ“ルドã¯ã‚ã‚Šã¾ã›ã‚“。"
-
msgid "No compute host specified"
msgstr "コンピュートホストãŒæŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“"
@@ -1943,10 +1774,6 @@ msgstr "%(image)s ã® %(root)s ã«ãƒžã‚¦ãƒ³ãƒˆãƒã‚¤ãƒ³ãƒˆãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã
msgid "No operating system found in %s"
msgstr "%s 内ã«ã‚ªãƒšãƒ¬ãƒ¼ãƒ†ã‚£ãƒ³ã‚°ã‚·ã‚¹ãƒ†ãƒ ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "%s ã®ãƒ—ライマリー VDI ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
-
msgid "No root disk defined."
msgstr "ルートディスクãŒå®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
@@ -1958,9 +1785,6 @@ msgstr ""
"特定ã®ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãŒè¦æ±‚ã•ã‚Œãšã€ãƒ—ロジェクト '%(project_id)s' ã§åˆ©ç”¨å¯èƒ½ãª"
"ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãŒã‚ã‚Šã¾ã›ã‚“。"
-msgid "No suitable network for migrate"
-msgstr "マイグレーションã«é©åˆ‡ãªãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãŒã‚ã‚Šã¾ã›ã‚“"
-
msgid "No valid host found for cold migrate"
msgstr "コールドマイグレーションã«æœ‰åŠ¹ãªãƒ›ã‚¹ãƒˆãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
@@ -2042,14 +1866,6 @@ msgstr "アベイラビリティーゾーン %s ã« 1 ã¤ä»¥ä¸Šã®ãƒ›ã‚¹ãƒˆãŒæ—
msgid "Only administrators may list deleted instances"
msgstr "削除済ã¿ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã®ä¸€è¦§ã‚’å–å¾—ã§ãã‚‹ã®ã¯ç®¡ç†è€…ã®ã¿ã§ã™ã€‚"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"ã“ã®æ©Ÿèƒ½ã§ã‚µãƒãƒ¼ãƒˆã•ã‚Œã‚‹ã®ã¯ã€ãƒ•ã‚¡ã‚¤ãƒ«ãƒ™ãƒ¼ã‚¹ã® SR (ext/NFS) ã®ã¿ã§ã™ã€‚SR "
-"%(uuid)s ã®ã‚¿ã‚¤ãƒ—㯠%(type)s ã§ã™ã€‚"
-
msgid "Origin header does not match this host."
msgstr "オリジンヘッダーãŒã“ã®ãƒ›ã‚¹ãƒˆã«ä¸€è‡´ã—ã¾ã›ã‚“。"
@@ -2092,10 +1908,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "PCI デãƒã‚¤ã‚¹è¦æ±‚ %(requests)s ãŒå¤±æ•—ã—ã¾ã—ãŸ"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIF %s ã« IP アドレスãŒå«ã¾ã‚Œã¦ã„ã¾ã›ã‚“"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "ページサイズ %(pagesize)s 㯠\"%(against)s\" ã«å¯¾ã—ã¦ç¦æ­¢ã•ã‚Œã¦ã„ã¾ã™"
@@ -2214,10 +2026,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "クォータを超éŽã—ã¾ã—ãŸã€‚グループ内ã®ã‚µãƒ¼ãƒãƒ¼ãŒå¤šã™ãŽã¾ã™"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "クォータを超éŽã—ã¾ã—ãŸ: code=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr ""
"プロジェクト %(project_id)sã€ãƒªã‚½ãƒ¼ã‚¹ %(resource)s ã®ã‚¯ã‚©ãƒ¼ã‚¿ãŒå­˜åœ¨ã—ã¾ã™"
@@ -2249,18 +2057,6 @@ msgstr ""
"%(resource)s ã®ã‚¯ã‚©ãƒ¼ã‚¿ä¸Šé™ %(limit)s ã¯ã€%(maximum)s 以下ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›"
"ん。"
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "VBD %s ã®å–り外ã—ã®æœ€å¤§è©¦è¡Œå›žæ•°ã«é”ã—ã¾ã—ãŸ"
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"リアルタイムãƒãƒªã‚·ãƒ¼ã§ã¯ã€1 ã¤ä»¥ä¸Šã® RT vCPU 㨠1 ã¤ã®é€šå¸¸ã® vCPU を使用ã—㦠"
-"vCPU ã®ãƒžã‚¹ã‚¯ã‚’設定ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚hw:cpu_realtime_mask ã¾ãŸã¯ "
-"hw_cpu_realtime_mask ã‚’å‚ç…§ã—ã¦ãã ã•ã„"
-
msgid "Request body and URI mismatch"
msgstr "リクエスト本文㨠URI ã®ä¸ä¸€è‡´"
@@ -2433,10 +2229,6 @@ msgid "Set admin password is not supported"
msgstr "設定ã•ã‚ŒãŸç®¡ç†è€…パスワードãŒã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "åå‰ãŒ %(name)s ã®ã‚·ãƒ£ãƒ‰ãƒ¼ãƒ†ãƒ¼ãƒ–ルã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™ã€‚"
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "シェア '%s' ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“"
@@ -2444,13 +2236,6 @@ msgstr "シェア '%s' ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“"
msgid "Share level '%s' cannot have share configured"
msgstr "シェアレベル '%s' ã«è¨­å®šã•ã‚ŒãŸã‚·ã‚§ã‚¢ãŒã‚ã‚Šã¾ã›ã‚“"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"resize2fs ã§ãƒ•ã‚¡ã‚¤ãƒ«ã‚·ã‚¹ãƒ†ãƒ ã®ã‚µã‚¤ã‚ºã‚’縮å°ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚ディスク上ã«å分"
-"ãªç©ºã容é‡ãŒã‚ã‚‹ã‹ã©ã†ã‹ã‚’確èªã—ã¦ãã ã•ã„。"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "スナップショット %(snapshot_id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚"
@@ -2476,14 +2261,6 @@ msgstr "指定ã•ã‚ŒãŸã‚½ãƒ¼ãƒˆã‚­ãƒ¼ãŒç„¡åŠ¹ã§ã—ãŸã€‚"
msgid "Specified fixed address not assigned to instance"
msgstr "指定ã•ã‚ŒãŸå›ºå®šã‚¢ãƒ‰ãƒ¬ã‚¹ã¯ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã«å‰²ã‚Šå½“ã¦ã‚‰ã‚Œã¦ã„ã¾ã›ã‚“"
-msgid "Specify `table_name` or `table` param"
-msgstr "'table_name' ã¾ãŸã¯ 'table' ã®ãƒ‘ラメーターを指定ã—ã¦ãã ã•ã„"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr ""
-"'table_name' ã¾ãŸã¯ 'table' ã®ãƒ‘ラメーターã®ã„ãšã‚Œã‹ 1 ã¤ã®ã¿ã‚’指定ã—ã¦ãã ã•"
-"ã„"
-
msgid "Started"
msgstr "開始済ã¿"
@@ -2554,9 +2331,6 @@ msgstr ""
msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr "定義ã—ãŸãƒãƒ¼ãƒˆ %(ports)d ã®æ•°ãŒä¸Šé™ %(quota)d を超ãˆã¦ã„ã¾ã™"
-msgid "The only partition should be partition 1."
-msgstr "唯一ã®ãƒ‘ーティションã¯ãƒ‘ーティション 1 ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“。"
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr "指定ã•ã‚ŒãŸ RNG デãƒã‚¤ã‚¹ãƒ‘ス (%(path)s) ãŒãƒ›ã‚¹ãƒˆä¸Šã«ã‚ã‚Šã¾ã›ã‚“。"
@@ -2620,49 +2394,13 @@ msgid ""
msgstr ""
"ボリュームã«ãƒ«ãƒ¼ãƒˆãƒ‡ãƒã‚¤ã‚¹ %s ã¨åŒã˜ãƒ‡ãƒã‚¤ã‚¹åを割り当ã¦ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"uuid 列ã¾ãŸã¯ instance_uuid 列ãŒãƒŒãƒ«ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ %(records)d ãŒãƒ†ãƒ¼ãƒ–ル "
-"'%(table_name)s' ã«ã‚ã‚Šã¾ã™ã€‚å¿…è¦ãªãƒ‡ãƒ¼ã‚¿ã‚’ãƒãƒƒã‚¯ã‚¢ãƒƒãƒ—ã—ãŸå¾Œã§ã€--delete オ"
-"プションを指定ã—ã¦ã“ã®ã‚³ãƒžãƒ³ãƒ‰ã‚’å†åº¦å®Ÿè¡Œã—ã¦ãã ã•ã„。"
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"uuid 列ã¾ãŸã¯ instance_uuid 列ãŒãƒŒãƒ«ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ %(records)d ãŒãƒ†ãƒ¼ãƒ–ル "
-"'%(table_name)s' ã«ã‚ã‚Šã¾ã™ã€‚マイグレーションを行ã†å‰ã«ã€ã“れらを手動ã§ã‚¯ãƒªãƒ¼"
-"ンアップã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚ 'nova-manage db null_instance_uuid_scan' コマン"
-"ドã®å®Ÿè¡Œã‚’検討ã—ã¦ãã ã•ã„。"
-
msgid "There are not enough hosts available."
msgstr "使用å¯èƒ½ãªãƒ›ã‚¹ãƒˆãŒä¸è¶³ã—ã¦ã„ã¾ã™ã€‚"
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"ã¾ã ãƒžã‚¤ã‚°ãƒ¬ãƒ¼ã‚·ãƒ§ãƒ³ãŒè¡Œã‚ã‚Œã¦ã„ãªã„フレーãƒãƒ¼ãƒ¬ã‚³ãƒ¼ãƒ‰ã®ä»¶æ•°ãŒ %(count)i ã‚ã‚Š"
-"ã¾ã™ã€‚ã™ã¹ã¦ã®ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã®ãƒ•ãƒ¬ãƒ¼ãƒãƒ¼ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒæ–°å½¢å¼ã«ç§»è¡Œã™ã‚‹ã¾ã§ã€ãƒžã‚¤ã‚°"
-"レーションを継続ã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“。最åˆã« 'nova-manage db "
-"migrate_flavor_data' を実行ã—ã¦ãã ã•ã„。"
-
-#, python-format
msgid "There is no such action: %s"
msgstr "ã“ã®ã‚ˆã†ãªã‚¢ã‚¯ã‚·ãƒ§ãƒ³ã¯ã‚ã‚Šã¾ã›ã‚“: %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "instance_uuid ãŒãƒŒãƒ«ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã¯ã‚ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚"
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
@@ -2671,11 +2409,6 @@ msgstr ""
"ã“ã®ã‚³ãƒ³ãƒ”ュートノードã®ãƒã‚¤ãƒ‘ーãƒã‚¤ã‚¶ãƒ¼ãŒã‚µãƒãƒ¼ãƒˆã•ã‚Œã‚‹æœ€å°ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚ˆã‚Šã‚‚"
"å¤ããªã£ã¦ã„ã¾ã™: %(version)s。"
-msgid "This domU must be running on the host specified by connection_url"
-msgstr ""
-"ã“ã® domU ã¯ã€connection_url ã§æŒ‡å®šã•ã‚ŒãŸãƒ›ã‚¹ãƒˆä¸Šã§å®Ÿè¡Œã•ã‚Œã¦ã„ã‚‹å¿…è¦ãŒã‚ã‚Šã¾"
-"ã™"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2695,10 +2428,6 @@ msgstr ""
"ã“ã®ã‚µãƒ¼ãƒ“スãŒå®Ÿè£…環境ã®æ®‹ã‚Šã®éƒ¨åˆ†ã®æœ€å° (v%(minver)i) ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã‚ˆã‚Šã‚‚å¤ã "
"(v%(thisver)i) ãªã£ã¦ã„ã¾ã™ã€‚処ç†ã‚’継続ã§ãã¾ã›ã‚“。"
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "デãƒã‚¤ã‚¹ %s ãŒä½œæˆã•ã‚Œã‚‹ã®ã‚’å¾…ã£ã¦ã„ã‚‹éš›ã«ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—ãŸ"
-
msgid "Timeout waiting for response from cell"
msgstr "セルã‹ã‚‰ã®å¿œç­”を待機中ã«ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—ãŸ"
@@ -2742,12 +2471,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "Ironic クライアントをèªè¨¼ã§ãã¾ã›ã‚“。"
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr ""
-"ゲストエージェントã«æŽ¥ç¶šã§ãã¾ã›ã‚“。次ã®å‘¼ã³å‡ºã—ãŒã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—ãŸ: "
-"%(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "イメージを %(format)s ã«å¤‰æ›ã§ãã¾ã›ã‚“: %(exp)s"
@@ -2756,14 +2479,6 @@ msgid "Unable to convert image to raw: %(exp)s"
msgstr "イメージを raw å½¢å¼ã«å¤‰æ›ã§ãã¾ã›ã‚“: %(exp)s"
#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "VBD %s を削除ã§ãã¾ã›ã‚“"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "VDI %s を破棄ã§ãã¾ã›ã‚“"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "ディスク '%s' ã®ãƒã‚¹ã‚’判別ã§ãã¾ã›ã‚“"
@@ -2772,22 +2487,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "%s ã®ãƒ‡ã‚£ã‚¹ã‚¯ãƒ—レフィックスを判別ã§ãã¾ã›ã‚“"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "プールã‹ã‚‰ %s を削除ã§ãã¾ã›ã‚“。マスターãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "プールã‹ã‚‰ %s を削除ã§ãã¾ã›ã‚“。プールã¯ç©ºã§ã¯ã‚ã‚Šã¾ã›ã‚“"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "VBD %s ã‹ã‚‰ SR ã‚’å–å¾—ã§ãã¾ã›ã‚“。"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "VDI %s ã‹ã‚‰ SR ã‚’å–å¾—ã§ãã¾ã›ã‚“"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "ca_file ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“: %s"
@@ -2806,9 +2505,6 @@ msgstr "iSCSI ターゲットãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
msgid "Unable to find key_file : %s"
msgstr "key_file %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "VM ã®ãƒ«ãƒ¼ãƒˆ VBD/VDI ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
-
msgid "Unable to find volume"
msgstr "ボリュームãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
@@ -2818,22 +2514,6 @@ msgstr " ホスト㮠UUID ã‚’å–å¾—ã§ãã¾ã›ã‚“: /etc/machine-id ãŒå­˜åœ¨ã
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "ホスト㮠UUID ãŒå–å¾—ã§ãã¾ã›ã‚“: /etc/machine-id ãŒç©ºã§ã™"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "VDI %s ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã‚’å–å¾—ã§ãã¾ã›ã‚“。"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "SR %s 㧠VDI を実装ã§ãã¾ã›ã‚“。"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "SR %s 㧠VDI を実装ã§ãã¾ã›ã‚“。"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "プール内㮠%s を追加ã§ãã¾ã›ã‚“"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2864,10 +2544,6 @@ msgstr ""
"インスタンス (%(instance_id)s) ã‚’ç¾åœ¨ã¨åŒã˜ãƒ›ã‚¹ãƒˆ (%(host)s) ã«ãƒžã‚¤ã‚°ãƒ¬ãƒ¼ã‚·ãƒ§"
"ンã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“。"
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "ターゲットã®æƒ…å ± %s ã‚’å–å¾—ã§ãã¾ã›ã‚“"
-
msgid "Unable to resize disk down."
msgstr "ディスクã®ã‚µã‚¤ã‚ºã‚’縮å°ã™ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“。"
@@ -2877,14 +2553,6 @@ msgstr "インスタンスã«ãƒ‘スワードを設定ã§ãã¾ã›ã‚“"
msgid "Unable to shrink disk."
msgstr "ディスクを縮å°ã§ãã¾ã›ã‚“。"
-#, fuzzy
-msgid "Unable to terminate instance."
-msgstr "インスタンスを強制終了ã§ãã¾ã›ã‚“。"
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "VBD %s ã®å–り外ã—ã«å¤±æ•—ã—ã¾ã—ãŸã€‚"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "指定ã§ããªã„ CPU 情報: %(reason)s"
@@ -2905,16 +2573,6 @@ msgstr ""
"ã¾ã‚Œã¦ã„ã¾ã™ã€‚"
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"想定ã—ãªã„ API エラーãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚http://bugs.launchpad.net/nova/ ã§ã“れを"
-"報告ã—ã¦ã€å¯èƒ½ãªå ´åˆã¯ Nova API ログを添付ã—ã¦ãã ã•ã„。\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "想定ã—ãªã„アグリゲートã®ã‚¢ã‚¯ã‚·ãƒ§ãƒ³ %s"
@@ -2974,9 +2632,6 @@ msgstr "復元ãŒè©¦è¡Œã•ã‚Œã¾ã—ãŸãŒã€ã‚¤ãƒ¡ãƒ¼ã‚¸ %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã
msgid "Unsupported Content-Type"
msgstr "サãƒãƒ¼ãƒˆã•ã‚Œãªã„ Content-Type"
-msgid "Upgrade DB using Essex release first."
-msgstr "最åˆã« Essex リリースを使用ã—㦠DB をアップグレードã—ã¾ã™ã€‚"
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "パスワードファイルã«ãƒ¦ãƒ¼ã‚¶ãƒ¼ %(username)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
@@ -2998,25 +2653,6 @@ msgstr "åŒã˜ãƒªã‚¯ã‚¨ã‚¹ãƒˆå†…ã§ç•°ãªã‚‹ block_device_mapping 指定ã¯ä½¿ç”
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s 㯠%(virtual_size)d ãƒã‚¤ãƒˆã§ã™ã€‚ã“ã‚Œã¯ã€ãƒ•ãƒ¬ãƒ¼ãƒãƒ¼ã®ã‚µã‚¤ã‚ºã§"
-"ã‚ã‚‹ %(new_disk_size)d ãƒã‚¤ãƒˆã‚’超ãˆã¦ã„ã¾ã™ã€‚"
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"SR %(sr)s 㧠VDI ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ (vdi_uuid %(vdi_uuid)sã€target_lun "
-"%(target_lun)s)"
-
-#, fuzzy, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr "VHD çµ±åˆã®è©¦è¡Œæ™‚ã« (%d) を超éŽã—ãŸãŸã‚ã€ä¸­æ­¢ã—ã¾ã™..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -3087,13 +2723,6 @@ msgstr ""
"ボリュームã«ã‚ˆã£ã¦ãƒ–ロックサイズãŒè¨­å®šã•ã‚Œã¾ã™ãŒã€ç¾åœ¨ã® libvirt ãƒã‚¤ãƒ‘ーãƒã‚¤"
"ザー '%s' ã¯ã‚«ã‚¹ã‚¿ãƒ ãƒ–ロックサイズをサãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr ""
-"Python < 2.7.4 ã§ã¯ã‚¹ã‚­ãƒ¼ãƒ  '%s' ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¾ã›ã‚“。http ã¾ãŸã¯https を使"
-"用ã—ã¦ãã ã•ã„"
-
msgid "When resizing, instances must change flavor!"
msgstr "サイズ変更ã®éš›ã¯ã€ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã®ãƒ•ãƒ¬ãƒ¼ãƒãƒ¼ã‚’変更ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
@@ -3109,11 +2738,6 @@ msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr ""
"リソース %(res)s ã§ä½¿ç”¨ã•ã‚Œã‚‹ã‚¯ã‚©ãƒ¼ã‚¿ãƒ¡ã‚½ãƒƒãƒ‰ %(method)s ãŒæ­£ã—ãã‚ã‚Šã¾ã›ã‚“"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr ""
-"フックメソッドã®ã‚¿ã‚¤ãƒ—ãŒæ­£ã—ãã‚ã‚Šã¾ã›ã‚“。タイプ「preã€ãŠã‚ˆã³ã€Œpostã€ã®ã¿ãŒè¨±"
-"å¯ã•ã‚Œã¦ã„ã¾ã™"
-
msgid "X-Forwarded-For is missing from request."
msgstr "リクエスト㫠X-Forwarded-For ãŒã‚ã‚Šã¾ã›ã‚“。"
@@ -3129,9 +2753,6 @@ msgstr "リクエスト㫠X-Metadata-Provider ãŒã‚ã‚Šã¾ã›ã‚“。"
msgid "X-Tenant-ID header is missing from request."
msgstr "リクエスト㫠X-Tenant-ID ヘッダーãŒã‚ã‚Šã¾ã›ã‚“。"
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "relax-xsm-sr-check=true をサãƒãƒ¼ãƒˆã™ã‚‹ XAPI ãŒå¿…è¦ã§ã™"
-
msgid "You are not allowed to delete the image."
msgstr "ã“ã®ã‚¤ãƒ¡ãƒ¼ã‚¸ã®å‰Šé™¤ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
@@ -3158,16 +2779,6 @@ msgstr "使用å¯èƒ½ãª Floating IP ã¯ã‚ã‚Šã¾ã›ã‚“。"
msgid "admin password can't be changed on existing disk"
msgstr "既存ã®ãƒ‡ã‚£ã‚¹ã‚¯ä¸Šã§ç®¡ç†è€…パスワードを変更ã™ã‚‹ã“ã¨ã¯ã§ãã¾ã›ã‚“"
-msgid "aggregate deleted"
-msgstr "アグリゲートãŒå‰Šé™¤ã•ã‚Œã¾ã—ãŸ"
-
-msgid "aggregate in error"
-msgstr "アグリゲートã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate ãŒå¤±æ•—ã—ã¾ã—ãŸ: ç†ç”± %s"
-
msgid "cannot understand JSON"
msgstr "JSON を解釈ã§ãã¾ã›ã‚“"
@@ -3231,11 +2842,6 @@ msgstr "イメージã¯æ—¢ã«ãƒžã‚¦ãƒ³ãƒˆã•ã‚Œã¦ã„ã¾ã™"
msgid "instance %s is not running"
msgstr "インスタンス %s ã¯å®Ÿè¡Œã•ã‚Œã¦ã„ã¾ã›ã‚“"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr ""
-"インスタンスã«ã¯ã‚«ãƒ¼ãƒãƒ«ãƒ‡ã‚£ã‚¹ã‚¯ã¨ RAM ディスクã®ä¸€æ–¹ã¯ã‚ã‚Šã¾ã™ãŒã€ä¸¡æ–¹ã¯ã‚ã‚Š"
-"ã¾ã›ã‚“"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr "@refresh_cache を使用ã™ã‚‹å ´åˆã€ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã¯å¿…é ˆã®å¼•æ•°ã§ã™"
@@ -3292,9 +2898,6 @@ msgstr "nbd デãƒã‚¤ã‚¹ %s ãŒå‡ºç¾ã—ã¾ã›ã‚“"
msgid "nbd unavailable: module not loaded"
msgstr "nbd ãŒä½¿ç”¨ä¸å¯ã§ã™: モジュールãŒãƒ­ãƒ¼ãƒ‰ã•ã‚Œã¦ã„ã¾ã›ã‚“"
-msgid "no hosts to remove"
-msgstr "削除ã™ã‚‹ãƒ›ã‚¹ãƒˆãŒã‚ã‚Šã¾ã›ã‚“"
-
#, python-format
msgid "no match found for %s"
msgstr "%s ã«åˆè‡´ã™ã‚‹ã‚‚ã®ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“"
@@ -3367,9 +2970,6 @@ msgstr ""
"set_admin_password ã¯ã€ã“ã®ãƒ‰ãƒ©ã‚¤ãƒãƒ¼ã¾ãŸã¯ã‚²ã‚¹ãƒˆã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ã§ã¯å®Ÿè£…ã•ã‚Œã¦ã„"
"ã¾ã›ã‚“。"
-msgid "setup in progress"
-msgstr "セットアップãŒé€²è¡Œä¸­ã§ã™"
-
#, python-format
msgid "snapshot for %s"
msgstr "%s ã®ã‚¹ãƒŠãƒƒãƒ—ショット"
@@ -3386,9 +2986,6 @@ msgstr "本文ã«ã‚­ãƒ¼ãŒå¤šã™ãŽã¾ã™"
msgid "unpause not supported for vmwareapi"
msgstr "vmwareapi ã§ã¯ä¸€æ™‚åœæ­¢è§£é™¤ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“"
-msgid "version should be an integer"
-msgstr "ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã¯æ•´æ•°ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "vg %s 㯠LVM ボリュームグループã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
@@ -3414,13 +3011,3 @@ msgid ""
msgstr ""
"ボリューム '%(vol)s' ã®çŠ¶æ³ã¯ã€Œä½¿ç”¨ä¸­ã€ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“ãŒã€ç¾åœ¨ã®çŠ¶æ³ã¯ "
"'%(status)s' ã§ã™"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake ã«ã¯ %s ãŒå®Ÿè£…ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr "xenapi.fake ã« %s ã®å®Ÿè£…ãŒãªã„ã‹ã€å¼•æ•°ã®æ•°ãŒèª¤ã£ã¦ã„ã¾ã™ã€‚"
diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova.po b/nova/locale/ko_KR/LC_MESSAGES/nova.po
index 577d044740..11197b6aee 100644
--- a/nova/locale/ko_KR/LC_MESSAGES/nova.po
+++ b/nova/locale/ko_KR/LC_MESSAGES/nova.po
@@ -15,7 +15,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -146,18 +146,6 @@ msgid "Affinity instance group policy was violated."
msgstr "ì„ í˜¸ë„ ì¸ìŠ¤í„´ìŠ¤ 그룹 ì •ì±…ì„ ìœ„ë°˜í–ˆìŠµë‹ˆë‹¤. "
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "ì—ì´ì „트가 í˜¸ì¶œì„ ì§€ì›í•˜ì§€ ì•ŠìŒ: %(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"하ì´í¼ë°”ì´ì € %(hypervisor)s OS %(os)s 아키í…처%(architecture)sì´(ê°€) 있는 ì—"
-"ì´ì „트 빌드가 존재합니다."
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "%(aggregate_id)s ì§‘í•©ì— ì´ë¯¸ %(host)s 호스트가 있습니다. "
@@ -175,12 +163,6 @@ msgstr ""
"%(aggregate_id)s ì§‘í•©ì— %(metadata_key)s 키를 갖는 메타ë°ì´í„°ê°€ 없습니다. "
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"%(aggregate_id)s 집합: '%(action)s' 조치로 ë‹¤ìŒ ì˜¤ë¥˜ê°€ ë°œìƒí•¨: %(reason)s."
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "%(aggregate_name)s ì§‘í•©ì´ ì´ë¯¸ 존재합니다. "
@@ -189,10 +171,6 @@ msgid "Aggregate %s does not support empty named availability zone"
msgstr "%s 집합ì—ì„œ ì´ë¦„ ì§€ì •ëœ ë¹„ì–´ 있는 가용 êµ¬ì—­ì„ ì§€ì›í•˜ì§€ ì•ŠìŒ"
#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "%(host)s í˜¸ìŠ¤íŠ¸ì— ëŒ€í•œ ì§‘í•©ì„ ì°¾ì„ ìˆ˜ 없습니다. "
-
-#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
msgstr ""
"올바르지 ì•Šì€ 'name' ê°’ì´ ì œê³µë˜ì—ˆìŠµë‹ˆë‹¤. ì´ë¦„ì€ %(reason)sì´ì–´ì•¼ 합니다."
@@ -369,12 +347,6 @@ msgstr "ìš”ì²­ëœ ì´ë¯¸ì§€ë¥¼ ì°¾ì„ ìˆ˜ ì—†ìŒ"
msgid "Can not handle authentication request for %d credentials"
msgstr "%d ì‹ ìž„ ì •ë³´ì— ëŒ€í•œ ì¸ì¦ 정보를 처리할 수 ì—†ìŒ"
-msgid "Can't resize a disk to 0 GB."
-msgstr "ë””ìŠ¤í¬ í¬ê¸°ë¥¼ 0GBë¡œ ì¡°ì •í•  수 없습니다."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "ephemeral 디스í¬ì˜ í¬ê¸°ë¥¼ ì¤„ì¼ ìˆ˜ 없습니다."
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr "ì¸ìŠ¤í„´ìŠ¤ libvirt 구성ì—ì„œ 루트 디바ì´ìŠ¤ 경로를 검색할 수 ì—†ìŒ"
@@ -415,12 +387,6 @@ msgstr ""
"%s;ì˜ ìƒìœ„ 스토리지 í’€ì„ íŒë³„í•  수 없습니다. ì´ë¯¸ì§€ 저장 위치를 íŒë³„í•  수 ì—†"
"습니다."
-msgid "Cannot find SR of content-type ISO"
-msgstr "컨í…츠 유형 ISOì˜ SRì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "VDI를 ì½ê¸°/쓰기할 SRì„ ì°¾ì„ ìˆ˜ 없습니다. "
-
msgid "Cannot find image for rebuild"
msgstr "다시 빌드할 ì´ë¯¸ì§€ë¥¼ ì°¾ì„ ìˆ˜ ì—†ìŒ"
@@ -543,10 +509,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "libvirt 연결 유실: %s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "하ì´í¼ë°”ì´ì € ì—°ê²°ì´ í˜¸ìŠ¤íŠ¸ì—ì„œ ëŠê²¼ìŠµë‹ˆë‹¤: %(host)s"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -617,10 +579,6 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "PBMì„ ì‚¬ìš©í•˜ëŠ” 경우 기본 PBM ì •ì±…ì´ í•„ìš”í•©ë‹ˆë‹¤."
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "í…Œì´ë¸” '%(table_name)s'ì—ì„œ %(records)dê°œì˜ ë ˆì½”ë“œë¥¼ 삭제했습니다."
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "'%(device)s' 디바ì´ìŠ¤ë¥¼ ì°¾ì„ ìˆ˜ 없습니다."
@@ -628,11 +586,6 @@ msgstr "'%(device)s' 디바ì´ìŠ¤ë¥¼ ì°¾ì„ ìˆ˜ 없습니다."
msgid "Device detach failed for %(device)s: %(reason)s"
msgstr "장치 해제가 %(device)s: %(reason)s ë•Œë¬¸ì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤."
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr "ì§€ì •ëœ ë””ë°”ì´ìŠ¤ ID %(id)sì€(는) 하ì´í¼ë°”ì´ì € 버전 %(version)sìž„"
-
msgid "Device name contains spaces."
msgstr "장치 ì´ë¦„ì— ê³µë°±ì´ ìžˆìŠµë‹ˆë‹¤."
@@ -644,18 +597,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "'%s' ë³„ëª…ì˜ ë””ë°”ì´ìŠ¤ ìœ í˜•ì´ ì¼ì¹˜í•˜ì§€ ì•ŠìŒ"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"%(table)s.%(column)s ë° ìƒˆë„ìš° í…Œì´ë¸”ì—ì„œ 서로 다른 유형: %(c_type)s "
-"%(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr "디스í¬ì— 사용ìžê°€ í¬ê¸°ë¥¼ ì¡°ì •í•  수 없는 íŒŒì¼ ì‹œìŠ¤í…œì´ í¬í•¨ë¨: %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Disk format %(disk_format)s를 알 수 없습니다."
@@ -663,13 +604,6 @@ msgstr "Disk format %(disk_format)s를 알 수 없습니다."
msgid "Disk info file is invalid: %(reason)s"
msgstr "ë””ìŠ¤í¬ ì •ë³´ 파ì¼ì´ 올바르지 ì•ŠìŒ: %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "디스í¬ì—는 í•˜ë‚˜ì˜ íŒŒí‹°ì…˜ë§Œ 있어야 합니다. "
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "ì¸ìŠ¤í„´ìŠ¤ì— ì ‘ì†ëœ IDê°€ %sì¸ ë””ìŠ¤í¬ë¥¼ ì°¾ì„ ìˆ˜ 없습니다."
-
#, python-format
msgid "Driver Error: %s"
msgstr "ë“œë¼ì´ë²„ 오류: %s"
@@ -696,10 +630,6 @@ msgstr ""
"ë‹ ìƒíƒœëŠ” ì•„ì§ '%(state)s'입니다."
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "ì—ì´ì „íŠ¸ì— ëŒ€í•œ ë‹¤ìŒ í˜¸ì¶œ 중 오류: %(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "%(instance_id)s ì¸ìŠ¤í„´ìŠ¤ 언쉘브 중 오류 ë°œìƒ: %(reason)s"
@@ -751,9 +681,6 @@ msgstr "libguestfs(%(e)s)를 갖는 %(image)s 마운트 오류"
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "ìžì› 모니터 작성 ì¤‘ì— ì˜¤ë¥˜ ë°œìƒ: %(monitor)s"
-msgid "Error: Agent is disabled"
-msgstr "오류: ì—ì´ì „트가 사용 안ë¨"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "조치 ID %(action_id)sì— ëŒ€í•œ %(event)s ì´ë²¤íŠ¸ë¥¼ ì°¾ì„ ìˆ˜ ì—†ìŒ"
@@ -785,10 +712,6 @@ msgstr "최대 ìž¬ì‹œë„ íšŸìˆ˜ë¥¼ 초과했습니다. %(reason)s"
msgid "Expected a uuid but received %(uuid)s."
msgstr "uuid를 예ìƒí–ˆì§€ë§Œ %(uuid)sì„(를) 수신했습니다. "
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "새ë„ìš° í…Œì´ë¸”ì— %(table)s.%(column)s ì—´ì´ ì¶”ê°€ë¡œ 있ìŒ"
-
msgid "Extracting vmdk from OVA failed."
msgstr "OVAì—ì„œ vmdkì˜ ì••ì¶•ì„ í’€ì§€ 못했습니다."
@@ -815,16 +738,6 @@ msgstr "ë„¤íŠ¸ì›Œí¬ í• ë‹¹ 실패. 다시 스케줄하지 ì•ŠìŒ"
msgid "Failed to attach network adapter device to %(instance_uuid)s"
msgstr "ë„¤íŠ¸ì›Œí¬ ì–´ëŒ‘í„° 디바ì´ìŠ¤ë¥¼ %(instance_uuid)sì— ì ‘ì†í•˜ëŠ” ë° ì‹¤íŒ¨í•¨"
-msgid "Failed to create the interim network for vif"
-msgstr "vifì— ëŒ€í•œ ìž„ì‹œ ë„¤íŠ¸ì›Œí¬ êµ¬ì„±ì— ì‹¤íŒ¨í•¨"
-
-#, python-format
-msgid "Failed to create vif %s"
-msgstr "vif %sì„(를) ìƒì„±í•˜ëŠ” ë° ì‹¤íŒ¨"
-
-msgid "Failed to delete bridge"
-msgstr "브릿지 ì œê±°ì— ì‹¤íŒ¨í•¨"
-
#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "ì¸ìŠ¤í„´ìŠ¤ 배치 실패: %(reason)s"
@@ -841,9 +754,6 @@ msgstr "ë„¤íŠ¸ì›Œí¬ ì–´ëŒ‘í„° 디바ì´ìŠ¤ë¥¼ %(instance_uuid)sì—ì„œ 분리하
msgid "Failed to encrypt text: %(reason)s"
msgstr "í…스트를 암호화하지 못했습니다: %(reason)s"
-msgid "Failed to find bridge for vif"
-msgstr "vif 브릿지 ê²€ìƒ‰ì— ì‹¤íŒ¨í•¨"
-
#, python-format
msgid "Failed to get resource provider with UUID %(uuid)s"
msgstr "UUIDë¡œ 리소스 ê³µê¸‰ìž ê°€ì ¸ì˜¤ê¸° 실패: %(uuid)s"
@@ -860,9 +770,6 @@ msgstr "íŒŒí‹°ì…˜ì„ ë§µí•‘í•˜ì§€ 못했ìŒ: %s"
msgid "Failed to mount filesystem: %s"
msgstr "íŒŒì¼ ì‹œìŠ¤í…œ 마운트 실패: %s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr "패스스루용 pci 디바ì´ìŠ¤ì— 대한 ì •ì±…ì„ êµ¬ë¬¸ 분ì„하지 못함"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "ì¸ìŠ¤í„´ìŠ¤ ì „ì› ë” ì‹¤íŒ¨: %(reason)s"
@@ -872,14 +779,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "ì¸ìŠ¤í„´ìŠ¤ ì „ì› ê¼„ 실패: %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"%(instance_uuid)s ì¸ìŠ¤í„´ìŠ¤ì— 대해 PCI 디바ì´ìŠ¤ %(id)sì„(를) 준비하지 못함: "
-"%(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "ì¸ìŠ¤í„´ìŠ¤ %(inst)s í”„ë¡œë¹„ì €ë‹ ì‹¤íŒ¨: %(reason)s"
@@ -911,9 +810,6 @@ msgstr "%(path)sì—ì„œ qemu-img ì •ë³´ 실행 실패: %(error)s"
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr "%(reason)s ë•Œë¬¸ì— %(instance)sì— ê´€ë¦¬ 비밀번호를 설정하지 못했ìŒ"
-msgid "Failed to spawn, rolling back"
-msgstr "íŒŒìƒ ì‹¤íŒ¨. 롤백 중"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "ì¸ìŠ¤í„´ìŠ¤ ì¼ì‹œì¤‘단 실패: %(reason)s"
@@ -923,10 +819,6 @@ msgid "Failed to terminate instance: %(reason)s"
msgstr "ì¸ìŠ¤í„´ìŠ¤ 종료 실패: %(reason)s"
#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "vif %sì˜ í”ŒëŸ¬ê·¸ë¥¼ 해제하는 ë° ì‹¤íŒ¨"
-
-#, python-format
msgid "Failed to unplug virtual interface: %(reason)s"
msgstr "ê°€ìƒ ì¸í„°íŽ˜ì´ìŠ¤ í•´ì œ 실패: %(reason)s"
@@ -938,10 +830,6 @@ msgid "File %(file_path)s could not be found."
msgstr "%(file_path)s 파ì¼ì„ ì°¾ì„ ìˆ˜ 없습니다. "
#, python-format
-msgid "File path %s not valid"
-msgstr "íŒŒì¼ ê²½ë¡œ %sì´(ê°€) 올바르지 ì•ŠìŒ"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr ""
"ê³ ì • IP %(ip)sì´(ê°€) ë„¤íŠ¸ì›Œí¬ %(network_id)sì— ëŒ€í•´ 올바른 IP 주소가 아닙니"
@@ -1072,18 +960,6 @@ msgstr ""
msgid "Found no disk to snapshot."
msgstr "스냅샷할 디스í¬ë¥¼ 찾지 못함."
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "브릿지 %sì— ëŒ€í•œ 네트워í¬ë¥¼ 발견하지 못함"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "브릿지 %sì— ëŒ€í•œ 고유하지 ì•Šì€ ë„¤íŠ¸ì›Œí¬ ë°œê²¬"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "name_label %sì— ëŒ€í•œ 고유하지 ì•Šì€ ë„¤íŠ¸ì›Œí¬ ë°œê²¬"
-
msgid "Guest agent is not enabled for the instance"
msgstr "게스트 ì—ì´ì „트는 해당 ì¸ìŠ¤í„°ìŠ¤ì— 활성화 ë˜ì§€ 않습니다."
@@ -1115,9 +991,6 @@ msgid "Host does not support guests with custom memory page sizes"
msgstr ""
"호스트ì—ì„œ ì‚¬ìš©ìž ì •ì˜ ë©”ëª¨ë¦¬ 페ì´ì§€ í¬ê¸°ë¥¼ 사용하는 게스트를 지ì›í•˜ì§€ ì•ŠìŒ"
-msgid "Host startup on XenServer is not supported."
-msgstr "XenServerì—ì„œì˜ í˜¸ìŠ¤íŠ¸ ì‹œìž‘ì€ ì§€ì›ë˜ì§€ 않습니다. "
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"하ì´í¼ë°”ì´ì € ë“œë¼ì´ë²„ê°€ post_live_migration_at_source 메소드를 지ì›í•˜ì§€ ì•ŠìŒ"
@@ -1338,10 +1211,6 @@ msgstr "ì¸ìŠ¤í„´ìŠ¤ í¬ê¸°ê°€ ì¡°ì •ë˜ì§€ 않았습니다. "
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "ì¸ìŠ¤í„´ìŠ¤ 호스트 ì´ë¦„ %(hostname)sì´(ê°€) 올바른 DNS ì´ë¦„ì´ ì•„ë‹˜"
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "ì¸ìŠ¤í„´ìŠ¤ê°€ ì´ë¯¸ 복구 ëª¨ë“œì— ìžˆìŒ: %s"
-
msgid "Instance is not a member of specified network"
msgstr "ì¸ìŠ¤í„´ìŠ¤ê°€ ì§€ì •ëœ ë„¤íŠ¸ì›Œí¬ì˜ 멤버가 아님"
@@ -1365,11 +1234,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "Compute 리소스가 충분하지 않습니다: %(reason)s."
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr ""
-"%(uuid)sì„(를) 시작하기ì—는 계산 ë…¸ë“œì˜ ì‚¬ìš© 가능한 메모리가 부족합니다. "
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "%(interface)s ì¸í„°íŽ˜ì´ìŠ¤ë¥¼ ì°¾ì„ ìˆ˜ 없습니다. "
@@ -1561,12 +1425,6 @@ msgid ""
"It is not allowed to create an interface on external network %(network_uuid)s"
msgstr "외부 ë„¤íŠ¸ì›Œí¬ %(network_uuid)sì— ì¸í„°íŽ˜ì´ìŠ¤ë¥¼ 작성할 수 ì—†ìŒ"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"커ë„/ëž¨ë””ìŠ¤í¬ ì´ë¯¸ì§€ê°€ 너무 í¼: %(vdi_size)d ë°”ì´íŠ¸, 최대 %(max_size)d ë°”ì´íŠ¸"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1589,12 +1447,6 @@ msgstr "%(user_id)s 사용ìžì— 대한 키 ìŒ %(name)sì„(를) ì°¾ì„ ìˆ˜ ì—†ì
msgid "Keypair data is invalid: %(reason)s"
msgstr "키 ìŒ ë°ì´í„°ê°€ 올바르지 않습니다: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "키 ìŒ ì´ë¦„ì— ì•ˆì „í•˜ì§€ ì•Šì€ ë¬¸ìžê°€ 들어있ìŒ"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "키 ìŒ ì´ë¦„ì€ ë¬¸ìžì—´ì´ê³  길ì´ê°€ 1 - 255ìž ë²”ìœ„ì— ì†í•´ì•¼ 함"
-
msgid "Libguestfs does not have permission to read host kernel."
msgstr "Libguestfsì—게는 ì»¤ë„ í˜¸ìŠ¤íŠ¸ë¥¼ ì½ì–´ì˜¬ 수 있는 ê¶Œí•œì´ ì—†ìŠµë‹ˆë‹¤"
@@ -1630,9 +1482,6 @@ msgstr "%(marker)s 마커를 ì°¾ì„ ìˆ˜ 없습니다. "
msgid "Maximum number of floating IPs exceeded"
msgstr "Floating IPì˜ ìµœëŒ€ìˆ˜ 초과"
-msgid "Maximum number of key pairs exceeded"
-msgstr "키 ìŒì˜ 최대 수 초과"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "메타ë°ì´í„°ì˜ 최대 수가 %(allowed)dì„(를) 초과함"
@@ -1669,12 +1518,6 @@ msgstr ""
"메트릭 %(name)sì„(를) 계산 호스트 노드 %(host)s.%(node)s.ì—ì„œ ì°¾ì„ ìˆ˜ 없습니"
"다."
-msgid "Migrate Receive failed"
-msgstr "마ì´ê·¸ë ˆì´ì…˜ 수신 실패"
-
-msgid "Migrate Send failed"
-msgstr "마ì´ê·¸ë ˆì´ì…˜ 전송 실패"
-
#, python-format
msgid "Migration %(id)s for server %(uuid)s is not live-migration."
msgstr "%(uuid)s ì„œë²„ì˜ %(id)s 마ì´ê·¸ë ˆì´ì…˜ì´ ë¼ì´ë¸Œ 마ì´ê·¸ë ˆì´ì…˜ì´ 아닙니다."
@@ -1722,10 +1565,6 @@ msgstr "마ì´ê·¸ë ˆì´ì…˜ ì„ íƒ ëŒ€ìƒ ì˜¤ë¥˜: %(reason)s"
msgid "Missing arguments: %s"
msgstr "누ë½ëœ ì¸ìˆ˜: %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "새ë„ìš° í…Œì´ë¸”ì— %(table)s.%(column)s ì—´ì´ ëˆ„ë½ë¨"
-
msgid "Missing device UUID."
msgstr "장치 UUID가 비어 있습니다."
@@ -1809,13 +1648,6 @@ msgid "Must not input both network_id and port_id"
msgstr "network_id ë° port_id 둘 다 입력하지 않아야 함"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"connection_url, connection_username (optionally), connection_password를 지정"
-"해야 compute_driver=xenapi.XenAPIDriver를 사용할 수 있ìŒ"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1868,10 +1700,6 @@ msgstr "IDê°€ %(id)sì¸ ë¸”ë¡ ë””ë°”ì´ìŠ¤ ë§µí•‘ì´ ì—†ìŠµë‹ˆë‹¤. "
msgid "No Unique Match Found."
msgstr "고유한 ì¼ì¹˜ì ì„ 찾지 못했습니다."
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "ID %(id)sê³¼(와) ì—°ê´€ëœ ì—ì´ì „트 빌드가 없습니다. "
-
msgid "No compute host specified"
msgstr "ì§€ì •ëœ ê³„ì‚° 호스트가 ì—†ìŒ"
@@ -1937,10 +1765,6 @@ msgstr "%(image)sì˜ %(root)sì— ë§ˆìš´íŠ¸ 지ì ì´ ì—†ìŒ"
msgid "No operating system found in %s"
msgstr "%sì— ìš´ì˜ ì²´ì œê°€ ì—†ìŒ"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "%sì— ëŒ€í•œ 1ì°¨ VDI를 ì°¾ì„ ìˆ˜ ì—†ìŒ"
-
msgid "No root disk defined."
msgstr "루트 디스í¬ê°€ ì •ì˜ë˜ì§€ 않았습니다."
@@ -1952,9 +1776,6 @@ msgstr ""
"'%(project_id)s' 프로ì íŠ¸ì— 특정 네트워í¬ê°€ 요청ë˜ì—ˆì§€ë§Œ, 사용할 수 있는 네트"
"워í¬ê°€ 없습니다."
-msgid "No suitable network for migrate"
-msgstr "마ì´ê·¸ë ˆì´ì…˜ì„ 위한 ì§€ì† ê°€ëŠ¥í•œ ë„¤íŠ¸ì›Œí¬ ì—†ìŒ"
-
msgid "No valid host found for cold migrate"
msgstr "콜드 마ì´ê·¸ë ˆì´ì…˜ì— 대한 유효한 호스트를 ì°¾ì„ ìˆ˜ ì—†ìŒ"
@@ -2038,14 +1859,6 @@ msgstr "하나 ì´ìƒì˜ 호스트가 ì´ë¯¸ 가용성 구역 %sì— ìžˆìŒ"
msgid "Only administrators may list deleted instances"
msgstr "관리ìžë§Œ ì‚­ì œëœ ì¸ìŠ¤í„´ìŠ¤ë¥¼ 나열할 수 있ìŒ"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"ì´ ê¸°ëŠ¥ì—서는 íŒŒì¼ ê¸°ë°˜ SR(ext/NFS)만 지ì›í•©ë‹ˆë‹¤. SR %(uuid)sì´(ê°€) %(type)s "
-"유형입니다."
-
msgid "Origin header does not match this host."
msgstr "ì›ë³¸ í—¤ë”ê°€ ì´ í˜¸ìŠ¤íŠ¸ì™€ ì¼ì¹˜í•˜ì§€ 않습니다."
@@ -2088,10 +1901,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "PCI 디바ì´ìŠ¤ 요청 %(requests)sì— ì‹¤íŒ¨í•¨"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIF %sì— IP 주소가 ì—†ìŒ"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "페ì´ì§€ í¬ê¸° %(pagesize)sì´(ê°€) '%(against)s'ì— ëŒ€í•´ 금지ë¨"
@@ -2212,10 +2021,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "할당량 초과. ê·¸ë£¹ì— ì„œë²„ê°€ 너무 많습니다. "
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "할당량 초과: 코드=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr ""
"프로ì íŠ¸ %(project_id)s, ìžì› %(resource)sì— ëŒ€í•œ í• ë‹¹ëŸ‰ì´ ì¡´ìž¬í•©ë‹ˆë‹¤. "
@@ -2246,17 +2051,6 @@ msgid ""
msgstr ""
"%(resource)sì˜ í• ë‹¹ëŸ‰ 한계 %(limit)sì€(는) %(maximum)s ì´í•˜ì—¬ì•¼ 합니다."
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "VBD %sì„(를) 언플러그하려는 최대 ìž¬ì‹œë„ íšŸìˆ˜ì— ë„달했ìŒ"
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"실시간 ì •ì±…ì—는 하나 ì´ìƒì˜ 1 RT vCPU와 1 ì¼ë°˜ vCPUë¡œ êµ¬ì„±ëœ vCPU(s) 마스í¬"
-"ê°€ 필요합니다. hw:cpu_realtime_mask ë˜ëŠ” hw_cpu_realtime_mask를 참조하십시오."
-
msgid "Request body and URI mismatch"
msgstr "요청 본문 ë° URI 불ì¼ì¹˜"
@@ -2425,10 +2219,6 @@ msgid "Set admin password is not supported"
msgstr "ì„¤ì •ëœ ê´€ë¦¬ 비밀번호가 지ì›ë˜ì§€ ì•ŠìŒ"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "ì´ë¦„ì´ %(name)sì¸ ìƒˆë„ìš° í…Œì´ë¸”ì´ ì´ë¯¸ 존재합니다. "
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "공유 '%s'ì€(는) 지ì›ë˜ì§€ ì•ŠìŒ"
@@ -2436,13 +2226,6 @@ msgstr "공유 '%s'ì€(는) 지ì›ë˜ì§€ ì•ŠìŒ"
msgid "Share level '%s' cannot have share configured"
msgstr "공유 레벨 '%s'ì—는 공유를 구성할 수 ì—†ìŒ"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"resize2fs를 사용한 íŒŒì¼ ì‹œìŠ¤í…œ ì¶•ì†Œì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. 사용ìžì˜ 디스í¬ì—충분한 "
-"여유 ê³µê°„ì´ ìžˆëŠ”ì§€ 확ì¸í•˜ì‹­ì‹œì˜¤."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "%(snapshot_id)s ìŠ¤ëƒ…ìƒ·ì„ ì°¾ì„ ìˆ˜ 없습니다. "
@@ -2467,12 +2250,6 @@ msgstr "제공ë˜ëŠ” ì •ë ¬ 키가 올바르지 않습니다. "
msgid "Specified fixed address not assigned to instance"
msgstr "ì§€ì •ëœ ê³ ì • 주소가 ì¸ìŠ¤í„´ìŠ¤ì— ì—°ê´€ë˜ì§€ ì•ŠìŒ"
-msgid "Specify `table_name` or `table` param"
-msgstr "`table_name` ë˜ëŠ” `table` 매개변수를 지정하십시오. "
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "í•˜ë‚˜ì˜ ë§¤ê°œë³€ìˆ˜ `table_name` ë˜ëŠ” `table`만 지정하십시오."
-
msgid "Started"
msgstr "ìž‘ë™í•¨"
@@ -2545,9 +2322,6 @@ msgstr "ì¸ìŠ¤í„´ìŠ¤ëŠ” ì œê³µëœ ê²ƒë³´ë‹¤ 최신 하ì´í¼ë°”ì´ì € 버전ì´
msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr "ì •ì˜ëœ í¬íŠ¸ 수 %(ports)dì´(ê°€) 한계를 초과함: %(quota)d"
-msgid "The only partition should be partition 1."
-msgstr "유ì¼í•œ íŒŒí‹°ì…˜ì€ íŒŒí‹°ì…˜ 1ì´ì–´ì•¼ 합니다."
-
#, python-format
msgid ""
"The property 'numa_nodes' cannot be '%(nodes)s'. It must be a number greater "
@@ -2616,48 +2390,13 @@ msgid ""
"The volume cannot be assigned the same device name as the root device %s"
msgstr "ë³¼ë¥¨ì— ë£¨íŠ¸ 디바ì´ìŠ¤ %sê³¼(와) ê°™ì€ ë””ë°”ì´ìŠ¤ ì´ë¦„ì„ ì§€ì •í•  수 ì—†ìŒ"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"uuid ë˜ëŠ” instance_uuid ì—´ì´ ë„ì¸ '%(table_name)s' í…Œì´ë¸”ì— %(records)dê°œì˜ "
-"레코드가 있습니다. 필요한 ë°ì´í„°ë¥¼ 백업한 í›„ì— --delete ì˜µì…˜ì„ ì‚¬ìš©í•˜ì—¬ ì´ ëª…"
-"ë ¹ì„ ë‹¤ì‹œ 실행하십시오."
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"uuid ë˜ëŠ” instance_uuid ì—´ì´ ë„ì¸ '%(table_name)s' í…Œì´ë¸”ì— %(records)dê°œì˜ "
-"레코드가 있습니다. ì´ëŸ¬í•œ 레코드는 마ì´ê·¸ë ˆì´ì…˜ì´ 지나기 ì „ì— ìˆ˜ë™ìœ¼ë¡œ 정리해"
-"야 합니다. 'nova-manage db null_instance_uuid_scan' ëª…ë ¹ì„ ì‚¬ìš©í•´ 보십시오. "
-
msgid "There are not enough hosts available."
msgstr "사용 가능한 호스트가 부족합니다."
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"여전히 %(count)iê°œì˜ í”Œë ˆì´ë²„ 레코드가 마ì´ê·¸ë ˆì´ì…˜ë˜ì§€ 않았습니다. 모든 ì¸ìŠ¤"
-"턴스 플레ì´ë²„ 레코드가 새로운 형ì‹ìœ¼ë¡œ 마ì´ê·¸ë ˆì´ì…˜ë  때까지 마ì´ê·¸ë ˆì´ì…˜ì„ "
-"계ì†í•  수 없습니다. 먼저 `nova-manage db migrate_flavor_data'를 실행하십시"
-"오. "
-
-#, python-format
msgid "There is no such action: %s"
msgstr "해당 조치가 ì—†ìŒ: %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "instance_uuidê°€ ë„ì¸ ë ˆì½”ë“œê°€ 없습니다."
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
@@ -2666,9 +2405,6 @@ msgstr ""
"ì´ ì»´í“¨íŠ¸ ë…¸ë“œì˜ í•˜ì´í¼ë°”ì´ì €ê°€ 지ì›ë˜ëŠ” 최소 버전 %(version)s보다 ì´ì „입니"
"다."
-msgid "This domU must be running on the host specified by connection_url"
-msgstr "ì´ domUê°€ connection_urlë¡œ 지정ë˜ëŠ” 호스트ì—ì„œ 실행 중ì´ì–´ì•¼ 함"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2688,10 +2424,6 @@ msgstr ""
"ì´ ì„œë¹„ìŠ¤ëŠ” 나머지 ë°°ì¹˜ì˜ ìµœì†Œ (v%(minver)i) 버전보다 ì´ì „(v%(thisver)i)입니"
"다."
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "%s 디바ì´ìŠ¤ê°€ 작성ë˜ê¸°ë¥¼ 기다리다가 제한시간 초과함"
-
msgid "Timeout waiting for response from cell"
msgstr "ì…€ì˜ ì‘ë‹µì„ ëŒ€ì‹œí•˜ëŠ” ì¤‘ì— ì œí•œì‹œê°„ 초과"
@@ -2739,11 +2471,6 @@ msgid "Unable to automatically allocate a network for project %(project_id)s"
msgstr "%(project_id)s ë•Œë¬¸ì— ìžë™ìœ¼ë¡œ 네트워í¬ë¥¼ 할당할 수 없습니다."
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr ""
-"게스트 ì—ì´ì „íŠ¸ì— ì ‘ì†í•  수 ì—†ìŒ. ë‹¤ìŒ í˜¸ì¶œì˜ ì œí•œì‹œê°„ì´ ì´ˆê³¼ë¨: %(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "ì´ë¯¸ì§€ë¥¼ %(format)s(으)ë¡œ 변환할 수 ì—†ìŒ: %(exp)s"
@@ -2752,14 +2479,6 @@ msgid "Unable to convert image to raw: %(exp)s"
msgstr "ì´ë¯¸ì§€ë¥¼ ì›ì‹œë¡œ 변환할 수 ì—†ìŒ: %(exp)s"
#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "VBD %sì„(를) ì˜êµ¬ 삭제할 수 ì—†ìŒ"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "VDI %sì„(를) ì˜êµ¬ 삭제할 수 ì—†ìŒ"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "'%s'ì˜ ë””ìŠ¤í¬ ë²„ìŠ¤ë¥¼ íŒë³„í•  수 ì—†ìŒ"
@@ -2768,22 +2487,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "%sì˜ ë””ìŠ¤í¬ ì ‘ë‘부를 íŒë³„í•  수 ì—†ìŒ"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "í’€ì—ì„œ %sì„(를) 방출할 수 ì—†ìŒ. 마스터를 ì°¾ì„ ìˆ˜ ì—†ìŒ"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "í’€ì—ì„œ %sì„(를) 방출할 수 ì—†ìŒ. í’€ì´ ë¹„ì–´ 있지 ì•ŠìŒ"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "VBD %sì—ì„œ SRì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "VDI %sì—ì„œ SRì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "ca_fileì„ ì°¾ì„ ìˆ˜ ì—†ìŒ: %s"
@@ -2802,9 +2505,6 @@ msgstr "iSCSI 대ìƒì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
msgid "Unable to find key_file : %s"
msgstr "key_fileì„ ì°¾ì„ ìˆ˜ ì—†ìŒ: %s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "VMì— ëŒ€í•œ 루트 VBD/VDI를 ì°¾ì„ ìˆ˜ ì—†ìŒ"
-
msgid "Unable to find volume"
msgstr "ë³¼ë¥¨ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
@@ -2814,22 +2514,6 @@ msgstr "호스트 UUID를 가져올 수 ì—†ìŒ: /etc/machine-idê°€ 존재하지
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "호스트 UUID를 가져올 수 ì—†ìŒ: /etc/machine-idê°€ 비어 있ìŒ"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "VDI %sì˜ ë ˆì½”ë“œë¥¼ 가져올 수 ì—†ìŒ"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "SR %sì— ëŒ€í•œ VDI를 ë„ìž…í•  수 ì—†ìŒ"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "SR %sì—ì„œ VDI를 ë„ìž…í•  수 ì—†ìŒ"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "í’€ì— %sì„(를) ê²°í•©í•  수 ì—†ìŒ"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2860,10 +2544,6 @@ msgstr ""
"ì¸ìŠ¤í„´ìŠ¤(%(instance_id)s)를 현재 호스트(%(host)s)ë¡œ 마ì´ê·¸ë ˆì´ì…˜í•  수 없습니"
"다. "
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "ëŒ€ìƒ ì •ë³´ %sì„(를) ì–»ì„ ìˆ˜ ì—†ìŒ"
-
msgid "Unable to resize disk down."
msgstr "ë””ìŠ¤í¬ í¬ê¸°ë¥¼ ì¤„ì¼ ìˆ˜ 없습니다."
@@ -2873,13 +2553,6 @@ msgstr "ì¸ìŠ¤í„´ìŠ¤ì— 대한 비밀번호를 설정할 수 ì—†ìŒ"
msgid "Unable to shrink disk."
msgstr "디스í¬ë¥¼ ì¤„ì¼ ìˆ˜ 없습니다."
-msgid "Unable to terminate instance."
-msgstr "ì¸ìŠ¤í„´ìŠ¤ë¥¼ 종료할 수 ì—†ìŒ"
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "VBD %sì„(를) 언플러그할 수 ì—†ìŒ"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "CPU 정보를 확ì¸í•  수 없습니다: %(reason)s"
@@ -2899,15 +2572,6 @@ msgstr ""
"ìŠ¤ì˜ ë¸”ë¡ ë””ë°”ì´ìŠ¤ ë§µí•‘ì´ í¬í•¨ë˜ì–´ 있습니다."
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"API Errorê°€ ë°œìƒí–ˆìŠµë‹ˆë‹¤. http://bugs.launchpad.net/nova/ ì— ìƒì„¸ ë‚´ìš©ì„ ë³´ë‚´"
-"주십시오. 가능하면 Nova API 로그를 í¬í•¨í•˜ì—¬ 보내주십시오. %s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "예ìƒì¹˜ ì•Šì€ ì§‘í•© %s ìž‘ì—…"
@@ -2967,9 +2631,6 @@ msgstr "언쉘브를 ì‹œë„했으나 %s ì´ë¯¸ì§€ë¥¼ ì°¾ì„ ìˆ˜ 없습니다."
msgid "Unsupported Content-Type"
msgstr "지ì›ë˜ì§€ 않는 Content-Type"
-msgid "Upgrade DB using Essex release first."
-msgstr "먼저 Essex 릴리스를 사용하여 DB를 업그레ì´ë“œí•˜ì‹­ì‹œì˜¤. "
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "%(username)s 사용ìžê°€ 비밀번호 파ì¼ì— 없습니다. "
@@ -2993,25 +2654,6 @@ msgstr ""
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)sì€(는) %(virtual_size)dë°”ì´íŠ¸ì´ë©° ì´ëŠ” 플레ì´ë²„ í¬ê¸°ì¸ "
-"%(new_disk_size)dë°”ì´íŠ¸ë³´ë‹¤ í½ë‹ˆë‹¤."
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"SR %(sr)sì—ì„œ VDI를 찾지 못함(vdi_uuid %(vdi_uuid)s, target_lun "
-"%(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr "VHD 합병 ì‹œë„ê°€ (%d)ì„(를) 초과했ìŒ, í¬ê¸°í•˜ëŠ” 중..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -3089,13 +2731,6 @@ msgstr ""
msgid "Volume size extension is not supported by the hypervisor."
msgstr "하ì´í¼ë°”ì´ì €ì—ì„œ 볼륨í¬ê¸° í™•ìž¥ì„ ì§€ì›í•˜ì§€ 않습니다"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr ""
-"2.7.4 ë¯¸ë§Œì˜ Pythonì—ì„œ 스키마 '%s'ì„(를) 지ì›í•˜ì§€ 않습니다. http ë˜ëŠ” https"
-"를 사용하십시오."
-
msgid "When resizing, instances must change flavor!"
msgstr "í¬ê¸°ë¥¼ ì¡°ì •í•  ë•Œ ì¸ìŠ¤í„´ìŠ¤ëŠ” 플레ì´ë²„를 변경해야 합니다!"
@@ -3110,9 +2745,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "%(res)s ìžì›ì—ì„œ 올바르지 ì•Šì€ í• ë‹¹ëŸ‰ 메소드 %(method)sì´(ê°€) 사용ë¨"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr "ìž˜ëª»ëœ ìœ í˜•ì˜ í›„í¬ ë©”ì†Œë“œìž„. 'pre' ë° 'post' 유형만 허용ë¨"
-
msgid "X-Forwarded-For is missing from request."
msgstr "X-Forwarded-Forê°€ 요청ì—ì„œ 누ë½ë˜ì—ˆìŠµë‹ˆë‹¤. "
@@ -3128,9 +2760,6 @@ msgstr "X-Metadata-Providerê°€ 요청ì—ì„œ 누ë½ë˜ì—ˆìŠµë‹ˆë‹¤. "
msgid "X-Tenant-ID header is missing from request."
msgstr "X-Tenant-ID í—¤ë”ê°€ 요청ì—ì„œ 누ë½ë˜ì—ˆìŠµë‹ˆë‹¤."
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "XAPI ì§€ì› relax-xsm-sr-check=trueê°€ 필요함"
-
msgid "You are not allowed to delete the image."
msgstr "ì´ë¯¸ì§€ë¥¼ 삭제할 수 없습니다."
@@ -3158,16 +2787,6 @@ msgstr "사용할 수 있는 Floating IP가 0개입니다."
msgid "admin password can't be changed on existing disk"
msgstr "관리 비밀번호는 기존 디스í¬ì—ì„œ ë³€ê²½ë  ìˆ˜ ì—†ìŒ"
-msgid "aggregate deleted"
-msgstr "ì§‘í•©ì´ ì‚­ì œë˜ì—ˆìŠµë‹ˆë‹¤"
-
-msgid "aggregate in error"
-msgstr "ì§‘í•©ì— ì˜¤ë¥˜ê°€ 있습니다"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate 실패. ì›ì¸: %s"
-
msgid "cannot understand JSON"
msgstr "JSONì„ ì´í•´í•  수 ì—†ìŒ"
@@ -3231,9 +2850,6 @@ msgstr "ì´ë¯¸ì§€ê°€ ì´ë¯¸ 마운트ë˜ì—ˆìŒ"
msgid "instance %s is not running"
msgstr "ì¸ìŠ¤í„´ìŠ¤ %sì´(ê°€) 실행 ì¤‘ì´ ì•„ë‹˜"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "ì¸ìŠ¤í„´ìŠ¤ê°€ ì»¤ë„ ë˜ëŠ” 램디스í¬ë¥¼ 갖지만 둘 다 갖지는 ì•ŠìŒ"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr "ì¸ìŠ¤í„´ìŠ¤ëŠ” @refresh_cache를 사용하기 위한 필수 ì¸ìˆ˜ìž„"
@@ -3288,9 +2904,6 @@ msgstr "nbd 디바ì´ìŠ¤ %sì´(ê°€) 표시ë˜ì§€ ì•ŠìŒ"
msgid "nbd unavailable: module not loaded"
msgstr "nbd 사용 불가능: ëª¨ë“ˆì´ ë¡œë“œë˜ì§€ 않았ìŒ"
-msgid "no hosts to remove"
-msgstr "제거할 호스트가 ì—†ìŒ"
-
#, python-format
msgid "no match found for %s"
msgstr "%sì— ëŒ€í•œ ì¼ì¹˜ í•­ëª©ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
@@ -3361,9 +2974,6 @@ msgstr ""
"set_admin_passwordê°€ ì´ ë“œë¼ì´ë²„ ë˜ëŠ” 게스트 ì¸ìŠ¤í„´ìŠ¤ì— ì˜í•´ 구현ë˜ì§€ 않습니"
"다. "
-msgid "setup in progress"
-msgstr "설정 진행 중"
-
#, python-format
msgid "snapshot for %s"
msgstr "%s 스냅샷"
@@ -3380,9 +2990,6 @@ msgstr "본문 키가 너무 많ìŒ"
msgid "unpause not supported for vmwareapi"
msgstr "vmwareapiì— ëŒ€í•œ ì¼ì‹œì •ì§€ 해제는 지ì›ë˜ì§€ ì•ŠìŒ"
-msgid "version should be an integer"
-msgstr "ë²„ì „ì€ ì •ìˆ˜ì—¬ì•¼ 함"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "vg %sì€(는) LVM 볼륨 그룹ì´ì–´ì•¼ 함"
@@ -3406,14 +3013,3 @@ msgstr "볼륨 %sì´(ê°€) ì´ë¯¸ ì ‘ì†ë¨"
msgid ""
"volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status"
msgstr "볼륨 '%(vol)s' ìƒíƒœëŠ” '사용 중'ì´ì–´ì•¼ 합니다. 현재 ìƒíƒœ '%(status)s'"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fakeê°€ %sì— ëŒ€í•œ êµ¬í˜„ì„ ê°–ì§€ ì•ŠìŒ"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr ""
-"xenapi.fakeì— %sì— ëŒ€í•œ êµ¬í˜„ì´ ì—†ê±°ë‚˜ ìž˜ëª»ëœ ìˆ˜ì˜ ì¸ìˆ˜ë¥¼ 사용하여 호출ë¨"
diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova.po b/nova/locale/pt_BR/LC_MESSAGES/nova.po
index dc0e2789a5..a760586ef9 100644
--- a/nova/locale/pt_BR/LC_MESSAGES/nova.po
+++ b/nova/locale/pt_BR/LC_MESSAGES/nova.po
@@ -19,7 +19,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -145,18 +145,6 @@ msgid "Affinity instance group policy was violated."
msgstr "A política de grupo da instância de afinidade foi violada."
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "O agente não suporta a chamada: %(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"A construção do agente com hypervisor %(hypervisor)s os %(os)s arquitetura "
-"%(architecture)s existe."
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "O agregado %(aggregate_id)s já possui o host %(host)s."
@@ -175,12 +163,6 @@ msgstr ""
"%(metadata_key)s."
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"Agregado %(aggregate_id)s: ação '%(action)s' causou um erro: %(reason)s."
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "O agregado %(aggregate_name)s já existe."
@@ -189,10 +171,6 @@ msgid "Aggregate %s does not support empty named availability zone"
msgstr "O agregado %s não suporta zona de disponibilidade nomeada vazia"
#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "Agregar para a contagem de host %(host)s não pode ser localizada."
-
-#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
msgstr "Um valor 'name' inválido foi fornecido. O nome deve ser: %(reason)s"
@@ -376,12 +354,6 @@ msgid "Can not handle authentication request for %d credentials"
msgstr ""
"Não é possível manipular solicitação de autenticação para %d credenciais"
-msgid "Can't resize a disk to 0 GB."
-msgstr "Não é possível redimensionar um disco para 0 GB."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "Não é possível redimensionar o disco temporário."
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
"Não é possível recuperar o caminho de dispositivo raiz da configuração de "
@@ -427,12 +399,6 @@ msgstr ""
"Não é possível determinar o conjunto de armazenamentos pai para %s; não é "
"possível determinar onde armazenar as imagens"
-msgid "Cannot find SR of content-type ISO"
-msgstr "Não é possível localizar SR do tipo de conteúdo ISO"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "Não é possível localizar SR para VDI de leitura/gravação."
-
msgid "Cannot find image for rebuild"
msgstr "Não foi possível localizar a imagem para reconstrução"
@@ -558,10 +524,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "Conexão com libvirt perdida: %s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "A conexão com o hypervisor for interrompida no host: %(host)s"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -640,20 +602,9 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "Política de PBM padrão será necessária se PBM for ativado."
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "Registros %(records)d excluídos da tabela ‘%(table_name)s‘."
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "Dispositivo '%(device)s' não localizado."
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr ""
-"id do dispositivo %(id)s especificado não é suportado pela versão do "
-"hypervisor %(version)s"
-
msgid "Device name contains spaces."
msgstr "Nome do dispositivo contém espaços."
@@ -665,19 +616,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "Tipo de dispositivo incompatível para o alias '%s'"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"Tipos diferentes em %(table)s.%(column)s e na tabela de sombra: %(c_type)s "
-"%(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr ""
-"O disco contém um sistema de arquivos que não é possível redimensionar: %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Formato do disco %(disk_format)s não é aceito"
@@ -685,13 +623,6 @@ msgstr "Formato do disco %(disk_format)s não é aceito"
msgid "Disk info file is invalid: %(reason)s"
msgstr "Arquivo de informações de disco é inválido: %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "O disco deve ter apenas uma partição."
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "Disco com o ID: %s não foi encontrado anexado à instância."
-
#, python-format
msgid "Driver Error: %s"
msgstr "Erro de driver: %s"
@@ -709,10 +640,6 @@ msgstr ""
"'%(state)s'."
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "Erro durante a seguinte chamada ao agente: %(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "Erro durante a instância unshelve %(instance_id)s: %(reason)s"
@@ -764,9 +691,6 @@ msgstr "Erro ao montar %(image)s com libguestfs (%(e)s)"
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Erro ao criar monitor de recurso: %(monitor)s"
-msgid "Error: Agent is disabled"
-msgstr "Erro: O agente está desativado"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "Evento %(event)s não localizado para o ID da ação %(action_id)s"
@@ -798,10 +722,6 @@ msgstr "Foi excedido o número máximo de tentativas. %(reason)s"
msgid "Expected a uuid but received %(uuid)s."
msgstr "Esperado um uuid, mas recebido %(uuid)s."
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "Coluna adicional %(table)s.%(column)s na tabela de sombra"
-
msgid "Extracting vmdk from OVA failed."
msgstr "A extração de vmdk de OVA falhou."
@@ -830,10 +750,6 @@ msgstr ""
"Falha ao anexar o dispositivo de adaptador de rede para %(instance_uuid)s"
#, python-format
-msgid "Failed to create vif %s"
-msgstr "Falha ao criar o vif %s"
-
-#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "Falha ao provisionar a instância: %(reason)s"
@@ -862,9 +778,6 @@ msgstr "Falha ao mapear partições: %s"
msgid "Failed to mount filesystem: %s"
msgstr "Falhou em montar sistema de arquivo: %s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr "Falha ao analisar informações sobre um dispositivo pci para passagem"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "Falha ao desativar a instância: %(reason)s"
@@ -874,14 +787,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "Falha ao ativar a instância: %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"Falha ao preparar o dispositivo PCI %(id)s para a instância "
-"%(instance_uuid)s: %(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "Falha ao fornecer instância %(inst)s: %(reason)s"
@@ -916,9 +821,6 @@ msgstr ""
"Falha ao configurar a senha de administrador em %(instance)s porque "
"%(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "Falha ao fazer spawn; recuperando"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "Falha ao suspender a instância: %(reason)s"
@@ -927,10 +829,6 @@ msgstr "Falha ao suspender a instância: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Falha ao finalizar instância: %(reason)s"
-#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "Falha ao desconectar o vif %s"
-
msgid "Failure prepping block device."
msgstr "Falha na preparação do dispositivo de bloco."
@@ -939,10 +837,6 @@ msgid "File %(file_path)s could not be found."
msgstr "O arquivo %(file_path)s não pôde ser localizado."
#, python-format
-msgid "File path %s not valid"
-msgstr "Caminho de arquivo %s inválido"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr ""
"O IP fixo %(ip)s não é um endereço IP válido para a rede %(network_id)s."
@@ -1071,18 +965,6 @@ msgstr ""
msgid "Found no disk to snapshot."
msgstr "Não foi localizado nenhum disco para captura instantânea."
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "Não foi encontrada rede para bridge %s"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "Encontrado múltiplas redes para a bridge %s"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "Localizada a rede não exclusiva para name_label %s"
-
msgid "Guest does not have a console available."
msgstr "O convidado não possui um console disponível"
@@ -1111,9 +993,6 @@ msgid "Host does not support guests with custom memory page sizes"
msgstr ""
"O host não suporta convidados com tamanhos de página de memória customizados"
-msgid "Host startup on XenServer is not supported."
-msgstr "A inicialização do host em XenServer não é suportada."
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"Driver do hypervisor não suporta o método post_live_migration_at_source"
@@ -1342,10 +1221,6 @@ msgstr "A instância não foi redimensionada."
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "O nome do host da instância %(hostname)s não é um nome DNS válido"
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "A instância já está em Modo de Resgate: %s"
-
msgid "Instance is not a member of specified network"
msgstr "A instância não é um membro de rede especificado"
@@ -1366,11 +1241,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "Recursos de cálculo insuficientes: %(reason)s."
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr ""
-"Memória livre insuficiente no nodo de computação para iniciar %(uuid)s."
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "Interface %(interface)s não encontrada."
@@ -1564,13 +1434,6 @@ msgid ""
"It is not allowed to create an interface on external network %(network_uuid)s"
msgstr "Não é permitido criar uma interface na rede externa %(network_uuid)s"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"A imagem de Kernel/Ramdisk é muito grande: %(vdi_size)d bytes, máx. "
-"%(max_size)d bytes"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1594,14 +1457,6 @@ msgstr "Par de chaves %(name)s não localizado para o usuário %(user_id)s"
msgid "Keypair data is invalid: %(reason)s"
msgstr "Dados do par de chaves é inválido: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "O nome do par de chaves contém caracteres não seguros"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"O nome do par de chaves deve ser uma sequência e entre 1 e 255 caracteres de "
-"comprimento"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "Limites suportados somente a partir do vCenter 6.0 e acima"
@@ -1636,9 +1491,6 @@ msgstr "O marcador %(marker)s não pôde ser localizado."
msgid "Maximum number of floating IPs exceeded"
msgstr "Número máximo de IPs flutuantes excedido"
-msgid "Maximum number of key pairs exceeded"
-msgstr "Número máximo de pares de chaves excedido"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "O número máximo de itens de metadados excede %(allowed)d"
@@ -1669,12 +1521,6 @@ msgstr ""
"Métrica %(name)s não pôde ser localizada no nó de host de cálculo %(host)s."
"%(node)s."
-msgid "Migrate Receive failed"
-msgstr "Falha em Migrar Recebimento"
-
-msgid "Migrate Send failed"
-msgstr "Falha em Migrar Envio"
-
#, python-format
msgid "Migration %(id)s for server %(uuid)s is not live-migration."
msgstr ""
@@ -1725,10 +1571,6 @@ msgstr "Erro de destinos de seleção de migração: %(reason)s"
msgid "Missing arguments: %s"
msgstr "Argumentos ausentes: %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "Coluna ausente %(table)s.%(column)s na tabela de sombra"
-
msgid "Missing device UUID."
msgstr "UUID de dispositivo faltando."
@@ -1814,13 +1656,6 @@ msgid "Must not input both network_id and port_id"
msgstr "Ambos network_id e port_id não devem ser inseridos"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"Deve especificar connection_url, connection_username (opcionalmente) e "
-"connection_password para usar compute_driver=xenapi.XenAPIDriver"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1874,10 +1709,6 @@ msgstr "Nenhum Mapeamento de Dispositivo de Bloco com id %(id)s."
msgid "No Unique Match Found."
msgstr "Nenhuma Correspondência Exclusiva Localizada."
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "Nenhuma criação de agente associada ao id %(id)s."
-
msgid "No compute host specified"
msgstr "Nenhum host de cálculo especificado"
@@ -1945,10 +1776,6 @@ msgstr "Nenhum ponto de montagem localizado em %(root)s de %(image)s"
msgid "No operating system found in %s"
msgstr "Nenhum sistema operacional localizado em %s"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "Nenhum VDI primário localizado para %s"
-
msgid "No root disk defined."
msgstr "Nenhum disco raiz definido."
@@ -1960,9 +1787,6 @@ msgstr ""
"Nenhuma rede específica foi solicitada e nenhuma está disponível para o "
"projeto '%(project_id)s'."
-msgid "No suitable network for migrate"
-msgstr "Nenhuma rede adequada para migração"
-
msgid "No valid host found for cold migrate"
msgstr "Nenhum host válido localizado para a migração a frio"
@@ -2045,14 +1869,6 @@ msgstr "Um ou mais hosts já na(s) zona(s) de disponibilidade %s"
msgid "Only administrators may list deleted instances"
msgstr "Apenas administradores podem listar instância excluídas"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"Apenas SRs bseados em arquivo (ext/NFS) são suportados por este recurso. SR "
-"%(uuid)s é do tipo %(type)s"
-
msgid "Origin header does not match this host."
msgstr "Cabeçalho de origem não corresponde a esse host."
@@ -2095,10 +1911,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "A solicitação de dispositivo PCI %(requests)s falhou"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIF %s não contém endereço IP"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "Tamanho da página %(pagesize)s proibido contra '%(against)s'"
@@ -2216,10 +2028,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "Cota excedida, servidores em excesso no grupo"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "Quota excedida: codigo=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "Existe cota para o projeto %(project_id)s, recurso %(resource)s"
@@ -2250,19 +2058,6 @@ msgstr ""
"O limite de cota %(limit)s para %(resource)s deve ser menor ou igual a "
"%(maximum)s."
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr ""
-"Número máximo de novas tentativas atingido ao tentar desconectar o VBD %s"
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"A política em tempo real precisa da máscara de vCPU(s) configurada com pelo "
-"menos 1 vCPU RT e 1 vCPU ordinária. Consulte hw:cpu_realtime_mask ou "
-"hw_cpu_realtime_mask"
-
msgid "Request body and URI mismatch"
msgstr "Corpo do pedido e incompatibilidade URI"
@@ -2431,10 +2226,6 @@ msgid "Set admin password is not supported"
msgstr "Definir senha admin não é suportado"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "A tabela de sombra com o nome %(name)s já existe."
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "O compartilhamento '%s' não é suportado"
@@ -2443,13 +2234,6 @@ msgid "Share level '%s' cannot have share configured"
msgstr ""
"O nível de compartilhamento '%s' não pode ter compartilhamento configurado"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"Redução do sistema de arquivos com resize2fs falhou, verifique se você tem "
-"espaço livre suficiente em disco."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "A captura instantânea %(snapshot_id)s não pôde ser localizada."
@@ -2477,12 +2261,6 @@ msgstr "A chave de classificação fornecida não era válida."
msgid "Specified fixed address not assigned to instance"
msgstr "Endereço fixo especificado não designado à instância"
-msgid "Specify `table_name` or `table` param"
-msgstr "Spe"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "Especifique apenas um parâmetro `table_name` `table`"
-
msgid "Started"
msgstr "Iniciado"
@@ -2552,9 +2330,6 @@ msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr ""
"O número de portas definidas: %(ports)d está acima do limite: %(quota)d"
-msgid "The only partition should be partition 1."
-msgstr "A única partição deve ser a partição 1."
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr ""
@@ -2619,49 +2394,13 @@ msgstr ""
"O volume não pode ser atribuído ao mesmo nome de dispositivo que o "
"dispositivo raiz %s"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"Existem registros %(records)d na tabela ‘%(table_name)s‘ em que a coluna "
-"uuid ou instance_uuid é NULA. Execute este comando novamente com a opção --"
-"delete depois de ter feito backup de todos os dados necessários."
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"Existem registros %(records)d na tabela ‘%(table_name)s‘ em que a coluna "
-"uuid ou instance_uuid é NULA. Esses devem ser limpos manualmente antes da "
-"migração ser aprovada. Considere executar o comando 'nova-manage db "
-"null_instance_uuid_scan'."
-
msgid "There are not enough hosts available."
msgstr "Não há hosts suficientes disponíveis."
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"Ainda há %(count)i registros de tipo não migrados. A migração não pode "
-"continuar até que todos os registros de tipo de instância tenham sido "
-"migrados para o novo formato. Execute `nova-manage db migrate_flavor_data' "
-"primeiro."
-
-#, python-format
msgid "There is no such action: %s"
msgstr "Essa ação não existe: %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "Não houve registros localizados em que instance_uuid era NULO."
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
@@ -2670,10 +2409,6 @@ msgstr ""
"Esse hypervisor de nó de cálculo é mais antigo que a versão mínima "
"suportada: %(version)s."
-msgid "This domU must be running on the host specified by connection_url"
-msgstr ""
-"Este domU deve estar em execução no host especificado pela connection_url"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2693,10 +2428,6 @@ msgstr ""
"Esse serviço é mais antigo (v%(thisver)i) que a versão mínima (v%(minver)i) "
"do resto da implementação. Não é possível continuar."
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "Tempo limite de espera para que o dispositivo %s seja criado"
-
msgid "Timeout waiting for response from cell"
msgstr "Aguardando tempo limite para a resposta da célula"
@@ -2742,12 +2473,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "Não é possível autenticar cliente Ironic."
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr ""
-"Não é possível entrar em contato com o agente convidado. A chamada a seguir "
-"atingiu o tempo limite: %(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "Não é possível converter a imagem em %(format)s: %(exp)s"
@@ -2756,14 +2481,6 @@ msgid "Unable to convert image to raw: %(exp)s"
msgstr "Não é possível converter a imagem para bruto: %(exp)s"
#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "Não é possível destruir o VBD %s"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "Não foi possível destruir o VDI %s"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "Não é possível determinar o barramento de disco para '%s'"
@@ -2772,22 +2489,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "Não é possível determinar o prefixo do disco para %s"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "Não foi possível ejetar %s do conjunto; Nenhum principal localizado"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "Não foi possível ejetar %s do conjunto; conjunto não vazio"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "Não foi possível localizar SR a partir de VBD %s"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "Não é possível localizar SR a partir de VDI %s"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "Não é possível localizar ca_file : %s"
@@ -2806,9 +2507,6 @@ msgstr "Não é possível localizar o Destino iSCSI"
msgid "Unable to find key_file : %s"
msgstr "Não é possível localizar key_file : %s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "Não é possível localizar VBD/VDI raiz para VM"
-
msgid "Unable to find volume"
msgstr "Não é possível localizar o volume"
@@ -2818,22 +2516,6 @@ msgstr "Não é possível obter UUID do host: /etc/machine-id não existe"
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "Não é possível obter UUID do host: /etc/machine-id está vazio"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "Não foi possível obter registro de VDI %s em"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "Não foi possível introduzir VDI para SR %s"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "Não foi possível introduzir VDI em SR %s"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "Não é possível associar %s ao conjunto"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2864,10 +2546,6 @@ msgstr ""
"Não é possível migrar a instância (%(instance_id)s) para o host atual "
"(%(host)s)."
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "Não é possível obter informações de destino %s"
-
msgid "Unable to resize disk down."
msgstr "Não é possível redimensionar o disco para um tamanho menor."
@@ -2877,13 +2555,6 @@ msgstr "Não é possível configurar senha na instância"
msgid "Unable to shrink disk."
msgstr "Não é possível reduzir disco."
-msgid "Unable to terminate instance."
-msgstr "Não é possível finalizar a instância."
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "Não é possível desconectar o VBD %s"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "Informações de CPU inaceitáveis: %(reason)s"
@@ -2904,16 +2575,6 @@ msgstr ""
"partir de diversas instâncias."
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"Erro inesperado da API. Relate isso em http://bugs.launchpad.net/nova/ e "
-"anexe o log da API Nova se possível.\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "Ação inesperada %s agregada"
@@ -2974,9 +2635,6 @@ msgstr ""
msgid "Unsupported Content-Type"
msgstr "Tipo de Conteúdo Não Suportado"
-msgid "Upgrade DB using Essex release first."
-msgstr "Faça o upgrade do BD usando a liberação de Essex primeiro."
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "Usuário %(username)s não localizado no arquivo de senha."
@@ -3000,25 +2658,6 @@ msgstr ""
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s é %(virtual_size)d bytes que é maior do que o tamanho do "
-"tipo %(new_disk_size)d bytes."
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"VDI não foi localizado no SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun "
-"%(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr "Tentativas de união de VHD excedeu (%d), concedendo..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -3089,11 +2728,6 @@ msgstr ""
"O volume configura o tamanho de bloco, mas o hypervisor libvirt atual '%s' "
"não suporta o tamanho de bloco customizado"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr "Não suportamos esquema ‘%s' sob Python < 2.7.4, use http ou https"
-
msgid "When resizing, instances must change flavor!"
msgstr "Ao redimensionar, as instâncias devem alterar o método!"
@@ -3108,10 +2742,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "Método de cota errado %(method)s usado no recurso %(res)s"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr ""
-"Tipo errado de método de gancho. Somente o tipo 'pré' e ‘pós' permitido"
-
msgid "X-Forwarded-For is missing from request."
msgstr "X-Forwarded-For está ausente da solicitação."
@@ -3127,9 +2757,6 @@ msgstr "X-Metadata-Provider está ausente da solicitação."
msgid "X-Tenant-ID header is missing from request."
msgstr "Cabeçalho X-Tenant-ID está ausente da solicitação."
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "XAPI que suporte relax-xsm-sr-check=true necessário"
-
msgid "You are not allowed to delete the image."
msgstr "Você não tem permissão para excluir a imagem."
@@ -3157,16 +2784,6 @@ msgstr "Nenhum IPs flutuantes disponíveis."
msgid "admin password can't be changed on existing disk"
msgstr "senha do administrador não pode ser alterada no disco existente"
-msgid "aggregate deleted"
-msgstr "agregação excluída"
-
-msgid "aggregate in error"
-msgstr "agregação em erro"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate falhou porque: %s"
-
msgid "cannot understand JSON"
msgstr "não é possível entender JSON"
@@ -3230,9 +2847,6 @@ msgstr "imagem já montada"
msgid "instance %s is not running"
msgstr "instância %s não está em execução"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "a instância possui um kernel ou ramdisk, mas não ambos"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr "a instância é um argumento necessário para usar @refresh_cache"
@@ -3289,9 +2903,6 @@ msgstr "dispositivo nbd %s não mostrado"
msgid "nbd unavailable: module not loaded"
msgstr "nbd indisponível: módulo não carregado"
-msgid "no hosts to remove"
-msgstr "nenhum host para remover"
-
#, python-format
msgid "no match found for %s"
msgstr "nenhuma correspondência localizada para %s"
@@ -3368,9 +2979,6 @@ msgstr ""
"set_admin_password não está implementado por este driver ou esta instância "
"convidada."
-msgid "setup in progress"
-msgstr "configuração em andamento"
-
#, python-format
msgid "snapshot for %s"
msgstr "captura instantânea para %s"
@@ -3387,9 +2995,6 @@ msgstr "excesso de chaves de corpo"
msgid "unpause not supported for vmwareapi"
msgstr "cancelamento de pausa não suportado para vmwareapi"
-msgid "version should be an integer"
-msgstr "a versão deve ser um número inteiro"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "vg %s deve estar no grupo de volumes LVM"
@@ -3417,15 +3022,3 @@ msgid ""
msgstr ""
"o volume '%(vol)s' de status deve estar 'em uso'. Atualmente em '%(status)s' "
"de status"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake não tem uma implementação para %s"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr ""
-"xenapi.fake não tem implementação para %s ou foi chamado com um número de "
-"argumentos inválido"
diff --git a/nova/locale/ru/LC_MESSAGES/nova.po b/nova/locale/ru/LC_MESSAGES/nova.po
index e49d74bc93..1ea59ab496 100644
--- a/nova/locale/ru/LC_MESSAGES/nova.po
+++ b/nova/locale/ru/LC_MESSAGES/nova.po
@@ -16,7 +16,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -148,18 +148,6 @@ msgid "Affinity instance group policy was violated."
msgstr "Ðарушена ÑÑ‚Ñ€Ð°Ñ‚ÐµÐ³Ð¸Ñ Ð³Ñ€ÑƒÐ¿Ð¿Ñ‹ ÑкземплÑров привÑзки."
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "Ðгент не поддерживает вызов: %(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"Agent-build Ñ Ð³Ð¸Ð¿ÐµÑ€Ð²Ð¸Ð·Ð¾Ñ€Ð¾Ð¼ %(hypervisor)s, Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема %(os)s, "
-"архитектура %(architecture)s, ÑущеÑтвует."
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "МножеÑтво %(aggregate_id)s уже имеет хоÑÑ‚ %(host)s."
@@ -177,12 +165,6 @@ msgstr ""
"МножеÑтво %(aggregate_id)s не имеет метаданных Ñ ÐºÐ»ÑŽÑ‡Ð¾Ð¼ %(metadata_key)s."
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"МножеÑтво %(aggregate_id)s: дейÑтвие '%(action)s' вызвало ошибку: %(reason)s."
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "МножеÑтво %(aggregate_name)s уже ÑущеÑтвует."
@@ -191,10 +173,6 @@ msgid "Aggregate %s does not support empty named availability zone"
msgstr "Совокупный реÑÑƒÑ€Ñ %s не поддерживает зону доÑтупноÑти Ñ Ð¿ÑƒÑтым именем"
#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "Ðе найдено множеÑтво Ð´Ð»Ñ Ñ‡Ð¸Ñла хоÑтов %(host)s."
-
-#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
msgstr "ÐедопуÑтимое значение 'name'. Должно быть указано: %(reason)s"
@@ -377,12 +355,6 @@ msgid "Can not handle authentication request for %d credentials"
msgstr ""
"Ðевозможно обработать Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ð¸ Ð´Ð»Ñ Ð¸Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ð¸ÐºÐ°Ñ†Ð¸Ð¾Ð½Ð½Ñ‹Ñ… данных %d"
-msgid "Can't resize a disk to 0 GB."
-msgstr "Ðе удаетÑÑ Ð¸Ð·Ð¼ÐµÐ½Ð¸Ñ‚ÑŒ размер диÑка на 0 ГБ."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "Ðе удаетÑÑ Ð¸Ð·Ð¼ÐµÐ½Ð¸Ñ‚ÑŒ размер временных диÑков на меньший."
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
"Ðевозможно извлечь корневой путь к уÑтройÑтву из конфигурации libvirt "
@@ -428,12 +400,6 @@ msgstr ""
"ÐеизвеÑтен родительÑкий пул памÑти Ð´Ð»Ñ %s. Ðе удаетÑÑ Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»Ð¸Ñ‚ÑŒ "
"раÑположение Ð´Ð»Ñ ÑÐ¾Ñ…Ñ€Ð°Ð½ÐµÐ½Ð¸Ñ Ð¾Ð±Ñ€Ð°Ð·Ð¾Ð²"
-msgid "Cannot find SR of content-type ISO"
-msgstr "Ðевозможно найти SR типа Ñодержимого ISO"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "Ðевозможно найти SR Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ/запиÑи VDI."
-
msgid "Cannot find image for rebuild"
msgstr "Ðевозможно найти образ Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐºÐ¾Ð¼Ð¿Ð¾Ð½Ð¾Ð²ÐºÐ¸"
@@ -561,10 +527,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "Соединение Ñ libvirt потерÑно: %s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "Соединение Ñ Ð³Ð¸Ð¿ÐµÑ€Ð²Ð¸Ð·Ð¾Ñ€Ð¾Ð¼ разорвано на хоÑте: %(host)s"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -638,20 +600,9 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "Ð¡Ñ‚Ñ€Ð°Ñ‚ÐµÐ³Ð¸Ñ PBM по умолчанию ÑвлÑетÑÑ Ð¾Ð±Ñзательной, еÑли включен PBM."
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "Из таблицы '%(table_name)s' удалены запиÑи (%(records)d)."
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "УÑтройÑтво '%(device)s' не найдено."
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr ""
-"УÑтройÑтво Ñ ÑƒÐºÐ°Ð·Ð°Ð½Ð½Ñ‹Ð¼ ИД %(id)s не поддерживаетÑÑ Ð² гипервизоре верÑии "
-"%(version)s"
-
msgid "Device name contains spaces."
msgstr "Ð˜Ð¼Ñ ÑƒÑтройÑтва Ñодержит пробелы."
@@ -663,18 +614,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "ÐеÑоответÑтвие типа уÑтройÑтва Ð´Ð»Ñ Ð¿Ñевдонима '%s'"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"Различные типы в %(table)s.%(column)s и теневой таблице: %(c_type)s "
-"%(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr "ДиÑк Ñодержит файловую ÑиÑтему, размер которой невозможно изменить: %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "Форматирование диÑка %(disk_format)s недопуÑтимо"
@@ -682,13 +621,6 @@ msgstr "Форматирование диÑка %(disk_format)s недопуÑÑ‚
msgid "Disk info file is invalid: %(reason)s"
msgstr "ÐедопуÑтимый файл информации о диÑке: %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "ДиÑк должен иметь только один раздел."
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "ДиÑк Ñ Ð˜Ð” %s не подключен к ÑкземплÑру."
-
#, python-format
msgid "Driver Error: %s"
msgstr "Ошибка драйвера: %s"
@@ -706,10 +638,6 @@ msgstr ""
"вÑе еще '%(state)s'."
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "Ошибка при вызове агента: %(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr ""
"Ошибка возврата из отложенного ÑоÑтоÑÐ½Ð¸Ñ ÑкземплÑра %(instance_id)s: "
@@ -763,9 +691,6 @@ msgstr "Ошибка при монтировании %(image)s Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Ошибка при Ñоздании монитора реÑурÑов: %(monitor)s"
-msgid "Error: Agent is disabled"
-msgstr "Ошибка: Ðгент выключен"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "Событие %(event)s не найдено Ð´Ð»Ñ Ð˜Ð” дейÑÑ‚Ð²Ð¸Ñ %(action_id)s"
@@ -799,10 +724,6 @@ msgstr "Превышено макÑимальное количеÑтво попÑ
msgid "Expected a uuid but received %(uuid)s."
msgstr "ОжидалÑÑ uuid, а получен %(uuid)s."
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "Дополнительный Ñтолбец %(table)s.%(column)s в теневой таблице"
-
msgid "Extracting vmdk from OVA failed."
msgstr "Извлечение vmdk из OVA не выполнено."
@@ -831,10 +752,6 @@ msgid "Failed to attach network adapter device to %(instance_uuid)s"
msgstr "Ðе удалоÑÑŒ подключить уÑтройÑтво Ñетевого адаптера к %(instance_uuid)s"
#, python-format
-msgid "Failed to create vif %s"
-msgstr "Ðе удалоÑÑŒ Ñоздать vif %s"
-
-#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "Ðе удалоÑÑŒ развернуть ÑкземплÑÑ€: %(reason)s"
@@ -862,11 +779,6 @@ msgstr "Ðе удалоÑÑŒ отобразить разделы: %s"
msgid "Failed to mount filesystem: %s"
msgstr "Ошибка Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð¾Ð²Ð¾Ð¹ ÑиÑтемы: %s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr ""
-"Ðе удалоÑÑŒ проанализировать информацию об уÑтройÑтве pci на предмет "
-"удаленного входа"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "Ðе удалоÑÑŒ выключить ÑкземплÑÑ€: %(reason)s"
@@ -876,14 +788,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "Ðе удалоÑÑŒ включить ÑкземплÑÑ€: %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"Ðе удалоÑÑŒ подготовить уÑтройÑтво PCI %(id)s Ð´Ð»Ñ ÑкземплÑра "
-"%(instance_uuid)s: %(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "Ðе удалоÑÑŒ выделить реÑурÑÑ‹ ÑкземплÑру %(inst)s: %(reason)s"
@@ -919,9 +823,6 @@ msgstr ""
"Ðе удалоÑÑŒ уÑтановить пароль админиÑтратора в %(instance)s по причине: "
"%(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "Ðе удалоÑÑŒ выполнить порождение, выполнÑетÑÑ Ð¾Ñ‚ÐºÐ°Ñ‚"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "Ðе удалоÑÑŒ приоÑтановить ÑкземплÑÑ€: %(reason)s"
@@ -930,10 +831,6 @@ msgstr "Ðе удалоÑÑŒ приоÑтановить ÑкземплÑÑ€: %(rea
msgid "Failed to terminate instance: %(reason)s"
msgstr "Ðе удалоÑÑŒ завершить ÑкземплÑÑ€: %(reason)s"
-#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "Ðе удалоÑÑŒ отÑоединить vif %s"
-
msgid "Failure prepping block device."
msgstr "Сбой при подготовке блочного уÑтройÑтва."
@@ -942,10 +839,6 @@ msgid "File %(file_path)s could not be found."
msgstr "Файл %(file_path)s не может быть найден."
#, python-format
-msgid "File path %s not valid"
-msgstr "Путь к файлу %s не верен"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr ""
"ФикÑированный IP %(ip)s не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым IP-адреÑом Ð´Ð»Ñ Ñети "
@@ -1077,18 +970,6 @@ msgstr ""
msgid "Found no disk to snapshot."
msgstr "Ðе найден диÑк Ð´Ð»Ñ ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¼Ð¾Ð¼ÐµÐ½Ñ‚Ð°Ð»ÑŒÐ½Ð¾Ð¹ копии."
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "Ðе найдена Ñеть Ð´Ð»Ñ Ð¼Ð¾Ñта %s"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "Ðайдена не ÑƒÐ½Ð¸ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ñеть Ð´Ð»Ñ Ð¼Ð¾Ñта %s"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "Ðайдена не ÑƒÐ½Ð¸ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ñеть Ð´Ð»Ñ name_label %s"
-
msgid "Guest does not have a console available."
msgstr "ГоÑÑ‚ÑŒ не имеет доÑтупной конÑоли."
@@ -1117,9 +998,6 @@ msgid "Host does not support guests with custom memory page sizes"
msgstr ""
"ХоÑÑ‚ не поддерживает гоÑтей Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»ÑŒÑкими размерами Ñтраниц памÑти"
-msgid "Host startup on XenServer is not supported."
-msgstr "ЗапуÑк узла на XenServer не поддерживаетÑÑ."
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"Драйвер гипервизора не поддерживает метод post_live_migration_at_source"
@@ -1347,10 +1225,6 @@ msgstr "С копией не производилоÑÑŒ изменение раÐ
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ DNS ÑкземплÑра %(hostname)s"
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "ÐšÐ¾Ð¿Ð¸Ñ Ð² ÑоÑтоÑнии режима воÑÑтановлениÑ: %s"
-
msgid "Instance is not a member of specified network"
msgstr "ÐšÐ¾Ð¿Ð¸Ñ Ð½Ðµ ÑвлÑетÑÑ ÑƒÑ‡Ð°Ñтником заданной Ñети"
@@ -1371,10 +1245,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "ÐедоÑтаточно вычиÑлительных реÑурÑов: %(reason)s."
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr "ÐедоÑтаточно памÑти на узле Ñети compute Ð´Ð»Ñ Ð·Ð°Ð¿ÑƒÑка %(uuid)s."
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ %(interface)s не найден."
@@ -1567,13 +1437,6 @@ msgid ""
"It is not allowed to create an interface on external network %(network_uuid)s"
msgstr "Ðе разрешено Ñоздавать интерфейÑÑ‹ во внешней Ñети %(network_uuid)s"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"Превышен размер Ñдра/Ramdisk образа: %(vdi_size)d байт, макÑ. %(max_size)d "
-"байт"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1598,13 +1461,6 @@ msgstr ""
msgid "Keypair data is invalid: %(reason)s"
msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¿Ð°Ñ€Ð° ключей: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Ð˜Ð¼Ñ ÐºÑ€Ð¸Ð¿Ñ‚Ð¾Ð³Ñ€Ð°Ñ„Ð¸Ñ‡ÐµÑкой пары Ñодержит ненадежные Ñимволы"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"Ð˜Ð¼Ñ ÐºÑ€Ð¸Ð¿Ñ‚Ð¾Ð³Ñ€Ð°Ñ„Ð¸Ñ‡ÐµÑкой пары должно быть Ñтрокой длиной от 1 до 255 Ñимволов"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "ÐžÐ³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÑŽÑ‚ÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ в vCenter 6.0 и выше"
@@ -1637,9 +1493,6 @@ msgstr "Маркер %(marker)s не найден."
msgid "Maximum number of floating IPs exceeded"
msgstr "Превышено макÑимальное чиÑло нефикÑированных IP"
-msgid "Maximum number of key pairs exceeded"
-msgstr "МакÑимальное чиÑло пар ключей превышено"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "МакÑимальное чиÑло Ñлементов метаданных превышает %(allowed)d"
@@ -1670,12 +1523,6 @@ msgstr ""
"Ðе удалоÑÑŒ найти показатель %(name)s на вычиÑлительном узле хоÑта %(host)s."
"%(node)s."
-msgid "Migrate Receive failed"
-msgstr "Ðе удалоÑÑŒ выполнить получение переноÑа"
-
-msgid "Migrate Send failed"
-msgstr "Ðе удалоÑÑŒ выполнить отправку переноÑа"
-
#, python-format
msgid "Migration %(id)s for server %(uuid)s is not live-migration."
msgstr ""
@@ -1722,10 +1569,6 @@ msgstr "Ошибка выбора целевых объектов переноÑ
msgid "Missing arguments: %s"
msgstr "ОтÑутÑтвуют аргументы: %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "ОтÑутÑтвует Ñтолбец %(table)s.%(column)s в теневой таблице"
-
msgid "Missing device UUID."
msgstr "Ðе указан UUID уÑтройÑтва."
@@ -1808,13 +1651,6 @@ msgid "Must not input both network_id and port_id"
msgstr "ÐÐµÐ»ÑŒÐ·Ñ Ð²Ð²Ð¾Ð´Ð¸Ñ‚ÑŒ и network_id, и port_id"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"Ðеобходимо указать connection_url, connection_username (необÑзательно) и "
-"connection_password Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ compute_driver=xenapi.XenAPIDriver"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1867,10 +1703,6 @@ msgstr "ОтÑутÑтвует ÑвÑзь блочного уÑтройÑтва
msgid "No Unique Match Found."
msgstr "Уникальное ÑоответÑтвие не найдено."
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "С %(id)s не ÑвÑзан ни один agent-build."
-
msgid "No compute host specified"
msgstr "ХоÑÑ‚ вычиÑлений не указан"
@@ -1937,10 +1769,6 @@ msgstr "Точки Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð½Ðµ найдены в %(root)s из
msgid "No operating system found in %s"
msgstr "ÐžÐ¿ÐµÑ€Ð°Ñ†Ð¸Ð¾Ð½Ð½Ð°Ñ ÑиÑтема не найдена в %s"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "Первичный VDI не найден Ð´Ð»Ñ %s"
-
msgid "No root disk defined."
msgstr "Ðе определен корневой диÑк."
@@ -1951,9 +1779,6 @@ msgid ""
msgstr ""
"Ð¢Ñ€ÐµÐ±ÑƒÐµÐ¼Ð°Ñ Ñеть не задана, и нет доÑтупных Ñетей Ð´Ð»Ñ Ð¿Ñ€Ð¾ÐµÐºÑ‚Ð° '%(project_id)s'."
-msgid "No suitable network for migrate"
-msgstr "Ðет подходÑщей Ñети Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ½Ð¾Ñа"
-
msgid "No valid host found for cold migrate"
msgstr "Ðе найден допуÑтимый хоÑÑ‚ Ð´Ð»Ñ Ñ…Ð¾Ð»Ð¾Ð´Ð½Ð¾Ð³Ð¾ переноÑа"
@@ -2033,14 +1858,6 @@ msgstr "Один или неÑколько хоÑтов уже находÑÑ‚ÑÑ
msgid "Only administrators may list deleted instances"
msgstr "Только админиÑтраторы могут выводить ÑпиÑок удаленных ÑкземплÑров"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"Ð’ Ñтой функции поддерживаютÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ SR (ext/NFS) на оÑнове файлов. SR "
-"%(uuid)s имеет тип %(type)s"
-
msgid "Origin header does not match this host."
msgstr "Заголовок Origin не ÑоответÑтвует данному хоÑту."
@@ -2083,10 +1900,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "Ð—Ð°Ð¿Ñ€Ð¾Ñ ÑƒÑтройÑтва PCI %(requests)s не выполнен"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIF %s не Ñодержит IP-адреÑ"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "Размер Ñтраницы %(pagesize)s запрещен Ð´Ð»Ñ '%(against)s'"
@@ -2204,10 +2017,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "Превышена квота, Ñлишком много Ñерверов в группе."
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "Квота превышена: код=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "Квота ÑущеÑтвует Ð´Ð»Ñ Ð¿Ñ€Ð¾ÐµÐºÑ‚Ð° %(project_id)s, реÑÑƒÑ€Ñ %(resource)s"
@@ -2237,18 +2046,6 @@ msgstr ""
"Ограничение квоты %(limit)s Ð´Ð»Ñ %(resource)s должно быть не больше "
"%(maximum)s."
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "ДоÑтигнуто макÑимальное чиÑло попыток отÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ VBD %s"
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"Ð¡Ñ‚Ñ€Ð°Ñ‚ÐµÐ³Ð¸Ñ Ñ€ÐµÐ°Ð»ÑŒÐ½Ð¾Ð³Ð¾ времени требует, чтобы маÑка vCPU(s) была наÑтроена Ñ "
-"Ñ…Ð¾Ñ‚Ñ Ð±Ñ‹ одним 1 vCPU реального времени и 1 обычным vCPU. См. hw:"
-"cpu_realtime_mask или hw_cpu_realtime_mask"
-
msgid "Request body and URI mismatch"
msgstr "Тело запроÑа и URI не Ñовпадают"
@@ -2414,10 +2211,6 @@ msgid "Set admin password is not supported"
msgstr "Указание Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð°Ð´Ð¼Ð¸Ð½Ð¸Ñтратора не поддерживаетÑÑ."
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "Ð¢ÐµÐ½ÐµÐ²Ð°Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð° Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %(name)s уже ÑущеÑтвует."
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "Общий реÑÑƒÑ€Ñ '%s' не поддерживаетÑÑ"
@@ -2425,13 +2218,6 @@ msgstr "Общий реÑÑƒÑ€Ñ '%s' не поддерживаетÑÑ"
msgid "Share level '%s' cannot have share configured"
msgstr "Ð”Ð»Ñ ÑƒÑ€Ð¾Ð²Ð½Ñ '%s' общего реÑурÑа Ð½ÐµÐ»ÑŒÐ·Ñ Ð½Ð°Ñтраивать общий реÑурÑ. "
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"Сокращение размера файловой ÑиÑтемы Ñ resize2fs не выполнено, проверьте, "
-"доÑтаточно ли Ñвободного меÑта на диÑке."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Снимок %(snapshot_id)s не может быть найден."
@@ -2456,12 +2242,6 @@ msgstr "Указанный ключ Ñортировки неверен."
msgid "Specified fixed address not assigned to instance"
msgstr "Указанный фикÑированный Ð°Ð´Ñ€ÐµÑ Ð½Ðµ назначен ÑкземплÑру"
-msgid "Specify `table_name` or `table` param"
-msgstr "Укажите параметр `table_name` или `table`"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "Укажите только один параметр `table_name` или `table`"
-
msgid "Started"
msgstr "Ðачато"
@@ -2531,9 +2311,6 @@ msgstr ""
"ЧиÑло определенных портов %(ports)dis превышает макÑимально разрешенное: "
"%(quota)d"
-msgid "The only partition should be partition 1."
-msgstr "ЕдинÑтвенный раздел должен быть разделом 1."
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr "Указанный путь к уÑтройÑтву RNG: (%(path)s) не ÑущеÑтвует на хоÑте."
@@ -2597,58 +2374,19 @@ msgid ""
msgstr ""
"Том Ð½ÐµÐ»ÑŒÐ·Ñ Ð½Ð°Ð·Ð½Ð°Ñ‡Ð¸Ñ‚ÑŒ имени уÑтройÑтва, Ñовпадающему Ñ ÐºÐ¾Ñ€Ð½ÐµÐ²Ñ‹Ð¼ уÑтройÑтвом %s"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"Ð’ таблице '%(table_name)s' ÑущеÑтвуют запиÑи (%(records)d), Ð´Ð»Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… "
-"Ñтолбец uuid или instance_uuid равен NULL. ЗапуÑтите команду повторно Ñ "
-"опцией --delete поÑле ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ñ€ÐµÐ·ÐµÑ€Ð²Ð½Ð¾Ð¹ копии вÑей нужных данных."
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"Ð’ таблице '%(table_name)s' ÑущеÑтвуют запиÑи (%(records)d), Ð´Ð»Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… "
-"Ñтолбец uuid или instance_uuid равен NULL. Они должны быть очищены вручную "
-"до Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¿ÐµÑ€ÐµÐ½Ð¾Ñа. ЗапуÑтите команду 'nova-manage db "
-"null_instance_uuid_scan'."
-
msgid "There are not enough hosts available."
msgstr "Ðет доÑтаточного чиÑла доÑтупных хоÑтов."
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"ОÑталоÑÑŒ %(count)i запиÑей разновидноÑти, которые не были перенеÑены. "
-"Продолжение миграции невозможно, пока вÑе запиÑи разновидноÑти ÑкземплÑра не "
-"будут перенеÑены в новый формат. Вначале необходимо выполнить команду 'nova-"
-"manage db migrate_flavor_data'."
-
-#, python-format
msgid "There is no such action: %s"
msgstr "Ðе ÑущеÑтвует такого дейÑтвиÑ: %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "Ðе обнаружены запиÑи, Ð´Ð»Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ñ… instance_uuid равен NULL."
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
"%(version)s."
msgstr "ВерÑÐ¸Ñ Ð³Ð¸Ð¿ÐµÑ€Ð²Ð¸Ð·Ð¾Ñ€Ð° Ñтого узла ниже минимально допуÑтимой: %(version)s."
-msgid "This domU must be running on the host specified by connection_url"
-msgstr "Этот domU должен быть запущен на хоÑте, указанном в connection_url"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2668,10 +2406,6 @@ msgstr ""
"ВерÑÐ¸Ñ Ñтой Ñлужбы (v%(thisver)i) меньше минимальной верÑии (v%(minver)i) "
"оÑтальных компонентов развертываниÑ. Продолжение работы невозможно."
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð¸ Ñоздании уÑтройÑтва %s"
-
msgid "Timeout waiting for response from cell"
msgstr "Тайм-аут Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð° от Ñчейки"
@@ -2716,12 +2450,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "Ðе удалоÑÑŒ идентифицировать клиент Ironic."
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr ""
-"Ðевозможно ÑвÑзатьÑÑ Ñ Ð³Ð¾Ñтевым агентом. Возник тайм-аут Ñледующего вызова: "
-"%(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "Ðе удаетÑÑ Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ñ‚ÑŒ образ в %(format)s: %(exp)s"
@@ -2730,14 +2458,6 @@ msgid "Unable to convert image to raw: %(exp)s"
msgstr "Ðе удаетÑÑ Ð¿Ñ€ÐµÐ¾Ð±Ñ€Ð°Ð·Ð¾Ð²Ð°Ñ‚ÑŒ образ в формат raw: %(exp)s"
#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "Ðевозможно ликвидировать VBD %s"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "Ðевозможно ликвидировать VDI %s"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "Ðевозможно определить шину диÑка Ð´Ð»Ñ '%s'"
@@ -2746,22 +2466,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "Ðевозможно определить Ð¿Ñ€ÐµÑ„Ð¸ÐºÑ Ð´Ð¸Ñка Ð´Ð»Ñ %s"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "Ðевозможно удалить %s из пула; главный узел не найден"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "Ðевозможно удалить %s из пула; пул не пуÑÑ‚"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "Ðевозможно найти SR из VBD %s"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "Ðе найден SR из VDI %s"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "Ðе удалоÑÑŒ найти ca_file : %s"
@@ -2780,9 +2484,6 @@ msgstr "Ðевозможно найти назначение iSCSI"
msgid "Unable to find key_file : %s"
msgstr "Ðевозможно найти key_file: %s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "Ðевозможно найти корневой VBD/VDI Ð´Ð»Ñ VM"
-
msgid "Unable to find volume"
msgstr "Ðевозможно найти том"
@@ -2792,22 +2493,6 @@ msgstr "Ðе удалоÑÑŒ получить UUID хоÑта: /etc/machine-id н
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "Ðе удалоÑÑŒ получить UUID хоÑта: /etc/machine-id пуÑÑ‚"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "Ðевозможно получить запиÑÑŒ VDI %s на"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "Ðевозможно внедрить VDI Ð´Ð»Ñ SR %s"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "Ðевозможно внедрить VDI на SR %s"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "Ðевозможно подключить %s в пул"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2837,10 +2522,6 @@ msgid ""
msgstr ""
"Ðевозможно перемеÑтить копию (%(instance_id)s) на текущий узел (%(host)s)."
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "Ðевозможно получить целевую информацию %s"
-
msgid "Unable to resize disk down."
msgstr "ÐÐµÐ»ÑŒÐ·Ñ ÑƒÐ¼ÐµÐ½ÑŒÑˆÐ¸Ñ‚ÑŒ размер диÑка."
@@ -2850,13 +2531,6 @@ msgstr "Ðевозможно уÑтановить пароль Ð´Ð»Ñ Ñкзем
msgid "Unable to shrink disk."
msgstr "Ðе удалоÑÑŒ уменьшить размер диÑка."
-msgid "Unable to terminate instance."
-msgstr "Ðевозможно завершить ÑкземплÑÑ€."
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "Ðевозможно отÑоединить VBD %s"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "ÐÐµÐ¿Ñ€Ð¸Ð¼ÐµÐ½Ð¸Ð¼Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ CPU: %(reason)s"
@@ -2877,16 +2551,6 @@ msgstr ""
"ÑкземплÑров."
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"ÐÐµÐ¾Ð¶Ð¸Ð´Ð°Ð½Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° API. Сообщите об Ñтом в http://bugs.launchpad.net/nova/ и "
-"прикрепите протокол API Nova, еÑли возможно.\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "Ðепредвиденное ÑоÑтавное дейÑтвие %s"
@@ -2946,9 +2610,6 @@ msgstr ""
msgid "Unsupported Content-Type"
msgstr "Ðе поддерживаемый тип Ñодержимого"
-msgid "Upgrade DB using Essex release first."
-msgstr "Обновите Ñначала базу данных Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ выпуÑка Essex."
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "Пользователь %(username)s не найден в файле паролей."
@@ -2972,24 +2633,6 @@ msgstr ""
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s %(virtual_size)d байт, что больше размера разновидноÑти "
-"(%(new_disk_size)d байт)."
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"VDI не найден в SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr "ЧиÑло попыток Ð¾Ð±ÑŠÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ VHD превыÑило (%d), отказ..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -3057,12 +2700,6 @@ msgstr ""
"Том указывает размер блока, но текущий гипервизор libvirt '%s' не "
"поддерживает неÑтандартный размер блока"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr ""
-"Схема '%s' не поддерживаетÑÑ Ð² Python < 2.7.4, иÑпользуйте http или https"
-
msgid "When resizing, instances must change flavor!"
msgstr "При изменении размера ÑкземплÑры должны изменить разновидноÑÑ‚ÑŒ!"
@@ -3078,9 +2715,6 @@ msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr ""
"ИÑпользуетÑÑ Ð½ÐµÐ²ÐµÑ€Ð½Ñ‹Ð¹ метод ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ ÐºÐ²Ð¾Ñ‚Ñ‹ %(method)s Ð´Ð»Ñ Ñ€ÐµÑурÑа %(res)s"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr "ÐедопуÑтимый тип метода перехватчика. ДопуÑтимые типы: 'pre' и 'post'"
-
msgid "X-Forwarded-For is missing from request."
msgstr "Ð’ запроÑе отÑутÑтвует X-Forwarded-For."
@@ -3096,9 +2730,6 @@ msgstr "Ð’ запроÑе отÑутÑтвует X-Metadata-Provider."
msgid "X-Tenant-ID header is missing from request."
msgstr "Заголовок X-Tenant-ID отÑутÑтвует в запроÑе."
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "ТребуетÑÑ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶ÐºÐ° XAPI relax-xsm-sr-check=true"
-
msgid "You are not allowed to delete the image."
msgstr "Вам не разрешено удалÑÑ‚ÑŒ образ."
@@ -3122,16 +2753,6 @@ msgstr "Ðет доÑтупных нефикÑированных IP."
msgid "admin password can't be changed on existing disk"
msgstr "пароль админиÑтратора не может быть изменен на ÑущеÑтвующем диÑке"
-msgid "aggregate deleted"
-msgstr "ÑоÑтавной объект удален"
-
-msgid "aggregate in error"
-msgstr "Ошибка в ÑоÑтавном объекте"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "Ðе удалоÑÑŒ выполнить assert_can_migrate по причине: %s"
-
msgid "cannot understand JSON"
msgstr "невозможно понÑÑ‚ÑŒ JSON"
@@ -3195,9 +2816,6 @@ msgstr "образ уже приÑоединён"
msgid "instance %s is not running"
msgstr "ЭкземплÑÑ€ %s не запущен"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "ÐºÐ¾Ð¿Ð¸Ñ Ñодержит Ñдро или ramdisk, но не оба"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr ""
"ÑкземплÑÑ€ ÑвлÑетÑÑ Ñ‚Ñ€ÐµÐ±ÑƒÐµÐ¼Ñ‹Ð¼ аргументом Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ @refresh_cache"
@@ -3254,9 +2872,6 @@ msgstr "УÑтройÑтво nbd %s не показан"
msgid "nbd unavailable: module not loaded"
msgstr "nbd недоÑтупен: модуль не загружен"
-msgid "no hosts to remove"
-msgstr "Ðет хоÑтов Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ"
-
#, python-format
msgid "no match found for %s"
msgstr "не найдено ÑоответÑтвие Ð´Ð»Ñ %s"
@@ -3332,9 +2947,6 @@ msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
"set_admin_password не реализован Ñтим драйвером или гоÑтевым ÑкземплÑром."
-msgid "setup in progress"
-msgstr "ВыполнÑетÑÑ Ð½Ð°Ñтройка"
-
#, python-format
msgid "snapshot for %s"
msgstr "Ð¼Ð¾Ð¼ÐµÐ½Ñ‚Ð°Ð»ÑŒÐ½Ð°Ñ ÐºÐ¾Ð¿Ð¸Ñ Ð´Ð»Ñ %s"
@@ -3351,9 +2963,6 @@ msgstr "Ñлишком много ключей тела"
msgid "unpause not supported for vmwareapi"
msgstr "отмена оÑтановки не поддерживаетÑÑ Ð´Ð»Ñ vmwareapi"
-msgid "version should be an integer"
-msgstr "верÑÐ¸Ñ Ð´Ð¾Ð»Ð¶Ð½Ð° быть целым чиÑлом"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "vg %s должен быть группой томов LVM"
@@ -3378,15 +2987,3 @@ msgid ""
"volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status"
msgstr ""
"Требуемое ÑоÑтоÑние '%(vol)s' тома: 'in-use'. Текущее ÑоÑтоÑние: '%(status)s'"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake не имеет реализации Ð´Ð»Ñ %s"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr ""
-"xenapi.fake не имеет реализации Ð´Ð»Ñ %s или был вызван Ñ Ð¸Ñпользованием "
-"неправильным чиÑлом аргументов"
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova.po b/nova/locale/tr_TR/LC_MESSAGES/nova.po
index 060f1c28b3..2e4783233d 100644
--- a/nova/locale/tr_TR/LC_MESSAGES/nova.po
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova.po
@@ -10,7 +10,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -120,18 +120,6 @@ msgid "Affinity instance group policy was violated."
msgstr "Ä°liÅŸki sunucu grubu ilkesi ihlal edildi."
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "Ajan çağrıyı desteklemiyor: %(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"%(hypervisor)s hipervizörüne %(os)s işletim sistemine %(architecture)s "
-"mimarisine sahip ajan-inşası mevcut."
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "%(aggregate_id)s kümesi zaten%(host)s sunucusuna sahip."
@@ -149,19 +137,9 @@ msgstr ""
"%(aggregate_id)s kümesi %(metadata_key)s. anahtarı ile hiç metadata'sı yok."
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr ""
-"Takım %(aggregate_id)s: eylem '%(action)s' hataya sebep oldu: %(reason)s."
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "%(aggregate_name)s kümesi zaten var."
-#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "İstemci %(host)s sayısı için takım bulunamadı."
-
msgid "An unknown error has occurred. Please try your request again."
msgstr "Bilinmeyen bir hata oluştu. Lütfen tekrar deneyin."
@@ -306,12 +284,6 @@ msgstr "İstenilen imaj dosyası bulunamadı"
msgid "Can not handle authentication request for %d credentials"
msgstr "%d kimlik bilgileri için kimlik doğrulama isteği ele alınamadı"
-msgid "Can't resize a disk to 0 GB."
-msgstr "Bir disk 0 GB'ye boyutlandırılamaz."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "Geçici disklerin boyutu küçültülemedi."
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr "Sunucu libvirt yapılandırmasından kök aygıt yolu alınamadı"
@@ -344,12 +316,6 @@ msgstr ""
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr "Artık %(objtype)s objesi üzerinde %(method)s çağrılamaz"
-msgid "Cannot find SR of content-type ISO"
-msgstr "ISO içerik türünün SR'si bulunamıyor"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "VDI'ya okuma/yazma yapılırken SR(Saklama deposu) bulunamadı."
-
msgid "Cannot find image for rebuild"
msgstr "Yeniden kurulum için imaj dosyası bulunamadı."
@@ -427,10 +393,6 @@ msgstr "Cinder istemcisine bağlantı başarısız: %(reason)s"
msgid "Connection to libvirt lost: %s"
msgstr "libvirt bağlantısı kayboldu: %s"
-#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "Hipervizör bağlantısı istemci üzerinde bozuk: %(host)s"
-
msgid "Constraint not met."
msgstr "Kısıtlama karşılanmadı."
@@ -493,17 +455,6 @@ msgstr "Datetime geçersiz biçimde"
msgid "Default PBM policy is required if PBM is enabled."
msgstr "PBM etkin ise varsayılan PBM ilkesi gerekir."
-#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "'%(table_name)s' tablosundan %(records)d kayıt silindi."
-
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr ""
-"Belirtilen aygıt kimliği %(id)s hipervizör sürüm %(version)s tarafından "
-"desteklenmiyor"
-
msgid "Device name contains spaces."
msgstr "Aygıt adı boşluk içeriyor."
@@ -511,18 +462,6 @@ msgid "Device name empty or too long."
msgstr "Aygıt adı boş veya çok uzun."
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"%(table)s.%(column)s ve gölge tabloda değişik türler: %(c_type)s "
-"%(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr "Disk, yeniden boyutlandıramadığımız bir dosya sistemi içeriyor: %s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "%(disk_format)s disk formatı kabul edilemez."
@@ -530,13 +469,6 @@ msgstr "%(disk_format)s disk formatı kabul edilemez."
msgid "Disk info file is invalid: %(reason)s"
msgstr "Disk bilgi dosyası geçersiz: %(reason)s"
-msgid "Disk must have only one partition."
-msgstr "Diskin tek bir bölümü olmalı."
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "id:%s diski sunucuya ekli şekilde bulunmadı."
-
#, python-format
msgid "Driver Error: %s"
msgstr "Sürücü Hatası: %s"
@@ -550,10 +482,6 @@ msgstr ""
"'%(state)s'."
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "Ajana yapılan şu çağrıda hata: %(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "Askıdan almadan hata sunucu %(instance_id)s: %(reason)s"
@@ -595,9 +523,6 @@ msgstr "%(image)s'in libguestfs (%(e)s) ile bağlanmasında hata"
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Kaynak izleme oluÅŸturulurken hata: %(monitor)s"
-msgid "Error: Agent is disabled"
-msgstr "Hata: Ajan kapalı"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "Olay %(event)s eylem kimliği %(action_id)s için bulunamadı"
@@ -617,10 +542,6 @@ msgstr ""
msgid "Expected a uuid but received %(uuid)s."
msgstr "Bir uuid bekleniyordu ama %(uuid)s alındı."
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "Gölge tabloda ek sütun %(table)s.%(column)s"
-
msgid "Extracting vmdk from OVA failed."
msgstr "OVA'dan vmdk çıkarma başarısız."
@@ -675,10 +596,6 @@ msgstr "Bölümler eşlenemiyor: %s"
msgid "Failed to mount filesystem: %s"
msgstr "Dosya sistemi bağlanamadı: %s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr ""
-"Düzgeçiş için bir pci aygıtıyla ilgili bilginin ayrıştırılması başarısız"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "Sunucu kapatılamadı: %(reason)s"
@@ -688,13 +605,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "Sunucu açılamadı: %(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr ""
-"%(id)s PCI aygıtı %(instance_uuid)s sunucusu için hazırlanamadı: %(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "%(inst)s sunucusu hazırlanamadı: %(reason)s"
@@ -727,9 +637,6 @@ msgstr "%(path)s üzerinde qemu-img info çalıştırılamadı: %(error)s"
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr "%(reason)s yüzünden %(instance)s üzerinde parola ayarlanamadı"
-msgid "Failed to spawn, rolling back"
-msgstr "Oluşturma başarısız, geri alınıyor"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "Sunucu askıya alınamadı: %(reason)s"
@@ -746,10 +653,6 @@ msgid "File %(file_path)s could not be found."
msgstr "%(file_path)s dosyası bulunamadı."
#, python-format
-msgid "File path %s not valid"
-msgstr "Dosya yolu %s geçerli değil"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr "Sabit IP %(ip)s %(network_id)s için gereçli bir ip adresi değil."
@@ -824,18 +727,6 @@ msgid "Found no disk to snapshot."
msgstr "Anlık görüntüsü alınacak disk bulunamadı."
#, python-format
-msgid "Found no network for bridge %s"
-msgstr "Köprü %s için ağ bulunamadı"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "Köprü %s için benzersiz olmayan ağ bulundu"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "name_label %s için benzersiz olmayan ağ bulundu"
-
-#, python-format
msgid "Host %(host)s could not be found."
msgstr "%(host)s sunucusu bulunamadı."
@@ -851,9 +742,6 @@ msgstr "İstemci NUMA toploji kümesine sahip konukları desteklemiyor"
msgid "Host does not support guests with custom memory page sizes"
msgstr "İstemci özel hafıza sayfa boyutlarına sahip konukları desteklemiyor"
-msgid "Host startup on XenServer is not supported."
-msgstr "XenSunucu üzerinde istemci başlatma desteklenmiyor."
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
"Hipervizör sürücüsü post_live_migration_at_source yöntemini desteklemiyor"
@@ -1052,10 +940,6 @@ msgstr "Sunucunun kaynak istemcisi yok"
msgid "Instance has not been resized."
msgstr "Örnek tekrar boyutlandırılacak şekilde ayarlanmadı."
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "Sunucu zaten Kurtarma Kipinde: %s"
-
msgid "Instance is not a member of specified network"
msgstr "Örnek belirlenmiş ağın bir üyesi değil"
@@ -1068,10 +952,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "Yetersiz hesaplama kaynağı: %(reason)s."
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr "%(uuid)s hesaplama düğümü başlatmada yetersiz boş hafıza."
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "%(interface)s arayüzü bulunamadı."
@@ -1263,12 +1143,6 @@ msgid ""
"It is not allowed to create an interface on external network %(network_uuid)s"
msgstr "Harici ağ %(network_uuid)s üzerinde arayüz oluşturmaya izin verilmiyor"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"Çekirdek/Ramdisk imajı çok büyük: %(vdi_size)d bayt, azami %(max_size)d bayt"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1292,14 +1166,6 @@ msgstr "%(user_id)s kullanıcısı için %(name)s anahtar çifti bulunamadı"
msgid "Keypair data is invalid: %(reason)s"
msgstr "Anahtar çifti verisi geçersiz: %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "Anahtar çifti ismi güvensiz karakterler içeriyor"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr ""
-"Anahtar çifti adı karakter dizisi ve 1 ve 255 karakter uzunluğu arasında "
-"olmalıdır"
-
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr "Hatalı biçimlendirilmiş mesaj gövdesi: %(reason)s"
@@ -1322,9 +1188,6 @@ msgstr "İmajın yerele eşleştirilmesi desteklenmiyor."
msgid "Marker %(marker)s could not be found."
msgstr "İşaretçi %(marker)s bulunamadı."
-msgid "Maximum number of key pairs exceeded"
-msgstr "Azami anahtar çifti sayısı aşıldı"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "Azami metadata öğesi sayısı %(allowed)d sayısını aşıyor"
@@ -1354,12 +1217,6 @@ msgid ""
msgstr ""
"%(name)s ölçüsü %(host)s.%(node)s hesaplama istemci düğümünde bulunamadı."
-msgid "Migrate Receive failed"
-msgstr "Göç Alma başarısız"
-
-msgid "Migrate Send failed"
-msgstr "Göç Gönderme başarısız"
-
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr "%(migration_id)s göçü bulunamadı."
@@ -1384,10 +1241,6 @@ msgstr "Göç ön-kontrol hatası: %(reason)s"
msgid "Missing arguments: %s"
msgstr "Eksik bağımsız değişken: %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "Gölge tabloda eksik sütun %(table)s.%(column)s"
-
msgid "Missing device UUID."
msgstr "Eksik aygıt UUID."
@@ -1457,13 +1310,6 @@ msgid "Must not input both network_id and port_id"
msgstr "Hem network_id hem port_id verilmemelidir"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"compute_driver=xenapi.XenAPIDriver kullanmak için bağlantı_url'si, "
-"bağlantı_kullanıcıadı (isteğe bağlı), ve bağlantı parolası belirtilmeli"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1515,10 +1361,6 @@ msgstr "%(id)s kimliğine sahip bir Blok Aygıt Eşleştirmesi yok."
msgid "No Unique Match Found."
msgstr "Benzersiz Eşleşme Bulunamadı."
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "Hiçbir ajan-inşası id %(id)s ile ilişkilendirilmemiş."
-
msgid "No compute host specified"
msgstr "Hesap istemcisi belirtilmedi"
@@ -1559,16 +1401,9 @@ msgstr "%(image)s %(root)s içinde bağlantı noktası bulunmadı"
msgid "No operating system found in %s"
msgstr "%s içinde işletim sistemi bulunamadı"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "%s için birincil VDI bulunamadı"
-
msgid "No root disk defined."
msgstr "Kök disk tanımlanmamış."
-msgid "No suitable network for migrate"
-msgstr "Göç için uygun bir ağ yok"
-
msgid "No valid host found for cold migrate"
msgstr "Soğuk göç için geçerli bir istemci bulunamadı"
@@ -1637,14 +1472,6 @@ msgstr "Bir ya da daha fazla istemci zaten kullanılabilir bölge(ler)de %s"
msgid "Only administrators may list deleted instances"
msgstr "Yalnızca yöneticiler silinen sunucuları listeleyebilir"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"Bu özellik tarafından yalnızca dosya-tabanlı SR'ler (ext/NFS) desteklenir. "
-"SR %(uuid)s ise %(type)s türünde"
-
msgid "Origin header does not match this host."
msgstr "Kaynak başlık bu istemciyle eşleşmiyor."
@@ -1683,10 +1510,6 @@ msgid "PCI device %(id)s not found"
msgstr "PCI aygıtı %(id)s bulunamadı"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIF %s IP adresi içermiyor"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "Sayfa boyutu %(pagesize)s '%(against)s' e karşı yasaklı"
@@ -1763,10 +1586,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "Kota aşıldı, grupta çok fazla sunucu var"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "Kota aşıldı: kod=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "Kota %(project_id)s projesi, %(resource)s kaynağı için mevcut"
@@ -1796,10 +1615,6 @@ msgstr ""
"%(resource)s için %(limit)s kota sınırı %(maximum)s den küçük ya da eşit "
"olmalı."
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "VBD %s çıkarılmaya çalışılırken azami deneme sayısına ulaşıldı"
-
msgid "Request body and URI mismatch"
msgstr "URI ve gövde isteği uyumsuz"
@@ -1938,10 +1753,6 @@ msgid "Service with host %(host)s topic %(topic)s exists."
msgstr "İstemci %(host)s başlığına %(topic)s sahip servis mevcut."
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "%(name)s isminde bir gölge tablo zaten var."
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "Paylaşım '%s' desteklenmiyor"
@@ -1949,13 +1760,6 @@ msgstr "Paylaşım '%s' desteklenmiyor"
msgid "Share level '%s' cannot have share configured"
msgstr "Paylaşım seviyesi '%s' paylaşım yapılandırmasına sahip olamaz"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"resize2fs ile dosya sisteminin küçültülmesi başarısız, lütfen diskinizde "
-"yeterli alan olduÄŸundan emin olun."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "%(snapshot_id)s sistem anlık görüntüsü bulunamadı."
@@ -1972,12 +1776,6 @@ msgstr "Verilen sıralama anahtarı geçerli değil."
msgid "Specified fixed address not assigned to instance"
msgstr "Belirtilen sabit adres sunucuya atanmamış"
-msgid "Specify `table_name` or `table` param"
-msgstr "`tablo_ismi` veya `tablo` parametresi belirtin"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "Yalnızca bir parametre belirtin `table_name` `table`"
-
msgid "Started"
msgstr "Başlatıldı"
@@ -2027,9 +1825,6 @@ msgid ""
"The instance requires a newer hypervisor version than has been provided."
msgstr "Örnek şu ankinden daha yeni hypervisor versiyonu gerektirir."
-msgid "The only partition should be partition 1."
-msgstr "Tek bölüm bölüm 1 olmalı."
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr "Sağlanan RNG aygıt yolu: (%(path)s) istemci üzerinde mevcut değil."
@@ -2086,59 +1881,17 @@ msgid ""
"The volume cannot be assigned the same device name as the root device %s"
msgstr "Mantıksal sürücü kök aygıt %s ile aynı aygıt ismiyle atanamaz"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"'%(table_name)s' tablosunda uuid veya instance_uuid sütunu NULL olan "
-"%(records)d kayıt var. Tüm gerekli veriyi yedekledikten sonra bu komutu "
-"tekrar --delete seçeneğiyle çalıştırın."
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"'%(table_name)s' tablosunda %(records)d kaydın uuid veya instance_uuid "
-"sütunu NULL. Göç devam etmeden önce bunların elle temizlenmesi gerekiyor. "
-"'nova-manage db null_instance_uuid_scan' komutunu çalıştırmayı "
-"deneyebilirsiniz."
-
msgid "There are not enough hosts available."
msgstr "Yeterince kullanılabilir istemci yok."
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"Hala %(count)i göç etmemiş nitelik kaydı var. Göç tüm sunucu nitelik "
-"kayıtları yeni biçime göç ettirilmeden devam edemez. Lütfen önce `nova-"
-"manage db migrate_flavor_data' çalıştırın."
-
-#, python-format
msgid "There is no such action: %s"
msgstr "Böyle bir işlem yok: %s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "instance_uuid'in NULL olduğu kayıt bulunamadı."
-
-msgid "This domU must be running on the host specified by connection_url"
-msgstr "Bu domU connection_url ile belirtilen istemcide çalışıyor olmalı"
-
#, python-format
msgid "This rule already exists in group %s"
msgstr "Bu kural zaten grupta var %s"
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "%s aygıtının oluşturulması beklenirken zaman aşımı"
-
msgid "Timeout waiting for response from cell"
msgstr "Hücreden cevap beklerken zaman aşımı"
@@ -2155,18 +1908,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "Ironic istemcisi doğrulanamıyor."
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr "Konuk ajana bağlanılamıyor. Şu çağrı zaman aşımına uğradı: %(method)s"
-
-#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "VBD %s silinemedi"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "VDI %s silinemedi"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "'%s' için disk veri yolu belirlenemiyor"
@@ -2175,22 +1916,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "%s için disk ön eki belirlenemiyor"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "%s havuzdan çıkarılamıyor; Ana bulunamadı"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "%s havuzdan boşaltılamadı; havuz boş değil"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "VBD %s'den SR bulunamadı"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "VDI %s'den SR bulunamadı"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "ca_file bulunamadı: %s"
@@ -2209,28 +1934,9 @@ msgstr "iSCSI Hedefi bulunamadı"
msgid "Unable to find key_file : %s"
msgstr "key_file bulunamadı: %s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "VM için kök VBD/VDI bulunamadı"
-
msgid "Unable to find volume"
msgstr "Mantıksal sürücü bulunamadı"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "VDI %s kaydı alınamadı"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "SR %s için VDI getirilemedi"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "SR %s üzerine VDI getirilemiyor"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "Havuzda %s'e katılınamadı"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2260,10 +1966,6 @@ msgid ""
"Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr "Mevcut (%(host)s) sunucusundan (%(instance_id)s) örneği geçirilemez."
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "Hedef bilgi alınamadı %s"
-
msgid "Unable to resize disk down."
msgstr "Disk boyutu küçültülemedi."
@@ -2273,13 +1975,6 @@ msgstr "Sunucuya parola ayarlanamadı"
msgid "Unable to shrink disk."
msgstr "Disk küçültülemiyor."
-msgid "Unable to terminate instance."
-msgstr "Sunucu sonlandırılamadı."
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "VBD %s çıkarılamıyor"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "Kabul edilemez CPU bilgisi: %(reason)s"
@@ -2292,16 +1987,6 @@ msgid "Unavailable console type %(console_type)s."
msgstr "Uygun olmayan konsol türü %(console_type)s."
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"Beklenmeyen API Hatası: Lütfen bunu http://bugs.launchpad.net/nova/ adresine "
-"raporlayın ve mümkünse Nova API kaydını da ekleyin.\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "Beklenmedik takım eylemi %s"
@@ -2355,9 +2040,6 @@ msgstr "Askıdan almaya çalışıldı ama %s imajı bulunamadı."
msgid "Unsupported Content-Type"
msgstr "Desteklenmeyen içerik türü"
-msgid "Upgrade DB using Essex release first."
-msgstr "Önce Essex sürümünü kullanarak veritabanı yükseltimi yapın."
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "Kullanıcı %(username)s parola dosyasında bulunamadı."
@@ -2381,25 +2063,6 @@ msgstr ""
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s %(virtual_size)d bayt ki bu nitelik boyutu olan "
-"%(new_disk_size)d bayttan fazla."
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"SR %(sr)s üzerinde VDI bulunamadı (vdi_uuid %(vdi_uuid)s, target_lun "
-"%(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr "VHD ergitme girişimi aşıldı (%d), vaz geçiliyor..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -2449,13 +2112,6 @@ msgstr ""
"Mantıksal sürücü blok boyutu ayarlıyor, ama mevcut libvirt hipervizörü '%s' "
"özel blok boyutunu desteklemiyor"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr ""
-"Python < 2.7.4 altında '%s' şablonunu desteklemiyoruz, lütfen http ya da "
-"https kullanın"
-
msgid "When resizing, instances must change flavor!"
msgstr "Yeniden boyutlandırırken, sunucular nitelik değiştirmelidir!"
@@ -2470,10 +2126,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "Kaynak %(res)s üstünde yanlış kota metodu %(method)s kullanıldı"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr ""
-"Yanlış türde kanca metodu. Yalnızca 'pre' ve 'post' türlerine izin verilir"
-
msgid "X-Instance-ID header is missing from request."
msgstr "İstekte X-Instance-ID başlığı eksik."
@@ -2483,9 +2135,6 @@ msgstr "İstekte X-Instance-ID-Signature başlığı eksik."
msgid "X-Tenant-ID header is missing from request."
msgstr "İstekte X-Tenant-ID başlığı eksik."
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "relax-xsm-sr-check=true destekleyen XAPI gerekli"
-
msgid "You are not allowed to delete the image."
msgstr "İmajı silmeye yetkili değilsiniz."
@@ -2506,16 +2155,6 @@ msgstr "LVM imajları kullanmak için images_volume_group belirtmelisiniz."
msgid "admin password can't be changed on existing disk"
msgstr "yönetici parolası mevcut diskte değiştirilemez"
-msgid "aggregate deleted"
-msgstr "takım silindi"
-
-msgid "aggregate in error"
-msgstr "takım hata durumunda"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate başarısız çünkü: %s"
-
msgid "cannot understand JSON"
msgstr "JSON dosyası anlaşılamadı"
@@ -2570,9 +2209,6 @@ msgstr "imaj zaten bağlanmış"
msgid "instance %s is not running"
msgstr "sunucu %s çalışmıyor"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "sunucu bir çekirdek veya ramdisk'e sahip ama her ikisine birden değil"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr "sunucu @refresh_cache kullanmak için gerekli bir bağımsız değişken"
@@ -2622,9 +2258,6 @@ msgstr "nbd aygıtı %s ortaya çıkmadı"
msgid "nbd unavailable: module not loaded"
msgstr "nbd kullanılabilir değil: modül yüklenmemiş"
-msgid "no hosts to remove"
-msgstr "silinecek istemci yok"
-
#, python-format
msgid "no match found for %s"
msgstr "%s için eşleşme bulunamadı"
@@ -2675,9 +2308,6 @@ msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
"set_admin_password bu sürücü ya da konuk sunucu tarafından uygulanmıyor."
-msgid "setup in progress"
-msgstr "kurulum sürüyor"
-
#, python-format
msgid "snapshot for %s"
msgstr "%s için anlık görüntü"
@@ -2694,9 +2324,6 @@ msgstr "Çok sayıda gövde anahtarları"
msgid "unpause not supported for vmwareapi"
msgstr "vmwareapi için sürdürme desteklenmiyor"
-msgid "version should be an integer"
-msgstr "Sürüm tam sayı olmak zorunda"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "vg %s LVM mantıksal sürücü grubu olmalı"
@@ -2719,15 +2346,3 @@ msgid ""
msgstr ""
"mantıksal sürücü '%(vol)s' durumu 'kullanımda' olmalı. Şu an '%(status)s' "
"durumunda"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake'in %s için bir uygulaması yok"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr ""
-"xenapi.fake'in %s için bir uygulaması yok veya yanlış sayıda bağımsız "
-"değişken ile çağrılmış"
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova.po b/nova/locale/zh_CN/LC_MESSAGES/nova.po
index a0fd31d543..f206bec321 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova.po
@@ -32,25 +32,24 @@
# zzxwill <zzxwill@gmail.com>, 2016. #zanata
# blkart <blkart.org@gmail.com>, 2017. #zanata
# Yikun Jiang <yikunkero@gmail.com>, 2018. #zanata
+# Research and Development Center UnitedStack <dev@unitedstack.com>, 2022. #zanata
msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2018-06-25 11:53+0000\n"
-"Last-Translator: Yikun Jiang <yikunkero@gmail.com>\n"
+"PO-Revision-Date: 2022-07-26 02:32+0000\n"
+"Last-Translator: Research and Development Center UnitedStack "
+"<dev@unitedstack.com>\n"
"Language: zh_CN\n"
"Language-Team: Chinese (China)\n"
"Plural-Forms: nplurals=1; plural=0\n"
"Generated-By: Babel 2.2.0\n"
"X-Generator: Zanata 4.3.3\n"
-msgid "\"Look for the VDIs failed"
-msgstr "查找VDI失败"
-
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
msgstr "%(address)s ä¸æ˜¯æœ‰æ•ˆçš„IP v4/6地å€ã€‚"
@@ -70,7 +69,9 @@ msgstr "%(field)s应该是更新的部分。"
#, python-format
msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB"
-msgstr "å·²åˆ†é… %(memsize)d MB å†…å­˜ï¼Œä½†éœ€è¦ %(memtotal)d MB"
+msgstr ""
+"å·²åˆ†é… %(memsize)d MB 内存,但本次请求申请了 %(memtotal)d MB 内存,已超出总é…"
+"é¢é™åˆ¶"
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
@@ -157,18 +158,6 @@ msgid "Affinity instance group policy was violated."
msgstr "è¿å亲和力实例组策略。"
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "代ç†ä¸æ”¯æŒè°ƒç”¨ï¼š%(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"è™šæ‹Ÿæœºç›‘æŽ§ç¨‹åº %(hypervisor)s æ“作系统 %(os)s 体系结构 %(architecture)s 的代"
-"ç†æž„建已存在。"
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "èšåˆ %(aggregate_id)så·²ç»æœ‰ä¸»æœº %(host)s。"
@@ -185,21 +174,12 @@ msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr "èšåˆ %(aggregate_id)s 没有键为 %(metadata_key)s 的元数æ®ã€‚"
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr "èšé›† %(aggregate_id)s:æ“作“%(action)sâ€å¯¼è‡´äº†é”™è¯¯ï¼š%(reason)s。"
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "èšåˆ %(aggregate_name)s å·²ç»å­˜åœ¨ã€‚"
#, python-format
msgid "Aggregate %s does not support empty named availability zone"
-msgstr "èšé›† %s ä¸æ”¯æŒå称为空的å¯ç”¨åŒºåŸŸ"
-
-#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "找ä¸åˆ°ä¸»æœº %(host)s 计数的汇总。"
+msgstr "èšåˆ %s ä¸æ”¯æŒå称为空的å¯ç”¨åŒºåŸŸ"
#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
@@ -367,12 +347,6 @@ msgstr "无法找到请求的镜åƒ"
msgid "Can not handle authentication request for %d credentials"
msgstr "无法为 %d 凭è¯å¤„ç†è®¤è¯è¯·æ±‚"
-msgid "Can't resize a disk to 0 GB."
-msgstr "ä¸èƒ½è°ƒæ•´ç£ç›˜åˆ°0GB."
-
-msgid "Can't resize down ephemeral disks."
-msgstr "ä¸èƒ½å‘下调整瞬时ç£ç›˜ã€‚"
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr "无法从实例 libvirt é…置中检索到根设备路径"
@@ -410,12 +384,6 @@ msgid ""
"store images"
msgstr "无法确定 %s 的父存储池;无法确定镜åƒçš„存储ä½ç½®"
-msgid "Cannot find SR of content-type ISO"
-msgstr "无法找到content-type ISO的存储库"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "没有找到存储库æ¥è¯»å†™VDI。"
-
msgid "Cannot find image for rebuild"
msgstr "无法找到用æ¥é‡æ–°åˆ›å»ºçš„é•œåƒ"
@@ -527,10 +495,6 @@ msgstr "连接cinder主机失败: %(reason)s"
msgid "Connection to glance host %(server)s failed: %(reason)s"
msgstr "连接 Glance 主机 %(server)s 失败:%(reason)s"
-#, python-format
-msgid "Connection to libvirt failed: %s"
-msgstr "与 libvirt 的连接失败:%s"
-
msgid "Connection to libvirt lost"
msgstr "libvirt 连接丢失"
@@ -539,10 +503,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "到libvirt的连接丢失:%s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "在主机: %(host)s上é¢ï¼Œç›‘测器的连接断开。"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -599,10 +559,6 @@ msgstr "未能安装 vfat é…置驱动器。%(operation)s 失败。错误:%(er
msgid "Could not upload image %(image_id)s"
msgstr "æœªèƒ½ä¸Šè½½æ˜ åƒ %(image_id)s"
-#, python-format
-msgid "Couldn't unmount the Quobyte Volume at %s"
-msgstr "ä¸èƒ½å¸è½½åœ¨%sçš„Quobyteå·"
-
msgid "Creation of virtual interface with unique mac address failed"
msgstr "为使用特殊MAC地å€çš„vm的创建失败"
@@ -620,10 +576,6 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "如果PBMå¯ç”¨ï¼Œç¼ºçœçš„PBM策略是必须的。"
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "从表格 '%(table_name)s'删除%(records)d记录。"
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "找ä¸åˆ°è®¾å¤‡â€œ%(device)sâ€ã€‚"
@@ -631,11 +583,6 @@ msgstr "找ä¸åˆ°è®¾å¤‡â€œ%(device)sâ€ã€‚"
msgid "Device detach failed for %(device)s: %(reason)s"
msgstr "对 %(device)s 执行设备拆离失败:%(reason)s)"
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr "在监测器版本%(version)s,ä¸æ”¯æŒæŒ‡å®šçš„设备%(id)s"
-
msgid "Device name contains spaces."
msgstr "Deviceå称中包å«äº†ç©ºæ ¼"
@@ -647,17 +594,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "别å“%sâ€çš„设备类型ä¸åŒ¹é…"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"在%(table)s.%(column)s和影å­è¡¨ : %(c_type)s %(shadow_c_type)s有ä¸åŒçš„类型"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr "ç£ç›˜åŒ…å«ä¸€ä¸ªæˆ‘们无法调整的文件系统:%s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "ç£ç›˜æ ¼å¼ %(disk_format)s ä¸èƒ½æŽ¥å—"
@@ -665,13 +601,6 @@ msgstr "ç£ç›˜æ ¼å¼ %(disk_format)s ä¸èƒ½æŽ¥å—"
msgid "Disk info file is invalid: %(reason)s"
msgstr "ç£ç›˜ä¿¡æ¯æ–‡ä»¶æ— æ•ˆï¼š%(reason)s"
-msgid "Disk must have only one partition."
-msgstr "ç£ç›˜å¿…é¡»åªèƒ½æœ‰ä¸€ä¸ªåˆ†åŒºã€‚"
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr " id: %s ç£ç›˜æ²¡æœ‰ç»‘定到实例。"
-
#, python-format
msgid "Driver Error: %s"
msgstr "驱动错误:%s"
@@ -687,10 +616,6 @@ msgid ""
msgstr "在节点%(node)s销æ¯å®žä¾‹å‡ºé”™ã€‚准备状æ€ä»ç„¶æ˜¯'%(state)s'。"
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "调用代ç†çš„%(method)s 方法出错"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "å–消æ置实例 %(instance_id)s 期间出错:%(reason)s"
@@ -743,9 +668,6 @@ msgstr ""
"错误:\n"
"%s"
-msgid "Error: Agent is disabled"
-msgstr "错误:代ç†å·²ç¦ç”¨"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "对于æ“作标识 %(action_id)s,找ä¸åˆ°äº‹ä»¶ %(event)s"
@@ -775,10 +697,6 @@ msgstr "超过最大å°è¯•æ¬¡æ•°ã€‚%(reason)s"
msgid "Expected a uuid but received %(uuid)s."
msgstr "期望 uuid,但是接收到 %(uuid)s。"
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "在影å­è¡¨ä¸­æœ‰é¢å¤–列%(table)s.%(column)s"
-
msgid "Extracting vmdk from OVA failed."
msgstr "从OVAæå‰vmdk失败。"
@@ -804,29 +722,14 @@ msgid "Failed to attach network adapter device to %(instance_uuid)s"
msgstr "连接网络适é…器设备到%(instance_uuid)s失败"
#, python-format
-msgid "Failed to attach volume at mountpoint: %s"
-msgstr "在挂载点%s绑定å·å¤±è´¥"
-
-#, python-format
msgid "Failed to connect to libvirt: %(msg)s"
msgstr "连接到 libvirt 失败: %(msg)s"
#, python-format
-msgid "Failed to create vif %s"
-msgstr "创建 vif %s 失败"
-
-msgid "Failed to delete bridge"
-msgstr "删除 bridge 失败"
-
-#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "未能部署实例: %(reason)s"
#, python-format
-msgid "Failed to destroy instance: %s"
-msgstr "未能销æ¯äº‘主机:%s"
-
-#, python-format
msgid "Failed to detach PCI device %(dev)s: %(reason)s"
msgstr "断开PCI设备%(dev)s失败:%(reason)s"
@@ -850,9 +753,6 @@ msgstr "映射分区失败:%s"
msgid "Failed to mount filesystem: %s"
msgstr "挂载文件系统失败:%s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr "为了直通,解æžpci设备的信æ¯å¤±è´¥"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "云主机无法关机:%(reason)s"
@@ -862,12 +762,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "云主机无法开机:%(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr "为实例%(instance_uuid)s准备PCI设备%(id)s失败:%(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "准备实例%(inst)s失败:%(reason)s"
@@ -880,10 +774,6 @@ msgid "Failed to reboot instance: %(reason)s"
msgstr "云主机无法é‡å¯ï¼š%(reason)s"
#, python-format
-msgid "Failed to remove snapshot for VM %s"
-msgstr "移除 VM %s 快照失败"
-
-#, python-format
msgid "Failed to remove volume(s): (%(reason)s)"
msgstr "移除å·å¤±è´¥ï¼š(%(reason)s)"
@@ -903,33 +793,14 @@ msgstr "在%(path)sè¿è¡Œ qemu-img info 失败:%(error)s"
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr "未能对 %(instance)s 设置管ç†å‘˜å¯†ç ï¼ŒåŽŸå› å¦‚下:%(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "未能è¡ç”Ÿï¼Œæ­£åœ¨å›žæ»š"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "无法挂起云主机:%(reason)s"
-msgid "Failed to teardown container filesystem"
-msgstr "未能å¸è½½å®¹å™¨æ–‡ä»¶ç³»ç»Ÿ"
-
#, python-format
msgid "Failed to terminate instance: %(reason)s"
msgstr "无法终止云主机:%(reason)s"
-msgid "Failed to umount container filesystem"
-msgstr "未能å–消挂载容器文件系统"
-
-#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "拔除 vif %s 失败"
-
-msgid "Failed while plugging vif"
-msgstr "æ’å…¥vif时失败"
-
-msgid "Failed while unplugging vif"
-msgstr "拔出 vif 时失败"
-
msgid "Failure prepping block device."
msgstr "准备å—设备失败。"
@@ -938,10 +809,6 @@ msgid "File %(file_path)s could not be found."
msgstr "找ä¸åˆ°æ–‡ä»¶ %(file_path)s。"
#, python-format
-msgid "File path %s not valid"
-msgstr "文件路径 %s 无效"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr "对于网络%(network_id)s,固定IP %(ip)s ä¸æ˜¯ä¸€ä¸ªæœ‰æ•ˆçš„ip地å€ã€‚"
@@ -1059,24 +926,9 @@ msgstr "传入镜åƒå…ƒæ•°æ®çš„串å£æ•°ä¸èƒ½è¶…过云主机类型中的设定
msgid "Found no disk to snapshot."
msgstr "å‘现没有盘æ¥åšå¿«ç…§ã€‚"
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "å‘现网桥 %s 没有网络"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "å‘现桥 %s 的网络ä¸å”¯ä¸€"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "å‘现ä¸å”¯ä¸€çš„网络 name_label %s"
-
msgid "Guest agent is not enabled for the instance"
msgstr "该云主机未å¯ç”¨ Guest agent"
-msgid "Guest does not have a console available"
-msgstr "Guest 没有å¯ç”¨æŽ§åˆ¶å°"
-
msgid "Guest does not have a console available."
msgstr "访客没有å¯ç”¨æŽ§åˆ¶å°ã€‚"
@@ -1096,7 +948,7 @@ msgid "Host PowerOn is not supported by the Hyper-V driver"
msgstr "Hyper-V驱动ä¸æ”¯æŒä¸»æœºå¼€æœº"
msgid "Host aggregate is not empty"
-msgstr "主机èšåˆä¸èƒ½ä¸ºç©º"
+msgstr "主机èšåˆä¸ä¸ºç©º"
msgid "Host does not support guests with NUMA topology set"
msgstr "主机ä¸æ”¯æŒå…·æœ‰ NUMA 拓扑集的客户机"
@@ -1104,9 +956,6 @@ msgstr "主机ä¸æ”¯æŒå…·æœ‰ NUMA 拓扑集的客户机"
msgid "Host does not support guests with custom memory page sizes"
msgstr "主机ä¸æ”¯æŒå®šåˆ¶å†…存页大å°çš„客户机"
-msgid "Host startup on XenServer is not supported."
-msgstr "ä¸æ”¯æŒåœ¨XenServerå¯åŠ¨ä¸»æœº"
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr "监测器驱动ä¸æ”¯æŒpost_live_migration_at_source方法"
@@ -1318,10 +1167,6 @@ msgstr "实例还没有调整大å°ã€‚"
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "实例主机å %(hostname)s 是无效 DNS å称"
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "实例已处于救æ´æ¨¡å¼ï¼š%s"
-
msgid "Instance is not a member of specified network"
msgstr "实例并ä¸æ˜¯æŒ‡å®šç½‘络的æˆå‘˜"
@@ -1345,10 +1190,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "计算资æºä¸è¶³ï¼š%(reason)s。"
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr "没有足够的å¯ç”¨å†…å­˜æ¥å¯åŠ¨è®¡ç®—节点 %(uuid)s。"
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "æŽ¥å£ %(interface)s没有找到。"
@@ -1543,11 +1384,6 @@ msgid ""
"It is not allowed to create an interface on external network %(network_uuid)s"
msgstr "在外部网络%(network_uuid)s创建一个接å£æ˜¯ä¸å…许的"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr "内核/内存盘镜åƒå¤ªå¤§ï¼š%(vdi_size)d 字节,最大 %(max_size)d 字节"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1569,12 +1405,6 @@ msgstr "密钥对 %(name)s 没有为用户 %(user_id)s 找到。"
msgid "Keypair data is invalid: %(reason)s"
msgstr "密钥对数æ®ä¸åˆæ³•ï¼š %(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "密钥对å称包å«ä¸å®‰å…¨çš„字符"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "密钥对必须是字符串,并且长度在1到255个字符"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "ä»… vCenter 6.0 åŠä»¥ä¸Šç‰ˆæœ¬æ”¯æŒé™åˆ¶"
@@ -1607,9 +1437,6 @@ msgstr "找ä¸åˆ°æ ‡è®°ç¬¦ %(marker)s。"
msgid "Maximum number of floating IPs exceeded"
msgstr "已超过最大浮动 IP 数"
-msgid "Maximum number of key pairs exceeded"
-msgstr "已超过最大密钥对数"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "最大元数æ®é¡¹æ•°è¶…过 %(allowed)d"
@@ -1638,12 +1465,6 @@ msgid ""
"%(node)s."
msgstr "在计算主机节点上%(host)s.%(node)s,测é‡%(name)s没有找到。"
-msgid "Migrate Receive failed"
-msgstr "Migrate Receive 失败"
-
-msgid "Migrate Send failed"
-msgstr "Migrate Send 失败"
-
msgid "Migration"
msgstr "è¿ç§»"
@@ -1691,10 +1512,6 @@ msgstr "è¿ç§»é€‰æ‹©ç›®æ ‡é”™è¯¯ï¼š%(reason)s"
msgid "Missing arguments: %s"
msgstr "缺少å‚数: %s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "在影å­è¡¨æ ¼ä¸­ç¼ºå°‘列%(table)s.%(column)s"
-
msgid "Missing device UUID."
msgstr "缺少设备UUID"
@@ -1775,13 +1592,6 @@ msgid "Must not input both network_id and port_id"
msgstr "network_idå’Œport_idå¿…é¡»åŒæ—¶è¾“å…¥"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"为了使用compute_driver=xenapi.XenAPIDriver,必须指定connection_url, "
-"connection_username (å¯é€‰), å’Œ connection_password"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1819,7 +1629,7 @@ msgstr "没有找到网络"
msgid ""
"Network requires port_security_enabled and subnet associated in order to "
"apply security groups."
-msgstr "网络需è¦å…³è”çš„ port_security_enabled å’Œå­ç½‘,以便应用安全组。"
+msgstr "网络必须å¯ç”¨ç«¯å£å®‰å…¨ç‰¹æ€§ï¼Œå¹¶ä¸”需è¦å…³è”å­ç½‘,以便应用安全组。"
msgid "New volume must be detached in order to swap."
msgstr "为了进行交æ¢ï¼Œæ–°å·å¿…须断开。"
@@ -1834,10 +1644,6 @@ msgstr "没有å—设备与ID %(id)s进行映射。"
msgid "No Unique Match Found."
msgstr "找ä¸åˆ°ä»»ä½•å”¯ä¸€åŒ¹é…项。"
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "ä¸å­˜åœ¨ä»»ä½•ä¸Žæ ‡è¯† %(id)s å…³è”的代ç†æž„建。"
-
msgid "No compute host specified"
msgstr "未指定计算宿主机"
@@ -1903,10 +1709,6 @@ msgstr "在%(image)s的%(root)s没有找到挂载点"
msgid "No operating system found in %s"
msgstr "在 %s 中找ä¸åˆ°ä»»ä½•æ“作系统"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "对于%s,没有找到主VDI"
-
msgid "No root disk defined."
msgstr "没有定义根ç£ç›˜ã€‚"
@@ -1916,9 +1718,6 @@ msgid ""
"'%(project_id)s'."
msgstr "未请求特定网络,项目“%(project_id)sâ€æ²¡æœ‰å¯ç”¨ç½‘络。"
-msgid "No suitable network for migrate"
-msgstr "对于è¿ç§»ï¼Œæ²¡æœ‰åˆé€‚的网络"
-
msgid "No valid host found for cold migrate"
msgstr "冷è¿ç§»è¿‡ç¨‹ä¸­å‘现无效主机"
@@ -2013,12 +1812,6 @@ msgstr "åªæœ‰ç®¡ç†å‘˜å¯ä»¥åŸºäºŽ %s 对æœåŠ¡å™¨è¿›è¡ŒæŽ’åº"
msgid "Only administrators may list deleted instances"
msgstr "仅管ç†å‘˜å¯åˆ—示已删除的实例"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr "åªæœ‰åŸºäºŽæ–‡ä»¶çš„SRs(ext/NFS)支æŒè¿™ä¸ªç‰¹æ€§ã€‚SR %(uuid)s 是类型 %(type)s"
-
msgid "Origin header does not match this host."
msgstr "æºå¤´ä¸Žè¿™å°ä¸»æœºä¸åŒ¹é…。"
@@ -2059,10 +1852,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "PCI 设备请求 %(requests)s 失败"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIF %s 没有包å«IP地å€"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "已对照“%(against)sâ€ç¦ç”¨é¡µå¤§å° %(pagesize)s"
@@ -2182,10 +1971,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "已超过é…é¢ï¼Œç»„中太多的æœåŠ¡å™¨"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "é…é¢ç”¨å°½ï¼šcode=%(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "项目 %(project_id)s èµ„æº %(resource)s çš„é…é¢å­˜åœ¨ã€‚"
@@ -2211,17 +1996,6 @@ msgid ""
"%(maximum)s."
msgstr "资æº%(resource)sçš„é…é¢é™åˆ¶%(limit)s 必须少于或等于%(maximum)s。"
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "已达到å°è¯•æ‹”出 VBD %s 的最大é‡è¯•æ¬¡æ•°"
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"实时策略è¦æ±‚ vCPU 掩ç é…置有至少一个 1 个 RT vCPU å’Œ 1 个普通 vCPU。请å‚阅hw:"
-"cpu_realtime_mask 或 hw_cpu_realtime_mask"
-
msgid "Request body and URI mismatch"
msgstr "请求主体和URIä¸åŒ¹é…"
@@ -2381,10 +2155,6 @@ msgid "Set the cell name."
msgstr "设置 cell å称"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "å½±å­æ•°æ®è¡¨ %(name)så·²ç»å­˜åœ¨"
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "ä¸æ”¯æŒå…±äº« '%s' "
@@ -2392,18 +2162,6 @@ msgstr "ä¸æ”¯æŒå…±äº« '%s' "
msgid "Share level '%s' cannot have share configured"
msgstr "共享级别 '%s' ä¸ç”¨å…±äº«é…ç½®"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"使用resize2fså‘下压缩文件系统失败,请检查您的ç£ç›˜ä¸Šæ˜¯å¦æœ‰è¶³å¤Ÿçš„剩余空间。"
-
-msgid "Shutting down VM (cleanly) failed."
-msgstr "关闭虚拟机(软)失败。"
-
-msgid "Shutting down VM (hard) failed"
-msgstr "关闭虚拟机(硬)失败"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "快照 %(snapshot_id)s 没有找到。"
@@ -2428,12 +2186,6 @@ msgstr "æ供的排åºé”®æ— æ•ˆã€‚"
msgid "Specified fixed address not assigned to instance"
msgstr "指定的固定IP地å€æ²¡æœ‰åˆ†é…给实例"
-msgid "Specify `table_name` or `table` param"
-msgstr "指定`table_name` 或 `table`å‚æ•°"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "åªèƒ½æŒ‡å®šä¸€ä¸ªå‚æ•°`table_name` `table`"
-
msgid "Started"
msgstr "已开始"
@@ -2503,9 +2255,6 @@ msgstr "定义的端å£æ•°é‡ï¼š%(ports)d 超过é™åˆ¶ï¼š%(quota)d"
msgid "The number of tags exceeded the per-server limit %d"
msgstr "标签数é‡è¶…过了ä¿æŠ¤é™åˆ¶ï¼Œä¸Šé™æ•°é‡ä¸º%d"
-msgid "The only partition should be partition 1."
-msgstr "唯一的分区应该是分区1。"
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr "主机上ä¸å­˜åœ¨æ供的RNG设备路径:(%(path)s)。"
@@ -2567,54 +2316,19 @@ msgid ""
"The volume cannot be assigned the same device name as the root device %s"
msgstr "å·ä¸èƒ½åˆ†é…ä¸èƒ½ç”¨ä¸Žroot设备%s一样的设备å称"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"在表格 '%(table_name)s'中有%(records)d记录,uuid或者instance_uuid列值为 "
-"NULL。在您备份任何需è¦çš„æ•°æ®ä¹‹åŽï¼Œä½¿ç”¨å‚æ•° --deleteå†è¿è¡Œä¸€æ¬¡è¿™ä¸ªå‘½ä»¤ã€‚"
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"在表格 '%(table_name)s'中有%(records)d记录,uuid或者instance_uuid列值为 "
-"NULL。在è¿ç§»é€šè¿‡å‰ï¼Œå¿…须手动清除。考虑è¿è¡Œå‘½ä»¤'nova-manage db "
-"null_instance_uuid_scan'。"
-
msgid "There are not enough hosts available."
msgstr "没有足够的主机å¯ç”¨ã€‚"
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"ä»ç„¶æœ‰%(count)i æ¡æœªè¿ç§»çš„云主机类型。在所有实例类型记录被è¿ç§»åˆ°æ–°çš„æ ¼å¼ä¹‹"
-"å‰ï¼Œè¿ç§»ä¸èƒ½ç»§ç»­ã€‚请è¿è¡Œé¦–å…ˆ `nova-manage db migrate_flavor_data'。"
-
-#, python-format
msgid "There is no such action: %s"
msgstr "没有该动作:%s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "没有找到instance_uuid为NULL的记录。"
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
"%(version)s."
msgstr "此计算节点的 hypervisor 的版本低于最低å—支æŒç‰ˆæœ¬ï¼š%(version)s。"
-msgid "This domU must be running on the host specified by connection_url"
-msgstr "这个domU å¿…é¡»è¿è¡Œåœ¨connection_url指定的主机上"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2634,10 +2348,6 @@ msgstr ""
"æ­¤æœåŠ¡çš„版本 (v%(thisver)i) 低于部署的余下部分的最低版本 (v%(minver)i)。无法"
"继续。"
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "等待设备 %s 创建超时"
-
msgid "Timeout waiting for response from cell"
msgstr "等待æ¥è‡ªå•å…ƒçš„å“应时å‘生超时"
@@ -2687,10 +2397,6 @@ msgid "Unable to automatically allocate a network for project %(project_id)s"
msgstr "无法为项目 %(project_id)s 自动分é…网络"
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr "ä¸èƒ½è¿žæŽ¥å®¢æˆ·ç«¯ä»£ç†ã€‚以下调用超时:%(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "无法将镜åƒè½¬æ¢ä¸º %(format)s:%(exp)s"
@@ -2698,17 +2404,6 @@ msgstr "无法将镜åƒè½¬æ¢ä¸º %(format)s:%(exp)s"
msgid "Unable to convert image to raw: %(exp)s"
msgstr "无法将镜åƒè½¬æ¢ä¸ºåŽŸå§‹æ ¼å¼ï¼š%(exp)s"
-msgid "Unable to destroy VBD"
-msgstr "ä¸èƒ½é”€æ¯VBD"
-
-#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "æ— æ³•é”€æ¯ VBD %s"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "æ— æ³•é”€æ¯ VDI %s"
-
#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "无法为“%sâ€ç¡®å®šç£ç›˜æ€»çº¿"
@@ -2718,28 +2413,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "无法为 %s 确定ç£ç›˜å‰ç¼€"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "ä¸èƒ½ä»Žæ± ä¸­å¼¹å‡º%s;没有找到主"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "ä¸èƒ½ä»Žæ± ä¸­å¼¹å‡º%s ;池ä¸ç©º"
-
-msgid "Unable to find SR from VBD"
-msgstr "ä¸èƒ½ä»ŽVBD找到SR"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "无法在VBD %s找到存储库"
-
-msgid "Unable to find SR from VDI"
-msgstr "ä¸èƒ½ä»ŽVDI找到SR"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "从VDI %s ä¸èƒ½æ‰¾åˆ°SR"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "找ä¸åˆ° ca_file:%s"
@@ -2758,9 +2431,6 @@ msgstr "找ä¸åˆ° iSCSI 目标"
msgid "Unable to find key_file : %s"
msgstr "找ä¸åˆ° key_file:%s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "找ä¸åˆ° VM çš„æ ¹ VBD/VDI"
-
msgid "Unable to find volume"
msgstr "找ä¸åˆ°å·"
@@ -2770,25 +2440,6 @@ msgstr "ä¸èƒ½èŽ·å–主机UUID:/etc/machine-idä¸å­˜åœ¨"
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "ä¸èƒ½èŽ·å–主机UUID:/etc/machine-id 为空"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "无法使得VDI %s 的记录è¿è¡Œ"
-
-msgid "Unable to get updated status"
-msgstr "无法获å–已更新的状æ€"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "无法为存储库 %s 引入VDI"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "无法在存储库 %s 上引入VDI"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "在池中ä¸èƒ½åŠ å…¥%s"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2817,14 +2468,6 @@ msgid ""
"Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr "无法把实例 (%(instance_id)s) è¿ç§»åˆ°å½“å‰ä¸»æœº (%(host)s)。"
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "ä¸èƒ½èŽ·å–目标信æ¯%s"
-
-#, python-format
-msgid "Unable to parse rrd of %s"
-msgstr "ä¸èƒ½è§£æž %s çš„rrd"
-
msgid "Unable to resize disk down."
msgstr "ä¸èƒ½å‘下调整ç£ç›˜ã€‚"
@@ -2834,16 +2477,6 @@ msgstr "无法对实例设置密ç "
msgid "Unable to shrink disk."
msgstr "ä¸èƒ½åŽ‹ç¼©ç£ç›˜ã€‚"
-msgid "Unable to terminate instance."
-msgstr "ä¸èƒ½ç»ˆæ­¢å®žä¾‹ã€‚"
-
-msgid "Unable to unplug VBD"
-msgstr "ä¸èƒ½æ‹”出VBD"
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "无法移除 VBD %s"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "CPUä¿¡æ¯ä¸èƒ½è¢«æŽ¥å—:%(reason)s。"
@@ -2862,16 +2495,6 @@ msgstr ""
"未定义的å—设备映射根:BlockDeviceMappingList 包å«æ¥è‡ªå¤šä¸ªå®žä¾‹çš„å—设备映射。"
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"å‘生æ„外 API 错误。请在 http://bugs.launchpad.net/nova/ 处报告此错误,并且附"
-"上 Nova API 日志(如果å¯èƒ½ï¼‰ã€‚\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "未预期的èšåˆåŠ¨ä½œ%s"
@@ -2933,9 +2556,6 @@ msgstr "试图å–消废弃但是镜åƒ%s没有找到。"
msgid "Unsupported Content-Type"
msgstr "ä¸æ”¯æŒçš„Content-Type"
-msgid "Upgrade DB using Essex release first."
-msgstr "请首先使用 Essex å‘行版æ¥å‡çº§æ•°æ®åº“。"
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "在密ç æ–‡ä»¶ä¸­æ‰¾ä¸åˆ°ç”¨æˆ· %(username)s。"
@@ -2957,25 +2577,6 @@ msgstr "在åŒä¸€ä¸ªè¯·æ±‚中,ä¸å…许使用ä¸åŒçš„ block_device_mapping语
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s 的大å°ä¸º %(virtual_size)d 字节,比云主机类型定义的 "
-"%(new_disk_size)d 字节还大。"
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"SR %(sr)s (vdi_uuid %(vdi_uuid)s上没有找到VDI,目标lun target_lun "
-"%(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr "VHD coalesce å°è¯•è¶…过(%d),放弃。。。"
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr ""
@@ -3042,11 +2643,6 @@ msgid ""
"support custom block size"
msgstr "å·è®¾ç½®å—大å°ï¼Œä½†æ˜¯å½“å‰libvirt监测器 '%s'ä¸æ”¯æŒå®šåˆ¶åŒ–å—大å°"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr "Python<2.7.4 ä¸æ”¯æŒå®žä¾‹ '%s',请使用http或者https"
-
msgid "When resizing, instances must change flavor!"
msgstr "调整大å°æ—¶ï¼Œå®žä¾‹å¿…须更æ¢äº‘主机类型ï¼"
@@ -3061,9 +2657,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "错误的é…é¢æ–¹æ³•%(method)s用在资æº%(res)s"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr "é’©å­æ–¹æ³•çš„类型ä¸æ­£ç¡®ã€‚ä»…å…许“preâ€å’Œâ€œpostâ€ç±»åž‹"
-
msgid "X-Forwarded-For is missing from request."
msgstr "请求中缺少 X-Forwarded-For 。"
@@ -3079,9 +2672,6 @@ msgstr "请求中缺少X-Metadata-Provider。"
msgid "X-Tenant-ID header is missing from request."
msgstr "请求中缺少 X-Tenant-ID 。"
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "XAPI 支æŒå¿…须的 relax-xsm-sr-check=true"
-
msgid "You are not allowed to delete the image."
msgstr "ä¸å…许删除该映åƒã€‚"
@@ -3104,20 +2694,6 @@ msgstr "没有å¯ç”¨æµ®åŠ¨ IP。"
msgid "admin password can't be changed on existing disk"
msgstr "无法在现有ç£ç›˜ä¸Šæ›´æ”¹ç®¡ç†å‘˜å¯†ç "
-msgid "aggregate deleted"
-msgstr "删除的èšåˆ"
-
-msgid "aggregate in error"
-msgstr "èšåˆåœ¨é”™è¯¯çŠ¶æ€"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate失败,原因是:%s"
-
-#, python-format
-msgid "attach network interface %s failed."
-msgstr "ç»‘å®šç½‘ç»œæŽ¥å£ %s 失败."
-
msgid "cannot understand JSON"
msgstr "无法ç†è§£JSON"
@@ -3135,10 +2711,6 @@ msgstr "连接信æ¯ï¼š%s"
msgid "connecting to: %(host)s:%(port)s"
msgstr "连接到:%(host)s:%(port)s"
-#, python-format
-msgid "detach network interface %s failed."
-msgstr "è§£ç»‘ç½‘ç»œæŽ¥å£ %s 失败."
-
msgid "direct_snapshot() is not implemented"
msgstr "未实现 direct_snapshot()"
@@ -3150,10 +2722,6 @@ msgstr "ä¸æ”¯æŒç£ç›˜ç±»åž‹'%s' "
msgid "empty project id for instance %s"
msgstr "用于实例 %s 的项目标识为空"
-#, python-format
-msgid "error opening rbd image %s"
-msgstr "打开rbdé•œåƒ%s 出错"
-
msgid "error setting admin password"
msgstr "设置管ç†å‘˜å¯†ç æ—¶å‡ºé”™"
@@ -3192,9 +2760,6 @@ msgstr "é•œåƒå·²ç»æŒ‚è½½"
msgid "instance %s is not running"
msgstr "实例%s没有è¿è¡Œ"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "实例拥有内核或者内存盘,但ä¸æ˜¯äºŒè€…å‡æœ‰"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr "使用 @refresh_cache 时,instance 是必需的自å˜é‡"
@@ -3249,9 +2814,6 @@ msgstr "nbd 设备 %s 没有出现"
msgid "nbd unavailable: module not loaded"
msgstr "NBDä¸å¯ç”¨ï¼šæ¨¡å—没有加载"
-msgid "no hosts to remove"
-msgstr "没有主è§å¯ç§»é™¤"
-
#, python-format
msgid "no match found for %s"
msgstr "对于%s没有找到匹é…çš„"
@@ -3318,9 +2880,6 @@ msgstr "service 对于基于 Memcached çš„ ServiceGroup 驱动程åºæ˜¯å¿…需的
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr "此驱动程åºæˆ– guest 实例未实现 set_admin_password。"
-msgid "setup in progress"
-msgstr "建立处ç†ä¸­"
-
#, python-format
msgid "snapshot for %s"
msgstr "%s çš„å¿«ç…§"
@@ -3340,9 +2899,6 @@ msgstr "过多主体密钥"
msgid "unpause not supported for vmwareapi"
msgstr "vmwareapi ä¸æ”¯æŒå–消暂åœ"
-msgid "version should be an integer"
-msgstr "version应该是整数"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "vg %s 必须为 LVM å·ç»„"
@@ -3366,13 +2922,3 @@ msgstr "å·%så·²ç»ç»‘定"
msgid ""
"volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status"
msgstr "å· '%(vol)s' 状æ€å¿…须是‘使用中‘。当å‰å¤„于 '%(status)s' 状æ€"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake 没有 %s 的实现"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr "xenapi.fake 没有 %s 的实现或者调用时用了错误数目的å‚æ•°"
diff --git a/nova/locale/zh_TW/LC_MESSAGES/nova.po b/nova/locale/zh_TW/LC_MESSAGES/nova.po
index 9533ff6dbe..83248a062e 100644
--- a/nova/locale/zh_TW/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_TW/LC_MESSAGES/nova.po
@@ -12,7 +12,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
-"POT-Creation-Date: 2020-04-25 15:32+0000\n"
+"POT-Creation-Date: 2022-08-09 17:12+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -131,18 +131,6 @@ msgid "Affinity instance group policy was violated."
msgstr "é•å了親緣性實例群組原則。"
#, python-format
-msgid "Agent does not support the call: %(method)s"
-msgstr "代ç†ç¨‹å¼ä¸æ”¯æ´å‘¼å«ï¼š%(method)s"
-
-#, python-format
-msgid ""
-"Agent-build with hypervisor %(hypervisor)s os %(os)s architecture "
-"%(architecture)s exists."
-msgstr ""
-"Hypervisor 為 %(hypervisor)sã€OS 為 %(os)s 且架構為%(architecture)s çš„ agent-"
-"build 已存在。"
-
-#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr "èšé›† %(aggregate_id)s 已有主機 %(host)s。"
@@ -159,11 +147,6 @@ msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr "èšé›† %(aggregate_id)s 沒有索引éµç‚º %(metadata_key)s çš„ meta 資料。"
#, python-format
-msgid ""
-"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: %(reason)s."
-msgstr "èšé›† %(aggregate_id)s:動作 '%(action)s' 造æˆéŒ¯èª¤ï¼š%(reason)s。"
-
-#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr "èšé›† %(aggregate_name)s 已存在。"
@@ -172,10 +155,6 @@ msgid "Aggregate %s does not support empty named availability zone"
msgstr "èšé›† %s ä¸æ”¯æ´ç©ºç™½å‘½åçš„å¯ç”¨å€åŸŸ"
#, python-format
-msgid "Aggregate for host %(host)s count not be found."
-msgstr "找ä¸åˆ°ä¸»æ©Ÿ %(host)s 計數的èšé›†ã€‚"
-
-#, python-format
msgid "An invalid 'name' value was provided. The name must be: %(reason)s"
msgstr "所æ供的「å稱ã€å€¼ç„¡æ•ˆã€‚å稱必須是:%(reason)s"
@@ -335,12 +314,6 @@ msgstr "找ä¸åˆ°æ‰€è¦æ±‚的映åƒæª”"
msgid "Can not handle authentication request for %d credentials"
msgstr "無法處ç†å° %d èªè­‰çš„鑑別è¦æ±‚"
-msgid "Can't resize a disk to 0 GB."
-msgstr "無法將ç£ç¢Ÿçš„大å°èª¿æ•´ç‚º 0 GB。"
-
-msgid "Can't resize down ephemeral disks."
-msgstr "無法將暫時ç£ç¢Ÿèª¿å°ã€‚"
-
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr "無法從實例 libVirt é…置擷å–æ ¹è£ç½®è·¯å¾‘"
@@ -377,12 +350,6 @@ msgid ""
"store images"
msgstr "無法判定 %s çš„æ¯é …儲存å€ï¼›ç„¡æ³•åˆ¤å®šç”¨ä¾†å„²å­˜æ˜ åƒæª”çš„ä½ç½®"
-msgid "Cannot find SR of content-type ISO"
-msgstr "找ä¸åˆ°å…§å®¹é¡žåž‹ç‚º ISO 的「儲存體儲存庫 (SR)ã€"
-
-msgid "Cannot find SR to read/write VDI."
-msgstr "找ä¸åˆ°ã€Œå„²å­˜é«”儲存庫 (SR)ã€ä¾†è®€å¯« VDI。"
-
msgid "Cannot find image for rebuild"
msgstr "找ä¸åˆ°è¦é‡å»ºçš„映åƒæª”"
@@ -493,10 +460,6 @@ msgid "Connection to libvirt lost: %s"
msgstr "libVirt 連線已中斷:%s"
#, python-format
-msgid "Connection to the hypervisor is broken on host: %(host)s"
-msgstr "Hypervisor 連線在主機 %(host)s 上已中斷"
-
-#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
@@ -563,18 +526,9 @@ msgid "Default PBM policy is required if PBM is enabled."
msgstr "如果已啟用 PBM,則需è¦é è¨­ PBM 原則。"
#, python-format
-msgid "Deleted %(records)d records from table '%(table_name)s'."
-msgstr "已從表格 '%(table_name)s' 刪除 %(records)d 筆記錄。"
-
-#, python-format
msgid "Device '%(device)s' not found."
msgstr "找ä¸åˆ°è£ç½® '%(device)s'。"
-#, python-format
-msgid ""
-"Device id %(id)s specified is not supported by hypervisor version %(version)s"
-msgstr "Hypervisor 版本 %(version)s ä¸æ”¯æ´æ‰€æŒ‡å®šçš„è£ç½® ID %(id)s"
-
msgid "Device name contains spaces."
msgstr "è£ç½®å稱包å«ç©ºæ ¼ã€‚"
@@ -586,17 +540,6 @@ msgid "Device type mismatch for alias '%s'"
msgstr "別å '%s' çš„è£ç½®é¡žåž‹ä¸ç¬¦"
#, python-format
-msgid ""
-"Different types in %(table)s.%(column)s and shadow table: %(c_type)s "
-"%(shadow_c_type)s"
-msgstr ""
-"%(table)s.%(column)s 與備份副本表格中的類型ä¸åŒï¼š%(c_type)s %(shadow_c_type)s"
-
-#, python-format
-msgid "Disk contains a filesystem we are unable to resize: %s"
-msgstr "ç£ç¢ŸåŒ…å«ä¸€å€‹ç„¡æ³•èª¿æ•´å¤§å°çš„檔案系統:%s"
-
-#, python-format
msgid "Disk format %(disk_format)s is not acceptable"
msgstr "無法接å—ç£ç¢Ÿæ ¼å¼ %(disk_format)s"
@@ -604,13 +547,6 @@ msgstr "無法接å—ç£ç¢Ÿæ ¼å¼ %(disk_format)s"
msgid "Disk info file is invalid: %(reason)s"
msgstr "ç£ç¢Ÿè³‡è¨Šæª”無效:%(reason)s"
-msgid "Disk must have only one partition."
-msgstr "ç£ç¢Ÿåªèƒ½å…·æœ‰ä¸€å€‹åˆ†å‰²å€ã€‚"
-
-#, python-format
-msgid "Disk with id: %s not found attached to instance."
-msgstr "找ä¸åˆ°å·²é€£æŽ¥è‡³å¯¦ä¾‹ä¸” ID 為 %s çš„ç£ç¢Ÿã€‚"
-
#, python-format
msgid "Driver Error: %s"
msgstr "驅動程å¼éŒ¯èª¤ï¼š%s"
@@ -626,10 +562,6 @@ msgid ""
msgstr "毀æ節點 %(node)s 上的實例時發生錯誤。供應狀態ä»ç‚º'%(state)s'。"
#, python-format
-msgid "Error during following call to agent: %(method)s"
-msgstr "å°ä»£ç†ç¨‹å¼é€²è¡Œä¸‹åˆ—呼å«æœŸé–“發生錯誤:%(method)s"
-
-#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr "解除擱置實例 %(instance_id)s 期間發生錯誤:%(reason)s"
@@ -680,9 +612,6 @@ msgstr "è£è¼‰å…·æœ‰ libguestfs (%(e)s) çš„ %(image)s 時發生錯誤"
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "建立資æºç›£è¦–器 %(monitor)s 時發生錯誤"
-msgid "Error: Agent is disabled"
-msgstr "錯誤:代ç†ç¨‹å¼å·²åœç”¨"
-
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr "找ä¸åˆ°å‹•ä½œè­˜åˆ¥ç¢¼ %(action_id)s 的事件 %(event)s"
@@ -714,10 +643,6 @@ msgstr "已超出é‡è©¦æ¬¡æ•¸ä¸Šé™ã€‚%(reason)s"
msgid "Expected a uuid but received %(uuid)s."
msgstr "éœ€è¦ UUID,但收到 %(uuid)s。"
-#, python-format
-msgid "Extra column %(table)s.%(column)s in shadow table"
-msgstr "備份副本表格中存在é¡å¤–直欄 %(table)s.%(column)s"
-
msgid "Extracting vmdk from OVA failed."
msgstr "從 OVA æ“·å– VMDK 失敗。"
@@ -743,10 +668,6 @@ msgid "Failed to attach network adapter device to %(instance_uuid)s"
msgstr "無法將網路é…接å¡è£ç½®é€£æŽ¥è‡³ %(instance_uuid)s"
#, python-format
-msgid "Failed to create vif %s"
-msgstr "無法建立 VIF %s"
-
-#, python-format
msgid "Failed to deploy instance: %(reason)s"
msgstr "無法部署實例:%(reason)s"
@@ -774,9 +695,6 @@ msgstr "無法å°æ˜ åˆ†å‰²å€ï¼š%s"
msgid "Failed to mount filesystem: %s"
msgstr "無法è£è¼‰æª”案系統:%s"
-msgid "Failed to parse information about a pci device for passthrough"
-msgstr "ç„¡æ³•å‰–æž PCI passthrough è£ç½®çš„相關資訊"
-
#, python-format
msgid "Failed to power off instance: %(reason)s"
msgstr "無法關閉實例的電æºï¼š%(reason)s"
@@ -786,12 +704,6 @@ msgid "Failed to power on instance: %(reason)s"
msgstr "無法開啟實例的電æºï¼š%(reason)s"
#, python-format
-msgid ""
-"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
-"%(reason)s"
-msgstr "無法為實例 %(instance_uuid)s 準備 PCI è£ç½® %(id)s:%(reason)s"
-
-#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr "無法供應實例 %(inst)s:%(reason)s"
@@ -823,9 +735,6 @@ msgstr "無法在 %(path)s 上執行 qemu-img 資訊:%(error)s"
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr "無法在 %(instance)s 上設定管ç†è€…密碼,因為 %(reason)s"
-msgid "Failed to spawn, rolling back"
-msgstr "無法大é‡ç”¢ç”Ÿï¼Œæ­£åœ¨å›žå¾©"
-
#, python-format
msgid "Failed to suspend instance: %(reason)s"
msgstr "無法懸置實例:%(reason)s"
@@ -834,10 +743,6 @@ msgstr "無法懸置實例:%(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "無法終止實例:%(reason)s"
-#, python-format
-msgid "Failed to unplug vif %s"
-msgstr "無法拔除 VIF %s"
-
msgid "Failure prepping block device."
msgstr "準備å€å¡Šè£ç½®æ™‚失敗。"
@@ -846,10 +751,6 @@ msgid "File %(file_path)s could not be found."
msgstr "找ä¸åˆ°æª”案 %(file_path)s。"
#, python-format
-msgid "File path %s not valid"
-msgstr "檔案路徑 %s 無效"
-
-#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr "固定 IP %(ip)s ä¸æ˜¯ç¶²è·¯ %(network_id)s 的有效 IP ä½å€ã€‚"
@@ -964,18 +865,6 @@ msgstr "å·²ç¦æ­¢è¶…出映åƒæª” meta 中傳éžä¹‹åºåˆ—埠數目的特性值。
msgid "Found no disk to snapshot."
msgstr "找ä¸åˆ°ç£ç¢Ÿä¾†å–å¾— Snapshot。"
-#, python-format
-msgid "Found no network for bridge %s"
-msgstr "找ä¸åˆ°æ©‹æŽ¥å™¨ %s 的網路"
-
-#, python-format
-msgid "Found non-unique network for bridge %s"
-msgstr "發ç¾æ©‹æŽ¥å™¨ %s 的網路ä¸æ˜¯å”¯ä¸€çš„"
-
-#, python-format
-msgid "Found non-unique network for name_label %s"
-msgstr "ç™¼ç¾ name_label %s 的網路ä¸æ˜¯å”¯ä¸€çš„"
-
msgid "Guest does not have a console available."
msgstr "訪客沒有主控å°å¯ç”¨ã€‚"
@@ -1003,9 +892,6 @@ msgstr "主機ä¸æ”¯æ´å…·æœ‰ NUMA 拓蹼集的訪客"
msgid "Host does not support guests with custom memory page sizes"
msgstr "主機ä¸æ”¯æ´å…·æœ‰è‡ªè¨‚記憶體é é¢å¤§å°çš„訪客"
-msgid "Host startup on XenServer is not supported."
-msgstr "ä¸æ”¯æ´åœ¨ XenServer 上啟動主機。"
-
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr "Hypervisor 驅動程å¼ä¸æ”¯æ´ post_live_migration_at_source 方法"
@@ -1217,10 +1103,6 @@ msgstr "尚未調整實例大å°ã€‚"
msgid "Instance hostname %(hostname)s is not a valid DNS name"
msgstr "實例主機å %(hostname)s ä¸æ˜¯æœ‰æ•ˆçš„ DNS å稱"
-#, python-format
-msgid "Instance is already in Rescue Mode: %s"
-msgstr "實例已處於救æ´æ¨¡å¼ï¼š%s"
-
msgid "Instance is not a member of specified network"
msgstr "實例ä¸æ˜¯æ‰€æŒ‡å®šç¶²è·¯çš„æˆå“¡"
@@ -1241,10 +1123,6 @@ msgid "Insufficient compute resources: %(reason)s."
msgstr "計算資æºä¸è¶³ï¼š%(reason)s。"
#, python-format
-msgid "Insufficient free memory on compute node to start %(uuid)s."
-msgstr "計算節點上的å¯ç”¨è¨˜æ†¶é«”ä¸è¶³ä»¥å•Ÿå‹• %(uuid)s。"
-
-#, python-format
msgid "Interface %(interface)s not found."
msgstr "找ä¸åˆ°ä»‹é¢ %(interface)s。"
@@ -1432,12 +1310,6 @@ msgid ""
"It is not allowed to create an interface on external network %(network_uuid)s"
msgstr "ä¸å®¹è¨±åœ¨ä¸‹åˆ—外部網路上建立介é¢ï¼š%(network_uuid)s"
-#, python-format
-msgid ""
-"Kernel/Ramdisk image is too large: %(vdi_size)d bytes, max %(max_size)d bytes"
-msgstr ""
-"核心/Ramdisk 映åƒæª”太大:%(vdi_size)d 個ä½å…ƒçµ„,上é™ç‚º %(max_size)d 個ä½å…ƒçµ„"
-
msgid ""
"Key Names can only contain alphanumeric characters, periods, dashes, "
"underscores, colons and spaces."
@@ -1459,12 +1331,6 @@ msgstr "找ä¸åˆ°ä½¿ç”¨è€… %(user_id)s 的金鑰組 %(name)s"
msgid "Keypair data is invalid: %(reason)s"
msgstr "金鑰組資料無效:%(reason)s"
-msgid "Keypair name contains unsafe characters"
-msgstr "金鑰組å稱包å«ä¸å®‰å…¨çš„å­—å…ƒ"
-
-msgid "Keypair name must be string and between 1 and 255 characters long"
-msgstr "金鑰組å稱必須是字串,並且長度必須介於 1 å’Œ 255 個字元之間"
-
msgid "Limits only supported from vCenter 6.0 and above"
msgstr "åªæœ‰ vCenter 6.0 åŠæ›´é«˜ç‰ˆæœ¬ä¸­çš„é™åˆ¶æ‰å—支æ´"
@@ -1497,9 +1363,6 @@ msgstr "找ä¸åˆ°æ¨™è¨˜ %(marker)s。"
msgid "Maximum number of floating IPs exceeded"
msgstr "超éŽäº†æµ®å‹• IP 數目上é™"
-msgid "Maximum number of key pairs exceeded"
-msgstr "已超出金鑰組數目上é™"
-
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr "meta 資料項目數目上é™å·²è¶…出所å…許的 %(allowed)d"
@@ -1528,12 +1391,6 @@ msgid ""
"%(node)s."
msgstr "在計算主機節點 %(host)s.%(node)s 上找ä¸åˆ°åº¦é‡ %(name)s。"
-msgid "Migrate Receive failed"
-msgstr "移轉接收失敗"
-
-msgid "Migrate Send failed"
-msgstr "移轉傳é€å¤±æ•—"
-
#, python-format
msgid "Migration %(id)s for server %(uuid)s is not live-migration."
msgstr "伺æœå™¨ %(uuid)s 的移轉 %(id)s ä¸æ˜¯å³æ™‚移轉。"
@@ -1578,10 +1435,6 @@ msgstr "移轉é¸å–目的地錯誤:%(reason)s"
msgid "Missing arguments: %s"
msgstr "éºæ¼å¼•æ•¸ï¼š%s"
-#, python-format
-msgid "Missing column %(table)s.%(column)s in shadow table"
-msgstr "備份副本表格中éºæ¼äº†ç›´æ¬„ %(table)s.%(column)s"
-
msgid "Missing device UUID."
msgstr "éºæ¼è£ç½® UUID。"
@@ -1658,13 +1511,6 @@ msgid "Must not input both network_id and port_id"
msgstr "ä¸å¾—åŒæ™‚輸入 network_id å’Œ port_id"
msgid ""
-"Must specify connection_url, connection_username (optionally), and "
-"connection_password to use compute_driver=xenapi.XenAPIDriver"
-msgstr ""
-"必須指定 connection_urlã€connection_username(é¸ç”¨é …目)åŠ"
-"connection_password,æ‰èƒ½ä½¿ç”¨ compute_driver=xenapi.XenAPIDriver"
-
-msgid ""
"Must specify host_ip, host_username and host_password to use vmwareapi."
"VMwareVCDriver"
msgstr ""
@@ -1715,10 +1561,6 @@ msgstr "沒有 ID 為 %(id)s çš„å€å¡Šè£ç½®å°æ˜ ã€‚"
msgid "No Unique Match Found."
msgstr "找ä¸åˆ°å”¯ä¸€ç›¸ç¬¦é …。"
-#, python-format
-msgid "No agent-build associated with id %(id)s."
-msgstr "ID %(id)s 沒有相關è¯çš„ agent-build。"
-
msgid "No compute host specified"
msgstr "未指定計算主機"
@@ -1784,10 +1626,6 @@ msgstr "在 %(image)s çš„ %(root)s 中找ä¸åˆ°è£è¼‰é»ž"
msgid "No operating system found in %s"
msgstr "在 %s 中找ä¸åˆ°ä½œæ¥­ç³»çµ±"
-#, python-format
-msgid "No primary VDI found for %s"
-msgstr "找ä¸åˆ° %s çš„ä¸»è¦ VDI"
-
msgid "No root disk defined."
msgstr "未定義根ç£ç¢Ÿã€‚"
@@ -1797,9 +1635,6 @@ msgid ""
"'%(project_id)s'."
msgstr "未è¦æ±‚任何特定網路,且專案 '%(project_id)s' 無法使用任何網路。"
-msgid "No suitable network for migrate"
-msgstr "沒有é©åˆæ–¼ç§»è½‰çš„網路"
-
msgid "No valid host found for cold migrate"
msgstr "找ä¸åˆ°æœ‰æ•ˆçš„主機進行冷移轉"
@@ -1878,14 +1713,6 @@ msgstr "一個以上的主機已經ä½æ–¼å¯ç”¨æ€§å€åŸŸ %s 中"
msgid "Only administrators may list deleted instances"
msgstr "åªæœ‰ç®¡ç†è€…æ‰èƒ½åˆ—出已刪除的實例"
-#, python-format
-msgid ""
-"Only file-based SRs (ext/NFS) are supported by this feature. SR %(uuid)s is "
-"of type %(type)s"
-msgstr ""
-"此特性僅支æ´æª”案型「儲存體儲存庫 (SR)ã€(ext/NFS)。「儲存體儲存庫 "
-"(SR)ã€%(uuid)s 的類型是%(type)s"
-
msgid "Origin header does not match this host."
msgstr "原始標頭與此主機ä¸ç¬¦ã€‚"
@@ -1928,10 +1755,6 @@ msgid "PCI device request %(requests)s failed"
msgstr "PCI è£ç½®è¦æ±‚ %(requests)s 失敗"
#, python-format
-msgid "PIF %s does not contain IP address"
-msgstr "PIF %s ä¸åŒ…å« IP ä½å€"
-
-#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr "é‡å° '%(against)s',已ç¦æ­¢é é¢å¤§å° %(pagesize)s"
@@ -2047,10 +1870,6 @@ msgid "Quota exceeded, too many servers in group"
msgstr "已超出é…é¡ï¼Œç¾¤çµ„中的伺æœå™¨å¤ªå¤š"
#, python-format
-msgid "Quota exceeded: code=%(code)s"
-msgstr "已超出é…é¡ï¼šéŒ¯èª¤ç¢¼ = %(code)s"
-
-#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr "專案 %(project_id)s è³‡æº %(resource)s 已存在é…é¡"
@@ -2077,17 +1896,6 @@ msgid ""
"%(maximum)s."
msgstr "%(resource)s çš„é…é¡é™åˆ¶ %(limit)s å¿…é ˆå°æ–¼æˆ–等於%(maximum)s。"
-#, python-format
-msgid "Reached maximum number of retries trying to unplug VBD %s"
-msgstr "嘗試拔除 VBD %s 時é”到了é‡è©¦æ¬¡æ•¸ä¸Šé™"
-
-msgid ""
-"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and 1 "
-"ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
-msgstr ""
-"å³æ™‚原則需è¦ä½¿ç”¨è‡³å°‘ 1 個 RT vCPU å’Œ 1 個普通 vCPU 進行é…置的 vCPU é®ç½©ã€‚è«‹åƒ"
-"閱 hw:cpu_realtime_mask 或 hw_cpu_realtime_mask"
-
msgid "Request body and URI mismatch"
msgstr "è¦æ±‚內文與 URI ä¸ç¬¦"
@@ -2240,10 +2048,6 @@ msgid "Set admin password is not supported"
msgstr "ä¸æ”¯æ´è¨­å®šç®¡ç†å¯†ç¢¼"
#, python-format
-msgid "Shadow table with name %(name)s already exists."
-msgstr "å稱為 %(name)s 的備份副本表格已存在。"
-
-#, python-format
msgid "Share '%s' is not supported"
msgstr "ä¸æ”¯æ´å…±ç”¨ '%s'"
@@ -2251,12 +2055,6 @@ msgstr "ä¸æ”¯æ´å…±ç”¨ '%s'"
msgid "Share level '%s' cannot have share configured"
msgstr "共用層次 '%s' ä¸èƒ½é…置共用"
-msgid ""
-"Shrinking the filesystem down with resize2fs has failed, please check if you "
-"have enough free space on your disk."
-msgstr ""
-"使用 resize2fs 來縮å°æª”案系統時失敗,請檢查ç£ç¢Ÿä¸Šæ˜¯å¦å…·æœ‰è¶³å¤ çš„å¯ç”¨ç©ºé–“。"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "找ä¸åˆ° Snapshot %(snapshot_id)s。"
@@ -2281,12 +2079,6 @@ msgstr "æ供的排åºéµç„¡æ•ˆã€‚"
msgid "Specified fixed address not assigned to instance"
msgstr "沒有將所指定的固定ä½å€æŒ‡æ´¾çµ¦å¯¦ä¾‹"
-msgid "Specify `table_name` or `table` param"
-msgstr "請指定 `table_name` 或 `table` åƒæ•¸"
-
-msgid "Specify only one param `table_name` `table`"
-msgstr "請僅指定 `table_name` 或 `table` 中的一個åƒæ•¸"
-
msgid "Started"
msgstr "已開始"
@@ -2352,9 +2144,6 @@ msgstr "實例需è¦æ¯”所æ供版本還新的 Hypervisor 版本。"
msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr "所定義的埠數目 %(ports)d 超出é™åˆ¶ï¼š%(quota)d"
-msgid "The only partition should be partition 1."
-msgstr "唯一的分割å€æ‡‰è©²æ˜¯åˆ†å‰²å€ 1。"
-
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr "主機上ä¸å­˜åœ¨æ‰€æ供的 RNG è£ç½®è·¯å¾‘:(%(path)s)。"
@@ -2414,54 +2203,19 @@ msgid ""
"The volume cannot be assigned the same device name as the root device %s"
msgstr "無法å°ç£å€æŒ‡æ´¾èˆ‡æ ¹è£ç½® %s 相åŒçš„è£ç½®å稱"
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. Run this command again with the --delete "
-"option after you have backed up any necessary data."
-msgstr ""
-"'%(table_name)s' 表格中有 %(records)d 筆 UUID 或 instance_uuid 直欄為空值的記"
-"錄。在備份任何必è¦è³‡æ–™ä¹‹å¾Œï¼Œè«‹ä½¿ç”¨ --delete é¸é …å†æ¬¡åŸ·è¡Œæ­¤æŒ‡ä»¤ã€‚"
-
-#, python-format
-msgid ""
-"There are %(records)d records in the '%(table_name)s' table where the uuid "
-"or instance_uuid column is NULL. These must be manually cleaned up before "
-"the migration will pass. Consider running the 'nova-manage db "
-"null_instance_uuid_scan' command."
-msgstr ""
-"'%(table_name)s' 表格中有 %(records)d 筆 UUID 或 instance_uuid 直欄為空值的記"
-"錄。必須先手動清除這些記錄,移轉æ‰èƒ½é€šéŽã€‚請考é‡åŸ·è¡Œ 'nova-manage db "
-"null_instance_uuid_scan' 指令。"
-
msgid "There are not enough hosts available."
msgstr "沒有足夠的å¯ç”¨ä¸»æ©Ÿã€‚"
#, python-format
-msgid ""
-"There are still %(count)i unmigrated flavor records. Migration cannot "
-"continue until all instance flavor records have been migrated to the new "
-"format. Please run `nova-manage db migrate_flavor_data' first."
-msgstr ""
-"ä»æœ‰ %(count)i 個未移轉的特性記錄。移轉無法繼續,直到將所有實例特性記錄都移轉"
-"為新的格å¼ç‚ºæ­¢ã€‚請先執行 `nova-manage db migrate_flavor_data'。"
-
-#, python-format
msgid "There is no such action: %s"
msgstr "沒有這樣的動作:%s"
-msgid "There were no records found where instance_uuid was NULL."
-msgstr "找ä¸åˆ° instance_uuid 為空值的記錄。"
-
#, python-format
msgid ""
"This compute node's hypervisor is older than the minimum supported version: "
"%(version)s."
msgstr "這部電腦節點的 Hypervisor 版本低於所支æ´çš„版本下é™ï¼š%(version)s。"
-msgid "This domU must be running on the host specified by connection_url"
-msgstr "此 domU 必須正在 connection_url 所指定的主機上執行"
-
msgid ""
"This method needs to be called with either networks=None and port_ids=None "
"or port_ids and networks as not none."
@@ -2480,10 +2234,6 @@ msgid ""
msgstr ""
"æ­¤æœå‹™çš„版本 (v%(thisver)i) ä½Žæ–¼å…¶é¤˜éƒ¨ç½²çš„ç‰ˆæœ¬ä¸‹é™ (v%(minver)i)。無法繼續。"
-#, python-format
-msgid "Timeout waiting for device %s to be created"
-msgstr "等待建立è£ç½® %s 時發生逾時"
-
msgid "Timeout waiting for response from cell"
msgstr "等候 Cell 回應時發生逾時"
@@ -2526,10 +2276,6 @@ msgid "Unable to authenticate Ironic client."
msgstr "無法鑑別 Ironic 用戶端。"
#, python-format
-msgid "Unable to contact guest agent. The following call timed out: %(method)s"
-msgstr "無法è¯çµ¡ä¾†è³“代ç†ç¨‹å¼ã€‚下列呼å«å·²é€¾æ™‚:%(method)s"
-
-#, python-format
msgid "Unable to convert image to %(format)s: %(exp)s"
msgstr "無法將映åƒæª”轉æ›ç‚º %(format)s:%(exp)s"
@@ -2538,14 +2284,6 @@ msgid "Unable to convert image to raw: %(exp)s"
msgstr "無法將映åƒæª”轉æ›ç‚ºåŽŸå§‹æ ¼å¼ï¼š%(exp)s"
#, python-format
-msgid "Unable to destroy VBD %s"
-msgstr "無法毀æ VBD %s"
-
-#, python-format
-msgid "Unable to destroy VDI %s"
-msgstr "無法毀æ VDI %s"
-
-#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr "無法判定 '%s' çš„ç£ç¢ŸåŒ¯æµæŽ’"
@@ -2554,22 +2292,6 @@ msgid "Unable to determine disk prefix for %s"
msgstr "無法判定 %s çš„ç£ç¢Ÿå­—首"
#, python-format
-msgid "Unable to eject %s from the pool; No master found"
-msgstr "無法將 %s 從儲存å€ä¸­é€€å‡ºï¼›æ‰¾ä¸åˆ°ä¸»è¦ä¸»æ©Ÿ"
-
-#, python-format
-msgid "Unable to eject %s from the pool; pool not empty"
-msgstr "無法將 %s 從儲存å€ä¸­é€€å‡ºï¼›å„²å­˜å€ä¸æ˜¯ç©ºçš„"
-
-#, python-format
-msgid "Unable to find SR from VBD %s"
-msgstr "在 VBD %s 中找ä¸åˆ°ã€Œå„²å­˜é«”儲存庫 (SR)ã€"
-
-#, python-format
-msgid "Unable to find SR from VDI %s"
-msgstr "從 VDI %s 中找ä¸åˆ° SR"
-
-#, python-format
msgid "Unable to find ca_file : %s"
msgstr "找ä¸åˆ° ca_file:%s"
@@ -2588,9 +2310,6 @@ msgstr "找ä¸åˆ° iSCSI 目標"
msgid "Unable to find key_file : %s"
msgstr "找ä¸åˆ° key_file:%s"
-msgid "Unable to find root VBD/VDI for VM"
-msgstr "找ä¸åˆ° VM çš„æ ¹ VBD/VDI"
-
msgid "Unable to find volume"
msgstr "找ä¸åˆ°ç£å€"
@@ -2600,22 +2319,6 @@ msgstr "無法å–得主機 UUID:/etc/machine-id ä¸å­˜åœ¨"
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr "無法å–得主機 UUID:/etc/machine-id 是空的"
-#, python-format
-msgid "Unable to get record of VDI %s on"
-msgstr "無法å–得下列ä½ç½®ä¸Š VDI %s 的記錄:"
-
-#, python-format
-msgid "Unable to introduce VDI for SR %s"
-msgstr "無法給「儲存體儲存庫 (SR)ã€%s 建立 VDI"
-
-#, python-format
-msgid "Unable to introduce VDI on SR %s"
-msgstr "無法在「儲存體儲存庫 (SR)ã€%s 上建立 VDI"
-
-#, python-format
-msgid "Unable to join %s in the pool"
-msgstr "無法çµåˆå„²å­˜å€ä¸­çš„ %s"
-
msgid ""
"Unable to launch multiple instances with a single configured port ID. Please "
"launch your instance one by one with different ports."
@@ -2642,10 +2345,6 @@ msgid ""
"Unable to migrate instance (%(instance_id)s) to current host (%(host)s)."
msgstr "無法將實例 (%(instance_id)s) 移轉至ç¾è¡Œä¸»æ©Ÿ (%(host)s)。"
-#, python-format
-msgid "Unable to obtain target information %s"
-msgstr "無法å–得目標資訊 %s"
-
msgid "Unable to resize disk down."
msgstr "無法將ç£ç¢Ÿå¤§å°èª¿å°ã€‚"
@@ -2655,13 +2354,6 @@ msgstr "無法在實例上設定密碼"
msgid "Unable to shrink disk."
msgstr "無法收縮ç£ç¢Ÿã€‚"
-msgid "Unable to terminate instance."
-msgstr "無法終止實例。"
-
-#, python-format
-msgid "Unable to unplug VBD %s"
-msgstr "無法拔除 VBD %s"
-
#, python-format
msgid "Unacceptable CPU info: %(reason)s"
msgstr "無法接å—çš„ CPU 資訊:%(reason)s"
@@ -2681,16 +2373,6 @@ msgstr ""
"ç½®å°æ˜ ã€ã€‚"
#, python-format
-msgid ""
-"Unexpected API Error. Please report this at http://bugs.launchpad.net/nova/ "
-"and attach the Nova API log if possible.\n"
-"%s"
-msgstr ""
-"éžé æœŸçš„ API 錯誤。請在網站http://bugs.launchpad.net/nova/ 上報告此å•é¡Œï¼Œè€Œä¸”"
-"如有å¯èƒ½ï¼Œè«‹é™„加 Nova API 日誌。\n"
-"%s"
-
-#, python-format
msgid "Unexpected aggregate action %s"
msgstr "éžé æœŸçš„èšé›†å‹•ä½œ %s"
@@ -2748,9 +2430,6 @@ msgstr "已嘗試解除擱置,但å»æ‰¾ä¸åˆ°æ˜ åƒæª” %s。"
msgid "Unsupported Content-Type"
msgstr "ä¸æ”¯æ´çš„內容類型"
-msgid "Upgrade DB using Essex release first."
-msgstr "請先使用 Essex 版本來å‡ç´š DB。"
-
#, python-format
msgid "User %(username)s not found in password file."
msgstr "在密碼檔中找ä¸åˆ°ä½¿ç”¨è€… %(username)s。"
@@ -2772,25 +2451,6 @@ msgstr "åŒä¸€å€‹è¦æ±‚中ä¸å®¹è¨±ä½¿ç”¨å…¶ä»– block_device_mapping語法。"
#, python-format
msgid ""
-"VDI %(vdi_ref)s is %(virtual_size)d bytes which is larger than flavor size "
-"of %(new_disk_size)d bytes."
-msgstr ""
-"VDI %(vdi_ref)s 為 %(virtual_size)d ä½å…ƒçµ„,這大於特性大å°%(new_disk_size)d "
-"ä½å…ƒçµ„。"
-
-#, python-format
-msgid ""
-"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun %(target_lun)s)"
-msgstr ""
-"在「儲存體儲存庫 (SR)ã€%(sr)s 上找ä¸åˆ° VDI(vdi_uuid %(vdi_uuid)sã€"
-"target_lun %(target_lun)s)"
-
-#, python-format
-msgid "VHD coalesce attempts exceeded (%d), giving up..."
-msgstr "VHD è¯åˆå˜—試次數已超出 (%d) 次,正在放棄..."
-
-#, python-format
-msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
msgstr "API ä¸æ”¯æ´ %(req_ver)s 版。最低為 %(min_ver)s,最高為 %(max_ver)s。"
@@ -2852,11 +2512,6 @@ msgid ""
"support custom block size"
msgstr "ç”±ç£å€è¨­å®šå€å¡Šå¤§å°ï¼Œä½†ç¾è¡Œ libVirt Hypervisor '%s' ä¸æ”¯æ´è‡ªè¨‚å€å¡Šå¤§å°"
-#, python-format
-msgid ""
-"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
-msgstr "在低於 2.7.4 çš„ Python 下,ä¸æ”¯æ´æž¶æ§‹ '%s',請使用 HTTP 或HTTPS"
-
msgid "When resizing, instances must change flavor!"
msgstr "é‡æ–°èª¿æ•´å¤§å°æ™‚,實例必須變更特性ï¼"
@@ -2870,9 +2525,6 @@ msgstr ""
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr "åœ¨è³‡æº %(res)s 上使用了錯誤的é…é¡æ–¹æ³• %(method)s"
-msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
-msgstr "連çµé‰¤æ–¹æ³•é¡žåž‹éŒ¯èª¤ã€‚僅容許 'pre' åŠ 'post' é¡žåž‹"
-
msgid "X-Forwarded-For is missing from request."
msgstr "è¦æ±‚éºæ¼äº† X-Forwarded-For。"
@@ -2888,9 +2540,6 @@ msgstr "è¦æ±‚éºæ¼äº† X-Metadata-Provider。"
msgid "X-Tenant-ID header is missing from request."
msgstr "è¦æ±‚éºæ¼äº† X-Tenant-ID 標頭。"
-msgid "XAPI supporting relax-xsm-sr-check=true required"
-msgstr "需è¦æ”¯æ´ relax-xsm-sr-check=true çš„ XAPI"
-
msgid "You are not allowed to delete the image."
msgstr "ä¸å®¹è¨±æ‚¨åˆªé™¤è©²æ˜ åƒæª”。"
@@ -2913,16 +2562,6 @@ msgstr "有 0 個浮動 IP å¯ä¾›ä½¿ç”¨ã€‚"
msgid "admin password can't be changed on existing disk"
msgstr "無法在ç¾æœ‰ç£ç¢Ÿä¸Šè®Šæ›´ç®¡ç†è€…密碼"
-msgid "aggregate deleted"
-msgstr "已刪除èšé›†"
-
-msgid "aggregate in error"
-msgstr "èšé›†ç™¼ç”ŸéŒ¯èª¤"
-
-#, python-format
-msgid "assert_can_migrate failed because: %s"
-msgstr "assert_can_migrate 失敗,原因:%s"
-
msgid "cannot understand JSON"
msgstr "無法ç†è§£ JSON"
@@ -2986,9 +2625,6 @@ msgstr "å·²è£è¼‰æ˜ åƒæª”"
msgid "instance %s is not running"
msgstr "實例 %s 未在執行中"
-msgid "instance has a kernel or ramdisk but not both"
-msgstr "實例具有核心或 Ramdisk,而ä¸æ˜¯å…©è€…兼有"
-
msgid "instance is a required argument to use @refresh_cache"
msgstr "實例是使用 @refresh_cache çš„å¿…è¦å¼•æ•¸"
@@ -3043,9 +2679,6 @@ msgstr "NBD è£ç½® %s 未顯示"
msgid "nbd unavailable: module not loaded"
msgstr "NBD 無法使用:未載入模組"
-msgid "no hosts to remove"
-msgstr "沒有è¦ç§»é™¤çš„主機"
-
#, python-format
msgid "no match found for %s"
msgstr "找ä¸åˆ° %s 的相符項"
@@ -3112,9 +2745,6 @@ msgstr "æœå‹™æ˜¯ Memcached åž‹ ServiceGroup 驅動程å¼çš„å¿…è¦å¼•æ•¸"
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr "set_admin_password ä¸æ˜¯ç”±æ­¤é©…動程å¼æˆ–來賓實例實作。"
-msgid "setup in progress"
-msgstr "正在進行設定"
-
#, python-format
msgid "snapshot for %s"
msgstr "%s çš„ Snapshot"
@@ -3131,9 +2761,6 @@ msgstr "主體金鑰太多"
msgid "unpause not supported for vmwareapi"
msgstr "vmwareapi ä¸æ”¯æ´å–消暫åœ"
-msgid "version should be an integer"
-msgstr "版本應該是整數"
-
#, python-format
msgid "vg %s must be LVM volume group"
msgstr "ç£å€ç¾¤çµ„ %s 必須是 LVM ç£å€ç¾¤çµ„"
@@ -3157,13 +2784,3 @@ msgstr "已連接ç£å€ %s"
msgid ""
"volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status"
msgstr "ç£å€ '%(vol)s' 狀態必須為「使用中ã€ã€‚ç›®å‰è™•æ–¼ã€Œ%(status)sã€ç‹€æ…‹"
-
-#, python-format
-msgid "xenapi.fake does not have an implementation for %s"
-msgstr "xenapi.fake 沒有 %s 的實作"
-
-#, python-format
-msgid ""
-"xenapi.fake does not have an implementation for %s or it has been called "
-"with the wrong number of arguments"
-msgstr "xenapi.fake 沒有 %s 的實作,或者已使用錯誤的引數數目進行呼å«"
diff --git a/nova/manager.py b/nova/manager.py
index 9c00401b96..df03305367 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -103,12 +103,15 @@ class Manager(PeriodicTasks, metaclass=ManagerMeta):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
- def init_host(self):
+ def init_host(self, service_ref):
"""Hook to do additional manager initialization when one requests
the service be started. This is called before any service record
- is created.
+ is created, but if one already exists for this service, it is
+ provided.
Child classes should override this method.
+
+ :param service_ref: An objects.Service if one exists, else None.
"""
pass
diff --git a/nova/monkey_patch.py b/nova/monkey_patch.py
index 3c96a433d5..6bcd9017a9 100644
--- a/nova/monkey_patch.py
+++ b/nova/monkey_patch.py
@@ -22,22 +22,11 @@ import os
def _monkey_patch():
- # See https://bugs.launchpad.net/nova/+bug/1164822
- # TODO(mdbooth): This feature was deprecated and removed in eventlet at
- # some point but brought back in version 0.21.0, presumably because some
- # users still required it to work round issues. However, there have been a
- # number of greendns fixes in eventlet since then. Specifically, it looks
- # as though the originally reported IPv6 issue may have been fixed in
- # version 0.24.0. We should remove this when we can confirm that the
- # original issue is fixed.
- # NOTE(artom) eventlet processes environment variables at import-time. We
- # therefore set this here, before importing eventlet, in order to correctly
- # disable greendns.
- os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
-
# NOTE(mdbooth): Anything imported here will not be monkey patched. It is
# important to take care not to import anything here which requires monkey
# patching.
+ # NOTE(artom) eventlet processes environment variables at import-time.
+ # as such any eventlet configuration should happen here if needed.
import eventlet
import sys
diff --git a/nova/network/constants.py b/nova/network/constants.py
index 4a08c870f6..d98e217f95 100644
--- a/nova/network/constants.py
+++ b/nova/network/constants.py
@@ -13,23 +13,39 @@
# License for the specific language governing permissions and limitations
# under the License.
-QOS_QUEUE = 'QoS Queue'
-NET_EXTERNAL = 'router:external'
-VNIC_INDEX_EXT = 'VNIC Index'
-DNS_INTEGRATION = 'DNS Integration'
-MULTI_NET_EXT = 'Multi Provider Network'
-FIP_PORT_DETAILS = 'Floating IP Port Details Extension'
-SUBSTR_PORT_FILTERING = 'IP address substring filtering'
-PORT_BINDING = 'Port Binding'
-PORT_BINDING_EXTENDED = 'Port Bindings Extended'
-DEFAULT_SECGROUP = 'default'
+# Port fields
+
BINDING_PROFILE = 'binding:profile'
BINDING_HOST_ID = 'binding:host_id'
-MIGRATING_ATTR = 'migrating_to'
-L3_NETWORK_TYPES = ['vxlan', 'gre', 'geneve']
-ALLOCATION = 'allocation'
RESOURCE_REQUEST = 'resource_request'
REQUEST_GROUPS = 'request_groups'
-SEGMENT = 'Segment'
NUMA_POLICY = 'numa_affinity_policy'
-RESOURCE_REQUEST_GROUPS_EXTENSION = "Port Resource Request Groups"
+
+# Binding profile fields
+
+MIGRATING_ATTR = 'migrating_to'
+ALLOCATION = 'allocation'
+
+# Core extensions
+
+DNS_INTEGRATION = 'dns-integration'
+MULTI_PROVIDER = 'multi-provider'
+FIP_PORT_DETAILS = 'fip-port-details'
+PORT_BINDING = 'binding'
+PORT_BINDING_EXTENDED = 'binding-extended'
+SUBSTR_PORT_FILTERING = 'ip-substring-filtering'
+SEGMENT = 'segment'
+RESOURCE_REQUEST_GROUPS = 'port-resource-request-groups'
+
+# Third-party extensions
+
+VNIC_INDEX = 'vnic-index' # this is provided by the vmware_nsx project
+
+# Search fields
+
+NET_EXTERNAL = 'router:external'
+
+# Misc
+
+DEFAULT_SECGROUP = 'default'
+L3_NETWORK_TYPES = ['vxlan', 'gre', 'geneve']
diff --git a/nova/network/model.py b/nova/network/model.py
index 64995c9527..1260349bcd 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -106,6 +106,7 @@ VNIC_TYPE_VIRTIO_FORWARDER = 'virtio-forwarder'
VNIC_TYPE_VDPA = 'vdpa'
VNIC_TYPE_ACCELERATOR_DIRECT = 'accelerator-direct'
VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL = 'accelerator-direct-physical'
+VNIC_TYPE_REMOTE_MANAGED = "remote-managed"
# Define list of ports which needs pci request.
# Note: The macvtap port needs a PCI request as it is a tap interface
@@ -121,19 +122,20 @@ VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL = 'accelerator-direct-physical'
# selected compute node.
VNIC_TYPES_SRIOV = (
VNIC_TYPE_DIRECT, VNIC_TYPE_MACVTAP, VNIC_TYPE_DIRECT_PHYSICAL,
- VNIC_TYPE_VIRTIO_FORWARDER, VNIC_TYPE_VDPA)
+ VNIC_TYPE_VIRTIO_FORWARDER, VNIC_TYPE_VDPA, VNIC_TYPE_REMOTE_MANAGED
+)
# Define list of ports which are passthrough to the guest
# and need a special treatment on snapshot and suspend/resume
-VNIC_TYPES_DIRECT_PASSTHROUGH = (VNIC_TYPE_DIRECT,
- VNIC_TYPE_DIRECT_PHYSICAL,
- VNIC_TYPE_ACCELERATOR_DIRECT,
- VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL)
+VNIC_TYPES_DIRECT_PASSTHROUGH = (
+ VNIC_TYPE_DIRECT, VNIC_TYPE_DIRECT_PHYSICAL,
+ VNIC_TYPE_ACCELERATOR_DIRECT, VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL,
+ VNIC_TYPE_REMOTE_MANAGED, VNIC_TYPE_VDPA
+)
# Define list of ports which contains devices managed by cyborg.
VNIC_TYPES_ACCELERATOR = (
- VNIC_TYPE_ACCELERATOR_DIRECT,
- VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL
+ VNIC_TYPE_ACCELERATOR_DIRECT, VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL
)
# Constants for the 'vif_model' values
diff --git a/nova/network/neutron.py b/nova/network/neutron.py
index e02da93b57..0ae0f4099f 100644
--- a/nova/network/neutron.py
+++ b/nova/network/neutron.py
@@ -43,7 +43,6 @@ from nova.network import constants
from nova.network import model as network_model
from nova import objects
from nova.objects import fields as obj_fields
-from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
@@ -124,6 +123,19 @@ def update_instance_cache_with_nw_info(impl, context, instance, nw_info=None):
ic.network_info = nw_info
ic.save()
instance.info_cache = ic
+ except exception.InstanceNotFound as e:
+ # The instance could have moved during a cross-cell migration when we
+ # receive an external event from neutron. Avoid logging a traceback
+ # when it happens.
+ msg = str(e)
+ if e.__class__.__name__.endswith('_Remote'):
+ # If this exception was raised remotely over RPC, the traceback(s)
+ # will be appended to the message. Truncate it in that case.
+ msg = utils.safe_truncate(msg.split('\n', 1)[0], 255)
+ LOG.info('Failed storing info cache due to: %s. '
+ 'The instance may have moved to another cell during a '
+ 'cross-cell migration', msg, instance=instance)
+ raise exception.InstanceNotFound(message=msg)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception('Failed storing info cache', instance=instance)
@@ -287,7 +299,7 @@ class API:
self.last_neutron_extension_sync = None
self.extensions = {}
self.pci_whitelist = pci_whitelist.Whitelist(
- CONF.pci.passthrough_whitelist)
+ CONF.pci.device_spec)
def _update_port_with_migration_profile(
self, instance, port_id, port_profile, admin_client):
@@ -382,7 +394,8 @@ class API:
# If a host was provided, delete any bindings between that
# host and the ports as long as the host isn't the same as
# the current instance.host.
- has_binding_ext = self.supports_port_binding_extension(context)
+ has_binding_ext = self.has_port_binding_extension(
+ client=admin_client)
if port_migrating and has_binding_ext:
self._delete_port_bindings(context, ports, host)
elif port_migrating:
@@ -599,10 +612,22 @@ class API:
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
+ def unbind_ports(self, context, ports, detach=True):
+ """Unbind and detach the given ports by clearing their
+ device_owner and dns_name.
+ The device_id will also be cleaned if detach=True.
+
+ :param context: The request context.
+ :param ports: list of port IDs.
+ """
+ neutron = get_client(context)
+ self._unbind_ports(context, ports, neutron, detach=detach)
+
def _unbind_ports(self, context, ports,
- neutron, port_client=None):
- """Unbind the given ports by clearing their device_id,
+ neutron, port_client=None, detach=True):
+ """Unbind and detach the given ports by clearing their
device_owner and dns_name.
+ The device_id will also be cleaned if detach=True.
:param context: The request context.
:param ports: list of port IDs.
@@ -622,13 +647,15 @@ class API:
# in case the caller forgot to filter the list.
if port_id is None:
continue
+
port_req_body: ty.Dict[str, ty.Any] = {
'port': {
- 'device_id': '',
- 'device_owner': '',
constants.BINDING_HOST_ID: None,
}
}
+ if detach:
+ port_req_body['port']['device_id'] = ''
+ port_req_body['port']['device_owner'] = ''
try:
port = self._show_port(
context, port_id, neutron_client=neutron,
@@ -636,7 +663,7 @@ class API:
except exception.PortNotFound:
LOG.debug('Unable to show port %s as it no longer '
'exists.', port_id)
- return
+ continue
except Exception:
# NOTE: In case we can't retrieve the binding:profile or
# network info assume that they are empty
@@ -667,7 +694,10 @@ class API:
# for the physical device but don't want to overwrite the other
# information in the binding profile.
for profile_key in ('pci_vendor_info', 'pci_slot',
- constants.ALLOCATION, 'arq_uuid'):
+ constants.ALLOCATION, 'arq_uuid',
+ 'physical_network', 'card_serial_number',
+ 'vf_num', 'pf_mac_address',
+ 'device_mac_address'):
if profile_key in port_profile:
del port_profile[profile_key]
port_req_body['port'][constants.BINDING_PROFILE] = port_profile
@@ -675,8 +705,12 @@ class API:
# NOTE: For internal DNS integration (network does not have a
# dns_domain), or if we cannot retrieve network info, we use the
# admin client to reset dns_name.
- if self._has_dns_extension() and not network.get('dns_domain'):
+ if (
+ self.has_dns_extension(client=port_client) and
+ not network.get('dns_domain')
+ ):
port_req_body['port']['dns_name'] = ''
+
try:
port_client.update_port(port_id, port_req_body)
except neutron_client_exc.PortNotFoundClient:
@@ -1286,6 +1320,10 @@ class API:
network=network, neutron=neutron,
bind_host_id=bind_host_id,
port_arq=port_arq)
+ # NOTE(gibi): Remove this once we are sure that the fix for
+ # bug 1942329 is always present in the deployed neutron. The
+ # _populate_neutron_extension_values() call above already
+ # populated this MAC to the binding profile instead.
self._populate_pci_mac_address(instance,
request.pci_request_id, port_req_body)
@@ -1333,62 +1371,102 @@ class API:
return (nets_in_requested_order, ports_in_requested_order,
preexisting_port_ids, created_port_ids)
- def _refresh_neutron_extensions_cache(self, context, neutron=None):
+ def _refresh_neutron_extensions_cache(self, client):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync) >=
CONF.neutron.extension_sync_interval)):
- if neutron is None:
- neutron = get_client(context)
- extensions_list = neutron.list_extensions()['extensions']
+ extensions_list = client.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
- self.extensions = {ext['name']: ext for ext in extensions_list}
+ self.extensions = {ext['alias']: ext for ext in extensions_list}
- def _has_multi_provider_extension(self, context, neutron=None):
- self._refresh_neutron_extensions_cache(context, neutron=neutron)
- return constants.MULTI_NET_EXT in self.extensions
+ def _has_extension(self, extension, context=None, client=None):
+ """Check if the provided neutron extension is enabled.
- def _has_dns_extension(self):
- return constants.DNS_INTEGRATION in self.extensions
+ :param extension: The alias of the extension to check
+ :param client: keystoneauth1.adapter.Adapter
+ :param context: nova.context.RequestContext
+ :returns: True if the neutron extension is available, else False
+ """
+ if client is None:
+ client = get_client(context)
- def _has_qos_queue_extension(self, context, neutron=None):
- self._refresh_neutron_extensions_cache(context, neutron=neutron)
- return constants.QOS_QUEUE in self.extensions
+ self._refresh_neutron_extensions_cache(client)
+ return extension in self.extensions
- def _has_fip_port_details_extension(self, context, neutron=None):
- self._refresh_neutron_extensions_cache(context, neutron=neutron)
- return constants.FIP_PORT_DETAILS in self.extensions
+ def has_multi_provider_extension(self, context=None, client=None):
+ """Check if the 'multi-provider' extension is enabled.
- def has_substr_port_filtering_extension(self, context):
- self._refresh_neutron_extensions_cache(context)
- return constants.SUBSTR_PORT_FILTERING in self.extensions
+ This extension allows administrative users to define multiple physical
+ bindings for a logical network.
+ """
+ return self._has_extension(constants.MULTI_PROVIDER, context, client)
+
+ def has_dns_extension(self, context=None, client=None):
+ """Check if the 'dns-integration' extension is enabled.
- def _has_segment_extension(self, context, neutron=None):
- self._refresh_neutron_extensions_cache(context, neutron=neutron)
- return constants.SEGMENT in self.extensions
+ This extension adds the 'dns_name' and 'dns_assignment' attributes to
+ port resources.
+ """
+ return self._has_extension(constants.DNS_INTEGRATION, context, client)
# TODO(gibi): Remove all branches where this is False after Neutron made
# the this extension mandatory. In Xena this extension will be optional to
# support the scenario where Neutron upgraded first. So Neutron can mark
# this mandatory earliest in Yoga.
- def has_extended_resource_request_extension(self, context, neutron=None):
- self._refresh_neutron_extensions_cache(context, neutron=neutron)
- return constants.RESOURCE_REQUEST_GROUPS_EXTENSION in self.extensions
+ def has_extended_resource_request_extension(
+ self, context=None, client=None,
+ ):
+ return self._has_extension(
+ constants.RESOURCE_REQUEST_GROUPS, context, client,
+ )
- def supports_port_binding_extension(self, context):
- """This is a simple check to see if the neutron "binding-extended"
- extension exists and is enabled.
+ def has_vnic_index_extension(self, context=None, client=None):
+ """Check if the 'vnic-index' extension is enabled.
- The "binding-extended" extension allows nova to bind a port to multiple
- hosts at the same time, like during live migration.
+ This extension is provided by the VMWare NSX neutron plugin.
+ """
+ return self._has_extension(constants.VNIC_INDEX, context, client)
- :param context: the user request context
- :returns: True if the binding-extended API extension is available,
- False otherwise
+ def has_fip_port_details_extension(self, context=None, client=None):
+ """Check if the 'fip-port-details' extension is enabled.
+
+ This extension adds the 'port_details' attribute to floating IPs.
"""
- self._refresh_neutron_extensions_cache(context)
- return constants.PORT_BINDING_EXTENDED in self.extensions
+ return self._has_extension(constants.FIP_PORT_DETAILS, context, client)
+
+ def has_substr_port_filtering_extension(self, context=None, client=None):
+ """Check if the 'ip-substring-filtering' extension is enabled.
+
+ This extension adds support for filtering ports by using part of an IP
+ address.
+ """
+ return self._has_extension(
+ constants.SUBSTR_PORT_FILTERING, context, client
+ )
+
+ def has_segment_extension(self, context=None, client=None):
+ """Check if the neutron 'segment' extension is enabled.
+
+ This extension exposes information about L2 segments of a network.
+ """
+ return self._has_extension(
+ constants.SEGMENT, context, client,
+ )
+
+ def has_port_binding_extension(self, context=None, client=None):
+ """Check if the neutron 'binding-extended' extension is enabled.
+
+ This extensions exposes port bindings of a virtual port to external
+ application.
+
+ This extension allows nova to bind a port to multiple hosts at the same
+ time, like during live migration.
+ """
+ return self._has_extension(
+ constants.PORT_BINDING_EXTENDED, context, client
+ )
def bind_ports_to_host(self, context, instance, host,
vnic_types=None, port_profiles=None):
@@ -1402,7 +1480,7 @@ class API:
In the event of an error, any ports which were successfully bound to
the host should have those host bindings removed from the ports.
- This method should not be used if "supports_port_binding_extension"
+ This method should not be used if "has_port_binding_extension"
returns False.
:param context: the user request context
@@ -1481,7 +1559,7 @@ class API:
def delete_port_binding(self, context, port_id, host):
"""Delete the port binding for the given port ID and host
- This method should not be used if "supports_port_binding_extension"
+ This method should not be used if "has_port_binding_extension"
returns False.
:param context: The request context for the operation.
@@ -1503,14 +1581,54 @@ class API:
raise exception.PortBindingDeletionFailed(
port_id=port_id, host=host)
+ def _get_vf_pci_device_profile(self, pci_dev):
+ """Get VF-specific fields to add to the PCI device profile.
+
+ This data can be useful, e.g. for off-path networking backends that
+ need to do the necessary plumbing in order to set a VF up for packet
+ forwarding.
+ """
+ vf_profile: ty.Dict[str, ty.Union[str, int]] = {}
+
+ pf_mac = pci_dev.sriov_cap.get('pf_mac_address')
+ vf_num = pci_dev.sriov_cap.get('vf_num')
+ card_serial_number = pci_dev.card_serial_number
+ if all((pf_mac, vf_num, card_serial_number)):
+ vf_profile.update({
+ 'card_serial_number': card_serial_number,
+ 'pf_mac_address': pf_mac,
+ 'vf_num': vf_num,
+ })
+ return vf_profile
+
def _get_pci_device_profile(self, pci_dev):
dev_spec = self.pci_whitelist.get_devspec(pci_dev)
if dev_spec:
- return {'pci_vendor_info': "%s:%s" %
- (pci_dev.vendor_id, pci_dev.product_id),
- 'pci_slot': pci_dev.address,
- 'physical_network':
- dev_spec.get_tags().get('physical_network')}
+ dev_profile = {
+ 'pci_vendor_info': "%s:%s"
+ % (pci_dev.vendor_id, pci_dev.product_id),
+ 'pci_slot': pci_dev.address,
+ 'physical_network': dev_spec.get_tags().get(
+ 'physical_network'
+ ),
+ }
+ if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_VF:
+ dev_profile.update(
+ self._get_vf_pci_device_profile(pci_dev))
+
+ if pci_dev.dev_type == obj_fields.PciDeviceType.SRIOV_PF:
+ # In general the MAC address information flows fom the neutron
+ # port to the device in the backend. Except for direct-physical
+ # ports. In that case the MAC address flows from the physical
+ # device, the PF, to the neutron port. So when such a port is
+ # being bound to a host the port's MAC address needs to be
+ # updated. Nova needs to put the new MAC into the binding
+ # profile.
+ if pci_dev.mac_address:
+ dev_profile['device_mac_address'] = pci_dev.mac_address
+
+ return dev_profile
+
raise exception.PciDeviceNotFound(node_id=pci_dev.compute_node_id,
address=pci_dev.address)
@@ -1525,14 +1643,13 @@ class API:
pci_request_id cannot be found on the instance.
"""
if pci_request_id:
- pci_devices = pci_manager.get_instance_pci_devs(
- instance, pci_request_id)
+ pci_devices = instance.get_pci_devices(request_id=pci_request_id)
if not pci_devices:
# The pci_request_id likely won't mean much except for tracing
# through the logs since it is generated per request.
LOG.error('Unable to find PCI device using PCI request ID in '
'list of claimed instance PCI devices: %s. Is the '
- '[pci]/passthrough_whitelist configuration correct?',
+ '[pci]device_spec configuration correct?',
# Convert to a primitive list to stringify it.
list(instance.pci_devices), instance=instance)
raise exception.PciDeviceNotFound(
@@ -1556,8 +1673,7 @@ class API:
Currently this is done only for PF passthrough.
"""
if pci_request_id is not None:
- pci_devs = pci_manager.get_instance_pci_devs(
- instance, pci_request_id)
+ pci_devs = instance.get_pci_devices(request_id=pci_request_id)
if len(pci_devs) != 1:
# NOTE(ndipanov): We shouldn't ever get here since
# InstancePCIRequest instances built from network requests
@@ -1590,17 +1706,16 @@ class API:
If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
"""
- if self._has_qos_queue_extension(context, neutron=neutron):
- flavor = instance.get_flavor()
- rxtx_factor = flavor.get('rxtx_factor')
- port_req_body['port']['rxtx_factor'] = rxtx_factor
+ if neutron is None:
+ neutron = get_client(context)
+
port_req_body['port'][constants.BINDING_HOST_ID] = bind_host_id
self._populate_neutron_binding_profile(instance,
pci_request_id,
port_req_body,
port_arq)
- if self._has_dns_extension():
+ if self.has_dns_extension(client=neutron):
# If the DNS integration extension is enabled in Neutron, most
# ports will get their dns_name attribute set in the port create or
# update requests in allocate_for_instance. So we just add the
@@ -1626,7 +1741,8 @@ class API:
an additional update request. Only a very small fraction of ports will
require this additional update request.
"""
- if self._has_dns_extension() and network.get('dns_domain'):
+ if self.has_dns_extension(client=neutron) and network.get(
+ 'dns_domain'):
try:
port_req_body = {'port': {'dns_name': instance.hostname}}
neutron.update_port(port_id, port_req_body)
@@ -1638,7 +1754,7 @@ class API:
'name') % {'hostname': instance.hostname})
raise exception.InvalidInput(reason=msg)
- def _reset_port_dns_name(self, network, port_id, neutron_client):
+ def _reset_port_dns_name(self, network, port_id, client):
"""Reset an instance port dns_name attribute to empty when using
external DNS service.
@@ -1648,10 +1764,11 @@ class API:
request with a Neutron client using user's context, so that the DNS
record can be found under user's zone and domain.
"""
- if self._has_dns_extension() and network.get('dns_domain'):
+ if self.has_dns_extension(client=client) and network.get(
+ 'dns_domain'):
try:
port_req_body = {'port': {'dns_name': ''}}
- neutron_client.update_port(port_id, port_req_body)
+ client.update_port(port_id, port_req_body)
except neutron_client_exc.NeutronClientException:
LOG.exception("Failed to reset dns_name for port %s", port_id)
@@ -2025,7 +2142,7 @@ class API:
segments, the first segment that defines a physnet value will be
used for the physnet name.
"""
- if self._has_multi_provider_extension(context, neutron=neutron):
+ if self.has_multi_provider_extension(client=neutron):
network = neutron.show_network(net_id,
fields='segments').get('network')
segments = network.get('segments', {})
@@ -2069,6 +2186,27 @@ class API:
# the port binding profile and we can handle it as a boolean.
return strutils.bool_from_string(value)
+ @staticmethod
+ def _is_remote_managed(vnic_type):
+ """Determine if the port is remote_managed or not by VNIC type.
+
+ :param str vnic_type: The VNIC type to assess.
+ :return: A boolean indicator whether the NIC is remote managed or not.
+ :rtype: bool
+ """
+ return vnic_type == network_model.VNIC_TYPE_REMOTE_MANAGED
+
+ def is_remote_managed_port(self, context, port_id):
+ """Determine if a port has a REMOTE_MANAGED VNIC type.
+
+ :param context: The request context
+ :param port_id: The id of the Neutron port
+ """
+ port = self.show_port(context, port_id)['port']
+ return self._is_remote_managed(
+ port.get('binding:vnic_type', network_model.VNIC_TYPE_NORMAL)
+ )
+
# NOTE(sean-k-mooney): we might want to have this return a
# nova.network.model.VIF object instead in the future.
def _get_port_vnic_info(self, context, neutron, port_id):
@@ -2240,7 +2378,13 @@ class API:
# libvirt to expose the nic feature. At the moment
# there is a limitation that deployers cannot use both
# SR-IOV modes (legacy and ovs) in the same deployment.
- spec = {pci_request.PCI_NET_TAG: physnet}
+ spec = {
+ pci_request.PCI_NET_TAG: physnet,
+ # Convert the value to string since tags are compared as
+ # string values case-insensitively.
+ pci_request.PCI_REMOTE_MANAGED_TAG:
+ str(self._is_remote_managed(vnic_type)),
+ }
dev_type = pci_request.DEVICE_TYPE_FOR_VNIC_TYPE.get(vnic_type)
if dev_type:
spec[pci_request.PCI_DEVICE_TYPE_TAG] = dev_type
@@ -2364,16 +2508,26 @@ class API:
if request.port_id:
port = self._show_port(context, request.port_id,
neutron_client=neutron)
- if port.get('device_id', None):
+ if port.get('device_id'):
raise exception.PortInUse(port_id=request.port_id)
+
deferred_ip = port.get('ip_allocation') == 'deferred'
+ ipless_port = port.get('ip_allocation') == 'none'
# NOTE(carl_baldwin) A deferred IP port doesn't have an
# address here. If it fails to get one later when nova
# updates it with host info, Neutron will error which
# raises an exception.
- if not deferred_ip and not port.get('fixed_ips'):
+ # NOTE(sbauza): We don't need to validate the
+ # 'connectivity' attribute of the port's
+ # 'binding:vif_details' to ensure it's 'l2', as Neutron
+ # already verifies it.
+ if (
+ not (deferred_ip or ipless_port) and
+ not port.get('fixed_ips')
+ ):
raise exception.PortRequiresFixedIP(
port_id=request.port_id)
+
request.network_id = port['network_id']
else:
ports_needed_per_instance += 1
@@ -2700,7 +2854,7 @@ class API:
# ...and retrieve the port details for the same reason, but only if
# they're not already there because the fip-port-details extension is
# present
- if not self._has_fip_port_details_extension(context, client):
+ if not self.has_fip_port_details_extension(client=client):
port_id = fip['port_id']
try:
fip['port_details'] = client.show_port(
@@ -2728,7 +2882,7 @@ class API:
# ...and retrieve the port details for the same reason, but only if
# they're not already there because the fip-port-details extension is
# present
- if not self._has_fip_port_details_extension(context, client):
+ if not self.has_fip_port_details_extension(client=client):
port_id = fip['port_id']
try:
fip['port_details'] = client.show_port(
@@ -2767,7 +2921,7 @@ class API:
# ...and retrieve the port details for the same reason, but only if
# they're not already there because the fip-port-details extension is
# present
- if not self._has_fip_port_details_extension(context, client):
+ if not self.has_fip_port_details_extension(client=client):
ports = {port['id']: port for port in client.list_ports(
**{'tenant_id': project_id})['ports']}
for fip in fips:
@@ -2963,7 +3117,7 @@ class API:
:raises: nova.exception.PortBindingActivationFailed if any port binding
activation fails
"""
- if not self.supports_port_binding_extension(context):
+ if not self.has_port_binding_extension(context):
# If neutron isn't new enough yet for the port "binding-extended"
# API extension, we just no-op. The port binding host will be
# be updated in migrate_instance_finish, which is functionally OK,
@@ -2974,7 +3128,7 @@ class API:
return
client = get_client(context, admin=True)
- dest_host = migration['dest_compute']
+ dest_host = migration.dest_compute
for vif in instance.get_network_info():
# Not all compute migration flows use the port binding-extended
# API yet, so first check to see if there is a binding for the
@@ -3215,6 +3369,25 @@ class API:
delegate_create=True,
)
+ def _log_error_if_vnic_type_changed(
+ self, port_id, old_vnic_type, new_vnic_type, instance
+ ):
+ if old_vnic_type and old_vnic_type != new_vnic_type:
+ LOG.error(
+ 'The vnic_type of the bound port %s has '
+ 'been changed in neutron from "%s" to '
+ '"%s". Changing vnic_type of a bound port '
+ 'is not supported by Nova. To avoid '
+ 'breaking the connectivity of the instance '
+ 'please change the port vnic_type back to '
+ '"%s".',
+ port_id,
+ old_vnic_type,
+ new_vnic_type,
+ old_vnic_type,
+ instance=instance
+ )
+
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None, admin_client=None,
preexisting_port_ids=None,
@@ -3288,6 +3461,12 @@ class API:
preexisting_port_ids)
for index, vif in enumerate(nw_info):
if vif['id'] == refresh_vif_id:
+ self._log_error_if_vnic_type_changed(
+ vif['id'],
+ vif['vnic_type'],
+ refreshed_vif['vnic_type'],
+ instance,
+ )
# Update the existing entry.
nw_info[index] = refreshed_vif
LOG.debug('Updated VIF entry in instance network '
@@ -3337,6 +3516,7 @@ class API:
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids, client)
+ old_nw_info = instance.get_network_info()
nw_info = network_model.NetworkInfo()
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
@@ -3344,6 +3524,14 @@ class API:
vif = self._build_vif_model(
context, client, current_neutron_port, networks,
preexisting_port_ids)
+ for old_vif in old_nw_info:
+ if old_vif['id'] == port_id:
+ self._log_error_if_vnic_type_changed(
+ port_id,
+ old_vif['vnic_type'],
+ vif['vnic_type'],
+ instance,
+ )
nw_info.append(vif)
elif nw_info_refresh:
LOG.info('Port %s from network info_cache is no '
@@ -3489,7 +3677,8 @@ class API:
:raises: PortBindingDeletionFailed if port binding deletion fails.
"""
# First check to see if the port binding extension is supported.
- if not self.supports_port_binding_extension(context):
+ client = get_client(context)
+ if not self.has_port_binding_extension(client=client):
LOG.info("Neutron extension '%s' is not supported; not cleaning "
"up port bindings for host %s.",
constants.PORT_BINDING_EXTENDED, host, instance=instance)
@@ -3508,19 +3697,17 @@ class API:
return {}
# In case of revert, swap old and new devices to
# update the ports back to the original devices.
- revert = (migration and
- migration.get('status') == 'reverted')
+ revert = migration and migration.status == 'reverted'
return instance.migration_context.get_pci_mapping_for_migration(revert)
- def _get_port_pci_slot(self, context, instance, port):
- """Find the PCI address of the device corresponding to the port.
+ def _get_port_pci_dev(self, instance, port):
+ """Find the PCI device corresponding to the port.
Assumes the port is an SRIOV one.
- :param context: The request context.
:param instance: The instance to which the port is attached.
:param port: The Neutron port, as obtained from the Neutron API
JSON form.
- :return: The PCI address as a string, or None if unable to find.
+ :return: The PciDevice object, or None if unable to find.
"""
# Find the port's PCIRequest, or return None
for r in instance.pci_requests.requests:
@@ -3540,8 +3727,7 @@ class API:
LOG.debug('No PCI device found for request %s',
request.request_id, instance=instance)
return None
- # Return the device's PCI address
- return device.address
+ return device
def _update_port_binding_for_instance(
self, context, instance, host, migration=None,
@@ -3605,13 +3791,14 @@ class API:
raise exception.PortUpdateFailed(port_id=p['id'],
reason=_("Unable to correlate PCI slot %s") %
pci_slot)
- # NOTE(artom) If migration is None, this is an unshevle, and we
- # need to figure out the pci_slot from the InstancePCIRequest
- # and PciDevice objects.
+ # NOTE(artom) If migration is None, this is an unshelve, and we
+ # need to figure out the pci related binding information from
+ # the InstancePCIRequest and PciDevice objects.
else:
- pci_slot = self._get_port_pci_slot(context, instance, p)
- if pci_slot:
- binding_profile.update({'pci_slot': pci_slot})
+ pci_dev = self._get_port_pci_dev(instance, p)
+ if pci_dev:
+ binding_profile.update(
+ self._get_pci_device_profile(pci_dev))
updates[constants.BINDING_PROFILE] = binding_profile
# NOTE(gibi): during live migration the conductor already sets the
@@ -3680,9 +3867,8 @@ class API:
:param vif: The VIF in question.
:param index: The index on the instance for the VIF.
"""
- self._refresh_neutron_extensions_cache(context)
- if constants.VNIC_INDEX_EXT in self.extensions:
- neutron = get_client(context)
+ neutron = get_client(context)
+ if self.has_vnic_index_extension(client=neutron):
port_req_body = {'port': {'vnic_index': index}}
try:
neutron.update_port(vif['id'], port_req_body)
@@ -3705,10 +3891,11 @@ class API:
either Segment extension isn't enabled in Neutron or if the network
isn't configured for routing.
"""
- if not self._has_segment_extension(context):
+ client = get_client(context, admin=True)
+
+ if not self.has_segment_extension(client=client):
return []
- client = get_client(context)
try:
# NOTE(sbauza): We can't use list_segments() directly because the
# API is borked and returns both segments but also segmentation IDs
@@ -3720,7 +3907,7 @@ class API:
'Failed to get segment IDs for network %s' % network_id) from e
# The segment field of an unconfigured subnet could be None
return [subnet['segment_id'] for subnet in subnets
- if subnet['segment_id'] is not None]
+ if subnet.get('segment_id') is not None]
def get_segment_id_for_subnet(
self,
@@ -3735,10 +3922,11 @@ class API:
extension isn't enabled in Neutron or the provided subnet doesn't
have segments (if the related network isn't configured for routing)
"""
- if not self._has_segment_extension(context):
+ client = get_client(context, admin=True)
+
+ if not self.has_segment_extension(client=client):
return None
- client = get_client(context)
try:
subnet = client.show_subnet(subnet_id)['subnet']
except neutron_client_exc.NeutronClientException as e:
diff --git a/nova/network/os_vif_util.py b/nova/network/os_vif_util.py
index bf643ff105..21d6f66b79 100644
--- a/nova/network/os_vif_util.py
+++ b/nova/network/os_vif_util.py
@@ -338,6 +338,15 @@ def _nova_to_osvif_vif_ovs(vif):
port_profile=_get_ovs_representor_port_profile(vif),
plugin="ovs")
_set_representor_datapath_offload_settings(vif, obj)
+ elif vnic_type == model.VNIC_TYPE_REMOTE_MANAGED:
+ # A networking backend is responsible for setting up a
+ # representor in this case so the driver is noop.
+ obj = _get_vif_instance(
+ vif, objects.vif.VIFHostDevice,
+ plugin="noop",
+ vif_name=vif_name,
+ dev_address=vif["profile"]["pci_slot"],
+ dev_type=objects.fields.VIFHostDeviceDevType.ETHERNET)
elif vif.is_hybrid_plug_enabled():
obj = _get_vif_instance(
vif,
diff --git a/nova/notifications/objects/image.py b/nova/notifications/objects/image.py
index 0240d00105..01c86d1cb0 100644
--- a/nova/notifications/objects/image.py
+++ b/nova/notifications/objects/image.py
@@ -125,7 +125,12 @@ class ImageMetaPropsPayload(base.NotificationPayloadBase):
# Version 1.6: Added 'socket' to hw_pci_numa_affinity_policy
# Version 1.7: Added 'hw_input_bus' field
# Version 1.8: Added 'bochs' as an option to 'hw_video_model'
- VERSION = '1.8'
+ # Version 1.9: Added 'hw_emulation_architecture' field
+ # Version 1.10: Added 'hw_ephemeral_encryption' and
+ # 'hw_ephemeral_encryption_format' fields
+ # Version 1.11: Added 'hw_locked_memory' field
+ # Version 1.12: Added 'hw_viommu_model' field
+ VERSION = '1.12'
SCHEMA = {
k: ('image_meta_props', k) for k in image_meta.ImageMetaProps.fields}
diff --git a/nova/objects/aggregate.py b/nova/objects/aggregate.py
index 2aa802cf9b..1d6597965c 100644
--- a/nova/objects/aggregate.py
+++ b/nova/objects/aggregate.py
@@ -35,8 +35,8 @@ DEPRECATED_FIELDS = ['deleted', 'deleted_at']
@api_db_api.context_manager.reader
def _aggregate_get_from_db(context, aggregate_id):
query = context.session.query(api_models.Aggregate).\
- options(orm.joinedload('_hosts')).\
- options(orm.joinedload('_metadata'))
+ options(orm.joinedload(api_models.Aggregate._hosts)).\
+ options(orm.joinedload(api_models.Aggregate._metadata))
query = query.filter(api_models.Aggregate.id == aggregate_id)
aggregate = query.first()
@@ -50,8 +50,8 @@ def _aggregate_get_from_db(context, aggregate_id):
@api_db_api.context_manager.reader
def _aggregate_get_from_db_by_uuid(context, aggregate_uuid):
query = context.session.query(api_models.Aggregate).\
- options(orm.joinedload('_hosts')).\
- options(orm.joinedload('_metadata'))
+ options(orm.joinedload(api_models.Aggregate._hosts)).\
+ options(orm.joinedload(api_models.Aggregate._metadata))
query = query.filter(api_models.Aggregate.uuid == aggregate_uuid)
aggregate = query.first()
@@ -414,8 +414,8 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
@api_db_api.context_manager.reader
def _get_all_from_db(context):
query = context.session.query(api_models.Aggregate).\
- options(orm.joinedload('_hosts')).\
- options(orm.joinedload('_metadata'))
+ options(orm.joinedload(api_models.Aggregate._hosts)).\
+ options(orm.joinedload(api_models.Aggregate._metadata))
return query.all()
@@ -423,13 +423,13 @@ def _get_all_from_db(context):
@api_db_api.context_manager.reader
def _get_by_host_from_db(context, host, key=None):
query = context.session.query(api_models.Aggregate).\
- options(orm.joinedload('_hosts')).\
- options(orm.joinedload('_metadata'))
- query = query.join('_hosts')
+ options(orm.joinedload(api_models.Aggregate._hosts)).\
+ options(orm.joinedload(api_models.Aggregate._metadata))
+ query = query.join(api_models.Aggregate._hosts)
query = query.filter(api_models.AggregateHost.host == host)
if key:
- query = query.join("_metadata").filter(
+ query = query.join(api_models.Aggregate._metadata).filter(
api_models.AggregateMetadata.key == key)
return query.all()
@@ -439,13 +439,15 @@ def _get_by_host_from_db(context, host, key=None):
def _get_by_metadata_from_db(context, key=None, value=None):
assert(key is not None or value is not None)
query = context.session.query(api_models.Aggregate)
- query = query.join("_metadata")
+ query = query.join(api_models.Aggregate._metadata)
if key is not None:
query = query.filter(api_models.AggregateMetadata.key == key)
if value is not None:
query = query.filter(api_models.AggregateMetadata.value == value)
- query = query.options(orm.contains_eager("_metadata"))
- query = query.options(orm.joinedload("_hosts"))
+ query = query.options(
+ orm.contains_eager(api_models.Aggregate._metadata)
+ )
+ query = query.options(orm.joinedload(api_models.Aggregate._hosts))
return query.all()
@@ -468,16 +470,19 @@ def _get_non_matching_by_metadata_keys_from_db(context, ignored_keys,
raise ValueError(_('key_prefix mandatory field.'))
query = context.session.query(api_models.Aggregate)
- query = query.join("_metadata")
+ query = query.join(api_models.Aggregate._metadata)
query = query.filter(api_models.AggregateMetadata.value == value)
query = query.filter(api_models.AggregateMetadata.key.like(
key_prefix + '%'))
if len(ignored_keys) > 0:
- query = query.filter(~api_models.AggregateMetadata.key.in_(
- ignored_keys))
-
- query = query.options(orm.contains_eager("_metadata"))
- query = query.options(orm.joinedload("_hosts"))
+ query = query.filter(
+ ~api_models.AggregateMetadata.key.in_(ignored_keys)
+ )
+
+ query = query.options(
+ orm.contains_eager(api_models.Aggregate._metadata)
+ )
+ query = query.options(orm.joinedload(api_models.Aggregate._hosts))
return query.all()
diff --git a/nova/objects/block_device.py b/nova/objects/block_device.py
index 97199cf17a..82ce1c6806 100644
--- a/nova/objects/block_device.py
+++ b/nova/objects/block_device.py
@@ -67,7 +67,9 @@ class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
# Version 1.18: Added attachment_id
# Version 1.19: Added uuid
# Version 1.20: Added volume_type
- VERSION = '1.20'
+ # Version 1.21: Added encrypted, encryption_secret_uuid, encryption_format
+ # and encryption_options
+ VERSION = '1.21'
fields = {
'id': fields.IntegerField(),
@@ -93,10 +95,20 @@ class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
'attachment_id': fields.UUIDField(nullable=True),
# volume_type field can be a volume type name or ID(UUID).
'volume_type': fields.StringField(nullable=True),
+ 'encrypted': fields.BooleanField(default=False),
+ 'encryption_secret_uuid': fields.UUIDField(nullable=True),
+ 'encryption_format': fields.BlockDeviceEncryptionFormatTypeField(
+ nullable=True),
+ 'encryption_options': fields.StringField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
+ if target_version < (1, 21):
+ primitive.pop('encrypted', None)
+ primitive.pop('encryption_secret_uuid', None)
+ primitive.pop('encryption_format', None)
+ primitive.pop('encryption_options', None)
if target_version < (1, 20) and 'volume_type' in primitive:
del primitive['volume_type']
if target_version < (1, 19) and 'uuid' in primitive:
@@ -308,26 +320,38 @@ class BlockDeviceMapping(base.NovaPersistentObject, base.NovaObject,
def is_image(self):
return self.source_type == fields.BlockDeviceSourceType.IMAGE
+ @property
+ def is_local(self):
+ return (self.destination_type ==
+ fields.BlockDeviceDestinationType.LOCAL)
+
def get_image_mapping(self):
return block_device.BlockDeviceDict(self).get_image_mapping()
def obj_load_attr(self, attrname):
- if attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS:
- raise exception.ObjectActionError(
- action='obj_load_attr',
- reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
-
- LOG.debug("Lazy-loading '%(attr)s' on %(name)s using uuid %(uuid)s",
- {'attr': attrname,
- 'name': self.obj_name(),
- 'uuid': self.instance_uuid,
- })
- self.instance = objects.Instance.get_by_uuid(self._context,
- self.instance_uuid)
- self.obj_reset_changes(fields=['instance'])
+ if attrname == 'encrypted':
+ # We attempt to load this if we're creating a BDM object during an
+ # attach volume request, for example. Use the default in that case.
+ self.obj_set_defaults(attrname)
+ elif attrname not in BLOCK_DEVICE_OPTIONAL_ATTRS:
+ raise exception.ObjectActionError(
+ action='obj_load_attr',
+ reason='attribute %s not lazy-loadable' % attrname)
+ else:
+ LOG.debug(
+ "Lazy-loading '%(attr)s' on %(name)s using uuid %(uuid)s",
+ {
+ 'attr': attrname,
+ 'name': self.obj_name(),
+ 'uuid': self.instance_uuid,
+ }
+ )
+ self.instance = objects.Instance.get_by_uuid(self._context,
+ self.instance_uuid)
+ self.obj_reset_changes(fields=['instance'])
@base.NovaObjectRegistry.register
diff --git a/nova/objects/cell_mapping.py b/nova/objects/cell_mapping.py
index 595ec43e48..1355182420 100644
--- a/nova/objects/cell_mapping.py
+++ b/nova/objects/cell_mapping.py
@@ -279,11 +279,15 @@ class CellMappingList(base.ObjectListBase, base.NovaObject):
# SELECT DISTINCT cell_id FROM instance_mappings \
# WHERE project_id = $project_id;
cell_ids = context.session.query(
- api_db_models.InstanceMapping.cell_id).filter_by(
- project_id=project_id).distinct().subquery()
+ api_db_models.InstanceMapping.cell_id
+ ).filter_by(
+ project_id=project_id
+ ).distinct()
# SELECT cell_mappings WHERE cell_id IN ($cell_ids);
- return context.session.query(api_db_models.CellMapping).filter(
- api_db_models.CellMapping.id.in_(cell_ids)).all()
+ return context.session.query(
+ api_db_models.CellMapping).filter(
+ api_db_models.CellMapping.id.in_(cell_ids)
+ ).all()
@classmethod
def get_by_project_id(cls, context, project_id):
diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py
index 60c2be71cd..dfc1b2ae28 100644
--- a/nova/objects/compute_node.py
+++ b/nova/objects/compute_node.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
@@ -339,7 +340,12 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
- db_compute = db.compute_node_create(self._context, updates)
+ try:
+ db_compute = db.compute_node_create(self._context, updates)
+ except db_exc.DBDuplicateEntry:
+ target = 'compute node %s:%s' % (updates['hypervisor_hostname'],
+ updates['uuid'])
+ raise exception.DuplicateRecord(target=target)
self._from_db_object(self._context, self, db_compute)
@base.remotable
@@ -388,8 +394,11 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# The uuid field is read-only so it should only be set when
# creating the compute node record for the first time. Ignore
# it otherwise.
- if key == 'uuid' and 'uuid' in self:
- continue
+ if (key == 'uuid' and 'uuid' in self and
+ resources[key] != self.uuid):
+ raise exception.InvalidNodeConfiguration(
+ reason='Attempt to overwrite node %s with %s!' % (
+ self.uuid, resources[key]))
setattr(self, key, resources[key])
# supported_instances has a different name in compute_node
diff --git a/nova/objects/external_event.py b/nova/objects/external_event.py
index b1acfc4aa0..e17008dade 100644
--- a/nova/objects/external_event.py
+++ b/nova/objects/external_event.py
@@ -33,6 +33,9 @@ EVENT_NAMES = [
# Accelerator Request got bound, tag is ARQ uuid.
# Sent when an ARQ for an instance has been bound or failed to bind.
'accelerator-request-bound',
+
+ # re-image operation has completed from cinder side
+ 'volume-reimaged',
]
EVENT_STATUSES = ['failed', 'completed', 'in-progress']
@@ -50,7 +53,8 @@ class InstanceExternalEvent(obj_base.NovaObject):
# Version 1.2: adds volume-extended event
# Version 1.3: adds power-update event
# Version 1.4: adds accelerator-request-bound event
- VERSION = '1.4'
+ # Version 1.5: adds volume-reimaged event
+ VERSION = '1.5'
fields = {
'instance_uuid': fields.UUIDField(),
diff --git a/nova/objects/fields.py b/nova/objects/fields.py
index d8cb10f700..cae1ea4a4d 100644
--- a/nova/objects/fields.py
+++ b/nova/objects/fields.py
@@ -260,6 +260,14 @@ class BlockDeviceType(BaseNovaEnum):
ALL = (CDROM, DISK, FLOPPY, FS, LUN)
+class BlockDeviceEncryptionFormatType(BaseNovaEnum):
+ PLAIN = 'plain'
+ LUKS = 'luks'
+ LUKSv2 = 'luksv2'
+
+ ALL = (PLAIN, LUKS, LUKSv2)
+
+
class ConfigDrivePolicy(BaseNovaEnum):
OPTIONAL = "optional"
MANDATORY = "mandatory"
@@ -608,6 +616,16 @@ class VIFModel(BaseNovaEnum):
return super(VIFModel, self).coerce(obj, attr, value)
+class VIOMMUModel(BaseNovaEnum):
+
+ INTEL = 'intel'
+ SMMUV3 = 'smmuv3'
+ VIRTIO = 'virtio'
+ AUTO = 'auto'
+
+ ALL = (INTEL, SMMUV3, VIRTIO, AUTO)
+
+
class VMMode(BaseNovaEnum):
"""Represents possible vm modes for instances.
@@ -1197,6 +1215,10 @@ class BlockDeviceTypeField(BaseEnumField):
AUTO_TYPE = BlockDeviceType()
+class BlockDeviceEncryptionFormatTypeField(BaseEnumField):
+ AUTO_TYPE = BlockDeviceEncryptionFormatType()
+
+
class ConfigDrivePolicyField(BaseEnumField):
AUTO_TYPE = ConfigDrivePolicy()
@@ -1289,6 +1311,10 @@ class VIFModelField(BaseEnumField):
AUTO_TYPE = VIFModel()
+class VIOMMUModelField(BaseEnumField):
+ AUTO_TYPE = VIOMMUModel()
+
+
class VMModeField(BaseEnumField):
AUTO_TYPE = VMMode()
@@ -1353,6 +1379,14 @@ class InstancePowerStateField(BaseEnumField):
AUTO_TYPE = InstancePowerState()
+class NetworkModelField(AutoTypedField):
+ AUTO_TYPE = NetworkModel()
+
+
+class NetworkVIFModelField(AutoTypedField):
+ AUTO_TYPE = NetworkVIFModel()
+
+
class ListOfListsOfStringsField(AutoTypedField):
AUTO_TYPE = List(List(fields.String()))
diff --git a/nova/objects/flavor.py b/nova/objects/flavor.py
index 20378b0dae..01eeb62331 100644
--- a/nova/objects/flavor.py
+++ b/nova/objects/flavor.py
@@ -54,10 +54,11 @@ def _dict_with_extra_specs(flavor_model):
# issues are resolved.
@api_db_api.context_manager.reader
def _get_projects_from_db(context, flavorid):
- db_flavor = context.session.query(api_models.Flavors).\
- filter_by(flavorid=flavorid).\
- options(orm.joinedload('projects')).\
- first()
+ db_flavor = context.session.query(api_models.Flavors).filter_by(
+ flavorid=flavorid
+ ).options(
+ orm.joinedload(api_models.Flavors.projects)
+ ).first()
if not db_flavor:
raise exception.FlavorNotFound(flavor_id=flavorid)
return [x['project_id'] for x in db_flavor['projects']]
@@ -271,8 +272,9 @@ class Flavor(base.NovaPersistentObject, base.NovaObject,
@staticmethod
@api_db_api.context_manager.reader
def _flavor_get_query_from_db(context):
- query = context.session.query(api_models.Flavors).\
- options(orm.joinedload('extra_specs'))
+ query = context.session.query(api_models.Flavors).options(
+ orm.joinedload(api_models.Flavors.extra_specs)
+ )
if not context.is_admin:
the_filter = [api_models.Flavors.is_public == sql.true()]
the_filter.extend([
diff --git a/nova/objects/host_mapping.py b/nova/objects/host_mapping.py
index 09dfe81354..e203f77f3a 100644
--- a/nova/objects/host_mapping.py
+++ b/nova/objects/host_mapping.py
@@ -89,9 +89,13 @@ class HostMapping(base.NovaTimestampObject, base.NovaObject):
@staticmethod
@api_db_api.context_manager.reader
def _get_by_host_from_db(context, host):
- db_mapping = context.session.query(api_models.HostMapping)\
- .options(orm.joinedload('cell_mapping'))\
- .filter(api_models.HostMapping.host == host).first()
+ db_mapping = context.session.query(
+ api_models.HostMapping
+ ).options(
+ orm.joinedload(api_models.HostMapping.cell_mapping)
+ ).filter(
+ api_models.HostMapping.host == host
+ ).first()
if not db_mapping:
raise exception.HostMappingNotFound(name=host)
return db_mapping
@@ -159,18 +163,19 @@ class HostMappingList(base.ObjectListBase, base.NovaObject):
@staticmethod
@api_db_api.context_manager.reader
def _get_from_db(context, cell_id=None):
- query = (context.session.query(api_models.HostMapping)
- .options(orm.joinedload('cell_mapping')))
+ query = context.session.query(api_models.HostMapping).options(
+ orm.joinedload(api_models.HostMapping.cell_mapping)
+ )
if cell_id:
query = query.filter(api_models.HostMapping.cell_id == cell_id)
return query.all()
- @base.remotable_classmethod
+ @ base.remotable_classmethod
def get_by_cell_id(cls, context, cell_id):
db_mappings = cls._get_from_db(context, cell_id)
return base.obj_make_list(context, cls(), HostMapping, db_mappings)
- @base.remotable_classmethod
+ @ base.remotable_classmethod
def get_all(cls, context):
db_mappings = cls._get_from_db(context)
return base.obj_make_list(context, cls(), HostMapping, db_mappings)
diff --git a/nova/objects/image_meta.py b/nova/objects/image_meta.py
index 853e1715e7..7927ad2575 100644
--- a/nova/objects/image_meta.py
+++ b/nova/objects/image_meta.py
@@ -187,14 +187,28 @@ class ImageMetaProps(base.NovaObject):
# Version 1.28: Added 'socket' to 'hw_pci_numa_affinity_policy'
# Version 1.29: Added 'hw_input_bus' field
# Version 1.30: Added 'bochs' as an option to 'hw_video_model'
+ # Version 1.31: Added 'hw_emulation_architecture' field
+ # Version 1.32: Added 'hw_ephemeral_encryption' and
+ # 'hw_ephemeral_encryption_format' fields
+ # Version 1.33: Added 'hw_locked_memory' field
+ # Version 1.34: Added 'hw_viommu_model' field
# NOTE(efried): When bumping this version, the version of
# ImageMetaPropsPayload must also be bumped. See its docstring for details.
- VERSION = '1.30'
+ VERSION = '1.34'
def obj_make_compatible(self, primitive, target_version):
super(ImageMetaProps, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
+ if target_version < (1, 34):
+ primitive.pop('hw_viommu_model', None)
+ if target_version < (1, 33):
+ primitive.pop('hw_locked_memory', None)
+ if target_version < (1, 32):
+ primitive.pop('hw_ephemeral_encryption', None)
+ primitive.pop('hw_ephemeral_encryption_format', None)
+ if target_version < (1, 31):
+ primitive.pop('hw_emulation_architecture', None)
if target_version < (1, 30):
video = primitive.get('hw_video_model', None)
if video == fields.VideoModel.BOCHS:
@@ -294,6 +308,10 @@ class ImageMetaProps(base.NovaObject):
# name of guest hardware architecture eg i686, x86_64, ppc64
'hw_architecture': fields.ArchitectureField(),
+ # hw_architecture field is leveraged for checks against physical nodes
+ # name of desired emulation architecture eg i686, x86_64, ppc64
+ 'hw_emulation_architecture': fields.ArchitectureField(),
+
# used to decide to expand root disk partition and fs to full size of
# root disk
'hw_auto_disk_config': fields.StringField(),
@@ -356,6 +374,10 @@ class ImageMetaProps(base.NovaObject):
# image with a network boot image
'hw_ipxe_boot': fields.FlexibleBooleanField(),
+ # string - make sure ``locked`` element is present in the
+ # ``memoryBacking``.
+ 'hw_locked_memory': fields.FlexibleBooleanField(),
+
# There are sooooooooooo many possible machine types in
# QEMU - several new ones with each new release - that it
# is not practical to enumerate them all. So we use a free
@@ -427,6 +449,9 @@ class ImageMetaProps(base.NovaObject):
# name of a NIC device model eg virtio, e1000, rtl8139
'hw_vif_model': fields.VIFModelField(),
+ # name of IOMMU device model eg virtio, intel, smmuv3, or auto
+ 'hw_viommu_model': fields.VIOMMUModelField(),
+
# "xen" vs "hvm"
'hw_vm_mode': fields.VMModeField(),
@@ -442,6 +467,12 @@ class ImageMetaProps(base.NovaObject):
# version of emulated TPM to use.
'hw_tpm_version': fields.TPMVersionField(),
+ # boolean - if true will enable ephemeral encryption for instance
+ 'hw_ephemeral_encryption': fields.FlexibleBooleanField(),
+ # encryption format to be used when ephemeral encryption is enabled
+ 'hw_ephemeral_encryption_format':
+ fields.BlockDeviceEncryptionFormatTypeField(),
+
# if true download using bittorrent
'img_bittorrent': fields.FlexibleBooleanField(),
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index e99762d277..fed1a7c58b 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -13,6 +13,7 @@
# under the License.
import contextlib
+import typing as ty
from oslo_config import cfg
from oslo_db import exception as db_exc
@@ -1226,6 +1227,46 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
pci_req for pci_req in self.pci_requests.requests
if pci_req.request_id != pci_device.request_id]
+ def get_pci_devices(
+ self,
+ source: ty.Optional[int] = None,
+ request_id: ty.Optional[str] = None,
+ ) -> ty.List["objects.PciDevice"]:
+ """Return the PCI devices allocated to the instance
+
+ :param source: Filter by source. It can be
+ InstancePCIRequest.FLAVOR_ALIAS or InstancePCIRequest.NEUTRON_PORT
+ or None. None means returns devices from both type of requests.
+ :param request_id: Filter by PciDevice.request_id. None means do not
+ filter by request_id.
+ :return: a list of matching PciDevice objects
+ """
+ if not self.pci_devices:
+ # return early to avoid an extra lazy load on self.pci_requests
+ # if there are no devices allocated to be filtered
+ return []
+
+ devs = self.pci_devices.objects
+
+ if request_id is not None:
+ devs = [dev for dev in devs if dev.request_id == request_id]
+
+ if source is not None:
+ # NOTE(gibi): this happens to work for the old requests when the
+ # request has request_id None and therefore the device allocated
+ # due to that request has request_id None too, so they will be
+ # mapped via the None key.
+ req_id_to_req = {
+ req.request_id: req for req in self.pci_requests.requests
+ }
+ devs = [
+ dev
+ for dev in devs
+ if (req_id_to_req[dev.request_id].source == source)
+ ]
+
+ return devs
+
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
diff --git a/nova/objects/instance_group.py b/nova/objects/instance_group.py
index 34cf40b7fe..f9fc7c7d45 100644
--- a/nova/objects/instance_group.py
+++ b/nova/objects/instance_group.py
@@ -36,8 +36,8 @@ LOG = logging.getLogger(__name__)
def _instance_group_get_query(context, id_field=None, id=None):
query = context.session.query(api_models.InstanceGroup).\
- options(orm.joinedload('_policies')).\
- options(orm.joinedload('_members'))
+ options(orm.joinedload(api_models.InstanceGroup._policies)).\
+ options(orm.joinedload(api_models.InstanceGroup._members))
if not context.is_admin:
query = query.filter_by(project_id=context.project_id)
if id and id_field:
@@ -84,16 +84,22 @@ def _instance_group_members_add(context, group, members):
def _instance_group_members_add_by_uuid(context, group_uuid, members):
# NOTE(melwitt): The condition on the join limits the number of members
# returned to only those we wish to check as already existing.
- group = context.session.query(api_models.InstanceGroup).\
- outerjoin(api_models.InstanceGroupMember,
- api_models.InstanceGroupMember.instance_uuid.in_(set(members))).\
- filter(api_models.InstanceGroup.uuid == group_uuid).\
- options(orm.contains_eager('_members')).first()
+ group = context.session.query(api_models.InstanceGroup).outerjoin(
+ api_models.InstanceGroupMember,
+ api_models.InstanceGroupMember.instance_uuid.in_(set(members))
+ ).filter(
+ api_models.InstanceGroup.uuid == group_uuid
+ ).options(orm.contains_eager(api_models.InstanceGroup._members)).first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
- return _instance_group_model_add(context, api_models.InstanceGroupMember,
- members, group._members, 'instance_uuid',
- group.id)
+ return _instance_group_model_add(
+ context,
+ api_models.InstanceGroupMember,
+ members,
+ group._members,
+ 'instance_uuid',
+ group.id,
+ )
# TODO(berrange): Remove NovaObjectDictCompat
diff --git a/nova/objects/instance_info_cache.py b/nova/objects/instance_info_cache.py
index 06e0af3f3b..506eb897c1 100644
--- a/nova/objects/instance_info_cache.py
+++ b/nova/objects/instance_info_cache.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_db import exception as db_exc
from oslo_log import log as logging
from nova.db.main import api as db
@@ -35,8 +36,8 @@ class InstanceInfoCache(base.NovaPersistentObject, base.NovaObject):
fields = {
'instance_uuid': fields.UUIDField(),
- 'network_info': fields.Field(fields.NetworkModel(), nullable=True),
- }
+ 'network_info': fields.NetworkModelField(nullable=True),
+ }
@staticmethod
def _from_db_object(context, info_cache, db_obj):
@@ -75,9 +76,25 @@ class InstanceInfoCache(base.NovaPersistentObject, base.NovaObject):
if 'network_info' in self.obj_what_changed():
nw_info_json = self.fields['network_info'].to_primitive(
self, 'network_info', self.network_info)
- rv = db.instance_info_cache_update(self._context,
- self.instance_uuid,
- {'network_info': nw_info_json})
+
+ inst_uuid = self.instance_uuid
+
+ try:
+ rv = db.instance_info_cache_update(
+ self._context, inst_uuid, {'network_info': nw_info_json})
+ except db_exc.DBReferenceError as exp:
+ if exp.key != 'instance_uuid':
+ raise
+ # NOTE(melwitt): It is possible for us to fail here with a
+ # foreign key constraint violation on instance_uuid when we
+ # attempt to save the instance network info cache after
+ # receiving a network-changed external event from neutron
+ # during a cross-cell migration. This means the instance record
+ # is not found in this cell database and we can raise
+ # InstanceNotFound to signal that in a way that callers know
+ # how to handle.
+ raise exception.InstanceNotFound(instance_id=inst_uuid)
+
self._from_db_object(self._context, self, rv)
self.obj_reset_changes()
diff --git a/nova/objects/instance_mapping.py b/nova/objects/instance_mapping.py
index 68f45cd8cc..ed75a958f2 100644
--- a/nova/objects/instance_mapping.py
+++ b/nova/objects/instance_mapping.py
@@ -99,7 +99,7 @@ class InstanceMapping(base.NovaTimestampObject, base.NovaObject):
@api_db_api.context_manager.reader
def _get_by_instance_uuid_from_db(context, instance_uuid):
db_mapping = context.session.query(api_models.InstanceMapping)\
- .options(orm.joinedload('cell_mapping'))\
+ .options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\
.filter(api_models.InstanceMapping.instance_uuid == instance_uuid)\
.first()
if not db_mapping:
@@ -312,7 +312,7 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
@api_db_api.context_manager.reader
def _get_by_project_id_from_db(context, project_id):
return context.session.query(api_models.InstanceMapping)\
- .options(orm.joinedload('cell_mapping'))\
+ .options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\
.filter(api_models.InstanceMapping.project_id == project_id).all()
@base.remotable_classmethod
@@ -326,7 +326,7 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
@api_db_api.context_manager.reader
def _get_by_cell_id_from_db(context, cell_id):
return context.session.query(api_models.InstanceMapping)\
- .options(orm.joinedload('cell_mapping'))\
+ .options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\
.filter(api_models.InstanceMapping.cell_id == cell_id).all()
@base.remotable_classmethod
@@ -339,7 +339,7 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
@api_db_api.context_manager.reader
def _get_by_instance_uuids_from_db(context, uuids):
return context.session.query(api_models.InstanceMapping)\
- .options(orm.joinedload('cell_mapping'))\
+ .options(orm.joinedload(api_models.InstanceMapping.cell_mapping))\
.filter(api_models.InstanceMapping.instance_uuid.in_(uuids))\
.all()
@@ -373,12 +373,16 @@ class InstanceMappingList(base.ObjectListBase, base.NovaObject):
# queued_for_delete was not run) and False (cases when the online
# data migration for queued_for_delete was run) are assumed to mean
# that the instance is not queued for deletion.
- query = (query.filter(sql.or_(
- api_models.InstanceMapping.queued_for_delete == sql.false(),
- api_models.InstanceMapping.queued_for_delete.is_(None)))
- .join('cell_mapping')
- .options(orm.joinedload('cell_mapping'))
- .filter(api_models.CellMapping.uuid == cell_uuid))
+ query = query.filter(
+ sql.or_(
+ api_models.InstanceMapping.queued_for_delete == sql.false(),
+ api_models.InstanceMapping.queued_for_delete.is_(None)
+ )
+ ).join(
+ api_models.InstanceMapping.cell_mapping
+ ).options(
+ orm.joinedload(api_models.InstanceMapping.cell_mapping)
+ ).filter(api_models.CellMapping.uuid == cell_uuid)
if limit is not None:
query = query.limit(limit)
return query.all()
diff --git a/nova/objects/instance_pci_requests.py b/nova/objects/instance_pci_requests.py
index ee94db87dd..3500c9a68d 100644
--- a/nova/objects/instance_pci_requests.py
+++ b/nova/objects/instance_pci_requests.py
@@ -18,10 +18,8 @@ from nova.objects import base
from nova.objects import fields
-# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
-class InstancePCIRequest(base.NovaObject,
- base.NovaObjectDictCompat):
+class InstancePCIRequest(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added request_id field
# Version 1.2: Added numa_policy field
@@ -70,10 +68,8 @@ class InstancePCIRequest(base.NovaObject,
del primitive['request_id']
-# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
-class InstancePCIRequests(base.NovaObject,
- base.NovaObjectDictCompat):
+class InstancePCIRequests(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: InstancePCIRequest 1.1
VERSION = '1.1'
@@ -155,9 +151,3 @@ class InstancePCIRequests(base.NovaObject,
'request_id': x.request_id,
'requester_id': x.requester_id} for x in self.requests]
return jsonutils.dumps(blob)
-
- @classmethod
- def from_request_spec_instance_props(cls, pci_requests):
- objs = [InstancePCIRequest(**request)
- for request in pci_requests['requests']]
- return cls(requests=objs, instance_uuid=pci_requests['instance_uuid'])
diff --git a/nova/objects/migrate_data.py b/nova/objects/migrate_data.py
index 06f30342e5..299f46d03b 100644
--- a/nova/objects/migrate_data.py
+++ b/nova/objects/migrate_data.py
@@ -55,7 +55,7 @@ class VIFMigrateData(obj_base.NovaObject):
# destination host is configured for all vif types. See the note in
# the libvirt driver here: https://review.opendev.org/#/c/551370/
# 29/nova/virt/libvirt/driver.py@7036
- 'source_vif': fields.Field(fields.NetworkVIFModel()),
+ 'source_vif': fields.NetworkVIFModelField(),
}
@property
@@ -279,6 +279,9 @@ class LibvirtLiveMigrateData(LiveMigrateData):
if (target_version < (1, 10) and
'src_supports_numa_live_migration' in primitive):
del primitive['src_supports_numa_live_migration']
+ if (target_version < (1, 10) and
+ 'dst_supports_numa_live_migration' in primitive):
+ del primitive['dst_supports_numa_live_migration']
if target_version < (1, 10) and 'dst_numa_info' in primitive:
del primitive['dst_numa_info']
if target_version < (1, 9) and 'vifs' in primitive:
@@ -338,42 +341,6 @@ class HyperVLiveMigrateData(LiveMigrateData):
@obj_base.NovaObjectRegistry.register
-class PowerVMLiveMigrateData(LiveMigrateData):
- # Version 1.0: Initial version
- # Version 1.1: Added the Virtual Ethernet Adapter VLAN mappings.
- # Version 1.2: Added old_vol_attachment_ids
- # Version 1.3: Added wait_for_vif_plugged
- # Version 1.4: Inherited vifs from LiveMigrateData
- VERSION = '1.4'
-
- fields = {
- 'host_mig_data': fields.DictOfNullableStringsField(),
- 'dest_ip': fields.StringField(),
- 'dest_user_id': fields.StringField(),
- 'dest_sys_name': fields.StringField(),
- 'public_key': fields.StringField(),
- 'dest_proc_compat': fields.StringField(),
- 'vol_data': fields.DictOfNullableStringsField(),
- 'vea_vlan_mappings': fields.DictOfNullableStringsField(),
- }
-
- def obj_make_compatible(self, primitive, target_version):
- super(PowerVMLiveMigrateData, self).obj_make_compatible(
- primitive, target_version)
- target_version = versionutils.convert_version_to_tuple(target_version)
- if target_version < (1, 4) and 'vifs' in primitive:
- del primitive['vifs']
- if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive:
- del primitive['wait_for_vif_plugged']
- if target_version < (1, 2):
- if 'old_vol_attachment_ids' in primitive:
- del primitive['old_vol_attachment_ids']
- if target_version < (1, 1):
- if 'vea_vlan_mappings' in primitive:
- del primitive['vea_vlan_mappings']
-
-
-@obj_base.NovaObjectRegistry.register
class VMwareLiveMigrateData(LiveMigrateData):
VERSION = '1.0'
diff --git a/nova/objects/migration.py b/nova/objects/migration.py
index cacb636ccc..6f5f217b80 100644
--- a/nova/objects/migration.py
+++ b/nova/objects/migration.py
@@ -25,16 +25,21 @@ from nova.objects import fields
def determine_migration_type(migration):
- if migration['old_instance_type_id'] != migration['new_instance_type_id']:
- return 'resize'
+ if isinstance(migration, dict):
+ old_instance_type_id = migration['old_instance_type_id']
+ new_instance_type_id = migration['new_instance_type_id']
else:
- return 'migration'
+ old_instance_type_id = migration.old_instance_type_id
+ new_instance_type_id = migration.new_instance_type_id
+
+ if old_instance_type_id != new_instance_type_id:
+ return 'resize'
+
+ return 'migration'
-# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
-class Migration(base.NovaPersistentObject, base.NovaObject,
- base.NovaObjectDictCompat):
+class Migration(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Added migration_type and hidden
@@ -82,7 +87,7 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
value = determine_migration_type(db_migration)
elif key == 'uuid' and value is None:
continue
- migration[key] = value
+ setattr(migration, key, value)
migration._context = context
migration.obj_reset_changes()
@@ -210,6 +215,10 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
def is_resize(self):
return self.migration_type == fields.MigrationType.RESIZE
+ @property
+ def is_same_host_resize(self):
+ return self.is_resize and self.source_node == self.dest_node
+
@base.NovaObjectRegistry.register
class MigrationList(base.ObjectListBase, base.NovaObject):
diff --git a/nova/objects/pci_device.py b/nova/objects/pci_device.py
index 0be94897e3..554d68feca 100644
--- a/nova/objects/pci_device.py
+++ b/nova/objects/pci_device.py
@@ -148,6 +148,12 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
reason='dev_type=%s not supported in version %s' % (
dev_type, target_version))
+ def __repr__(self):
+ return (
+ f'PciDevice(address={self.address}, '
+ f'compute_node_id={self.compute_node_id})'
+ )
+
def update_device(self, dev_dict):
"""Sync the content from device dictionary to device object.
@@ -175,6 +181,9 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
# NOTE(ralonsoh): list of parameters currently added to
# "extra_info" dict:
# - "capabilities": dict of (strings/list of strings)
+ # - "parent_ifname": the netdev name of the parent (PF)
+ # device of a VF
+ # - "mac_address": the MAC address of the PF
extra_info = self.extra_info
data = v if isinstance(v, str) else jsonutils.dumps(v)
extra_info.update({k: data})
@@ -346,10 +355,40 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
# Update PF status to CLAIMED if all of it dependants are free
# and set their status to UNCLAIMABLE
vfs_list = self.child_devices
- if not all([vf.is_available() for vf in vfs_list]):
- raise exception.PciDeviceVFInvalidStatus(
- compute_node_id=self.compute_node_id,
- address=self.address)
+ non_free_dependants = [
+ vf for vf in vfs_list if not vf.is_available()]
+ if non_free_dependants:
+ # NOTE(gibi): There should not be any dependent devices that
+ # are UNCLAIMABLE or UNAVAILABLE as the parent is AVAILABLE,
+ # but we got reports in bug 1969496 that this inconsistency
+ # can happen. So check if the only non-free devices are in
+ # state UNCLAIMABLE or UNAVAILABLE then we log a warning but
+ # allow to claim the parent.
+ actual_statuses = {
+ child.status for child in non_free_dependants}
+ allowed_non_free_statues = {
+ fields.PciDeviceStatus.UNCLAIMABLE,
+ fields.PciDeviceStatus.UNAVAILABLE,
+ }
+ if actual_statuses - allowed_non_free_statues == set():
+ LOG.warning(
+ "Some child device of parent %s is in an inconsistent "
+ "state. If you can reproduce this warning then please "
+ "report a bug at "
+ "https://bugs.launchpad.net/nova/+filebug with "
+ "reproduction steps. Inconsistent children with "
+ "state: %s",
+ self.address,
+ ",".join(
+ "%s - %s" % (child.address, child.status)
+ for child in non_free_dependants
+ ),
+ )
+
+ else:
+ raise exception.PciDeviceVFInvalidStatus(
+ compute_node_id=self.compute_node_id,
+ address=self.address)
self._bulk_update_status(vfs_list,
fields.PciDeviceStatus.UNCLAIMABLE)
@@ -447,11 +486,30 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
instance.pci_devices.objects.append(copy.copy(self))
def remove(self):
- if self.status != fields.PciDeviceStatus.AVAILABLE:
+ # We allow removal of a device is if it is unused. It can be unused
+ # either by being in available state or being in a state that shows
+ # that the parent or child device blocks the consumption of this device
+ expected_states = [
+ fields.PciDeviceStatus.AVAILABLE,
+ fields.PciDeviceStatus.UNAVAILABLE,
+ fields.PciDeviceStatus.UNCLAIMABLE,
+ ]
+ if self.status not in expected_states:
raise exception.PciDeviceInvalidStatus(
compute_node_id=self.compute_node_id,
address=self.address, status=self.status,
- hopestatus=[fields.PciDeviceStatus.AVAILABLE])
+ hopestatus=expected_states)
+ # Just to be on the safe side, do not allow removal of device that has
+ # an owner even if the state of the device suggests that it is not
+ # owned.
+ if 'instance_uuid' in self and self.instance_uuid is not None:
+ raise exception.PciDeviceInvalidOwner(
+ compute_node_id=self.compute_node_id,
+ address=self.address,
+ owner=self.instance_uuid,
+ hopeowner=None,
+ )
+
self.status = fields.PciDeviceStatus.REMOVED
self.instance_uuid = None
self.request_id = None
@@ -511,6 +569,25 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
def is_available(self):
return self.status == fields.PciDeviceStatus.AVAILABLE
+ @property
+ def card_serial_number(self):
+ caps_json = self.extra_info.get('capabilities', "{}")
+ caps = jsonutils.loads(caps_json)
+ return caps.get('vpd', {}).get('card_serial_number')
+
+ @property
+ def sriov_cap(self):
+ caps_json = self.extra_info.get('capabilities', '{}')
+ caps = jsonutils.loads(caps_json)
+ return caps.get('sriov', {})
+
+ @property
+ def mac_address(self):
+ """The MAC address of the PF physical device or None if the device is
+ not a PF or if the MAC is not available.
+ """
+ return self.extra_info.get('mac_address')
+
@base.NovaObjectRegistry.register
class PciDeviceList(base.ObjectListBase, base.NovaObject):
@@ -550,3 +627,6 @@ class PciDeviceList(base.ObjectListBase, base.NovaObject):
parent_addr)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list)
+
+ def __repr__(self):
+ return f"PciDeviceList(objects={[repr(obj) for obj in self.objects]})"
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
index 9ce77a4043..a4ca77edf6 100644
--- a/nova/objects/request_spec.py
+++ b/nova/objects/request_spec.py
@@ -14,12 +14,15 @@
import copy
import itertools
+import typing as ty
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
+from nova.compute import pci_placement_translator
+import nova.conf
from nova.db.api import api as api_db_api
from nova.db.api import models as api_models
from nova import exception
@@ -28,6 +31,7 @@ from nova.objects import base
from nova.objects import fields
from nova.objects import instance as obj_instance
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
REQUEST_SPEC_OPTIONAL_ATTRS = ['requested_destination',
@@ -248,9 +252,9 @@ class RequestSpec(base.NovaObject):
def _from_instance_pci_requests(self, pci_requests):
if isinstance(pci_requests, dict):
- pci_req_cls = objects.InstancePCIRequests
- self.pci_requests = pci_req_cls.from_request_spec_instance_props(
- pci_requests)
+ self.pci_requests = objects.InstancePCIRequests.obj_from_primitive(
+ pci_requests,
+ )
else:
self.pci_requests = pci_requests
@@ -473,6 +477,113 @@ class RequestSpec(base.NovaObject):
filt_props['requested_destination'] = self.requested_destination
return filt_props
+ @staticmethod
+ def _rc_from_request(spec: ty.Dict[str, ty.Any]) -> str:
+ return pci_placement_translator.get_resource_class(
+ spec.get("resource_class"),
+ spec.get("vendor_id"),
+ spec.get("product_id"),
+ )
+
+ @staticmethod
+ def _traits_from_request(spec: ty.Dict[str, ty.Any]) -> ty.Set[str]:
+ return pci_placement_translator.get_traits(spec.get("traits", ""))
+
+ def generate_request_groups_from_pci_requests(self):
+ if not CONF.filter_scheduler.pci_in_placement:
+ return False
+
+ for pci_request in self.pci_requests.requests:
+ if pci_request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ # TODO(gibi): Handle neutron based PCI requests here in a later
+ # cycle.
+ continue
+
+ if len(pci_request.spec) != 1:
+ # We are instantiating InstancePCIRequest objects with spec in
+ # two cases:
+ # 1) when a neutron port is translated to InstancePCIRequest
+ # object in
+ # nova.network.neutron.API.create_resource_requests
+ # 2) when the pci_passthrough:alias flavor extra_spec is
+ # translated to InstancePCIRequest objects in
+ # nova.pci.request._get_alias_from_config which enforces the
+ # json schema defined in nova.pci.request.
+ #
+ # In both cases only a single dict is added to the spec list.
+ # If we ever want to add support for multiple specs per request
+ # then we have to solve the issue that each spec can request a
+ # different resource class from placement. The only place in
+ # nova that currently handles multiple specs per request is
+ # nova.pci.utils.pci_device_prop_match() and it considers them
+ # as alternatives. So specs with different resource classes
+ # would mean alternative resource_class requests. This cannot
+ # be expressed today in the allocation_candidate query towards
+ # placement.
+ raise ValueError(
+ "PCI tracking in placement does not support multiple "
+ "specs per PCI request"
+ )
+
+ spec = pci_request.spec[0]
+
+ # The goal is to translate InstancePCIRequest to RequestGroup. Each
+ # InstancePCIRequest can be fulfilled from the whole RP tree. And
+ # a flavor based InstancePCIRequest might request more than one
+ # device (if count > 1) and those devices still need to be placed
+ # independently to RPs. So we could have two options to translate
+ # an InstancePCIRequest object to RequestGroup objects:
+ # 1) put the all the requested resources from every
+ # InstancePCIRequest to the unsuffixed RequestGroup.
+ # 2) generate a separate RequestGroup for each individual device
+ # request
+ #
+ # While #1) feels simpler it has a big downside. The unsuffixed
+ # group will have a bulk request group resource provider mapping
+ # returned from placement. So there would be no easy way to later
+ # untangle which InstancePCIRequest is fulfilled by which RP, and
+ # therefore which PCI device should be used to allocate a specific
+ # device on the hypervisor during the PCI claim. Note that there
+ # could be multiple PF RPs providing the same type of resources but
+ # still we need to make sure that if a resource is allocated in
+ # placement from a specific RP (representing a physical device)
+ # then the PCI claim should consume resources from the same
+ # physical device.
+ #
+ # So we need at least a separate RequestGroup per
+ # InstancePCIRequest. However, for a InstancePCIRequest(count=2)
+ # that would mean a RequestGroup(RC:2) which would mean both
+ # resource should come from the same RP in placement. This is
+ # impossible for PF or PCI type requests and over restrictive for
+ # VF type requests. Therefore we need to generate one RequestGroup
+ # per requested device. So for InstancePCIRequest(count=2) we need
+ # to generate two separate RequestGroup(RC:1) objects.
+
+ # NOTE(gibi): If we have count=2 requests then the multiple
+ # RequestGroup split below only works if group_policy is set to
+ # none as group_policy=isolate would prevent allocating two VFs
+ # from the same PF. Fortunately
+ # nova.scheduler.utils.resources_from_request_spec() already
+ # defaults group_policy to none if it is not specified in the
+ # flavor and there are multiple RequestGroups in the RequestSpec.
+
+ for i in range(pci_request.count):
+ rg = objects.RequestGroup(
+ use_same_provider=True,
+ # we need to generate a unique ID for each group, so we use
+ # a counter
+ requester_id=f"{pci_request.request_id}-{i}",
+ # as we split count >= 2 requests to independent groups
+ # each group will have a resource request of one
+ resources={
+ self._rc_from_request(spec): 1
+ },
+ required_traits=self._traits_from_request(spec),
+ # TODO(gibi): later we can add support for complex trait
+ # queries here including forbidden_traits.
+ )
+ self.requested_resources.append(rg)
+
@classmethod
def from_components(
cls, context, instance_uuid, image, flavor,
@@ -539,6 +650,8 @@ class RequestSpec(base.NovaObject):
if port_resource_requests:
spec_obj.requested_resources.extend(port_resource_requests)
+ spec_obj.generate_request_groups_from_pci_requests()
+
# NOTE(gibi): later the scheduler adds more request level params but
# never overrides existing ones so we can initialize them here.
if request_level_params is None:
@@ -645,6 +758,7 @@ class RequestSpec(base.NovaObject):
except exception.InstanceGroupNotFound:
# NOTE(danms): Instance group may have been deleted
spec.instance_group = None
+ spec.scheduler_hints.pop('group', None)
if data_migrated:
spec.save()
diff --git a/nova/objects/service.py b/nova/objects/service.py
index c027412d9d..1a4629cc84 100644
--- a/nova/objects/service.py
+++ b/nova/objects/service.py
@@ -31,12 +31,12 @@ LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
-SERVICE_VERSION = 60
+SERVICE_VERSION = 66
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
# time we bump the version, we will put an entry here to record the change,
-# along with any pertinent data. For things that we can programatically
+# along with any pertinent data. For things that we can programmatically
# detect that need a bump, we put something in _collect_things() below to
# assemble a dict of things we can check. For example, we pretty much always
# want to consider the compute RPC API version a thing that requires a service
@@ -213,16 +213,54 @@ SERVICE_VERSION_HISTORY = (
# Add support for interface attach operation with neutron extended resource
# request
{'compute_rpc': '6.0'},
+ # Version 61: Compute RPC v6.0:
+ # Add support for remotely-managed ports (vnic-type 'remote-managed')
+ {'compute_rpc': '6.0'},
+ # Version 62: Compute RPC v6.0:
+ # Add support for VDPA port attach/detach
+ {'compute_rpc': '6.0'},
+ # Version 63: Compute RPC v6.0:
+ # Add support for VDPA hotplug live migration and suspend/resume
+ {'compute_rpc': '6.0'},
+ # Version 64: Compute RPC v6.1:
+ # Add reimage_boot_volume parameter to rebuild_instance()
+ {'compute_rpc': '6.1'},
+ # Version 65: Compute RPC v6.1:
+ # Added stable local node identity
+ {'compute_rpc': '6.1'},
+ # Version 66: Compute RPC v6.2:
+ # Add target_state parameter to rebuild_instance()
+ {'compute_rpc': '6.2'},
)
-# This is used to raise an error at service startup if older than N-1 computes
-# are detected. Update this at the beginning of every release cycle to point to
-# the smallest service version that was added in N-1.
-OLDEST_SUPPORTED_SERVICE_VERSION = 'Xena'
+# This is the version after which we can rely on having a persistent
+# local node identity for single-node systems.
+NODE_IDENTITY_VERSION = 65
+
+# This is used to raise an error at service startup if older than supported
+# computes are detected.
+# NOTE(sbauza) : Please modify it this way :
+# * At the beginning of a non-SLURP release (eg. 2023.2 Bobcat) (or just after
+# the previous SLURP release RC1, like 2023.1 Antelope), please bump
+# OLDEST_SUPPORTED_SERVICE_VERSION to the previous SLURP release (in that
+# example, Antelope)
+# * At the beginning of a SLURP release (eg. 2024.1 C) (or just after the
+# previous non-SLURP release RC1, like 2023.2 Bobcat), please keep the
+# OLDEST_SUPPORTED_SERVICE_VERSION value using the previous SLURP release
+# (in that example, Antelope)
+# * At the end of any release (SLURP or non-SLURP), please modify
+# SERVICE_VERSION_ALIASES to add a key/value with key being the release name
+# and value be the latest service version that the release supports (for
+# example, before Bobcat RC1, please add 'Bobcat': XX where X is the latest
+# servion version that was added)
+OLDEST_SUPPORTED_SERVICE_VERSION = 'Antelope'
SERVICE_VERSION_ALIASES = {
'Victoria': 52,
'Wallaby': 54,
'Xena': 57,
+ 'Yoga': 61,
+ 'Zed': 64,
+ 'Antelope': 66,
}
diff --git a/nova/pci/devspec.py b/nova/pci/devspec.py
index 4a663c81ac..386005c8eb 100644
--- a/nova/pci/devspec.py
+++ b/nova/pci/devspec.py
@@ -12,6 +12,7 @@
# under the License.
import abc
+import copy
import re
import string
import typing as ty
@@ -19,7 +20,10 @@ import typing as ty
from nova import exception
from nova.i18n import _
from nova import objects
+from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova.pci import utils
+from oslo_log import log as logging
+from oslo_utils import strutils
MAX_VENDOR_ID = 0xFFFF
MAX_PRODUCT_ID = 0xFFFF
@@ -30,6 +34,7 @@ MAX_SLOT = 0x1F
ANY = '*'
REGEX_ANY = '.*'
+LOG = logging.getLogger(__name__)
PCISpecAddressType = ty.Union[ty.Dict[str, str], str]
@@ -37,7 +42,7 @@ PCISpecAddressType = ty.Union[ty.Dict[str, str], str]
class PciAddressSpec(metaclass=abc.ABCMeta):
"""Abstract class for all PCI address spec styles
- This class checks the address fields of the pci.passthrough_whitelist
+ This class checks the address fields of the pci.device_spec
"""
def __init__(self, pci_addr: str) -> None:
@@ -66,11 +71,11 @@ class PciAddressSpec(metaclass=abc.ABCMeta):
try:
v = int(a, 16)
except ValueError:
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("property %(property)s ('%(attr)s') does not parse "
"as a hex number.") % {'property': prop, 'attr': a})
if v > maxval:
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("property %(property)s (%(attr)s) is greater than "
"the maximum allowable value (%(max)X).") %
{'property': prop, 'attr': a, 'max': maxval})
@@ -111,6 +116,9 @@ class PhysicalPciAddress(PciAddressSpec):
]
return all(conditions)
+ def __str__(self):
+ return f'{self.domain}:{self.bus}:{self.slot}.{self.func}'
+
class PciAddressGlobSpec(PciAddressSpec):
"""Manages the address fields with glob style.
@@ -188,19 +196,19 @@ class PciAddressRegexSpec(PciAddressSpec):
class WhitelistPciAddress(object):
"""Manages the address fields of the whitelist.
- This class checks the address fields of the pci.passthrough_whitelist
+ This class checks the address fields of the pci.device_spec
configuration option, validating the address fields.
Example configs:
| [pci]
- | passthrough_whitelist = {"address":"*:0a:00.*",
- | "physical_network":"physnet1"}
- | passthrough_whitelist = {"address": {"domain": ".*",
- "bus": "02",
- "slot": "01",
- "function": "[0-2]"},
- "physical_network":"net1"}
- | passthrough_whitelist = {"vendor_id":"1137","product_id":"0071"}
+ | device_spec = {"address":"*:0a:00.*",
+ | "physical_network":"physnet1"}
+ | device_spec = {"address": {"domain": ".*",
+ "bus": "02",
+ "slot": "01",
+ "function": "[0-2]"},
+ "physical_network":"net1"}
+ | device_spec = {"vendor_id":"1137","product_id":"0071"}
"""
@@ -247,7 +255,7 @@ class WhitelistPciAddress(object):
# Try to match on the parent PCI address if the PciDeviceSpec is a
# PF (sriov is available) and the device to match is a VF. This
# makes it possible to specify the PCI address of a PF in the
- # pci.passthrough_whitelist to match any of its VFs' PCI addresses.
+ # pci.device_spec to match any of its VFs' PCI addresses.
if self.is_physical_function and pci_phys_addr:
pci_phys_addr_obj = PhysicalPciAddress(pci_phys_addr)
if self.pci_address_spec.match(pci_phys_addr_obj):
@@ -260,9 +268,27 @@ class WhitelistPciAddress(object):
class PciDeviceSpec(PciAddressSpec):
def __init__(self, dev_spec: ty.Dict[str, str]) -> None:
+ # stored for better error reporting
+ self.dev_spec_conf = copy.deepcopy(dev_spec)
+ # the non tag fields (i.e. address, devname) will be removed by
+ # _init_dev_details
self.tags = dev_spec
self._init_dev_details()
+ def _address_obj(self) -> ty.Optional[WhitelistPciAddress]:
+ address_obj = None
+ if self.dev_name:
+ address_str, pf = utils.get_function_by_ifname(self.dev_name)
+ if not address_str:
+ return None
+ # Note(moshele): In this case we always passing a string
+ # of the PF pci address
+ address_obj = WhitelistPciAddress(address_str, pf)
+ else: # use self.address
+ address_obj = self.address
+
+ return address_obj
+
def _init_dev_details(self) -> None:
self.vendor_id = self.tags.pop("vendor_id", ANY)
self.product_id = self.tags.pop("product_id", ANY)
@@ -283,19 +309,72 @@ class PciDeviceSpec(PciAddressSpec):
if not self.dev_name:
self.address = WhitelistPciAddress(address or '*:*:*.*', False)
- def match(self, dev_dict: ty.Dict[str, str]) -> bool:
- address_obj: ty.Optional[WhitelistPciAddress]
-
- if self.dev_name:
- address_str, pf = utils.get_function_by_ifname(self.dev_name)
- if not address_str:
- return False
- # Note(moshele): In this case we always passing a string
- # of the PF pci address
- address_obj = WhitelistPciAddress(address_str, pf)
- else: # use self.address
- address_obj = self.address
-
+ # PFs with remote_managed tags are explicitly not supported. If they
+ # are tagged as such by mistake in the whitelist Nova will
+ # raise an exception. The reason for excluding PFs is the lack of a way
+ # for an instance to access the control plane at the remote side (e.g.
+ # on a DPU) for managing the PF representor corresponding to the PF.
+ address_obj = self._address_obj()
+ self._remote_managed = strutils.bool_from_string(
+ self.tags.get(PCI_REMOTE_MANAGED_TAG))
+ if self._remote_managed:
+ if address_obj is None:
+ # Note that this will happen if a netdev was specified in the
+ # whitelist but it is not actually present on a system - in
+ # this case Nova is not able to look up an address by
+ # a netdev name.
+ raise exception.PciDeviceRemoteManagedNotPresent()
+ elif address_obj.is_physical_function:
+ pf_addr = str(address_obj.pci_address_spec)
+ vf_product_id = utils.get_vf_product_id_by_pf_addr(pf_addr)
+ # VF vendor IDs have to match the corresponding PF vendor IDs
+ # per the SR-IOV spec so we use it for matching here.
+ pf_vendor_id, pf_product_id = utils.get_pci_ids_by_pci_addr(
+ pf_addr)
+ # Check the actual vendor ID and VF product ID of an assumed
+ # VF (based on the actual PF). The VF product ID must match
+ # the actual one if this is a VF device spec.
+ if (self.product_id == vf_product_id and
+ self.vendor_id in (pf_vendor_id, ANY)):
+ pass
+ elif (self.product_id in (pf_product_id, ANY) and
+ self.vendor_id in (pf_vendor_id, ANY)):
+ raise exception.PciDeviceInvalidPFRemoteManaged(
+ address_obj.pci_address_spec)
+ else:
+ # The specified product and vendor IDs of what is supposed
+ # to be a VF corresponding to the PF PCI address do not
+ # match the actual ones for this PF. This means that the
+ # whitelist is invalid.
+ raise exception.PciConfigInvalidSpec(
+ reason=_('the specified VF vendor ID %(vendor_id)s and'
+ ' product ID %(product_id)s do not match the'
+ ' expected VF IDs based on the corresponding'
+ ' PF identified by PCI address %(pf_addr)s') %
+ {'vendor_id': self.vendor_id,
+ 'product_id': self.product_id,
+ 'pf_addr': pf_addr})
+
+ def _ensure_remote_managed_dev_vpd_serial(
+ self, dev_dict: ty.Dict[str, ty.Any]) -> bool:
+ """Ensure the presence of a serial number field in PCI VPD.
+
+ A card serial number extracted from PCI VPD is required to allow a
+ networking backend to identify which remote host needs to program a
+ given device. So if a device is tagged as remote_managed, it must
+ have the card serial number or be filtered out.
+ """
+ if not self._remote_managed:
+ return True
+ card_sn = dev_dict.get('capabilities', {}).get(
+ 'vpd', {}).get('card_serial_number')
+ # None or empty card_serial_number should be filtered out. That would
+ # mean either no serial number in the VPD (if present at all) or SN is
+ # an empty string which is not useful for device identification.
+ return bool(card_sn)
+
+ def match(self, dev_dict: ty.Dict[str, ty.Any]) -> bool:
+ address_obj: ty.Optional[WhitelistPciAddress] = self._address_obj()
if not address_obj:
return False
@@ -303,13 +382,20 @@ class PciDeviceSpec(PciAddressSpec):
self.vendor_id in (ANY, dev_dict['vendor_id']),
self.product_id in (ANY, dev_dict['product_id']),
address_obj.match(dev_dict['address'],
- dev_dict.get('parent_addr'))])
+ dev_dict.get('parent_addr')),
+ self._ensure_remote_managed_dev_vpd_serial(dev_dict),
+ ])
def match_pci_obj(self, pci_obj: 'objects.PciDevice') -> bool:
- return self.match({'vendor_id': pci_obj.vendor_id,
- 'product_id': pci_obj.product_id,
- 'address': pci_obj.address,
- 'parent_addr': pci_obj.parent_addr})
+ dev_dict = {
+ 'vendor_id': pci_obj.vendor_id,
+ 'product_id': pci_obj.product_id,
+ 'address': pci_obj.address,
+ 'parent_addr': pci_obj.parent_addr,
+ 'capabilities': {
+ 'vpd': {'card_serial_number': pci_obj.card_serial_number}}
+ }
+ return self.match(dev_dict)
def get_tags(self) -> ty.Dict[str, str]:
return self.tags
diff --git a/nova/pci/manager.py b/nova/pci/manager.py
index fc6a841724..af6d72521b 100644
--- a/nova/pci/manager.py
+++ b/nova/pci/manager.py
@@ -69,7 +69,7 @@ class PciDevTracker(object):
"""
self.stale: ty.Dict[str, objects.PciDevice] = {}
self.node_id: str = compute_node.id
- self.dev_filter = whitelist.Whitelist(CONF.pci.passthrough_whitelist)
+ self.dev_filter = whitelist.Whitelist(CONF.pci.device_spec)
numa_topology = compute_node.numa_topology
if numa_topology:
# For legacy reasons, the NUMATopology is stored as a JSON blob.
@@ -133,7 +133,7 @@ class PciDevTracker(object):
try:
if self.dev_filter.device_assignable(dev):
devices.append(dev)
- except exception.PciConfigInvalidWhitelist as e:
+ except exception.PciConfigInvalidSpec as e:
# The raised exception is misleading as the problem is not with
# the whitelist config but with the host PCI device reported by
# libvirt. The code that matches the host PCI device to the
@@ -164,7 +164,7 @@ class PciDevTracker(object):
# parse whitelist config with
# devspec.PciAddressSpec._set_pci_dev_info()
str(e).replace(
- 'Invalid PCI devices Whitelist config:', 'The'))
+ 'Invalid [pci]device_spec config:', 'The'))
self._set_hvdevs(devices)
@@ -217,11 +217,14 @@ class PciDevTracker(object):
# from the pci whitelist.
try:
existed.remove()
- except exception.PciDeviceInvalidStatus as e:
- LOG.warning("Unable to remove device with %(status)s "
- "ownership %(instance_uuid)s because of "
- "%(pci_exception)s. "
- "Check your [pci]passthrough_whitelist "
+ except (
+ exception.PciDeviceInvalidStatus,
+ exception.PciDeviceInvalidOwner,
+ ) as e:
+ LOG.warning("Unable to remove device with status "
+ "'%(status)s' and ownership %(instance_uuid)s "
+ "because of %(pci_exception)s. "
+ "Check your [pci]device_spec "
"configuration to make sure this allocated "
"device is whitelisted. If you have removed "
"the device from the whitelist intentionally "
@@ -250,7 +253,10 @@ class PciDevTracker(object):
else:
# Note(yjiang5): no need to update stats if an assigned
# device is hot removed.
- self.stats.remove_device(existed)
+ # NOTE(gibi): only remove the device from the pools if it
+ # is not already removed
+ if existed in self.stats.get_free_devs():
+ self.stats.remove_device(existed)
else:
# Update tracked devices.
new_value: ty.Dict[str, ty.Any]
@@ -459,8 +465,8 @@ class PciDevTracker(object):
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
- existed = set(inst['uuid'] for inst in instances)
- existed |= set(mig['instance_uuid'] for mig in migrations)
+ existed = set(inst.uuid for inst in instances)
+ existed |= set(mig.instance_uuid for mig in migrations)
# need to copy keys, because the dict is modified in the loop body
for uuid in list(self.claims):
@@ -474,24 +480,3 @@ class PciDevTracker(object):
devs = self.allocations.pop(uuid, [])
for dev in devs:
self._free_device(dev)
-
-
-def get_instance_pci_devs(
- inst: 'objects.Instance', request_id: str = None,
-) -> ty.List['objects.PciDevice']:
- """Get the devices allocated to one or all requests for an instance.
-
- - For generic PCI request, the request id is None.
- - For sr-iov networking, the request id is a valid uuid
- - There are a couple of cases where all the PCI devices allocated to an
- instance need to be returned. Refer to libvirt driver that handles
- soft_reboot and hard_boot of 'xen' instances.
- """
- pci_devices = inst.pci_devices
- if pci_devices is None:
- return []
-
- return [
- device for device in pci_devices if
- device.request_id == request_id or request_id == 'all'
- ]
diff --git a/nova/pci/request.py b/nova/pci/request.py
index 01ea1ae112..8ae2385549 100644
--- a/nova/pci/request.py
+++ b/nova/pci/request.py
@@ -43,6 +43,7 @@ import typing as ty
import jsonschema
from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils import uuidutils
import nova.conf
from nova import context as ctx
@@ -58,6 +59,7 @@ Alias = ty.Dict[str, ty.Tuple[str, ty.List[ty.Dict[str, str]]]]
PCI_NET_TAG = 'physical_network'
PCI_TRUSTED_TAG = 'trusted'
PCI_DEVICE_TYPE_TAG = 'dev_type'
+PCI_REMOTE_MANAGED_TAG = 'remote_managed'
DEVICE_TYPE_FOR_VNIC_TYPE = {
network_model.VNIC_TYPE_DIRECT_PHYSICAL: obj_fields.PciDeviceType.SRIOV_PF,
@@ -104,6 +106,12 @@ _ALIAS_SCHEMA = {
"type": "string",
"enum": list(obj_fields.PCINUMAAffinityPolicy.ALL),
},
+ "resource_class": {
+ "type": "string",
+ },
+ "traits": {
+ "type": "string",
+ },
},
"required": ["name"],
}
@@ -112,7 +120,7 @@ _ALIAS_SCHEMA = {
def _get_alias_from_config() -> Alias:
"""Parse and validate PCI aliases from the nova config.
- :returns: A dictionary where the keys are device names and the values are
+ :returns: A dictionary where the keys are alias names and the values are
tuples of form ``(numa_policy, specs)``. ``numa_policy`` describes the
required NUMA affinity of the device(s), while ``specs`` is a list of
PCI device specs.
@@ -160,7 +168,7 @@ def _get_alias_from_config() -> Alias:
def _translate_alias_to_requests(
- alias_spec: str, affinity_policy: str = None,
+ alias_spec: str, affinity_policy: ty.Optional[str] = None,
) -> ty.List['objects.InstancePCIRequest']:
"""Generate complete pci requests from pci aliases in extra_spec."""
pci_aliases = _get_alias_from_config()
@@ -182,7 +190,9 @@ def _translate_alias_to_requests(
count=int(count),
spec=spec,
alias_name=name,
- numa_policy=policy))
+ numa_policy=policy,
+ request_id=uuidutils.generate_uuid(),
+ ))
return pci_requests
@@ -245,7 +255,7 @@ def get_instance_pci_request_from_vif(
def get_pci_requests_from_flavor(
- flavor: 'objects.Flavor', affinity_policy: str = None,
+ flavor: 'objects.Flavor', affinity_policy: ty.Optional[str] = None,
) -> 'objects.InstancePCIRequests':
"""Validate and return PCI requests.
diff --git a/nova/pci/stats.py b/nova/pci/stats.py
index e8e810fa4f..c6e4844b34 100644
--- a/nova/pci/stats.py
+++ b/nova/pci/stats.py
@@ -13,17 +13,19 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-
+import collections
import copy
import typing as ty
from oslo_config import cfg
from oslo_log import log as logging
+from oslo_utils import strutils
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
+from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova.pci import utils
from nova.pci import whitelist
@@ -62,12 +64,25 @@ class PciDeviceStats(object):
"""
pool_keys = ['product_id', 'vendor_id', 'numa_node', 'dev_type']
+ # these can be specified in the [pci]device_spec and can be requested via
+ # the PCI alias, but they are matched by the placement
+ # allocation_candidates query, so we can ignore them during pool creation
+ # and during filtering here
+ ignored_spec_tags = ignored_pool_tags = ['resource_class', 'traits']
+ # this is a metadata key in the spec that is matched
+ # specially in _filter_pools_based_on_placement_allocation. So we can
+ # ignore them in the general matching logic.
+ ignored_spec_tags += ['rp_uuids']
+ # this is a metadata key in the pool that is matched
+ # specially in _filter_pools_based_on_placement_allocation. So we can
+ # ignore them in the general matching logic.
+ ignored_pool_tags += ['rp_uuid']
def __init__(
self,
numa_topology: 'objects.NUMATopology',
stats: 'objects.PCIDevicePoolList' = None,
- dev_filter: whitelist.Whitelist = None,
+ dev_filter: ty.Optional[whitelist.Whitelist] = None,
) -> None:
self.numa_topology = numa_topology
self.pools = (
@@ -75,7 +90,7 @@ class PciDeviceStats(object):
)
self.pools.sort(key=lambda item: len(item))
self.dev_filter = dev_filter or whitelist.Whitelist(
- CONF.pci.passthrough_whitelist)
+ CONF.pci.device_spec)
def _equal_properties(
self, dev: Pool, entry: Pool, matching_keys: ty.List[str],
@@ -95,6 +110,28 @@ class PciDeviceStats(object):
return None
+ @staticmethod
+ def _ensure_remote_managed_tag(
+ dev: 'objects.PciDevice', pool: Pool):
+ """Add a remote_managed tag depending on a device type if needed.
+
+ Network devices may be managed remotely, e.g. by a SmartNIC DPU. If
+ a tag has not been explicitly provided, populate it by assuming that
+ a device is not remote managed by default.
+ """
+ if dev.dev_type not in (fields.PciDeviceType.SRIOV_VF,
+ fields.PciDeviceType.SRIOV_PF,
+ fields.PciDeviceType.VDPA):
+ return
+
+ # A tag is added here rather than at the client side to avoid an
+ # issue with having objects without this tag specified during an
+ # upgrade to the first version that supports handling this tag.
+ if pool.get(PCI_REMOTE_MANAGED_TAG) is None:
+ # NOTE: tags are compared as strings case-insensitively, see
+ # pci_device_prop_match in nova/pci/utils.py.
+ pool[PCI_REMOTE_MANAGED_TAG] = 'false'
+
def _create_pool_keys_from_dev(
self, dev: 'objects.PciDevice',
) -> ty.Optional[Pool]:
@@ -110,8 +147,22 @@ class PciDeviceStats(object):
return None
tags = devspec.get_tags()
pool = {k: getattr(dev, k) for k in self.pool_keys}
+
if tags:
- pool.update(tags)
+ pool.update(
+ {
+ k: v
+ for k, v in tags.items()
+ if k not in self.ignored_pool_tags
+ }
+ )
+ # NOTE(gibi): since PCI in placement maps a PCI dev or a PF to a
+ # single RP and the scheduler allocates from a specific RP we need
+ # to split the pools by PCI or PF address. We can still keep
+ # the VFs from the same parent PF in a single pool though as they
+ # are equivalent from placement perspective.
+ pool['address'] = dev.parent_addr or dev.address
+
# NOTE(gibi): parent_ifname acts like a tag during pci claim but
# not provided as part of the whitelist spec as it is auto detected
# by the virt driver.
@@ -120,6 +171,9 @@ class PciDeviceStats(object):
# already in placement.
if dev.extra_info.get('parent_ifname'):
pool['parent_ifname'] = dev.extra_info['parent_ifname']
+
+ self._ensure_remote_managed_tag(dev, pool)
+
return pool
def _get_pool_with_device_type_mismatch(
@@ -197,6 +251,17 @@ class PciDeviceStats(object):
free_devs.extend(pool['devices'])
return free_devs
+ def _allocate_devs(
+ self, pool: Pool, num: int, request_id: str
+ ) -> ty.List["objects.PciDevice"]:
+ alloc_devices = []
+ for _ in range(num):
+ pci_dev = pool['devices'].pop()
+ self._handle_device_dependents(pci_dev)
+ pci_dev.request_id = request_id
+ alloc_devices.append(pci_dev)
+ return alloc_devices
+
def consume_requests(
self,
pci_requests: 'objects.InstancePCIRequests',
@@ -208,7 +273,10 @@ class PciDeviceStats(object):
for request in pci_requests:
count = request.count
- pools = self._filter_pools(self.pools, request, numa_cells)
+ rp_uuids = self._get_rp_uuids_for_request(
+ request=request, provider_mapping=None)
+ pools = self._filter_pools(
+ self.pools, request, numa_cells, rp_uuids=rp_uuids)
# Failed to allocate the required number of devices. Return the
# devices already allocated during previous iterations back to
@@ -222,22 +290,31 @@ class PciDeviceStats(object):
"on the compute node semaphore.")
for d in range(len(alloc_devices)):
self.add_device(alloc_devices.pop())
- return None
-
- for pool in pools:
- if pool['count'] >= count:
- num_alloc = count
- else:
- num_alloc = pool['count']
- count -= num_alloc
- pool['count'] -= num_alloc
- for d in range(num_alloc):
- pci_dev = pool['devices'].pop()
- self._handle_device_dependents(pci_dev)
- pci_dev.request_id = request.request_id
- alloc_devices.append(pci_dev)
- if count == 0:
- break
+ raise exception.PciDeviceRequestFailed(requests=pci_requests)
+
+ if not rp_uuids:
+ # if there is no placement allocation then we are free to
+ # consume from the pools in any order:
+ for pool in pools:
+ if pool['count'] >= count:
+ num_alloc = count
+ else:
+ num_alloc = pool['count']
+ count -= num_alloc
+ pool['count'] -= num_alloc
+ alloc_devices += self._allocate_devs(
+ pool, num_alloc, request.request_id)
+ if count == 0:
+ break
+ else:
+ # but if there is placement allocation then we have to follow
+ # it
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ alloc_devices += self._allocate_devs(
+ pool, count, request.request_id)
return alloc_devices
@@ -252,8 +329,12 @@ class PciDeviceStats(object):
if pci_dev.dev_type == fields.PciDeviceType.SRIOV_PF:
vfs_list = pci_dev.child_devices
if vfs_list:
+ free_devs = self.get_free_devs()
for vf in vfs_list:
- self.remove_device(vf)
+ # NOTE(gibi): do not try to remove a device that are
+ # already removed
+ if vf in free_devs:
+ self.remove_device(vf)
elif pci_dev.dev_type in (
fields.PciDeviceType.SRIOV_VF,
fields.PciDeviceType.VDPA,
@@ -282,7 +363,15 @@ class PciDeviceStats(object):
:returns: A list of pools that can be used to support the request if
this is possible.
"""
- request_specs = request.spec
+
+ def ignore_keys(spec):
+ return {
+ k: v
+ for k, v in spec.items()
+ if k not in self.ignored_spec_tags
+ }
+
+ request_specs = [ignore_keys(spec) for spec in request.spec]
return [
pool for pool in pools
if utils.pci_device_prop_match(pool, request_specs)
@@ -458,11 +547,73 @@ class PciDeviceStats(object):
]
return pools
+ def _filter_pools_for_unrequested_remote_managed_devices(
+ self, pools: ty.List[Pool], request: 'objects.InstancePCIRequest',
+ ) -> ty.List[Pool]:
+ """Filter out pools with remote_managed devices, unless requested.
+
+ Remote-managed devices are not usable for legacy SR-IOV or hardware
+ offload scenarios and must be excluded from allocation.
+
+ :param pools: A list of PCI device pool dicts
+ :param request: An InstancePCIRequest object describing the type,
+ quantity and required NUMA affinity of device(s) we want.
+ :returns: A list of pools that can be used to support the request if
+ this is possible.
+ """
+ if all(not strutils.bool_from_string(spec.get(PCI_REMOTE_MANAGED_TAG))
+ for spec in request.spec):
+ pools = [pool for pool in pools
+ if not strutils.bool_from_string(
+ pool.get(PCI_REMOTE_MANAGED_TAG))]
+ return pools
+
+ def _filter_pools_based_on_placement_allocation(
+ self,
+ pools: ty.List[Pool],
+ request: 'objects.InstancePCIRequest',
+ rp_uuids: ty.List[str],
+ ) -> ty.List[Pool]:
+ if not rp_uuids:
+ # If there is no placement allocation then we don't need to filter
+ # by it. This could happen if the instance only has neutron port
+ # based InstancePCIRequest as that is currently not having
+ # placement allocation (except for QoS ports, but that handled in a
+ # separate codepath) or if the [filter_scheduler]pci_in_placement
+ # configuration option is not enabled in the scheduler.
+ return pools
+
+ requested_dev_count_per_rp = collections.Counter(rp_uuids)
+ matching_pools = []
+ for pool in pools:
+ rp_uuid = pool.get('rp_uuid')
+ if rp_uuid is None:
+ # NOTE(gibi): As rp_uuids is not empty the scheduler allocated
+ # PCI resources on this host, so we know that
+ # [pci]report_in_placement is enabled on this host. But this
+ # pool has no RP mapping which can only happen if the pool
+ # contains PCI devices with physical_network tag, as those
+ # devices not yet reported in placement. But if they are not
+ # reported then we can ignore them here too.
+ continue
+
+ if (
+ # the placement allocation contains this pool
+ rp_uuid in requested_dev_count_per_rp and
+ # the amount of dev allocated in placement can be consumed
+ # from the pool
+ pool["count"] >= requested_dev_count_per_rp[rp_uuid]
+ ):
+ matching_pools.append(pool)
+
+ return matching_pools
+
def _filter_pools(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']],
+ rp_uuids: ty.List[str],
) -> ty.Optional[ty.List[Pool]]:
"""Determine if an individual PCI request can be met.
@@ -477,6 +628,9 @@ class PciDeviceStats(object):
quantity and required NUMA affinity of device(s) we want.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
+ :param rp_uuids: A list of PR uuids this request fulfilled from in
+ placement. So here we have to consider only the pools matching with
+ thes RP uuids
:returns: A list of pools that can be used to support the request if
this is possible, else None.
"""
@@ -547,6 +701,33 @@ class PciDeviceStats(object):
before_count - after_count
)
+ # If we're not requesting remote_managed devices then we should not
+ # use these either. Exclude them.
+ before_count = after_count
+ pools = self._filter_pools_for_unrequested_remote_managed_devices(
+ pools, request)
+ after_count = sum([pool['count'] for pool in pools])
+
+ if after_count < before_count:
+ LOG.debug(
+ 'Dropped %d device(s) as they are remote-managed devices which'
+ 'we have not requested',
+ before_count - after_count
+ )
+
+ # if there is placement allocation for the request then we have to
+ # remove the pools that are not in the placement allocation
+ before_count = after_count
+ pools = self._filter_pools_based_on_placement_allocation(
+ pools, request, rp_uuids)
+ after_count = sum([pool['count'] for pool in pools])
+ if after_count < before_count:
+ LOG.debug(
+ 'Dropped %d device(s) that are not part of the placement '
+ 'allocation',
+ before_count - after_count
+ )
+
if after_count < request.count:
LOG.debug('Not enough PCI devices left to satisfy request')
return None
@@ -556,6 +737,7 @@ class PciDeviceStats(object):
def support_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Determine if the PCI requests can be met.
@@ -569,20 +751,38 @@ class PciDeviceStats(object):
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:returns: Whether this compute node can satisfy the given request.
"""
- # NOTE(yjiang5): this function has high possibility to fail,
- # so no exception should be triggered for performance reason.
- return all(
- self._filter_pools(self.pools, r, numa_cells) for r in requests
- )
+
+ # try to apply the requests on the copy of the stats if it applies
+ # cleanly then we know that the requests is supported. We call apply
+ # only on a copy as we don't want to actually consume resources from
+ # the pool as at this point this is just a test during host filtering.
+ # Later the scheduler will call apply_request to consume on the
+ # selected host. The compute will call consume_request during PCI claim
+ # to consume not just from the pools but also consume PciDevice
+ # objects.
+ stats = copy.deepcopy(self)
+ try:
+ stats.apply_requests(requests, provider_mapping, numa_cells)
+ except exception.PciDeviceRequestFailed:
+ return False
+
+ return True
def _apply_request(
self,
pools: ty.List[Pool],
request: 'objects.InstancePCIRequest',
+ rp_uuids: ty.List[str],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> bool:
"""Apply an individual PCI request.
@@ -596,6 +796,8 @@ class PciDeviceStats(object):
:param pools: A list of PCI device pool dicts
:param request: An InstancePCIRequest object describing the type,
quantity and required NUMA affinity of device(s) we want.
+ :param rp_uuids: A list of PR uuids this request fulfilled from in
+ placement
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACell objects.
:returns: True if the request was applied against the provided pools
@@ -605,22 +807,77 @@ class PciDeviceStats(object):
# Two concurrent requests may succeed when called support_requests
# because this method does not remove related devices from the pools
- filtered_pools = self._filter_pools(pools, request, numa_cells)
+ filtered_pools = self._filter_pools(
+ pools, request, numa_cells, rp_uuids)
if not filtered_pools:
return False
- count = request.count
- for pool in filtered_pools:
- count = self._decrease_pool_count(pools, pool, count)
- if not count:
- break
+ if not rp_uuids:
+ # If there is no placement allocation for this request then we are
+ # free to consume from the filtered pools in any order
+ count = request.count
+ for pool in filtered_pools:
+ count = self._decrease_pool_count(pools, pool, count)
+ if not count:
+ break
+ else:
+ # but if there is placement allocation then we have to follow that
+ requested_devs_per_pool_rp = collections.Counter(rp_uuids)
+ for pool in filtered_pools:
+ count = requested_devs_per_pool_rp[pool['rp_uuid']]
+ pool['count'] -= count
+ if pool['count'] == 0:
+ pools.remove(pool)
return True
+ def _get_rp_uuids_for_request(
+ self,
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
+ request: 'objects.InstancePCIRequest'
+ ) -> ty.List[str]:
+ """Return the list of RP uuids that are fulfilling the request.
+
+ An RP will be in the list as many times as many devices needs to
+ be allocated from that RP.
+ """
+
+ if request.source == objects.InstancePCIRequest.NEUTRON_PORT:
+ # TODO(gibi): support neutron based requests in a later cycle
+ # an empty list will signal that any PCI pool can be used for this
+ # request
+ return []
+
+ if not provider_mapping:
+ # NOTE(gibi): AFAIK specs is always a list of a single dict
+ # but the object is hard to change retroactively
+ rp_uuids = request.spec[0].get('rp_uuids')
+ if not rp_uuids:
+ # This can happen if [filter_scheduler]pci_in_placement is not
+ # enabled yet
+ # An empty list will signal that any PCI pool can be used for
+ # this request
+ return []
+
+ # TODO(gibi): this is baaad but spec is a dict of string so
+ # the list is serialized
+ return rp_uuids.split(',')
+
+ # NOTE(gibi): the PCI prefilter generates RequestGroup suffixes from
+ # InstancePCIRequests in the form of {request_id}-{count_index}
+ # NOTE(gibi): a suffixed request group always fulfilled from a single
+ # RP
+ return [
+ rp_uuids[0]
+ for group_id, rp_uuids in provider_mapping.items()
+ if group_id.startswith(request.request_id)
+ ]
+
def apply_requests(
self,
requests: ty.List['objects.InstancePCIRequest'],
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
numa_cells: ty.Optional[ty.List['objects.InstanceNUMACell']] = None,
) -> None:
"""Apply PCI requests to the PCI stats.
@@ -634,15 +891,23 @@ class PciDeviceStats(object):
:param requests: A list of InstancePCIRequest object describing the
types, quantities and required NUMA affinities of devices we want.
:type requests: nova.objects.InstancePCIRequests
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param numa_cells: A list of InstanceNUMACell objects whose ``id``
corresponds to the ``id`` of host NUMACells, or None.
:raises: exception.PciDeviceRequestFailed if this compute node cannot
satisfy the given request.
"""
- if not all(
- self._apply_request(self.pools, r, numa_cells) for r in requests
- ):
- raise exception.PciDeviceRequestFailed(requests=requests)
+
+ for r in requests:
+ rp_uuids = self._get_rp_uuids_for_request(provider_mapping, r)
+
+ if not self._apply_request(self.pools, r, rp_uuids, numa_cells):
+ raise exception.PciDeviceRequestFailed(requests=requests)
def __iter__(self) -> ty.Iterator[Pool]:
pools: ty.List[Pool] = []
@@ -667,3 +932,53 @@ class PciDeviceStats(object):
"""Return the contents of the pools as a PciDevicePoolList object."""
stats = [x for x in self]
return pci_device_pool.from_pci_stats(stats)
+
+ def has_remote_managed_device_pools(self) -> bool:
+ """Determine whether remote managed device pools are present on a host.
+
+ The check is pool-based, not free device-based and is NUMA cell
+ agnostic.
+ """
+ dummy_req = objects.InstancePCIRequest(
+ count=0,
+ spec=[{'remote_managed': True}]
+ )
+ pools = self._filter_pools_for_spec(self.pools, dummy_req)
+ return bool(pools)
+
+ def populate_pools_metadata_from_assigned_devices(self):
+ """Populate the rp_uuid of each pool based on the rp_uuid of the
+ devices assigned to the pool. This can only be called from the compute
+ where devices are assigned to each pool. This should not be called from
+ the scheduler as there device - pool assignment is not known.
+ """
+ # PciDevices are tracked in placement and flavor based PCI requests
+ # are scheduled and allocated in placement. To be able to correlate
+ # what is allocated in placement and what is consumed in nova we
+ # need to map device pools to RPs. We can do that as the PciDevice
+ # contains the RP UUID that represents it in placement.
+ # NOTE(gibi): We cannot do this when the device is originally added to
+ # the pool as the device -> placement translation, that creates the
+ # RPs, runs after all the device is created and assigned to pools.
+ for pool in self.pools:
+ pool_rps = {
+ dev.extra_info.get("rp_uuid")
+ for dev in pool["devices"]
+ if "rp_uuid" in dev.extra_info
+ }
+ if len(pool_rps) >= 2:
+ # FIXME(gibi): Do we have a 1:1 pool - RP mapping even
+ # if two PFs providing very similar VFs?
+ raise ValueError(
+ "We have a pool %s connected to more than one RPs %s in "
+ "placement via devs %s" % (pool, pool_rps, pool["devices"])
+ )
+
+ if not pool_rps:
+ # this can happen if the nova-compute is upgraded to have the
+ # PCI in placement inventory handling code but
+ # [pci]report_in_placement is not turned on yet.
+ continue
+
+ if pool_rps: # now we know that it is a single RP
+ pool['rp_uuid'] = next(iter(pool_rps))
diff --git a/nova/pci/utils.py b/nova/pci/utils.py
index 778cdb227c..51716c9d98 100644
--- a/nova/pci/utils.py
+++ b/nova/pci/utils.py
@@ -191,7 +191,7 @@ def get_mac_by_pci_address(pci_addr: str, pf_interface: bool = False) -> str:
raise exception.PciDeviceNotFoundById(id=pci_addr)
-def get_vf_num_by_pci_address(pci_addr: str) -> str:
+def get_vf_num_by_pci_address(pci_addr: str) -> int:
"""Get the VF number based on a VF's pci address
A VF is associated with an VF number, which ip link command uses to
@@ -210,4 +210,64 @@ def get_vf_num_by_pci_address(pci_addr: str) -> str:
else:
raise exception.PciDeviceNotFoundById(id=pci_addr)
- return vf_num
+ return int(vf_num)
+
+
+def get_vf_product_id_by_pf_addr(pci_addr: str) -> str:
+ """Get the VF product ID for a given PF.
+
+ "Product ID" or Device ID in the PCIe spec terms for a PF is
+ possible to retrieve via the VF Device ID field present as of
+ SR-IOV 1.0 in the "3.3.11. VF Device ID (1Ah)" section. It is
+ described as a field that "contains the Device ID that should
+ be presented for every VF to the SI".
+
+ It is available as of Linux kernel 4.15, commit
+ 7dfca15276fc3f18411a2b2182704fa1222bcb60
+
+ :param pci_addr: A string of the form "<domain>:<bus>:<slot>.<function>".
+ :return: A string containing a product ID of a VF corresponding to the PF.
+ """
+ sriov_vf_device_path = f"/sys/bus/pci/devices/{pci_addr}/sriov_vf_device"
+ try:
+ with open(sriov_vf_device_path) as f:
+ vf_product_id = f.readline().strip()
+ except IOError as e:
+ LOG.warning(
+ "Could not find the expected sysfs file for "
+ "determining the VF product ID of a PCI VF by PF"
+ "with addr %(addr)s. May not be a PF. Error: %(e)s",
+ {"addr": pci_addr, "e": e},
+ )
+ raise exception.PciDeviceNotFoundById(id=pci_addr)
+ if not vf_product_id:
+ raise ValueError("sriov_vf_device file does not contain"
+ " a VF product ID")
+ return vf_product_id
+
+
+def get_pci_ids_by_pci_addr(pci_addr: str) -> ty.Tuple[str, ...]:
+ """Get the product ID and vendor ID for a given PCI device.
+
+ :param pci_addr: A string of the form "<domain>:<bus>:<slot>.<function>".
+ :return: A list containing a vendor and product ids.
+ """
+ id_prefix = f"/sys/bus/pci/devices/{pci_addr}"
+ ids: ty.List[str] = []
+ for id_name in ("vendor", "product"):
+ try:
+ with open(os.path.join(id_prefix, id_name)) as f:
+ id_value = f.readline()
+ if not id_value:
+ raise ValueError(f"{id_name} file does not contain"
+ " a valid value")
+ ids.append(id_value.strip().replace("0x", ""))
+ except IOError as e:
+ LOG.warning(
+ "Could not find the expected sysfs file for "
+ f"determining the {id_name} ID of a PCI device "
+ "with addr %(addr)s. Error: %(e)s",
+ {"addr": pci_addr, "e": e},
+ )
+ raise exception.PciDeviceNotFoundById(id=pci_addr)
+ return tuple(ids)
diff --git a/nova/pci/whitelist.py b/nova/pci/whitelist.py
index 7623f4903e..152cc29ca6 100644
--- a/nova/pci/whitelist.py
+++ b/nova/pci/whitelist.py
@@ -33,7 +33,7 @@ class Whitelist(object):
assignable.
"""
- def __init__(self, whitelist_spec: str = None) -> None:
+ def __init__(self, whitelist_spec: ty.Optional[str] = None) -> None:
"""White list constructor
For example, the following json string specifies that devices whose
@@ -44,7 +44,7 @@ class Whitelist(object):
:param whitelist_spec: A JSON string for a dictionary or list thereof.
Each dictionary specifies the pci device properties requirement.
- See the definition of ``passthrough_whitelist`` in
+ See the definition of ``device_spec`` in
``nova.conf.pci`` for details and examples.
"""
if whitelist_spec:
@@ -62,18 +62,18 @@ class Whitelist(object):
try:
dev_spec = jsonutils.loads(jsonspec)
except ValueError:
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("Invalid entry: '%s'") % jsonspec)
if isinstance(dev_spec, dict):
dev_spec = [dev_spec]
elif not isinstance(dev_spec, list):
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("Invalid entry: '%s'; "
"Expecting list or dict") % jsonspec)
for ds in dev_spec:
if not isinstance(ds, dict):
- raise exception.PciConfigInvalidWhitelist(
+ raise exception.PciConfigInvalidSpec(
reason=_("Invalid entry: '%s'; "
"Expecting dict") % ds)
@@ -82,7 +82,7 @@ class Whitelist(object):
return specs
- def device_assignable(self, dev: ty.Dict[str, str]) -> bool:
+ def device_assignable(self, dev: ty.Dict[str, ty.Any]) -> bool:
"""Check if a device can be assigned to a guest.
:param dev: A dictionary describing the device properties
diff --git a/nova/policies/admin_actions.py b/nova/policies/admin_actions.py
index b0e6b40c21..e07d66ee36 100644
--- a/nova/policies/admin_actions.py
+++ b/nova/policies/admin_actions.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-admin-actions:%s'
admin_actions_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'reset_state',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Reset the state of a given server",
operations=[
{
@@ -32,10 +32,10 @@ admin_actions_policies = [
'path': '/servers/{server_id}/action (os-resetState)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'inject_network_info',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Inject network information into the server",
operations=[
{
@@ -43,7 +43,7 @@ admin_actions_policies = [
'path': '/servers/{server_id}/action (injectNetworkInfo)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/admin_password.py b/nova/policies/admin_password.py
index 455771037c..ad87aa7c96 100644
--- a/nova/policies/admin_password.py
+++ b/nova/policies/admin_password.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-admin-password'
admin_password_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Change the administrative password for a server",
operations=[
{
@@ -32,7 +32,7 @@ admin_password_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project'])
+ scope_types=['project'])
]
diff --git a/nova/policies/aggregates.py b/nova/policies/aggregates.py
index ea629a5db1..2775721699 100644
--- a/nova/policies/aggregates.py
+++ b/nova/policies/aggregates.py
@@ -25,7 +25,7 @@ NEW_POLICY_ROOT = 'compute:aggregates:%s'
aggregates_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'set_metadata',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Create or replace metadata for an aggregate",
operations=[
{
@@ -33,10 +33,10 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'add_host',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Add a host to an aggregate",
operations=[
{
@@ -44,10 +44,10 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Create an aggregate",
operations=[
{
@@ -55,10 +55,10 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'remove_host',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Remove a host from an aggregate",
operations=[
{
@@ -66,10 +66,10 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Update name and/or availability zone for an aggregate",
operations=[
{
@@ -77,10 +77,10 @@ aggregates_policies = [
'method': 'PUT'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all aggregates",
operations=[
{
@@ -88,10 +88,10 @@ aggregates_policies = [
'method': 'GET'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Delete an aggregate",
operations=[
{
@@ -99,10 +99,10 @@ aggregates_policies = [
'method': 'DELETE'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Show details for an aggregate",
operations=[
{
@@ -110,10 +110,10 @@ aggregates_policies = [
'method': 'GET'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=NEW_POLICY_ROOT % 'images',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Request image caching for an aggregate",
operations=[
{
@@ -121,7 +121,7 @@ aggregates_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/assisted_volume_snapshots.py b/nova/policies/assisted_volume_snapshots.py
index ed919076b4..98a67a8e37 100644
--- a/nova/policies/assisted_volume_snapshots.py
+++ b/nova/policies/assisted_volume_snapshots.py
@@ -24,7 +24,14 @@ POLICY_ROOT = 'os_compute_api:os-assisted-volume-snapshots:%s'
assisted_volume_snapshots_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.SYSTEM_ADMIN,
+ # TODO(gmann): This is internal API policy and called by
+ # cinder. Add 'service' role in this policy so that cinder
+ # can call it with user having 'service' role (not having
+ # correct project_id). That is for phase-2 of RBAC goal and until
+ # then, we keep it open for all admin in any project. We cannot
+ # default it to ADMIN which has the project_id in
+ # check_str and will fail if cinder call it with other project_id.
+ check_str=base.ADMIN,
description="Create an assisted volume snapshot",
operations=[
{
@@ -32,10 +39,17 @@ assisted_volume_snapshots_policies = [
'method': 'POST'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.SYSTEM_ADMIN,
+ # TODO(gmann): This is internal API policy and called by
+ # cinder. Add 'service' role in this policy so that cinder
+ # can call it with user having 'service' role (not having
+ # correct project_id). That is for phase-2 of RBAC goal and until
+ # then, we keep it open for all admin in any project. We cannot
+ # default it to ADMIN which has the project_id in
+ # check_str and will fail if cinder call it with other project_id.
+ check_str=base.ADMIN,
description="Delete an assisted volume snapshot",
operations=[
{
@@ -43,7 +57,7 @@ assisted_volume_snapshots_policies = [
'method': 'DELETE'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/attach_interfaces.py b/nova/policies/attach_interfaces.py
index cabe525674..b996e8ae59 100644
--- a/nova/policies/attach_interfaces.py
+++ b/nova/policies/attach_interfaces.py
@@ -37,7 +37,7 @@ DEPRECATED_INTERFACES_POLICY = policy.DeprecatedRule(
attach_interfaces_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List port interfaces attached to a server",
operations=[
{
@@ -45,11 +45,11 @@ attach_interfaces_policies = [
'path': '/servers/{server_id}/os-interface'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_INTERFACES_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show details of a port interface attached to a server",
operations=[
{
@@ -57,11 +57,11 @@ attach_interfaces_policies = [
'path': '/servers/{server_id}/os-interface/{port_id}'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_INTERFACES_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Attach an interface to a server",
operations=[
{
@@ -69,11 +69,11 @@ attach_interfaces_policies = [
'path': '/servers/{server_id}/os-interface'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_INTERFACES_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Detach an interface from a server",
operations=[
{
@@ -81,7 +81,7 @@ attach_interfaces_policies = [
'path': '/servers/{server_id}/os-interface/{port_id}'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_INTERFACES_POLICY)
]
diff --git a/nova/policies/availability_zone.py b/nova/policies/availability_zone.py
index 3b592674ac..9a32c095b2 100644
--- a/nova/policies/availability_zone.py
+++ b/nova/policies/availability_zone.py
@@ -33,10 +33,10 @@ availability_zone_policies = [
'path': '/os-availability-zone'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'detail',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List detailed availability zone information with host "
"information",
operations=[
@@ -45,7 +45,7 @@ availability_zone_policies = [
'path': '/os-availability-zone/detail'
}
],
- scope_types=['system'])
+ scope_types=['project'])
]
diff --git a/nova/policies/baremetal_nodes.py b/nova/policies/baremetal_nodes.py
index 191d9db650..8fd66d57ba 100644
--- a/nova/policies/baremetal_nodes.py
+++ b/nova/policies/baremetal_nodes.py
@@ -38,7 +38,7 @@ DEPRECATED_BAREMETAL_POLICY = policy.DeprecatedRule(
baremetal_nodes_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="""List and show details of bare metal nodes.
These APIs are proxy calls to the Ironic service and are deprecated.
@@ -49,11 +49,11 @@ These APIs are proxy calls to the Ironic service and are deprecated.
'path': '/os-baremetal-nodes'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_BAREMETAL_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="""Show action details for a server.""",
operations=[
{
@@ -61,7 +61,7 @@ These APIs are proxy calls to the Ironic service and are deprecated.
'path': '/os-baremetal-nodes/{node_id}'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_BAREMETAL_POLICY)
]
diff --git a/nova/policies/base.py b/nova/policies/base.py
index b04410425d..ab0c319cdf 100644
--- a/nova/policies/base.py
+++ b/nova/policies/base.py
@@ -37,40 +37,25 @@ DEPRECATED_ADMIN_OR_OWNER_POLICY = policy.DeprecatedRule(
deprecated_since='21.0.0'
)
-# TODO(gmann): # Special string ``system_scope:all`` is added for system
-# scoped policies for backwards compatibility where ``nova.conf [oslo_policy]
-# enforce_scope = False``.
-# Otherwise, this might open up APIs to be more permissive unintentionally if a
-# deployment isn't enforcing scope. For example, the 'list all servers'
-# policy will be System Scoped Reader with ``role:reader`` and
-# scope_type=['system'] Until enforce_scope=True by default, it would
-# be possible for users with the ``reader`` role on a project to access the
-# 'list all servers' API. Once nova defaults ``nova.conf [oslo_policy]
-# enforce_scope=True``, the ``system_scope:all`` bits of these check strings
-# can be removed since that will be handled automatically by scope_types in
-# oslo.policy's RuleDefault objects.
-SYSTEM_ADMIN = 'rule:system_admin_api'
-SYSTEM_READER = 'rule:system_reader_api'
-PROJECT_ADMIN = 'rule:project_admin_api'
+ADMIN = 'rule:context_is_admin'
PROJECT_MEMBER = 'rule:project_member_api'
PROJECT_READER = 'rule:project_reader_api'
-PROJECT_MEMBER_OR_SYSTEM_ADMIN = 'rule:system_admin_or_owner'
-PROJECT_READER_OR_SYSTEM_READER = 'rule:system_or_project_reader'
+PROJECT_MEMBER_OR_ADMIN = 'rule:project_member_or_admin'
+PROJECT_READER_OR_ADMIN = 'rule:project_reader_or_admin'
-# NOTE(gmann): Below is the mapping of new roles and scope_types
-# with legacy roles::
+# NOTE(gmann): Below is the mapping of new roles with legacy roles::
-# Legacy Rule | New Rules |Operation |scope_type|
-# -------------------+----------------------------------+----------+-----------
-# |-> SYSTEM_ADMIN |Global | [system]
-# RULE_ADMIN_API | Write
-# |-> SYSTEM_READER |Global | [system]
-# | |Read |
-#
-# |-> PROJECT_MEMBER_OR_SYSTEM_ADMIN |Project | [system,
-# RULE_ADMIN_OR_OWNER| |Write | project]
-# |-> PROJECT_READER_OR_SYSTEM_READER|Project | [system,
-# |Read | project]
+# Legacy Rule | New Rules |Operation |scope_type|
+# -------------------+---------------------------+----------------+-----------
+# RULE_ADMIN_API |-> ADMIN |Global resource | [project]
+# | |Write & Read |
+# -------------------+---------------------------+----------------+-----------
+# |-> ADMIN |Project admin | [project]
+# | |level operation |
+# RULE_ADMIN_OR_OWNER|-> PROJECT_MEMBER_OR_ADMIN |Project resource| [project]
+# | |Write |
+# |-> PROJECT_READER_OR_ADMIN |Project resource| [project]
+# | |Read |
# NOTE(johngarbutt) The base rules here affect so many APIs the list
# of related API operations has not been populated. It would be
@@ -88,7 +73,8 @@ rules = [
policy.RuleDefault(
"context_is_admin",
"role:admin",
- "Decides what is required for the 'is_admin:True' check to succeed."),
+ "Decides what is required for the 'is_admin:True' check to succeed.",
+ deprecated_rule=DEPRECATED_ADMIN_POLICY),
policy.RuleDefault(
"admin_or_owner",
"is_admin:True or project_id:%(project_id)s",
@@ -104,21 +90,6 @@ rules = [
deprecated_reason=DEPRECATED_REASON,
deprecated_since='21.0.0'),
policy.RuleDefault(
- name="system_admin_api",
- check_str='role:admin and system_scope:all',
- description="Default rule for System Admin APIs.",
- deprecated_rule=DEPRECATED_ADMIN_POLICY),
- policy.RuleDefault(
- name="system_reader_api",
- check_str="role:reader and system_scope:all",
- description="Default rule for System level read only APIs.",
- deprecated_rule=DEPRECATED_ADMIN_POLICY),
- policy.RuleDefault(
- "project_admin_api",
- "role:admin and project_id:%(project_id)s",
- "Default rule for Project level admin APIs.",
- deprecated_rule=DEPRECATED_ADMIN_POLICY),
- policy.RuleDefault(
"project_member_api",
"role:member and project_id:%(project_id)s",
"Default rule for Project level non admin APIs.",
@@ -126,16 +97,17 @@ rules = [
policy.RuleDefault(
"project_reader_api",
"role:reader and project_id:%(project_id)s",
- "Default rule for Project level read only APIs."),
+ "Default rule for Project level read only APIs.",
+ deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY),
policy.RuleDefault(
- name="system_admin_or_owner",
- check_str="rule:system_admin_api or rule:project_member_api",
- description="Default rule for System admin+owner APIs.",
+ "project_member_or_admin",
+ "rule:project_member_api or rule:context_is_admin",
+ "Default rule for Project Member or admin APIs.",
deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY),
policy.RuleDefault(
- "system_or_project_reader",
- "rule:system_reader_api or rule:project_reader_api",
- "Default rule for System+Project read only APIs.",
+ "project_reader_or_admin",
+ "rule:project_reader_api or rule:context_is_admin",
+ "Default rule for Project reader or admin APIs.",
deprecated_rule=DEPRECATED_ADMIN_OR_OWNER_POLICY)
]
diff --git a/nova/policies/console_auth_tokens.py b/nova/policies/console_auth_tokens.py
index d6a00b735c..5f784965cf 100644
--- a/nova/policies/console_auth_tokens.py
+++ b/nova/policies/console_auth_tokens.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-console-auth-tokens'
console_auth_tokens_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Show console connection information for a given console "
"authentication token",
operations=[
@@ -33,7 +33,7 @@ console_auth_tokens_policies = [
'path': '/os-console-auth-tokens/{console_token}'
}
],
- scope_types=['system'])
+ scope_types=['project'])
]
diff --git a/nova/policies/console_output.py b/nova/policies/console_output.py
index 461ef83a54..625971b5d7 100644
--- a/nova/policies/console_output.py
+++ b/nova/policies/console_output.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-console-output'
console_output_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description='Show console output for a server',
operations=[
{
@@ -32,7 +32,7 @@ console_output_policies = [
'path': '/servers/{server_id}/action (os-getConsoleOutput)'
}
],
- scope_types=['system', 'project'])
+ scope_types=['project'])
]
diff --git a/nova/policies/create_backup.py b/nova/policies/create_backup.py
index b7acc36bd5..c18fa11e84 100644
--- a/nova/policies/create_backup.py
+++ b/nova/policies/create_backup.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-create-backup'
create_backup_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description='Create a back up of a server',
operations=[
{
@@ -32,7 +32,7 @@ create_backup_policies = [
'path': '/servers/{server_id}/action (createBackup)'
}
],
- scope_types=['system', 'project'])
+ scope_types=['project'])
]
diff --git a/nova/policies/deferred_delete.py b/nova/policies/deferred_delete.py
index 32069790ed..9c18aa02de 100644
--- a/nova/policies/deferred_delete.py
+++ b/nova/policies/deferred_delete.py
@@ -36,7 +36,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
deferred_delete_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'restore',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Restore a soft deleted server",
operations=[
{
@@ -44,11 +44,11 @@ deferred_delete_policies = [
'path': '/servers/{server_id}/action (restore)'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'force',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Force delete a server before deferred cleanup",
operations=[
{
@@ -56,7 +56,7 @@ deferred_delete_policies = [
'path': '/servers/{server_id}/action (forceDelete)'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY)
]
diff --git a/nova/policies/evacuate.py b/nova/policies/evacuate.py
index 33b86f7a26..3a0fd502fd 100644
--- a/nova/policies/evacuate.py
+++ b/nova/policies/evacuate.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-evacuate'
evacuate_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Evacuate a server from a failed host to a new host",
operations=[
{
@@ -32,7 +32,7 @@ evacuate_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/extended_server_attributes.py b/nova/policies/extended_server_attributes.py
index 93444219ff..ba151a36cc 100644
--- a/nova/policies/extended_server_attributes.py
+++ b/nova/policies/extended_server_attributes.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-extended-server-attributes'
extended_server_attributes_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="""Return extended attributes for server.
This rule will control the visibility for a set of servers attributes:
@@ -66,7 +66,7 @@ is therefore deprecated and will be removed in a future release.
'path': '/servers/{server_id}/action (rebuild)'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
]
diff --git a/nova/policies/extensions.py b/nova/policies/extensions.py
index b049db7a7d..36c3fa0a05 100644
--- a/nova/policies/extensions.py
+++ b/nova/policies/extensions.py
@@ -37,7 +37,7 @@ extensions_policies = [
'path': '/extensions/{alias}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/flavor_access.py b/nova/policies/flavor_access.py
index b70ae71811..e7044d0cec 100644
--- a/nova/policies/flavor_access.py
+++ b/nova/policies/flavor_access.py
@@ -25,8 +25,8 @@ POLICY_ROOT = 'os_compute_api:os-flavor-access:%s'
# NOTE(gmann): Deprecating this policy explicitly as old defaults
# admin or owner is not suitable for that which should be admin (Bug#1867840)
# but changing that will break old deployment so let's keep supporting
-# the old default also and new default can be SYSTEM_READER
-# SYSTEM_READER rule in base class is defined with the deprecated rule of admin
+# the old default also and new default can be System Admin.
+# System Admin rule in base class is defined with the deprecated rule of admin
# not admin or owner which is the main reason that we need to explicitly
# deprecate this policy here.
DEPRECATED_REASON = """
@@ -45,7 +45,7 @@ DEPRECATED_FLAVOR_ACCESS_POLICY = policy.DeprecatedRule(
flavor_access_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'add_tenant_access',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Add flavor access to a tenant",
operations=[
{
@@ -53,10 +53,10 @@ flavor_access_policies = [
'path': '/flavors/{flavor_id}/action (addTenantAccess)'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'remove_tenant_access',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Remove flavor access from a tenant",
operations=[
{
@@ -64,10 +64,10 @@ flavor_access_policies = [
'path': '/flavors/{flavor_id}/action (removeTenantAccess)'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="""List flavor access information
Allows access to the full list of tenants that have access
@@ -79,7 +79,7 @@ to a flavor via an os-flavor-access API.
'path': '/flavors/{flavor_id}/os-flavor-access'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_FLAVOR_ACCESS_POLICY),
]
diff --git a/nova/policies/flavor_extra_specs.py b/nova/policies/flavor_extra_specs.py
index 9355a9719d..eaa7dd52cb 100644
--- a/nova/policies/flavor_extra_specs.py
+++ b/nova/policies/flavor_extra_specs.py
@@ -17,14 +17,12 @@ from oslo_policy import policy
from nova.policies import base
-
POLICY_ROOT = 'os_compute_api:os-flavor-extra-specs:%s'
-
flavor_extra_specs_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show an extra spec for a flavor",
operations=[
{
@@ -33,11 +31,11 @@ flavor_extra_specs_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Create extra specs for a flavor",
operations=[
{
@@ -45,11 +43,11 @@ flavor_extra_specs_policies = [
'method': 'POST'
}
],
- scope_types=['system']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Update an extra spec for a flavor",
operations=[
{
@@ -58,11 +56,11 @@ flavor_extra_specs_policies = [
'method': 'PUT'
}
],
- scope_types=['system']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Delete an extra spec for a flavor",
operations=[
{
@@ -71,38 +69,19 @@ flavor_extra_specs_policies = [
'method': 'DELETE'
}
],
- scope_types=['system']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List extra specs for a flavor. Starting with "
- "microversion 2.47, the flavor used for a server is also returned "
- "in the response when showing server details, updating a server or "
- "rebuilding a server. Starting with microversion 2.61, extra specs "
- "may be returned in responses for the flavor resource.",
+ "microversion 2.61, extra specs may be returned in responses "
+ "for the flavor resource.",
operations=[
{
'path': '/flavors/{flavor_id}/os-extra_specs/',
'method': 'GET'
},
- # Microversion 2.47 operations for servers:
- {
- 'path': '/servers/detail',
- 'method': 'GET'
- },
- {
- 'path': '/servers/{server_id}',
- 'method': 'GET'
- },
- {
- 'path': '/servers/{server_id}',
- 'method': 'PUT'
- },
- {
- 'path': '/servers/{server_id}/action (rebuild)',
- 'method': 'POST'
- },
# Microversion 2.61 operations for flavors:
{
'path': '/flavors',
@@ -121,7 +100,7 @@ flavor_extra_specs_policies = [
'method': 'PUT'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
]
diff --git a/nova/policies/flavor_manage.py b/nova/policies/flavor_manage.py
index 07f33975cc..a2ac6d8b21 100644
--- a/nova/policies/flavor_manage.py
+++ b/nova/policies/flavor_manage.py
@@ -25,7 +25,7 @@ POLICY_ROOT = 'os_compute_api:os-flavor-manage:%s'
flavor_manage_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Create a flavor",
operations=[
{
@@ -33,10 +33,10 @@ flavor_manage_policies = [
'path': '/flavors'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Update a flavor",
operations=[
{
@@ -44,10 +44,10 @@ flavor_manage_policies = [
'path': '/flavors/{flavor_id}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Delete a flavor",
operations=[
{
@@ -55,7 +55,7 @@ flavor_manage_policies = [
'path': '/flavors/{flavor_id}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/floating_ip_pools.py b/nova/policies/floating_ip_pools.py
index 61105efcb7..dd1d8f6851 100644
--- a/nova/policies/floating_ip_pools.py
+++ b/nova/policies/floating_ip_pools.py
@@ -32,7 +32,7 @@ floating_ip_pools_policies = [
'path': '/os-floating-ip-pools'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/floating_ips.py b/nova/policies/floating_ips.py
index 0b8d8c53f6..48d60d7b89 100644
--- a/nova/policies/floating_ips.py
+++ b/nova/policies/floating_ips.py
@@ -38,7 +38,7 @@ DEPRECATED_FIP_POLICY = policy.DeprecatedRule(
floating_ips_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'add',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Associate floating IPs to server. "
" This API is deprecated.",
operations=[
@@ -47,11 +47,11 @@ floating_ips_policies = [
'path': '/servers/{server_id}/action (addFloatingIp)'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'remove',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Disassociate floating IPs to server. "
" This API is deprecated.",
operations=[
@@ -60,11 +60,11 @@ floating_ips_policies = [
'path': '/servers/{server_id}/action (removeFloatingIp)'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List floating IPs. This API is deprecated.",
operations=[
{
@@ -72,11 +72,11 @@ floating_ips_policies = [
'path': '/os-floating-ips'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'create',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create floating IPs. This API is deprecated.",
operations=[
{
@@ -84,11 +84,11 @@ floating_ips_policies = [
'path': '/os-floating-ips'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show floating IPs. This API is deprecated.",
operations=[
{
@@ -96,11 +96,11 @@ floating_ips_policies = [
'path': '/os-floating-ips/{floating_ip_id}'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_FIP_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete floating IPs. This API is deprecated.",
operations=[
{
@@ -108,7 +108,7 @@ floating_ips_policies = [
'path': '/os-floating-ips/{floating_ip_id}'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_FIP_POLICY),
]
diff --git a/nova/policies/hosts.py b/nova/policies/hosts.py
index 64c36845bf..04b91a8641 100644
--- a/nova/policies/hosts.py
+++ b/nova/policies/hosts.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
hosts_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="""List physical hosts.
This API is deprecated in favor of os-hypervisors and os-services.""",
@@ -48,11 +48,11 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="""Show physical host.
This API is deprecated in favor of os-hypervisors and os-services.""",
@@ -62,11 +62,11 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'update',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="""Update physical host.
This API is deprecated in favor of os-hypervisors and os-services.""",
@@ -76,11 +76,11 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'reboot',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="""Reboot physical host.
This API is deprecated in favor of os-hypervisors and os-services.""",
@@ -90,11 +90,11 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}/reboot'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'shutdown',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="""Shutdown physical host.
This API is deprecated in favor of os-hypervisors and os-services.""",
@@ -104,11 +104,11 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}/shutdown'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'start',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="""Start physical host.
This API is deprecated in favor of os-hypervisors and os-services.""",
@@ -118,7 +118,7 @@ This API is deprecated in favor of os-hypervisors and os-services.""",
'path': '/os-hosts/{host_name}/startup'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/hypervisors.py b/nova/policies/hypervisors.py
index 02a179cb34..f4f29d1e1b 100644
--- a/nova/policies/hypervisors.py
+++ b/nova/policies/hypervisors.py
@@ -37,7 +37,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
hypervisors_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all hypervisors.",
operations=[
{
@@ -45,11 +45,11 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list-detail',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all hypervisors with details",
operations=[
{
@@ -57,11 +57,11 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'statistics',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Show summary statistics for all hypervisors "
"over all compute nodes.",
operations=[
@@ -70,11 +70,11 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Show details for a hypervisor.",
operations=[
{
@@ -82,11 +82,11 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'uptime',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Show the uptime of a hypervisor.",
operations=[
{
@@ -94,11 +94,11 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'search',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Search hypervisor by hypervisor_hostname pattern.",
operations=[
{
@@ -106,11 +106,11 @@ hypervisors_policies = [
'method': 'GET'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'servers',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all servers on hypervisors that can match "
"the provided hypervisor_hostname pattern.",
operations=[
@@ -120,7 +120,7 @@ hypervisors_policies = [
'method': 'GET'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY
),
]
diff --git a/nova/policies/instance_actions.py b/nova/policies/instance_actions.py
index 0447005b1d..e3e16a58f0 100644
--- a/nova/policies/instance_actions.py
+++ b/nova/policies/instance_actions.py
@@ -38,7 +38,7 @@ DEPRECATED_INSTANCE_ACTION_POLICY = policy.DeprecatedRule(
instance_actions_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'events:details',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="""Add "details" key in action events for a server.
This check is performed only after the check
@@ -56,10 +56,10 @@ but in the other hand it might leak information about the deployment
'path': '/servers/{server_id}/os-instance-actions/{request_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'events',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="""Add events details in action details for a server.
This check is performed only after the check
os_compute_api:os-instance-actions:show passes. Beginning with Microversion
@@ -73,10 +73,10 @@ passes, the name of the host.""",
'path': '/servers/{server_id}/os-instance-actions/{request_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List actions for a server.""",
operations=[
{
@@ -84,11 +84,11 @@ passes, the name of the host.""",
'path': '/servers/{server_id}/os-instance-actions'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_INSTANCE_ACTION_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show action details for a server.""",
operations=[
{
@@ -96,7 +96,7 @@ passes, the name of the host.""",
'path': '/servers/{server_id}/os-instance-actions/{request_id}'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_INSTANCE_ACTION_POLICY),
]
diff --git a/nova/policies/instance_usage_audit_log.py b/nova/policies/instance_usage_audit_log.py
index 98ab12c52e..7884134e4a 100644
--- a/nova/policies/instance_usage_audit_log.py
+++ b/nova/policies/instance_usage_audit_log.py
@@ -36,7 +36,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
instance_usage_audit_log_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all usage audits.",
operations=[
{
@@ -44,11 +44,11 @@ instance_usage_audit_log_policies = [
'path': '/os-instance_usage_audit_log'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all usage audits occurred before "
"a specified time for all servers on all compute hosts where "
"usage auditing is configured",
@@ -59,7 +59,7 @@ instance_usage_audit_log_policies = [
'path': '/os-instance_usage_audit_log/{before_timestamp}'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/ips.py b/nova/policies/ips.py
index aeee77ceaf..20cad2522a 100644
--- a/nova/policies/ips.py
+++ b/nova/policies/ips.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:ips:%s'
ips_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show IP addresses details for a network label of a "
" server",
operations=[
@@ -33,10 +33,10 @@ ips_policies = [
'path': '/servers/{server_id}/ips/{network_label}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List IP addresses that are assigned to a server",
operations=[
{
@@ -44,7 +44,7 @@ ips_policies = [
'path': '/servers/{server_id}/ips'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/keypairs.py b/nova/policies/keypairs.py
index 3feaa524bd..a42ee6302b 100644
--- a/nova/policies/keypairs.py
+++ b/nova/policies/keypairs.py
@@ -23,7 +23,7 @@ POLICY_ROOT = 'os_compute_api:os-keypairs:%s'
keypairs_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str='(' + base.SYSTEM_READER + ') or user_id:%(user_id)s',
+ check_str='(' + base.ADMIN + ') or user_id:%(user_id)s',
description="List all keypairs",
operations=[
{
@@ -31,10 +31,10 @@ keypairs_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str='(' + base.SYSTEM_ADMIN + ') or user_id:%(user_id)s',
+ check_str='(' + base.ADMIN + ') or user_id:%(user_id)s',
description="Create a keypair",
operations=[
{
@@ -42,10 +42,10 @@ keypairs_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str='(' + base.SYSTEM_ADMIN + ') or user_id:%(user_id)s',
+ check_str='(' + base.ADMIN + ') or user_id:%(user_id)s',
description="Delete a keypair",
operations=[
{
@@ -53,10 +53,10 @@ keypairs_policies = [
'method': 'DELETE'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str='(' + base.SYSTEM_READER + ') or user_id:%(user_id)s',
+ check_str='(' + base.ADMIN + ') or user_id:%(user_id)s',
description="Show details of a keypair",
operations=[
{
@@ -64,7 +64,7 @@ keypairs_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/limits.py b/nova/policies/limits.py
index efe430c237..1216dd1995 100644
--- a/nova/policies/limits.py
+++ b/nova/policies/limits.py
@@ -46,10 +46,10 @@ limits_policies = [
'path': '/limits'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=OTHER_PROJECT_LIMIT_POLICY_NAME,
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="""Show rate and absolute limits of other project.
This policy only checks if the user has access to the requested
@@ -61,7 +61,7 @@ os_compute_api:limits passes""",
'path': '/limits'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/lock_server.py b/nova/policies/lock_server.py
index 1b30e4b0a2..f7a018803c 100644
--- a/nova/policies/lock_server.py
+++ b/nova/policies/lock_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-lock-server:%s'
lock_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'lock',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Lock a server",
operations=[
{
@@ -32,11 +32,11 @@ lock_server_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unlock',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unlock a server",
operations=[
{
@@ -44,11 +44,11 @@ lock_server_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unlock:unlock_override',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="""Unlock a server, regardless who locked the server.
This check is performed only after the check
@@ -59,7 +59,7 @@ os_compute_api:os-lock-server:unlock passes""",
'method': 'POST'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
]
diff --git a/nova/policies/migrate_server.py b/nova/policies/migrate_server.py
index fe1c5b55e0..0b3d7c8bd1 100644
--- a/nova/policies/migrate_server.py
+++ b/nova/policies/migrate_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-migrate-server:%s'
migrate_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'migrate',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Cold migrate a server to a host",
operations=[
{
@@ -32,10 +32,10 @@ migrate_server_policies = [
'path': '/servers/{server_id}/action (migrate)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'migrate_live',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Live migrate a server to a new host without a reboot",
operations=[
{
@@ -43,7 +43,7 @@ migrate_server_policies = [
'path': '/servers/{server_id}/action (os-migrateLive)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/migrations.py b/nova/policies/migrations.py
index bb77d1f0bb..ce2aeaa564 100644
--- a/nova/policies/migrations.py
+++ b/nova/policies/migrations.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-migrations:%s'
migrations_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List migrations",
operations=[
{
@@ -32,7 +32,7 @@ migrations_policies = [
'path': '/os-migrations'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/multinic.py b/nova/policies/multinic.py
index bd1f04c84e..7119ec25b4 100644
--- a/nova/policies/multinic.py
+++ b/nova/policies/multinic.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
multinic_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'add',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Add a fixed IP address to a server.
This API is proxy calls to the Network service. This is
@@ -49,11 +49,11 @@ deprecated.""",
'path': '/servers/{server_id}/action (addFixedIp)'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'remove',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Remove a fixed IP address from a server.
This API is proxy calls to the Network service. This is
@@ -64,7 +64,7 @@ deprecated.""",
'path': '/servers/{server_id}/action (removeFixedIp)'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/networks.py b/nova/policies/networks.py
index 59fb166708..928705d8be 100644
--- a/nova/policies/networks.py
+++ b/nova/policies/networks.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
networks_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List networks for the project.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -48,11 +48,11 @@ This API is proxy calls to the Network service. This is deprecated.""",
'path': '/os-networks'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show network details.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -62,7 +62,7 @@ This API is proxy calls to the Network service. This is deprecated.""",
'path': '/os-networks/{network_id}'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/pause_server.py b/nova/policies/pause_server.py
index 2eea903535..96a1ff4c0d 100644
--- a/nova/policies/pause_server.py
+++ b/nova/policies/pause_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-pause-server:%s'
pause_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'pause',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Pause a server",
operations=[
{
@@ -32,11 +32,11 @@ pause_server_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unpause',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unpause a paused server",
operations=[
{
@@ -44,7 +44,7 @@ pause_server_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
]
diff --git a/nova/policies/quota_class_sets.py b/nova/policies/quota_class_sets.py
index 5a41a79bec..b01102b44e 100644
--- a/nova/policies/quota_class_sets.py
+++ b/nova/policies/quota_class_sets.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-quota-class-sets:%s'
quota_class_sets_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List quotas for specific quota classs",
operations=[
{
@@ -32,10 +32,10 @@ quota_class_sets_policies = [
'path': '/os-quota-class-sets/{quota_class}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description='Update quotas for specific quota class',
operations=[
{
@@ -43,7 +43,7 @@ quota_class_sets_policies = [
'path': '/os-quota-class-sets/{quota_class}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/quota_sets.py b/nova/policies/quota_sets.py
index ac141a2c24..ae8c471f56 100644
--- a/nova/policies/quota_sets.py
+++ b/nova/policies/quota_sets.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-quota-sets:%s'
quota_sets_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Update the quotas",
operations=[
{
@@ -32,7 +32,7 @@ quota_sets_policies = [
'path': '/os-quota-sets/{tenant_id}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'defaults',
check_str=base.RULE_ANY,
@@ -43,10 +43,10 @@ quota_sets_policies = [
'path': '/os-quota-sets/{tenant_id}/defaults'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show a quota",
operations=[
{
@@ -54,10 +54,10 @@ quota_sets_policies = [
'path': '/os-quota-sets/{tenant_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Revert quotas to defaults",
operations=[
{
@@ -65,10 +65,13 @@ quota_sets_policies = [
'path': '/os-quota-sets/{tenant_id}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'detail',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ # TODO(gmann): Until we have domain admin or so to get other project's
+ # data, allow admin role(with scope check it will be project admin) to
+ # get other project quota.
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show the detail of quota",
operations=[
{
@@ -76,7 +79,7 @@ quota_sets_policies = [
'path': '/os-quota-sets/{tenant_id}/detail'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/remote_consoles.py b/nova/policies/remote_consoles.py
index 68858e8b93..e32dd33d4c 100644
--- a/nova/policies/remote_consoles.py
+++ b/nova/policies/remote_consoles.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-remote-consoles'
remote_consoles_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Generate a URL to access remove server console.
This policy is for ``POST /remote-consoles`` API and below Server actions APIs
@@ -56,7 +56,7 @@ are deprecated:
'path': '/servers/{server_id}/remote-consoles'
},
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/rescue.py b/nova/policies/rescue.py
index 53612a711a..f9f72e92ef 100644
--- a/nova/policies/rescue.py
+++ b/nova/policies/rescue.py
@@ -37,7 +37,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
rescue_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rescue a server",
operations=[
{
@@ -45,10 +45,10 @@ rescue_policies = [
'method': 'POST'
},
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=UNRESCUE_POLICY_NAME,
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unrescue a server",
operations=[
{
@@ -56,7 +56,7 @@ rescue_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY
),
]
diff --git a/nova/policies/security_groups.py b/nova/policies/security_groups.py
index b09a6632c3..d6318bc724 100644
--- a/nova/policies/security_groups.py
+++ b/nova/policies/security_groups.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
security_groups_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'get',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List security groups. This API is deprecated.",
operations=[
{
@@ -46,11 +46,11 @@ security_groups_policies = [
'path': '/os-security-groups'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show security group. This API is deprecated.",
operations=[
{
@@ -58,11 +58,11 @@ security_groups_policies = [
'path': '/os-security-groups/{security_group_id}'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'create',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create security group. This API is deprecated.",
operations=[
{
@@ -70,11 +70,11 @@ security_groups_policies = [
'path': '/os-security-groups'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'update',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Update security group. This API is deprecated.",
operations=[
{
@@ -82,11 +82,11 @@ security_groups_policies = [
'path': '/os-security-groups/{security_group_id}'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete security group. This API is deprecated.",
operations=[
{
@@ -94,11 +94,11 @@ security_groups_policies = [
'path': '/os-security-groups/{security_group_id}'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'rule:create',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create security group Rule. This API is deprecated.",
operations=[
{
@@ -106,11 +106,11 @@ security_groups_policies = [
'path': '/os-security-group-rules'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'rule:delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete security group Rule. This API is deprecated.",
operations=[
{
@@ -118,11 +118,11 @@ security_groups_policies = [
'path': '/os-security-group-rules/{security_group_id}'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List security groups of server.",
operations=[
{
@@ -130,11 +130,11 @@ security_groups_policies = [
'path': '/servers/{server_id}/os-security-groups'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'add',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Add security groups to server.",
operations=[
{
@@ -142,11 +142,11 @@ security_groups_policies = [
'path': '/servers/{server_id}/action (addSecurityGroup)'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'remove',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Remove security groups from server.",
operations=[
{
@@ -154,7 +154,7 @@ security_groups_policies = [
'path': '/servers/{server_id}/action (removeSecurityGroup)'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/server_diagnostics.py b/nova/policies/server_diagnostics.py
index c788b3eb42..6774b7e862 100644
--- a/nova/policies/server_diagnostics.py
+++ b/nova/policies/server_diagnostics.py
@@ -24,7 +24,7 @@ BASE_POLICY_NAME = 'os_compute_api:os-server-diagnostics'
server_diagnostics_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Show the usage data for a server",
operations=[
{
@@ -32,7 +32,7 @@ server_diagnostics_policies = [
'path': '/servers/{server_id}/diagnostics'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/server_external_events.py b/nova/policies/server_external_events.py
index bd9a21aea0..56034d0186 100644
--- a/nova/policies/server_external_events.py
+++ b/nova/policies/server_external_events.py
@@ -24,7 +24,15 @@ POLICY_ROOT = 'os_compute_api:os-server-external-events:%s'
server_external_events_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.SYSTEM_ADMIN,
+ # TODO(gmann): This is internal API policy and supposed to be called
+ # by neutron, cinder, ironic, and cyborg (may be other openstack
+ # services in future). Add 'service' role in this policy so that
+ # neutron can call it with user having 'service' role (not having
+ # server's project_id). That is for phase-2 of RBAC goal and until
+ # then, we keep it open for all admin in any project. We cannot
+ # default it to ADMIN which has the project_id in
+ # check_str and will fail if neutron call it with other project_id.
+ check_str=base.ADMIN,
description="Create one or more external events",
operations=[
{
@@ -32,7 +40,7 @@ server_external_events_policies = [
'path': '/os-server-external-events'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/server_groups.py b/nova/policies/server_groups.py
index 55176b8a6a..8dfbe7c202 100644
--- a/nova/policies/server_groups.py
+++ b/nova/policies/server_groups.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-server-groups:%s'
server_groups_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a new server group",
operations=[
{
@@ -32,20 +32,11 @@ server_groups_policies = [
'method': 'POST'
}
],
- # (NOTE)gmann: Reason for 'project' only scope:
- # POST SG need project_id to create the serve groups
- # system scope members do not have project id for which
- # SG needs to be created.
- # If we allow system scope role also then created SG will have
- # project_id of system role, not the one he/she wants to create the SG
- # for (nobody can create the SG for other projects because API does
- # not take project id in request ). So keeping this scoped to project
- # only as these roles are the only ones who will be creating SG.
scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete a server group",
operations=[
{
@@ -53,11 +44,11 @@ server_groups_policies = [
'method': 'DELETE'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all server groups",
operations=[
{
@@ -65,11 +56,11 @@ server_groups_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index:all_projects',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all server groups for all projects",
operations=[
{
@@ -77,11 +68,11 @@ server_groups_policies = [
'method': 'GET'
}
],
- scope_types=['system']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show details of a server group",
operations=[
{
@@ -89,7 +80,7 @@ server_groups_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
]
diff --git a/nova/policies/server_metadata.py b/nova/policies/server_metadata.py
index 198e6e4643..f136df8439 100644
--- a/nova/policies/server_metadata.py
+++ b/nova/policies/server_metadata.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:server-metadata:%s'
server_metadata_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all metadata of a server",
operations=[
{
@@ -32,11 +32,11 @@ server_metadata_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show metadata for a server",
operations=[
{
@@ -44,11 +44,11 @@ server_metadata_policies = [
'method': 'GET'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create metadata for a server",
operations=[
{
@@ -56,11 +56,11 @@ server_metadata_policies = [
'method': 'POST'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update_all',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Replace metadata for a server",
operations=[
{
@@ -68,11 +68,11 @@ server_metadata_policies = [
'method': 'PUT'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Update metadata from a server",
operations=[
{
@@ -80,11 +80,11 @@ server_metadata_policies = [
'method': 'PUT'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete metadata from a server",
operations=[
{
@@ -92,7 +92,7 @@ server_metadata_policies = [
'method': 'DELETE'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
]
diff --git a/nova/policies/server_password.py b/nova/policies/server_password.py
index a861d3086c..1f9ddafd3c 100644
--- a/nova/policies/server_password.py
+++ b/nova/policies/server_password.py
@@ -37,7 +37,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
server_password_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show the encrypted administrative "
"password of a server",
operations=[
@@ -46,11 +46,11 @@ server_password_policies = [
'path': '/servers/{server_id}/os-server-password'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'clear',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Clear the encrypted administrative "
"password of a server",
operations=[
@@ -59,7 +59,7 @@ server_password_policies = [
'path': '/servers/{server_id}/os-server-password'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/server_tags.py b/nova/policies/server_tags.py
index 619941f759..baa1123987 100644
--- a/nova/policies/server_tags.py
+++ b/nova/policies/server_tags.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-server-tags:%s'
server_tags_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete_all',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete all the server tags",
operations=[
{
@@ -32,10 +32,10 @@ server_tags_policies = [
'path': '/servers/{server_id}/tags'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all tags for given server",
operations=[
{
@@ -43,10 +43,10 @@ server_tags_policies = [
'path': '/servers/{server_id}/tags'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update_all',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Replace all tags on specified server with the new set "
"of tags.",
operations=[
@@ -56,10 +56,10 @@ server_tags_policies = [
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete a single tag from the specified server",
operations=[
{
@@ -67,11 +67,11 @@ server_tags_policies = [
'path': '/servers/{server_id}/tags/{tag}'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Add a single tag to the server if server has no "
"specified tag",
operations=[
@@ -80,11 +80,11 @@ server_tags_policies = [
'path': '/servers/{server_id}/tags/{tag}'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Check tag existence on the server.",
operations=[
{
@@ -92,7 +92,7 @@ server_tags_policies = [
'path': '/servers/{server_id}/tags/{tag}'
}
],
- scope_types=['system', 'project']
+ scope_types=['project']
),
]
diff --git a/nova/policies/server_topology.py b/nova/policies/server_topology.py
index 4ebbc43888..0e6c203e4f 100644
--- a/nova/policies/server_topology.py
+++ b/nova/policies/server_topology.py
@@ -21,7 +21,7 @@ BASE_POLICY_NAME = 'compute:server:topology:%s'
server_topology_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'index',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show the NUMA topology data for a server",
operations=[
{
@@ -29,11 +29,11 @@ server_topology_policies = [
'path': '/servers/{server_id}/topology'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
# Control host NUMA node and cpu pinning information
name=BASE_POLICY_NAME % 'host:index',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Show the NUMA topology data for a server with host "
"NUMA ID and CPU pinning information",
operations=[
@@ -42,7 +42,7 @@ server_topology_policies = [
'path': '/servers/{server_id}/topology'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/servers.py b/nova/policies/servers.py
index 54e5301fb9..1e41baa203 100644
--- a/nova/policies/servers.py
+++ b/nova/policies/servers.py
@@ -22,10 +22,21 @@ ZERO_DISK_FLAVOR = SERVERS % 'create:zero_disk_flavor'
REQUESTED_DESTINATION = 'compute:servers:create:requested_destination'
CROSS_CELL_RESIZE = 'compute:servers:resize:cross_cell'
+DEPRECATED_POLICY = policy.DeprecatedRule(
+ 'os_compute_api:os-flavor-extra-specs:index',
+ base.RULE_ADMIN_OR_OWNER,
+)
+
+DEPRECATED_REASON = """
+Policies for showing flavor extra specs in server APIs response is
+seprated as new policy. This policy is deprecated only for that but
+not for list extra specs and showing it in flavor API response.
+"""
+
rules = [
policy.DocumentedRuleDefault(
name=SERVERS % 'index',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all servers",
operations=[
{
@@ -33,10 +44,10 @@ rules = [
'path': '/servers'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'detail',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List all servers with detailed information",
operations=[
{
@@ -44,10 +55,10 @@ rules = [
'path': '/servers/detail'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'index:get_all_tenants',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all servers for all projects",
operations=[
{
@@ -55,10 +66,11 @@ rules = [
'path': '/servers'
}
],
- scope_types=['system']),
+ scope_types=['project']),
+
policy.DocumentedRuleDefault(
name=SERVERS % 'detail:get_all_tenants',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all servers with detailed information for "
" all projects",
operations=[
@@ -67,10 +79,10 @@ rules = [
'path': '/servers/detail'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'allow_all_filters',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Allow all filters when listing servers",
operations=[
{
@@ -82,10 +94,10 @@ rules = [
'path': '/servers/detail'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show a server",
operations=[
{
@@ -93,12 +105,42 @@ rules = [
'path': '/servers/{server_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
+ policy.DocumentedRuleDefault(
+ name=SERVERS % 'show:flavor-extra-specs',
+ check_str=base.PROJECT_READER_OR_ADMIN,
+ description="Starting with microversion 2.47, the flavor and its "
+ "extra specs used for a server is also returned in the response "
+ "when showing server details, updating a server or rebuilding a "
+ "server.",
+ operations=[
+ # Microversion 2.47 operations for servers:
+ {
+ 'path': '/servers/detail',
+ 'method': 'GET'
+ },
+ {
+ 'path': '/servers/{server_id}',
+ 'method': 'GET'
+ },
+ {
+ 'path': '/servers/{server_id}',
+ 'method': 'PUT'
+ },
+ {
+ 'path': '/servers/{server_id}/action (rebuild)',
+ 'method': 'POST'
+ },
+ ],
+ scope_types=['project'],
+ deprecated_rule=DEPRECATED_POLICY,
+ deprecated_reason=DEPRECATED_REASON,
+ deprecated_since='25.0.0'),
# the details in host_status are pretty sensitive, only admins
# should do that by default.
policy.DocumentedRuleDefault(
name=SERVERS % 'show:host_status',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="""
Show a server with additional host status information.
@@ -129,10 +171,10 @@ API responses which are also controlled by this policy rule, like the
'path': '/servers/{server_id}/action (rebuild)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'show:host_status:unknown-only',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="""
Show a server with additional host status information, only if host status is
UNKNOWN.
@@ -162,10 +204,10 @@ allow everyone.
'path': '/servers/{server_id}/action (rebuild)'
}
],
- scope_types=['system', 'project'],),
+ scope_types=['project'],),
policy.DocumentedRuleDefault(
name=SERVERS % 'create',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server",
operations=[
{
@@ -176,17 +218,7 @@ allow everyone.
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:forced_host',
- # TODO(gmann): We need to make it SYSTEM_ADMIN.
- # PROJECT_ADMIN is added for now because create server
- # policy is project scoped and there is no way to
- # pass the project_id in request body for system scoped
- # roles so that create server for other project with force host.
- # To achieve that, we need to update the create server API to
- # accept the project_id for whom the server needs to be created
- # and then change the scope of this policy to system-only
- # Because that is API change it needs to be done with new
- # microversion.
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Create a server on the specified host and/or node.
@@ -200,21 +232,10 @@ host and/or node by bypassing the scheduler filters unlike the
'path': '/servers'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=REQUESTED_DESTINATION,
- # TODO(gmann): We need to make it SYSTEM_ADMIN.
- # PROJECT_ADMIN is added for now because create server
- # policy is project scoped and there is no way to
- # pass the project_id in request body for system scoped
- # roles so that create server for other project with requested
- # destination.
- # To achieve that, we need to update the create server API to
- # accept the project_id for whom the server needs to be created
- # and then change the scope of this policy to system-only
- # Because that is API change it needs to be done with new
- # microversion.
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
Create a server on the requested compute service host and/or
hypervisor_hostname.
@@ -229,10 +250,10 @@ validated by the scheduler filters unlike the
'path': '/servers'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:attach_volume',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server with the requested volume attached to it",
operations=[
{
@@ -243,7 +264,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:attach_network',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server with the requested network attached "
" to it",
operations=[
@@ -255,7 +276,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create:trusted_certs',
- check_str=base.PROJECT_MEMBER,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create a server with trusted image certificate IDs",
operations=[
{
@@ -266,7 +287,7 @@ validated by the scheduler filters unlike the
scope_types=['project']),
policy.DocumentedRuleDefault(
name=ZERO_DISK_FLAVOR,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="""
This rule controls the compute API validation behavior of creating a server
with a flavor that has 0 disk, indicating the server should be volume-backed.
@@ -288,10 +309,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=NETWORK_ATTACH_EXTERNAL,
- check_str=base.PROJECT_ADMIN,
+ check_str=base.ADMIN,
description="Attach an unshared external network to a server",
operations=[
# Create a server with a requested network or port.
@@ -305,10 +326,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/os-interface'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Delete a server",
operations=[
{
@@ -316,10 +337,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'update',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Update a server",
operations=[
{
@@ -327,10 +348,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'confirm_resize',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Confirm a server resize",
operations=[
{
@@ -338,10 +359,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (confirmResize)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'revert_resize',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Revert a server resize",
operations=[
{
@@ -349,10 +370,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (revertResize)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'reboot',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Reboot a server",
operations=[
{
@@ -360,10 +381,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (reboot)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'resize',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Resize a server",
operations=[
{
@@ -371,7 +392,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (resize)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=CROSS_CELL_RESIZE,
check_str=base.RULE_NOBODY,
@@ -386,10 +407,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (resize)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'rebuild',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rebuild a server",
operations=[
{
@@ -397,10 +418,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (rebuild)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'rebuild:trusted_certs',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Rebuild a server with trusted image certificate IDs",
operations=[
{
@@ -408,10 +429,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (rebuild)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create_image',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create an image from a server",
operations=[
{
@@ -419,10 +440,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (createImage)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'create_image:allow_volume_backed',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Create an image from a volume backed server",
operations=[
{
@@ -430,10 +451,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (createImage)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'start',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Start a server",
operations=[
{
@@ -441,10 +462,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (os-start)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'stop',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Stop a server",
operations=[
{
@@ -452,10 +473,10 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (os-stop)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=SERVERS % 'trigger_crash_dump',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Trigger crash dump in a server",
operations=[
{
@@ -463,7 +484,7 @@ https://bugs.launchpad.net/nova/+bug/1739646 for details.
'path': '/servers/{server_id}/action (trigger_crash_dump)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/servers_migrations.py b/nova/policies/servers_migrations.py
index a323ce5660..21762fc575 100644
--- a/nova/policies/servers_migrations.py
+++ b/nova/policies/servers_migrations.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:servers:migrations:%s'
servers_migrations_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Show details for an in-progress live migration for a "
"given server",
operations=[
@@ -33,10 +33,10 @@ servers_migrations_policies = [
'path': '/servers/{server_id}/migrations/{migration_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'force_complete',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Force an in-progress live migration for a given server "
"to complete",
operations=[
@@ -46,10 +46,10 @@ servers_migrations_policies = [
'/action (force_complete)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Delete(Abort) an in-progress live migration",
operations=[
{
@@ -57,10 +57,10 @@ servers_migrations_policies = [
'path': '/servers/{server_id}/migrations/{migration_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="Lists in-progress live migrations for a given server",
operations=[
{
@@ -68,7 +68,7 @@ servers_migrations_policies = [
'path': '/servers/{server_id}/migrations'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/services.py b/nova/policies/services.py
index bd4e455a95..7300d3bdb3 100644
--- a/nova/policies/services.py
+++ b/nova/policies/services.py
@@ -37,7 +37,7 @@ DEPRECATED_SERVICE_POLICY = policy.DeprecatedRule(
services_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'list',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List all running Compute services in a region.",
operations=[
{
@@ -45,11 +45,11 @@ services_policies = [
'path': '/os-services'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_SERVICE_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'update',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Update a Compute service.",
operations=[
{
@@ -58,11 +58,11 @@ services_policies = [
'path': '/os-services/{service_id}'
},
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_SERVICE_POLICY),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'delete',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Delete a Compute service.",
operations=[
{
@@ -70,7 +70,7 @@ services_policies = [
'path': '/os-services/{service_id}'
}
],
- scope_types=['system'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_SERVICE_POLICY),
]
diff --git a/nova/policies/shelve.py b/nova/policies/shelve.py
index da4a3354ce..476d212b04 100644
--- a/nova/policies/shelve.py
+++ b/nova/policies/shelve.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-shelve:%s'
shelve_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'shelve',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Shelve server",
operations=[
{
@@ -32,10 +32,10 @@ shelve_policies = [
'path': '/servers/{server_id}/action (shelve)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'unshelve',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Unshelve (restore) shelved server",
operations=[
{
@@ -43,10 +43,22 @@ shelve_policies = [
'path': '/servers/{server_id}/action (unshelve)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
+ policy.DocumentedRuleDefault(
+ name=POLICY_ROOT % 'unshelve_to_host',
+ check_str=base.ADMIN,
+ description="Unshelve (restore) shelve offloaded server to a "
+ "specific host",
+ operations=[
+ {
+ 'method': 'POST',
+ 'path': '/servers/{server_id}/action (unshelve)'
+ }
+ ],
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'shelve_offload',
- check_str=base.SYSTEM_ADMIN,
+ check_str=base.ADMIN,
description="Shelf-offload (remove) server",
operations=[
{
@@ -54,7 +66,7 @@ shelve_policies = [
'path': '/servers/{server_id}/action (shelveOffload)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/simple_tenant_usage.py b/nova/policies/simple_tenant_usage.py
index 85ebffbb30..41d87d1426 100644
--- a/nova/policies/simple_tenant_usage.py
+++ b/nova/policies/simple_tenant_usage.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-simple-tenant-usage:%s'
simple_tenant_usage_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show usage statistics for a specific tenant",
operations=[
{
@@ -32,10 +32,10 @@ simple_tenant_usage_policies = [
'path': '/os-simple-tenant-usage/{tenant_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'list',
- check_str=base.SYSTEM_READER,
+ check_str=base.ADMIN,
description="List per tenant usage statistics for all tenants",
operations=[
{
@@ -43,7 +43,7 @@ simple_tenant_usage_policies = [
'path': '/os-simple-tenant-usage'
}
],
- scope_types=['system']),
+ scope_types=['project']),
]
diff --git a/nova/policies/suspend_server.py b/nova/policies/suspend_server.py
index 58a7d878be..5e889808fd 100644
--- a/nova/policies/suspend_server.py
+++ b/nova/policies/suspend_server.py
@@ -24,7 +24,7 @@ POLICY_ROOT = 'os_compute_api:os-suspend-server:%s'
suspend_server_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'resume',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Resume suspended server",
operations=[
{
@@ -32,10 +32,10 @@ suspend_server_policies = [
'path': '/servers/{server_id}/action (resume)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'suspend',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Suspend server",
operations=[
{
@@ -43,7 +43,7 @@ suspend_server_policies = [
'path': '/servers/{server_id}/action (suspend)'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policies/tenant_networks.py b/nova/policies/tenant_networks.py
index a3eace29b4..79f8d21eaa 100644
--- a/nova/policies/tenant_networks.py
+++ b/nova/policies/tenant_networks.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
tenant_networks_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List project networks.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -48,11 +48,11 @@ This API is proxy calls to the Network service. This is deprecated.""",
'path': '/os-tenant-networks'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show project network details.
This API is proxy calls to the Network service. This is deprecated.""",
@@ -62,7 +62,7 @@ This API is proxy calls to the Network service. This is deprecated.""",
'path': '/os-tenant-networks/{network_id}'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/volumes.py b/nova/policies/volumes.py
index a4237a14e6..129ced82c1 100644
--- a/nova/policies/volumes.py
+++ b/nova/policies/volumes.py
@@ -38,7 +38,7 @@ DEPRECATED_POLICY = policy.DeprecatedRule(
volumes_policies = [
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'list',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List volumes.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -48,11 +48,11 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-volumes'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'create',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Create volume.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -62,11 +62,11 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-volumes'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'detail',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List volumes detail.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -76,11 +76,11 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-volumes/detail'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show volume.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -90,11 +90,11 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-volumes/{volume_id}'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Delete volume.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -104,11 +104,11 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-volumes/{volume_id}'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:list',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List snapshots.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -118,11 +118,11 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-snapshots'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:create',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Create snapshots.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -132,11 +132,11 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-snapshots'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:detail',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""List snapshots details.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -146,11 +146,11 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-snapshots/detail'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="""Show snapshot.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -160,11 +160,11 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-snapshots/{snapshot_id}'
},
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
policy.DocumentedRuleDefault(
name=POLICY_NAME % 'snapshots:delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Delete snapshot.
This API is a proxy call to the Volume service. It is deprecated.""",
@@ -174,7 +174,7 @@ This API is a proxy call to the Volume service. It is deprecated.""",
'path': '/os-snapshots/{snapshot_id}'
}
],
- scope_types=['system', 'project'],
+ scope_types=['project'],
deprecated_rule=DEPRECATED_POLICY),
]
diff --git a/nova/policies/volumes_attachments.py b/nova/policies/volumes_attachments.py
index 7b229f598f..68a1694c59 100644
--- a/nova/policies/volumes_attachments.py
+++ b/nova/policies/volumes_attachments.py
@@ -24,17 +24,17 @@ POLICY_ROOT = 'os_compute_api:os-volumes-attachments:%s'
volumes_attachments_policies = [
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'index',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="List volume attachments for an instance",
operations=[
{'method': 'GET',
'path': '/servers/{server_id}/os-volume_attachments'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'create',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Attach a volume to an instance",
operations=[
{
@@ -42,10 +42,10 @@ volumes_attachments_policies = [
'path': '/servers/{server_id}/os-volume_attachments'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'show',
- check_str=base.PROJECT_READER_OR_SYSTEM_READER,
+ check_str=base.PROJECT_READER_OR_ADMIN,
description="Show details of a volume attachment",
operations=[
{
@@ -54,10 +54,10 @@ volumes_attachments_policies = [
'/servers/{server_id}/os-volume_attachments/{volume_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'update',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="""Update a volume attachment.
New 'update' policy about 'swap + update' request (which is possible
only >2.85) only <swap policy> is checked. We expect <swap policy> to be
@@ -70,10 +70,17 @@ always superset of this policy permission.
'/servers/{server_id}/os-volume_attachments/{volume_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'swap',
- check_str=base.SYSTEM_ADMIN,
+ # TODO(gmann): This is internal API policy and supposed to be called
+ # only by cinder. Add 'service' role in this policy so that cinder
+ # can call it with user having 'service' role (not having server's
+ # project_id). That is for phase-2 of RBAC goal and until then,
+ # we keep it open for all admin in any project. We cannot default it to
+ # ADMIN which has the project_id in check_str and will fail
+ # if cinder call it with other project_id.
+ check_str=base.ADMIN,
description="Update a volume attachment with a different volumeId",
operations=[
{
@@ -82,10 +89,10 @@ always superset of this policy permission.
'/servers/{server_id}/os-volume_attachments/{volume_id}'
}
],
- scope_types=['system']),
+ scope_types=['project']),
policy.DocumentedRuleDefault(
name=POLICY_ROOT % 'delete',
- check_str=base.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ check_str=base.PROJECT_MEMBER_OR_ADMIN,
description="Detach a volume from an instance",
operations=[
{
@@ -94,7 +101,7 @@ always superset of this policy permission.
'/servers/{server_id}/os-volume_attachments/{volume_id}'
}
],
- scope_types=['system', 'project']),
+ scope_types=['project']),
]
diff --git a/nova/policy.py b/nova/policy.py
index 55455a9271..c66489cc8d 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -41,11 +41,15 @@ USER_BASED_RESOURCES = ['os-keypairs']
saved_file_rules = []
KEY_EXPR = re.compile(r'%\((\w+)\)s')
-# TODO(gmann): Remove setting the default value of config policy_file
-# once oslo_policy change the default value to 'policy.yaml'.
-# https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49
+# TODO(gmann): Remove overriding the default value of config options
+# 'policy_file', 'enforce_scope', and 'enforce_new_defaults' once
+# oslo_policy change their default value to what is overridden here.
DEFAULT_POLICY_FILE = 'policy.yaml'
-opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE)
+opts.set_defaults(
+ cfg.CONF,
+ DEFAULT_POLICY_FILE,
+ enforce_scope=True,
+ enforce_new_defaults=True)
def reset():
diff --git a/nova/quota.py b/nova/quota.py
index a311ecc87f..eafad4cd23 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -28,6 +28,8 @@ from nova.db.api import api as api_db_api
from nova.db.api import models as api_models
from nova.db.main import api as main_db_api
from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limit
from nova import objects
from nova.scheduler.client import report
from nova import utils
@@ -53,6 +55,10 @@ class DbQuotaDriver(object):
"""
UNLIMITED_VALUE = -1
+ def get_reserved(self):
+ # Since we stopped reserving the DB, we just return 0
+ return 0
+
def get_defaults(self, context, resources):
"""Given a list of resources, retrieve the default quotas.
Use the class quotas named `_DEFAULT_QUOTA_NAME` as default quotas,
@@ -615,6 +621,10 @@ class NoopQuotaDriver(object):
wish to have any quota checking.
"""
+ def get_reserved(self):
+ # Noop has always returned -1 for reserved
+ return -1
+
def get_defaults(self, context, resources):
"""Given a list of resources, retrieve the default quotas.
@@ -768,6 +778,98 @@ class NoopQuotaDriver(object):
pass
+class UnifiedLimitsDriver(NoopQuotaDriver):
+ """Ease migration to new unified limits code.
+
+ Help ease migration to unified limits by ensuring the old code
+ paths still work with unified limits. Eventually the expectation is
+ all this legacy quota code will go away, leaving the new simpler code
+ """
+
+ def __init__(self):
+ LOG.warning("The Unified Limits Quota Driver is experimental and "
+ "is under active development. Do not use this driver.")
+
+ def get_reserved(self):
+ # To make unified limits APIs the same as the DB driver, return 0
+ return 0
+
+ def get_class_quotas(self, context, resources, quota_class):
+ """Given a list of resources, retrieve the quotas for the given
+ quota class.
+
+ :param context: The request context, for access checks.
+ :param resources: A dictionary of the registered resources.
+ :param quota_class: Placeholder, we always assume default quota class.
+ """
+ # NOTE(johngarbutt): ignoring quota_class, as ignored in noop driver
+ return self.get_defaults(context, resources)
+
+ def get_defaults(self, context, resources):
+ local_limits = local_limit.get_legacy_default_limits()
+ # Note we get 0 if there is no registered limit,
+ # to mirror oslo_limit behaviour when there is no registered limit
+ placement_limits = placement_limit.get_legacy_default_limits()
+ quotas = {}
+ for resource in resources.values():
+ if resource.name in placement_limits:
+ quotas[resource.name] = placement_limits[resource.name]
+ else:
+ # return -1 for things like security_group_rules
+ # that are neither a keystone limit or a local limit
+ quotas[resource.name] = local_limits.get(resource.name, -1)
+
+ return quotas
+
+ def get_project_quotas(self, context, resources, project_id,
+ quota_class=None,
+ usages=True, remains=False):
+ if quota_class is not None:
+ raise NotImplementedError("quota_class")
+
+ if remains:
+ raise NotImplementedError("remains")
+
+ local_limits = local_limit.get_legacy_default_limits()
+ # keystone limits always returns core, ram and instances
+ # if nothing set in keystone, we get back 0, i.e. don't allow
+ placement_limits = placement_limit.get_legacy_project_limits(
+ project_id)
+
+ project_quotas = {}
+ for resource in resources.values():
+ if resource.name in placement_limits:
+ limit = placement_limits[resource.name]
+ else:
+ # return -1 for things like security_group_rules
+ # that are neither a keystone limit or a local limit
+ limit = local_limits.get(resource.name, -1)
+ project_quotas[resource.name] = {"limit": limit}
+
+ if usages:
+ local_in_use = local_limit.get_in_use(context, project_id)
+ p_in_use = placement_limit.get_legacy_counts(context, project_id)
+
+ for resource in resources.values():
+ # default to 0 for resources that are deprecated,
+ # i.e. not in keystone or local limits, such that we
+ # are API compatible with what was returned with
+ # the db driver, even though noop driver returned -1
+ usage_count = 0
+ if resource.name in local_in_use:
+ usage_count = local_in_use[resource.name]
+ if resource.name in p_in_use:
+ usage_count = p_in_use[resource.name]
+ project_quotas[resource.name]["in_use"] = usage_count
+
+ return project_quotas
+
+ def get_user_quotas(self, context, resources, project_id, user_id,
+ quota_class=None, usages=True):
+ return self.get_project_quotas(context, resources, project_id,
+ quota_class, usages)
+
+
class BaseResource(object):
"""Describe a single resource for quota checking."""
@@ -869,13 +971,23 @@ class QuotaEngine(object):
}
# NOTE(mriedem): quota_driver is ever only supplied in tests with a
# fake driver.
- self.__driver = quota_driver
+ self.__driver_override = quota_driver
+ self.__driver = None
+ self.__driver_name = None
@property
def _driver(self):
- if self.__driver:
- return self.__driver
- self.__driver = importutils.import_object(CONF.quota.driver)
+ if self.__driver_override:
+ return self.__driver_override
+
+ # NOTE(johngarbutt) to allow unit tests to change the driver by
+ # simply overriding config, double check if we have the correct
+ # driver cached before we return the currently cached driver
+ driver_name_in_config = CONF.quota.driver
+ if self.__driver_name != driver_name_in_config:
+ self.__driver = importutils.import_object(driver_name_in_config)
+ self.__driver_name = driver_name_in_config
+
return self.__driver
def get_defaults(self, context):
@@ -1044,9 +1156,7 @@ class QuotaEngine(object):
return sorted(self._resources.keys())
def get_reserved(self):
- if isinstance(self._driver, NoopQuotaDriver):
- return -1
- return 0
+ return self._driver.get_reserved()
@api_db_api.context_manager.reader
@@ -1132,6 +1242,37 @@ def _server_group_count_members_by_user_legacy(context, group, user_id):
return {'user': {'server_group_members': count}}
+def is_qfd_populated(context):
+ """Check if user_id and queued_for_delete fields are populated.
+
+ This method is related to counting quota usage from placement. It is not
+ yet possible to count instances from placement, so in the meantime we can
+ use instance mappings for counting. This method is used to determine
+ whether the user_id and queued_for_delete columns are populated in the API
+ database's instance_mappings table. Instance mapping records are not
+ deleted from the database until the database is archived, so
+ queued_for_delete tells us whether or not we should count them for instance
+ quota usage. The user_id field enables us to scope instance quota usage to
+ a user (legacy quota).
+
+ Scoping instance quota to a user is only possible
+ when counting quota usage from placement is configured and unified limits
+ is not configured. When unified limits is configured, quotas are scoped
+ only to projects.
+
+ In the future when it is possible to count instance usage from placement,
+ this method will no longer be needed.
+ """
+ global UID_QFD_POPULATED_CACHE_ALL
+ if not UID_QFD_POPULATED_CACHE_ALL:
+ LOG.debug('Checking whether user_id and queued_for_delete are '
+ 'populated for all projects')
+ UID_QFD_POPULATED_CACHE_ALL = _user_id_queued_for_delete_populated(
+ context)
+
+ return UID_QFD_POPULATED_CACHE_ALL
+
+
def _server_group_count_members_by_user(context, group, user_id):
"""Get the count of server group members for a group by user.
@@ -1149,14 +1290,7 @@ def _server_group_count_members_by_user(context, group, user_id):
# So, we check whether user_id/queued_for_delete is populated for all
# records and cache the result to prevent unnecessary checking once the
# data migration has been completed.
- global UID_QFD_POPULATED_CACHE_ALL
- if not UID_QFD_POPULATED_CACHE_ALL:
- LOG.debug('Checking whether user_id and queued_for_delete are '
- 'populated for all projects')
- UID_QFD_POPULATED_CACHE_ALL = _user_id_queued_for_delete_populated(
- context)
-
- if UID_QFD_POPULATED_CACHE_ALL:
+ if is_qfd_populated(context):
count = objects.InstanceMappingList.get_count_by_uuids_and_user(
context, group.members, user_id)
return {'user': {'server_group_members': count}}
@@ -1214,11 +1348,8 @@ def _instances_cores_ram_count_legacy(context, project_id, user_id=None):
def _cores_ram_count_placement(context, project_id, user_id=None):
- global PLACEMENT_CLIENT
- if not PLACEMENT_CLIENT:
- PLACEMENT_CLIENT = report.SchedulerReportClient()
- return PLACEMENT_CLIENT.get_usages_counts_for_quota(context, project_id,
- user_id=user_id)
+ return report.report_client_singleton().get_usages_counts_for_quota(
+ context, project_id, user_id=user_id)
def _instances_cores_ram_count_api_db_placement(context, project_id,
diff --git a/nova/rpc.py b/nova/rpc.py
index a32b920e06..7a92650414 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -204,11 +204,9 @@ def get_client(target, version_cap=None, serializer=None,
else:
serializer = RequestContextSerializer(serializer)
- return messaging.RPCClient(TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(TRANSPORT, target,
+ version_cap=version_cap, serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def get_server(target, endpoints, serializer=None):
@@ -436,9 +434,9 @@ class ClientRouter(periodic_task.PeriodicTasks):
transport = context.mq_connection
if transport:
cmt = self.default_client.call_monitor_timeout
- return messaging.RPCClient(transport, self.target,
- version_cap=self.version_cap,
- serializer=self.serializer,
- call_monitor_timeout=cmt)
+ return messaging.get_rpc_client(transport, self.target,
+ version_cap=self.version_cap,
+ serializer=self.serializer,
+ call_monitor_timeout=cmt)
else:
return self.default_client
diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py
index 4b83a57883..7c14f3d7ef 100644
--- a/nova/scheduler/client/report.py
+++ b/nova/scheduler/client/report.py
@@ -52,6 +52,7 @@ AGGREGATE_GENERATION_VERSION = '1.19'
NESTED_PROVIDER_API_VERSION = '1.14'
POST_ALLOCATIONS_API_VERSION = '1.13'
GET_USAGES_VERSION = '1.9'
+PLACEMENTCLIENT = None
AggInfo = collections.namedtuple('AggInfo', ['aggregates', 'generation'])
TraitInfo = collections.namedtuple('TraitInfo', ['traits', 'generation'])
@@ -67,6 +68,51 @@ def warn_limit(self, msg):
LOG.warning(msg)
+def report_client_singleton():
+ """Return a reference to the global placement client singleton.
+
+ This initializes the placement client once and returns a reference
+ to that singleton on subsequent calls. Errors are raised
+ (particularly ks_exc.*) but context-specific error messages are
+ logged for consistency.
+ """
+ # NOTE(danms): The report client maintains internal state in the
+ # form of the provider tree, which will be shared across all users
+ # of this global client. That is not a problem now, but in the
+ # future it may be beneficial to fix that. One idea would be to
+ # change the behavior of the client such that the static-config
+ # pieces of the actual keystone client are separate from the
+ # internal state, so that we can return a new object here with a
+ # context-specific local state object, but with the client bits
+ # shared.
+ global PLACEMENTCLIENT
+ if PLACEMENTCLIENT is None:
+ try:
+ PLACEMENTCLIENT = SchedulerReportClient()
+ except ks_exc.EndpointNotFound:
+ LOG.error('The placement API endpoint was not found.')
+ raise
+ except ks_exc.MissingAuthPlugin:
+ LOG.error('No authentication information found for placement API.')
+ raise
+ except ks_exc.Unauthorized:
+ LOG.error('Placement service credentials do not work.')
+ raise
+ except ks_exc.DiscoveryFailure:
+ LOG.error('Discovering suitable URL for placement API failed.')
+ raise
+ except (ks_exc.ConnectFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout):
+ LOG.error('Placement API service is not responding.')
+ raise
+ except Exception:
+ LOG.error('Failed to initialize placement client '
+ '(is keystone available?)')
+ raise
+ return PLACEMENTCLIENT
+
+
def safe_connect(f):
@functools.wraps(f)
def wrapper(self, *a, **k):
@@ -1001,7 +1047,7 @@ class SchedulerReportClient(object):
context: nova_context.RequestContext,
rp_uuid: str,
traits: ty.Iterable[str],
- generation: int = None
+ generation: ty.Optional[int] = None
):
"""Replace a provider's traits with those specified.
@@ -1231,6 +1277,11 @@ class SchedulerReportClient(object):
resp = self.post('/reshaper', payload, version=RESHAPER_VERSION,
global_request_id=context.global_id)
if not resp:
+ if resp.status_code == 409:
+ err = resp.json()['errors'][0]
+ if err['code'] == 'placement.concurrent_update':
+ raise exception.PlacementReshapeConflict(error=resp.text)
+
raise exception.ReshapeFailed(error=resp.text)
return resp
@@ -1264,7 +1315,7 @@ class SchedulerReportClient(object):
# failure here to be fatal to the caller.
try:
self._reshape(context, inventories, allocations)
- except exception.ReshapeFailed:
+ except (exception.ReshapeFailed, exception.PlacementReshapeConflict):
raise
except Exception as e:
# Make sure the original stack trace gets logged.
@@ -1322,7 +1373,6 @@ class SchedulerReportClient(object):
# can inherit.
helper_exceptions = (
exception.InvalidResourceClass,
- exception.InventoryInUse,
exception.ResourceProviderAggregateRetrievalFailed,
exception.ResourceProviderDeletionFailed,
exception.ResourceProviderInUse,
@@ -1341,8 +1391,8 @@ class SchedulerReportClient(object):
# the conflict exception. This signals the resource tracker to
# redrive the update right away rather than waiting until the
# next periodic.
- with excutils.save_and_reraise_exception():
- self._clear_provider_cache_for_tree(rp_uuid)
+ self._clear_provider_cache_for_tree(rp_uuid)
+ raise
except helper_exceptions:
# Invalidate the relevant part of the cache. It gets rebuilt on
# the next pass.
@@ -1383,8 +1433,16 @@ class SchedulerReportClient(object):
if allocations is not None:
# NOTE(efried): We do not catch_all here, because ReshapeFailed
# needs to bubble up right away and be handled specially.
- self._set_up_and_do_reshape(context, old_tree, new_tree,
- allocations)
+ try:
+ self._set_up_and_do_reshape(
+ context, old_tree, new_tree, allocations)
+ except exception.PlacementReshapeConflict:
+ # The conflict means we need to invalidate the local caches and
+ # let the retry mechanism in _update_to_placement to re-drive
+ # the reshape top of the fresh data
+ with excutils.save_and_reraise_exception():
+ self.clear_provider_cache()
+
# The reshape updated provider generations, so the ones we have in
# the cache are now stale. The inventory update below will short
# out, but we would still bounce with a provider generation
@@ -2486,6 +2544,30 @@ class SchedulerReportClient(object):
return self.get(url, version=GET_USAGES_VERSION,
global_request_id=context.global_id)
+ def get_usages_counts_for_limits(self, context, project_id):
+ """Get the usages counts for the purpose of enforcing unified limits
+
+ The response from placement will not contain a resource class if
+ there is no usage. i.e. if there is no usage, you get an empty dict.
+
+ Note resources are counted as placement sees them, as such note
+ that VCPUs and PCPUs will be counted independently.
+
+ :param context: The request context
+ :param project_id: The project_id to count across
+ :return: A dict containing the project-scoped counts, for example:
+ {'VCPU': 2, 'MEMORY_MB': 1024}
+ :raises: `exception.UsagesRetrievalFailed` if a placement API call
+ fails
+ """
+ LOG.debug('Getting usages for project_id %s from placement',
+ project_id)
+ resp = self._get_usages(context, project_id)
+ if resp:
+ data = resp.json()
+ return data['usages']
+ self._handle_usages_error_from_placement(resp, project_id)
+
def get_usages_counts_for_quota(self, context, project_id, user_id=None):
"""Get the usages counts for the purpose of counting quota usage.
diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py
index 44f283f7ac..785a13279e 100644
--- a/nova/scheduler/filters/__init__.py
+++ b/nova/scheduler/filters/__init__.py
@@ -16,8 +16,12 @@
"""
Scheduler host filters
"""
+from oslo_log import log as logging
+
from nova import filters
+LOG = logging.getLogger(__name__)
+
class BaseHostFilter(filters.BaseFilter):
"""Base class for host filters."""
@@ -28,6 +32,9 @@ class BaseHostFilter(filters.BaseFilter):
# other parameters. We care about running policy filters (i.e.
# ImagePropertiesFilter) but not things that check usage on the
# existing compute node, etc.
+ # This also means that filters marked with RUN_ON_REBUILD = True cannot
+ # filter on allocation candidates or need to handle the rebuild case
+ # specially.
RUN_ON_REBUILD = False
def _filter_one(self, obj, spec):
@@ -50,6 +57,43 @@ class BaseHostFilter(filters.BaseFilter):
raise NotImplementedError()
+class CandidateFilterMixin:
+ """Mixing that helps to implement a Filter that needs to filter host by
+ Placement allocation candidates.
+ """
+
+ def filter_candidates(self, host_state, filter_func):
+ """Checks still viable allocation candidates by the filter_func and
+ keep only those that are passing it.
+
+ :param host_state: HostState object holding the list of still viable
+ allocation candidates
+ :param filter_func: A callable that takes an allocation candidate and
+ returns a True like object if the candidate passed the filter or a
+ False like object if it doesn't.
+ """
+ good_candidates = []
+ for candidate in host_state.allocation_candidates:
+ LOG.debug(
+ f'{self.__class__.__name__} tries allocation candidate: '
+ f'{candidate}',
+ )
+ if filter_func(candidate):
+ LOG.debug(
+ f'{self.__class__.__name__} accepted allocation '
+ f'candidate: {candidate}',
+ )
+ good_candidates.append(candidate)
+ else:
+ LOG.debug(
+ f'{self.__class__.__name__} rejected allocation '
+ f'candidate: {candidate}',
+ )
+
+ host_state.allocation_candidates = good_candidates
+ return good_candidates
+
+
class HostFilterHandler(filters.BaseFilterHandler):
def __init__(self):
super(HostFilterHandler, self).__init__(BaseHostFilter)
diff --git a/nova/scheduler/filters/numa_topology_filter.py b/nova/scheduler/filters/numa_topology_filter.py
index 74d6012f82..ae50db90e5 100644
--- a/nova/scheduler/filters/numa_topology_filter.py
+++ b/nova/scheduler/filters/numa_topology_filter.py
@@ -20,7 +20,10 @@ from nova.virt import hardware
LOG = logging.getLogger(__name__)
-class NUMATopologyFilter(filters.BaseHostFilter):
+class NUMATopologyFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Filter on requested NUMA topology."""
# NOTE(sean-k-mooney): In change I0322d872bdff68936033a6f5a54e8296a6fb343
@@ -97,12 +100,19 @@ class NUMATopologyFilter(filters.BaseHostFilter):
if network_metadata:
limits.network_metadata = network_metadata
- instance_topology = (hardware.numa_fit_instance_to_host(
- host_topology, requested_topology,
- limits=limits,
- pci_requests=pci_requests,
- pci_stats=host_state.pci_stats))
- if not instance_topology:
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: hardware.numa_fit_instance_to_host(
+ host_topology,
+ requested_topology,
+ limits=limits,
+ pci_requests=pci_requests,
+ pci_stats=host_state.pci_stats,
+ provider_mapping=candidate["mappings"],
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host)s, %(node)s fails NUMA topology "
"requirements. The instance does not fit on this "
"host.", {'host': host_state.host,
diff --git a/nova/scheduler/filters/pci_passthrough_filter.py b/nova/scheduler/filters/pci_passthrough_filter.py
index f08899586a..992879072a 100644
--- a/nova/scheduler/filters/pci_passthrough_filter.py
+++ b/nova/scheduler/filters/pci_passthrough_filter.py
@@ -20,7 +20,10 @@ from nova.scheduler import filters
LOG = logging.getLogger(__name__)
-class PciPassthroughFilter(filters.BaseHostFilter):
+class PciPassthroughFilter(
+ filters.BaseHostFilter,
+ filters.CandidateFilterMixin,
+):
"""Pci Passthrough Filter based on PCI request
Filter that schedules instances on a host if the host has devices
@@ -47,10 +50,24 @@ class PciPassthroughFilter(filters.BaseHostFilter):
pci_requests = spec_obj.pci_requests
if not pci_requests or not pci_requests.requests:
return True
- if (not host_state.pci_stats or
- not host_state.pci_stats.support_requests(pci_requests.requests)):
+
+ if not host_state.pci_stats:
+ LOG.debug("%(host_state)s doesn't have the required PCI devices"
+ " (%(requests)s)",
+ {'host_state': host_state, 'requests': pci_requests})
+ return False
+
+ good_candidates = self.filter_candidates(
+ host_state,
+ lambda candidate: host_state.pci_stats.support_requests(
+ pci_requests.requests, provider_mapping=candidate["mappings"]
+ ),
+ )
+
+ if not good_candidates:
LOG.debug("%(host_state)s doesn't have the required PCI devices"
" (%(requests)s)",
{'host_state': host_state, 'requests': pci_requests})
return False
+
return True
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 80511ffad6..8cb775a923 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -153,6 +153,8 @@ class HostState(object):
self.updated = None
+ self.allocation_candidates = []
+
def update(self, compute=None, service=None, aggregates=None,
inst_dict=None):
"""Update all information about a host."""
@@ -296,7 +298,9 @@ class HostState(object):
spec_obj.numa_topology = hardware.numa_fit_instance_to_host(
self.numa_topology, spec_obj.numa_topology,
limits=self.limits.get('numa_topology'),
- pci_requests=pci_requests, pci_stats=self.pci_stats)
+ pci_requests=pci_requests,
+ pci_stats=self.pci_stats,
+ provider_mapping=spec_obj.get_request_group_mapping())
self.numa_topology = hardware.numa_usage_from_instance_numa(
self.numa_topology, spec_obj.numa_topology)
@@ -306,7 +310,11 @@ class HostState(object):
instance_cells = None
if spec_obj.numa_topology:
instance_cells = spec_obj.numa_topology.cells
- self.pci_stats.apply_requests(pci_requests, instance_cells)
+ self.pci_stats.apply_requests(
+ pci_requests,
+ spec_obj.get_request_group_mapping(),
+ instance_cells
+ )
# NOTE(sbauza): By considering all cases when the scheduler is called
# and when consume_from_request() is run, we can safely say that there
@@ -314,13 +322,21 @@ class HostState(object):
self.num_io_ops += 1
def __repr__(self):
- return ("(%(host)s, %(node)s) ram: %(free_ram)sMB "
- "disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
- "instances: %(num_instances)s" %
- {'host': self.host, 'node': self.nodename,
- 'free_ram': self.free_ram_mb, 'free_disk': self.free_disk_mb,
- 'num_io_ops': self.num_io_ops,
- 'num_instances': self.num_instances})
+ return (
+ "(%(host)s, %(node)s) ram: %(free_ram)sMB "
+ "disk: %(free_disk)sMB io_ops: %(num_io_ops)s "
+ "instances: %(num_instances)s, "
+ "allocation_candidates: %(num_a_c)s"
+ % {
+ "host": self.host,
+ "node": self.nodename,
+ "free_ram": self.free_ram_mb,
+ "free_disk": self.free_disk_mb,
+ "num_io_ops": self.num_io_ops,
+ "num_instances": self.num_instances,
+ "num_a_c": len(self.allocation_candidates),
+ }
+ )
class HostManager(object):
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 03df615f6a..620519d403 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -20,8 +20,10 @@ Scheduler Service
"""
import collections
+import copy
import random
+from keystoneauth1 import exceptions as ks_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
@@ -66,10 +68,42 @@ class SchedulerManager(manager.Manager):
self.host_manager = host_manager.HostManager()
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('scheduler')
- self.placement_client = report.SchedulerReportClient()
+ self._placement_client = None
+
+ try:
+ # Test our placement client during initialization
+ self.placement_client
+ except (ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure) as e:
+ # Non-fatal, likely transient (although not definitely);
+ # continue startup but log the warning so that when things
+ # fail later, it will be clear why we can not do certain
+ # things.
+ LOG.warning('Unable to initialize placement client (%s); '
+ 'Continuing with startup, but scheduling '
+ 'will not be possible.', e)
+ except (ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized) as e:
+ # This is almost definitely fatal mis-configuration. The
+ # Unauthorized error might be transient, but it is
+ # probably reasonable to consider it fatal.
+ LOG.error('Fatal error initializing placement client; '
+ 'config is incorrect or incomplete: %s', e)
+ raise
+ except Exception as e:
+ # Unknown/unexpected errors here are fatal
+ LOG.error('Fatal error initializing placement client: %s', e)
+ raise
super().__init__(service_name='scheduler', *args, **kwargs)
+ @property
+ def placement_client(self):
+ return report.report_client_singleton()
+
@periodic_task.periodic_task(
spacing=CONF.scheduler.discover_hosts_in_cells_interval,
run_immediately=True)
@@ -299,12 +333,29 @@ class SchedulerManager(manager.Manager):
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
+ def hosts_with_alloc_reqs(hosts_gen):
+ """Extend the HostState objects returned by the generator with
+ the allocation requests of that host
+ """
+ for host in hosts_gen:
+ host.allocation_candidates = copy.deepcopy(
+ alloc_reqs_by_rp_uuid[host.uuid])
+ yield host
+
# Note: remember, we are using a generator-iterator here. So only
# traverse this list once. This can bite you if the hosts
# are being scanned in a filter or weighing function.
hosts = self._get_all_host_states(
elevated, spec_obj, provider_summaries)
+ # alloc_reqs_by_rp_uuid is None during rebuild, so this mean we cannot
+ # run filters that are using allocation candidates during rebuild
+ if alloc_reqs_by_rp_uuid is not None:
+ # wrap the generator to extend the HostState objects with the
+ # allocation requests for that given host. This is needed to
+ # support scheduler filters filtering on allocation candidates.
+ hosts = hosts_with_alloc_reqs(hosts)
+
# NOTE(sbauza): The RequestSpec.num_instances field contains the number
# of instances created when the RequestSpec was used to first boot some
# instances. This is incorrect when doing a move or resize operation,
@@ -332,6 +383,13 @@ class SchedulerManager(manager.Manager):
# the older dict format representing HostState objects.
# TODO(stephenfin): Remove this when we bump scheduler the RPC API
# version to 5.0
+ # NOTE(gibi): We cannot remove this branch as it is actively used
+ # when nova calls the scheduler during rebuild (not evacuate) to
+ # check if the current host is still good for the new image used
+ # for the rebuild. In this case placement cannot be used to
+ # generate candidates as that would require space on the current
+ # compute for double allocation. So no allocation candidates for
+ # rebuild and therefore alloc_reqs_by_rp_uuid is None
return self._legacy_find_hosts(
context, num_instances, spec_obj, hosts, num_alts,
instance_uuids=instance_uuids)
@@ -345,6 +403,9 @@ class SchedulerManager(manager.Manager):
# The list of hosts that have been selected (and claimed).
claimed_hosts = []
+ # The allocation request allocated on the given claimed host
+ claimed_alloc_reqs = []
+
for num, instance_uuid in enumerate(instance_uuids):
# In a multi-create request, the first request spec from the list
# is passed to the scheduler and that request spec's instance_uuid
@@ -371,21 +432,20 @@ class SchedulerManager(manager.Manager):
# resource provider UUID
claimed_host = None
for host in hosts:
- cn_uuid = host.uuid
- if cn_uuid not in alloc_reqs_by_rp_uuid:
- msg = ("A host state with uuid = '%s' that did not have a "
- "matching allocation_request was encountered while "
- "scheduling. This host was skipped.")
- LOG.debug(msg, cn_uuid)
+ if not host.allocation_candidates:
+ LOG.debug(
+ "The nova scheduler removed every allocation candidate"
+ "for host %s so this host was skipped.",
+ host
+ )
continue
- alloc_reqs = alloc_reqs_by_rp_uuid[cn_uuid]
# TODO(jaypipes): Loop through all allocation_requests instead
# of just trying the first one. For now, since we'll likely
# want to order the allocation_requests in the future based on
# information in the provider summaries, we'll just try to
# claim resources using the first allocation_request
- alloc_req = alloc_reqs[0]
+ alloc_req = host.allocation_candidates[0]
if utils.claim_resources(
elevated, self.placement_client, spec_obj, instance_uuid,
alloc_req,
@@ -405,6 +465,15 @@ class SchedulerManager(manager.Manager):
claimed_instance_uuids.append(instance_uuid)
claimed_hosts.append(claimed_host)
+ claimed_alloc_reqs.append(alloc_req)
+
+ # update the provider mapping in the request spec based
+ # on the allocated candidate as the _consume_selected_host depends
+ # on this information to temporally consume PCI devices tracked in
+ # placement
+ for request_group in spec_obj.requested_resources:
+ request_group.provider_uuids = alloc_req[
+ 'mappings'][request_group.requester_id]
# Now consume the resources so the filter/weights will change for
# the next instance.
@@ -416,11 +485,19 @@ class SchedulerManager(manager.Manager):
self._ensure_sufficient_hosts(
context, claimed_hosts, num_instances, claimed_instance_uuids)
- # We have selected and claimed hosts for each instance. Now we need to
- # find alternates for each host.
+ # We have selected and claimed hosts for each instance along with a
+ # claimed allocation request. Now we need to find alternates for each
+ # host.
return self._get_alternate_hosts(
- claimed_hosts, spec_obj, hosts, num, num_alts,
- alloc_reqs_by_rp_uuid, allocation_request_version)
+ claimed_hosts,
+ spec_obj,
+ hosts,
+ num,
+ num_alts,
+ alloc_reqs_by_rp_uuid,
+ allocation_request_version,
+ claimed_alloc_reqs,
+ )
def _ensure_sufficient_hosts(
self, context, hosts, required_count, claimed_uuids=None,
@@ -532,7 +609,21 @@ class SchedulerManager(manager.Manager):
def _get_alternate_hosts(
self, selected_hosts, spec_obj, hosts, index, num_alts,
alloc_reqs_by_rp_uuid=None, allocation_request_version=None,
+ selected_alloc_reqs=None,
):
+ """Generate the main Selection and possible alternate Selection
+ objects for each "instance".
+
+ :param selected_hosts: This is a list of HostState objects. Each
+ HostState represents the main selection for a given instance being
+ scheduled (we can have multiple instances during multi create).
+ :param selected_alloc_reqs: This is a list of allocation requests that
+ are already allocated in placement for the main Selection for each
+ instance. This list is matching with selected_hosts by index. So
+ for the first instance the selected host is selected_host[0] and
+ the already allocated placement candidate is
+ selected_alloc_reqs[0].
+ """
# We only need to filter/weigh the hosts again if we're dealing with
# more than one instance and are going to be picking alternates.
if index > 0 and num_alts > 0:
@@ -546,11 +637,10 @@ class SchedulerManager(manager.Manager):
# representing the selected host along with alternates from the same
# cell.
selections_to_return = []
- for selected_host in selected_hosts:
+ for i, selected_host in enumerate(selected_hosts):
# This is the list of hosts for one particular instance.
if alloc_reqs_by_rp_uuid:
- selected_alloc_req = alloc_reqs_by_rp_uuid.get(
- selected_host.uuid)[0]
+ selected_alloc_req = selected_alloc_reqs[i]
else:
selected_alloc_req = None
@@ -571,15 +661,17 @@ class SchedulerManager(manager.Manager):
if len(selected_plus_alts) >= num_alts + 1:
break
+ # TODO(gibi): In theory we could generate alternatives on the
+ # same host if that host has different possible allocation
+ # candidates for the request. But we don't do that today
if host.cell_uuid == cell_uuid and host not in selected_hosts:
if alloc_reqs_by_rp_uuid is not None:
- alt_uuid = host.uuid
- if alt_uuid not in alloc_reqs_by_rp_uuid:
+ if not host.allocation_candidates:
msg = ("A host state with uuid = '%s' that did "
- "not have a matching allocation_request "
+ "not have any remaining allocation_request "
"was encountered while scheduling. This "
"host was skipped.")
- LOG.debug(msg, alt_uuid)
+ LOG.debug(msg, host.uuid)
continue
# TODO(jaypipes): Loop through all allocation_requests
@@ -588,7 +680,13 @@ class SchedulerManager(manager.Manager):
# the future based on information in the provider
# summaries, we'll just try to claim resources using
# the first allocation_request
- alloc_req = alloc_reqs_by_rp_uuid[alt_uuid][0]
+ # NOTE(gibi): we are using, and re-using, allocation
+ # candidates for alternatives here. This is OK as
+ # these candidates are not yet allocated in placement
+ # and we don't know if an alternate will ever be used.
+ # To increase our success we could try to use different
+ # candidate for different alternative though.
+ alloc_req = host.allocation_candidates[0]
alt_selection = objects.Selection.from_host_state(
host, alloc_req, allocation_request_version)
else:
diff --git a/nova/scheduler/request_filter.py b/nova/scheduler/request_filter.py
index f837efe653..bf5c32f372 100644
--- a/nova/scheduler/request_filter.py
+++ b/nova/scheduler/request_filter.py
@@ -24,7 +24,7 @@ from nova.network import neutron
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils
-
+from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -212,6 +212,9 @@ def transform_image_metadata(ctxt, request_spec):
'hw_disk_bus': 'COMPUTE_STORAGE_BUS',
'hw_video_model': 'COMPUTE_GRAPHICS_MODEL',
'hw_vif_model': 'COMPUTE_NET_VIF_MODEL',
+ 'hw_architecture': 'HW_ARCH',
+ 'hw_emulation_architecture': 'COMPUTE_ARCH',
+ 'hw_viommu_model': 'COMPUTE_VIOMMU',
}
trait_names = []
@@ -309,7 +312,7 @@ def routed_networks_filter(
# Get the clients we need
network_api = neutron.API()
- report_api = report.SchedulerReportClient()
+ report_api = report.report_client_singleton()
for requested_network in requested_networks:
network_id = None
@@ -365,6 +368,68 @@ def routed_networks_filter(
return True
+@trace_request_filter
+def remote_managed_ports_filter(
+ context: nova_context.RequestContext,
+ request_spec: 'objects.RequestSpec',
+) -> bool:
+ """Filter out hosts without remote managed port support (driver or hw).
+
+ If a request spec contains VNIC_TYPE_REMOTE_MANAGED ports then a
+ remote-managed port trait (COMPUTE_REMOTE_MANAGED_PORTS) is added to
+ the request in order to pre-filter hosts that do not use compute
+ drivers supporting remote managed ports and the ones that do not have
+ the device pools providing remote-managed ports (actual device
+ availability besides a pool presence check is done at the time of
+ PciPassthroughFilter execution).
+ """
+ if request_spec.requested_networks:
+ network_api = neutron.API()
+ for request_net in request_spec.requested_networks:
+ if request_net.port_id and network_api.is_remote_managed_port(
+ context, request_net.port_id):
+ request_spec.root_required.add(
+ os_traits.COMPUTE_REMOTE_MANAGED_PORTS)
+ LOG.debug('remote_managed_ports_filter request filter added '
+ f'trait {os_traits.COMPUTE_REMOTE_MANAGED_PORTS}')
+ return True
+
+
+@trace_request_filter
+def ephemeral_encryption_filter(
+ ctxt: nova_context.RequestContext,
+ request_spec: 'objects.RequestSpec'
+) -> bool:
+ """Pre-filter resource provides by ephemeral encryption support
+
+ This filter will only retain compute node resource providers that support
+ ephemeral storage encryption when the associated image properties or flavor
+ extra specs are present within the request spec.
+ """
+ # Skip if ephemeral encryption isn't requested in the flavor or image
+ if not hardware.get_ephemeral_encryption_constraint(
+ request_spec.flavor, request_spec.image):
+ LOG.debug("ephemeral_encryption_filter skipped")
+ return False
+
+ # Always add the feature trait regardless of the format being provided
+ request_spec.root_required.add(os_traits.COMPUTE_EPHEMERAL_ENCRYPTION)
+ LOG.debug("ephemeral_encryption_filter added trait "
+ "COMPUTE_EPHEMERAL_ENCRYPTION")
+
+ # Try to find the format in the flavor or image and add as a trait
+ eph_format = hardware.get_ephemeral_encryption_format(
+ request_spec.flavor, request_spec.image)
+ if eph_format:
+ # We don't need to validate the trait here because the earlier call to
+ # get_ephemeral_encryption_format will raise if it is not valid
+ trait_name = f"COMPUTE_EPHEMERAL_ENCRYPTION_{eph_format.upper()}"
+ request_spec.root_required.add(trait_name)
+ LOG.debug(f"ephemeral_encryption_filter added trait {trait_name}")
+
+ return True
+
+
ALL_REQUEST_FILTERS = [
require_tenant_aggregate,
map_az_to_placement_aggregate,
@@ -374,6 +439,8 @@ ALL_REQUEST_FILTERS = [
transform_image_metadata,
accelerators_filter,
routed_networks_filter,
+ remote_managed_ports_filter,
+ ephemeral_encryption_filter,
]
diff --git a/nova/scheduler/rpcapi.py b/nova/scheduler/rpcapi.py
index a3d0109bdc..08b768eb2b 100644
--- a/nova/scheduler/rpcapi.py
+++ b/nova/scheduler/rpcapi.py
@@ -171,15 +171,15 @@ class SchedulerAPI(object):
def update_instance_info(self, ctxt, host_name, instance_info):
cctxt = self.client.prepare(version='4.2', fanout=True)
- return cctxt.cast(ctxt, 'update_instance_info', host_name=host_name,
- instance_info=instance_info)
+ cctxt.cast(ctxt, 'update_instance_info', host_name=host_name,
+ instance_info=instance_info)
def delete_instance_info(self, ctxt, host_name, instance_uuid):
cctxt = self.client.prepare(version='4.2', fanout=True)
- return cctxt.cast(ctxt, 'delete_instance_info', host_name=host_name,
- instance_uuid=instance_uuid)
+ cctxt.cast(ctxt, 'delete_instance_info', host_name=host_name,
+ instance_uuid=instance_uuid)
def sync_instance_info(self, ctxt, host_name, instance_uuids):
cctxt = self.client.prepare(version='4.2', fanout=True)
- return cctxt.cast(ctxt, 'sync_instance_info', host_name=host_name,
- instance_uuids=instance_uuids)
+ cctxt.cast(ctxt, 'sync_instance_info', host_name=host_name,
+ instance_uuids=instance_uuids)
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index a98aabe51b..02c44093bd 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -615,6 +615,10 @@ def resources_from_flavor(instance, flavor):
"""
is_bfv = compute_utils.is_volume_backed_instance(instance._context,
instance)
+ return _get_resources(flavor, is_bfv)
+
+
+def _get_resources(flavor, is_bfv):
# create a fake RequestSpec as a wrapper to the caller
req_spec = objects.RequestSpec(flavor=flavor, is_bfv=is_bfv)
@@ -628,6 +632,11 @@ def resources_from_flavor(instance, flavor):
return res_req.merged_resources()
+def resources_for_limits(flavor, is_bfv):
+ """Work out what unified limits may be exceeded."""
+ return _get_resources(flavor, is_bfv)
+
+
def resources_from_request_spec(ctxt, spec_obj, host_manager,
enable_pinning_translate=True):
"""Given a RequestSpec object, returns a ResourceRequest of the resources,
@@ -1071,6 +1080,17 @@ _SUPPORTS_SOFT_AFFINITY = None
_SUPPORTS_SOFT_ANTI_AFFINITY = None
+def reset_globals():
+ global _SUPPORTS_AFFINITY
+ _SUPPORTS_AFFINITY = None
+ global _SUPPORTS_ANTI_AFFINITY
+ _SUPPORTS_ANTI_AFFINITY = None
+ global _SUPPORTS_SOFT_AFFINITY
+ _SUPPORTS_SOFT_AFFINITY = None
+ global _SUPPORTS_SOFT_ANTI_AFFINITY
+ _SUPPORTS_SOFT_ANTI_AFFINITY = None
+
+
def _get_group_details(context, instance_uuid, user_group_hosts=None):
"""Provide group_hosts and group_policies sets related to instances if
those instances are belonging to a group and if corresponding filters are
diff --git a/nova/scheduler/weights/hypervisor_version.py b/nova/scheduler/weights/hypervisor_version.py
new file mode 100644
index 0000000000..0cd7b0a824
--- /dev/null
+++ b/nova/scheduler/weights/hypervisor_version.py
@@ -0,0 +1,39 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Hypervisor Version Weigher. Weigh hosts by their relative hypervior version.
+
+The default is to select newer hosts. If you prefer
+to invert the behavior set the 'hypervisor_version_weight_multiplier' option
+to a negative number and the weighing has the opposite effect of the default.
+"""
+
+import nova.conf
+from nova.scheduler import utils
+from nova.scheduler import weights
+
+CONF = nova.conf.CONF
+
+
+class HypervisorVersionWeigher(weights.BaseHostWeigher):
+
+ def weight_multiplier(self, host_state):
+ """Override the weight multiplier."""
+ return utils.get_weight_multiplier(
+ host_state, 'hypervisor_version_weight_multiplier',
+ CONF.filter_scheduler.hypervisor_version_weight_multiplier)
+
+ def _weigh_object(self, host_state, weight_properties):
+ """Higher weights win. We want newer hosts by default."""
+ # convert None to 0
+ return host_state.hypervisor_version or 0
diff --git a/nova/service.py b/nova/service.py
index 2c10224926..bd3b49ae66 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -156,11 +156,11 @@ class Service(service.Service):
LOG.info('Starting %(topic)s node (version %(version)s)',
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
- self.manager.init_host()
- self.model_disconnected = False
ctxt = context.get_admin_context()
self.service_ref = objects.Service.get_by_host_and_binary(
ctxt, self.host, self.binary)
+ self.manager.init_host(self.service_ref)
+ self.model_disconnected = False
if self.service_ref:
_update_service_ref(self.service_ref)
diff --git a/nova/test.py b/nova/test.py
index a6449c01f0..e37967b06d 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -35,9 +35,9 @@ import os
import os.path
import pprint
import sys
+from unittest import mock
import fixtures
-import mock
from oslo_cache import core as cache
from oslo_concurrency import lockutils
from oslo_config import cfg
@@ -61,6 +61,8 @@ from nova import exception
from nova import objects
from nova.objects import base as objects_base
from nova import quota
+from nova.scheduler.client import report
+from nova.scheduler import utils as scheduler_utils
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import matchers
from nova import utils
@@ -170,6 +172,12 @@ class TestCase(base.BaseTestCase):
# base class when USES_DB is True.
NUMBER_OF_CELLS = 1
+ # The stable compute id stuff is intentionally singleton-ish, which makes
+ # it a nightmare for testing multiple host/node combinations in tests like
+ # we do. So, mock it out by default, unless the test is specifically
+ # designed to handle it.
+ STUB_COMPUTE_ID = True
+
def setUp(self):
"""Run before each test method to initialize test environment."""
# Ensure BaseTestCase's ConfigureLogging fixture is disabled since
@@ -285,11 +293,30 @@ class TestCase(base.BaseTestCase):
quota.UID_QFD_POPULATED_CACHE_ALL = False
self.useFixture(nova_fixtures.GenericPoisonFixture())
+ self.useFixture(nova_fixtures.SysFsPoisonFixture())
+
+ # Additional module names can be added to this set if needed
+ self.useFixture(nova_fixtures.ImportModulePoisonFixture(
+ set(['guestfs', 'libvirt'])))
# make sure that the wsgi app is fully initialized for all testcase
# instead of only once initialized for test worker
wsgi_app.init_global_data.reset()
+ # Reset the placement client singleton
+ report.PLACEMENTCLIENT = None
+
+ # Reset our local node uuid cache (and avoid writing to the
+ # local filesystem when we generate a new one).
+ if self.STUB_COMPUTE_ID:
+ self.useFixture(nova_fixtures.ComputeNodeIdFixture())
+
+ # Reset globals indicating affinity filter support. Some tests may set
+ # self.flags(enabled_filters=...) which could make the affinity filter
+ # support globals get set to a non-default configuration which affects
+ # all other tests.
+ scheduler_utils.reset_globals()
+
def _setup_cells(self):
"""Setup a normal cellsv2 environment.
@@ -355,7 +382,7 @@ class TestCase(base.BaseTestCase):
self.useFixture(fixtures.MonkeyPatch(old, new))
@staticmethod
- def patch_exists(patched_path, result):
+ def patch_exists(patched_path, result, other=None):
"""Provide a static method version of patch_exists(), which if you
haven't already imported nova.test can be slightly easier to
use as a context manager within a test method via:
@@ -364,7 +391,7 @@ class TestCase(base.BaseTestCase):
with self.patch_exists(path, True):
...
"""
- return patch_exists(patched_path, result)
+ return patch_exists(patched_path, result, other)
@staticmethod
def patch_open(patched_path, read_data):
@@ -678,6 +705,7 @@ class SubclassSignatureTestCase(testtools.TestCase, metaclass=abc.ABCMeta):
raise NotImplementedError()
def setUp(self):
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
self.base = self._get_base_class()
super(SubclassSignatureTestCase, self).setUp()
@@ -848,10 +876,12 @@ class ContainKeyValue(object):
@contextlib.contextmanager
-def patch_exists(patched_path, result):
+def patch_exists(patched_path, result, other=None):
"""Selectively patch os.path.exists() so that if it's called with
patched_path, return result. Calls with any other path are passed
- through to the real os.path.exists() function.
+ through to the real os.path.exists() function if other is not provided.
+ If other is provided then that will be the result of the call on paths
+ other than patched_path.
Either import and use as a decorator / context manager, or use the
nova.TestCase.patch_exists() static method as a context manager.
@@ -885,7 +915,10 @@ def patch_exists(patched_path, result):
def fake_exists(path):
if path == patched_path:
return result
- return real_exists(path)
+ elif other is not None:
+ return other
+ else:
+ return real_exists(path)
with mock.patch.object(os.path, "exists") as mock_exists:
mock_exists.side_effect = fake_exists
diff --git a/nova/tests/fixtures/__init__.py b/nova/tests/fixtures/__init__.py
index df254608fd..9ff4a2a601 100644
--- a/nova/tests/fixtures/__init__.py
+++ b/nova/tests/fixtures/__init__.py
@@ -16,6 +16,8 @@ from .cast_as_call import CastAsCallFixture # noqa: F401
from .cinder import CinderFixture # noqa: F401
from .conf import ConfFixture # noqa: F401, F403
from .cyborg import CyborgFixture # noqa: F401
+from .filesystem import SysFileSystemFixture # noqa: F401
+from .filesystem import TempFileSystemFixture # noqa: F401
from .glance import GlanceFixture # noqa: F401
from .libvirt import LibvirtFixture # noqa: F401
from .libvirt_imagebackend import LibvirtImageBackendFixture # noqa: F401
diff --git a/nova/tests/fixtures/cinder.py b/nova/tests/fixtures/cinder.py
index 97b32d9b84..025a3d8b81 100644
--- a/nova/tests/fixtures/cinder.py
+++ b/nova/tests/fixtures/cinder.py
@@ -47,6 +47,13 @@ class CinderFixture(fixtures.Fixture):
# This represents a bootable image-backed volume to test
# boot-from-volume scenarios.
IMAGE_BACKED_VOL = '6ca404f3-d844-4169-bb96-bc792f37de98'
+
+ # This represents a bootable image-backed volume to test
+ # boot-from-volume scenarios with
+ # os_require_quiesce
+ # hw_qemu_guest_agent
+ IMAGE_BACKED_VOL_QUIESCE = '6ca404f3-d844-4169-bb96-bc792f37de26'
+
# This represents a bootable image-backed volume with required traits
# as part of volume image metadata
IMAGE_WITH_TRAITS_BACKED_VOL = '6194fc02-c60e-4a01-a8e5-600798208b5f'
@@ -157,6 +164,13 @@ class CinderFixture(fixtures.Fixture):
'image_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6'
}
+ if volume_id == self.IMAGE_BACKED_VOL_QUIESCE:
+ volume['bootable'] = True
+ volume['volume_image_metadata'] = {
+ "os_require_quiesce": "True",
+ "hw_qemu_guest_agent": "True"
+ }
+
if volume_id == self.IMAGE_WITH_TRAITS_BACKED_VOL:
volume['bootable'] = True
volume['volume_image_metadata'] = {
@@ -327,6 +341,16 @@ class CinderFixture(fixtures.Fixture):
_find_attachment(attachment_id)
LOG.info('Completing volume attachment: %s', attachment_id)
+ def fake_reimage_volume(*args, **kwargs):
+ if self.IMAGE_BACKED_VOL not in args:
+ raise exception.VolumeNotFound()
+ if 'reimage_reserved' not in kwargs:
+ raise exception.InvalidInput('reimage_reserved not specified')
+
+ def fake_get_absolute_limits(_self, context):
+ limits = {'totalSnapshotsUsed': 0, 'maxTotalSnapshots': -1}
+ return limits
+
self.test.stub_out(
'nova.volume.cinder.API.attachment_create', fake_attachment_create)
self.test.stub_out(
@@ -366,6 +390,12 @@ class CinderFixture(fixtures.Fixture):
self.test.stub_out(
'nova.volume.cinder.API.terminate_connection',
lambda *args, **kwargs: None)
+ self.test.stub_out(
+ 'nova.volume.cinder.API.reimage_volume',
+ fake_reimage_volume)
+ self.test.stub_out(
+ 'nova.volume.cinder.API.get_absolute_limits',
+ fake_get_absolute_limits)
def volume_ids_for_instance(self, instance_uuid):
for volume_id, attachments in self.volume_to_attachment.items():
diff --git a/nova/tests/fixtures/filesystem.py b/nova/tests/fixtures/filesystem.py
new file mode 100644
index 0000000000..932d42fe27
--- /dev/null
+++ b/nova/tests/fixtures/filesystem.py
@@ -0,0 +1,81 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import shutil
+import tempfile
+from unittest import mock
+
+import fixtures
+
+from nova import filesystem
+from nova.virt.libvirt.cpu import core
+
+
+SYS = 'sys'
+
+
+class TempFileSystemFixture(fixtures.Fixture):
+ """Creates a fake / filesystem"""
+
+ def _setUp(self):
+ self.temp_dir = tempfile.TemporaryDirectory(prefix='fake_fs')
+ # NOTE(sbauza): I/O disk errors may raise an exception here, as we
+ # don't ignore them. If that's causing a problem in our CI jobs, the
+ # recommended solution is to use shutil.rmtree instead of cleanup()
+ # with ignore_errors parameter set to True (or wait for the minimum
+ # python version to be 3.10 as TemporaryDirectory will provide
+ # ignore_cleanup_errors parameter)
+ self.addCleanup(self.temp_dir.cleanup)
+
+
+class SysFileSystemFixture(TempFileSystemFixture):
+ """Creates a fake /sys filesystem"""
+
+ def __init__(self, cpus_supported=None):
+ self.cpus_supported = cpus_supported or 10
+
+ def _setUp(self):
+ super()._setUp()
+ self.sys_path = os.path.join(self.temp_dir.name, SYS)
+ self.addCleanup(shutil.rmtree, self.sys_path, ignore_errors=True)
+
+ sys_patcher = mock.patch(
+ 'nova.filesystem.SYS',
+ new_callable=mock.PropertyMock(return_value=self.sys_path))
+ self.sys_mock = sys_patcher.start()
+ self.addCleanup(sys_patcher.stop)
+
+ avail_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.AVAILABLE_PATH',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/present')))
+ self.avail_path_mock = avail_path_patcher.start()
+ self.addCleanup(avail_path_patcher.stop)
+
+ cpu_path_patcher = mock.patch(
+ 'nova.virt.libvirt.cpu.core.CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(
+ return_value=os.path.join(self.sys_path,
+ 'devices/system/cpu/cpu%(core)s')))
+ self.cpu_path_mock = cpu_path_patcher.start()
+ self.addCleanup(cpu_path_patcher.stop)
+
+ for cpu_nr in range(self.cpus_supported):
+ cpu_dir = os.path.join(self.cpu_path_mock % {'core': cpu_nr})
+ os.makedirs(os.path.join(cpu_dir, 'cpufreq'))
+ filesystem.write_sys(
+ os.path.join(cpu_dir, 'cpufreq/scaling_governor'),
+ data='powersave')
+ filesystem.write_sys(core.AVAILABLE_PATH,
+ f'0-{self.cpus_supported - 1}')
diff --git a/nova/tests/fixtures/glance.py b/nova/tests/fixtures/glance.py
index cf68f490b4..b718f28c2a 100644
--- a/nova/tests/fixtures/glance.py
+++ b/nova/tests/fixtures/glance.py
@@ -15,6 +15,7 @@ import datetime
import fixtures
from oslo_log import log as logging
+from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
from nova import exception
@@ -198,6 +199,32 @@ class GlanceFixture(fixtures.Fixture):
},
}
+ eph_encryption = copy.deepcopy(image1)
+ eph_encryption['id'] = uuidsentinel.eph_encryption
+ eph_encryption['properties'] = {
+ 'hw_ephemeral_encryption': 'True'
+ }
+
+ eph_encryption_disabled = copy.deepcopy(image1)
+ eph_encryption_disabled['id'] = uuidsentinel.eph_encryption_disabled
+ eph_encryption_disabled['properties'] = {
+ 'hw_ephemeral_encryption': 'False'
+ }
+
+ eph_encryption_luks = copy.deepcopy(image1)
+ eph_encryption_luks['id'] = uuidsentinel.eph_encryption_luks
+ eph_encryption_luks['properties'] = {
+ 'hw_ephemeral_encryption': 'True',
+ 'hw_ephemeral_encryption_format': 'luks'
+ }
+
+ eph_encryption_plain = copy.deepcopy(image1)
+ eph_encryption_plain['id'] = uuidsentinel.eph_encryption_plain
+ eph_encryption_plain['properties'] = {
+ 'hw_ephemeral_encryption': 'True',
+ 'hw_ephemeral_encryption_format': 'plain'
+ }
+
def __init__(self, test):
super().__init__()
self.test = test
@@ -222,6 +249,10 @@ class GlanceFixture(fixtures.Fixture):
self.create(None, self.image5)
self.create(None, self.auto_disk_config_disabled_image)
self.create(None, self.auto_disk_config_enabled_image)
+ self.create(None, self.eph_encryption)
+ self.create(None, self.eph_encryption_disabled)
+ self.create(None, self.eph_encryption_luks)
+ self.create(None, self.eph_encryption_plain)
self._imagedata = {}
diff --git a/nova/tests/fixtures/libvirt.py b/nova/tests/fixtures/libvirt.py
index 0faf9eb4c5..4f48463118 100644
--- a/nova/tests/fixtures/libvirt.py
+++ b/nova/tests/fixtures/libvirt.py
@@ -18,10 +18,10 @@ import sys
import textwrap
import time
import typing as ty
+from unittest import mock
import fixtures
from lxml import etree
-import mock
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
@@ -31,6 +31,7 @@ from nova.objects import fields as obj_fields
from nova.tests.fixtures import libvirt_data as fake_libvirt_data
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
+from nova.virt.libvirt import host
# Allow passing None to the various connect methods
@@ -275,6 +276,7 @@ class FakePCIDevice(object):
<product id='0x%(prod_id)s'>%(prod_name)s</product>
<vendor id='0x%(vend_id)s'>%(vend_name)s</vendor>
%(capability)s
+ %(vpd_capability)s
<iommuGroup number='%(iommu_group)d'>
<address domain='0x0000' bus='%(bus)#02x' slot='%(slot)#02x' function='0x%(function)d'/>
</iommuGroup>
@@ -293,13 +295,22 @@ class FakePCIDevice(object):
<availableInstances>%(instances)s</availableInstances>
</type>""".strip()) # noqa
+ vpd_cap_templ = textwrap.dedent("""
+ <capability type='vpd'>
+ <name>%(name)s</name>
+ %(fields)s
+ </capability>""".strip())
+ vpd_fields_templ = textwrap.dedent("""
+ <fields access='%(access)s'>%(section_fields)s</fields>""".strip())
+ vpd_field_templ = """<%(field_name)s>%(field_value)s</%(field_name)s>"""
+
is_capable_of_mdevs = False
def __init__(
self, dev_type, bus, slot, function, iommu_group, numa_node, *,
vf_ratio=None, multiple_gpu_types=False, generic_types=False,
parent=None, vend_id=None, vend_name=None, prod_id=None,
- prod_name=None, driver_name=None,
+ prod_name=None, driver_name=None, vpd_fields=None, mac_address=None,
):
"""Populate pci devices
@@ -321,6 +332,8 @@ class FakePCIDevice(object):
:param prod_id: (str) The product ID.
:param prod_name: (str) The product name.
:param driver_name: (str) The driver name.
+ :param mac_address: (str) The MAC of the device.
+ Used in case of SRIOV PFs
"""
self.dev_type = dev_type
@@ -339,6 +352,9 @@ class FakePCIDevice(object):
self.prod_id = prod_id
self.prod_name = prod_name
self.driver_name = driver_name
+ self.mac_address = mac_address
+
+ self.vpd_fields = vpd_fields
self.generate_xml()
@@ -352,7 +368,9 @@ class FakePCIDevice(object):
assert not self.vf_ratio, 'vf_ratio does not apply for PCI devices'
if self.dev_type in ('PF', 'VF'):
- assert self.vf_ratio, 'require vf_ratio for PFs and VFs'
+ assert (
+ self.vf_ratio is not None
+ ), 'require vf_ratio for PFs and VFs'
if self.dev_type == 'VF':
assert self.parent, 'require parent for VFs'
@@ -447,6 +465,7 @@ class FakePCIDevice(object):
'prod_name': prod_name,
'driver': driver,
'capability': capability,
+ 'vpd_capability': self.format_vpd_cap(),
'iommu_group': self.iommu_group,
'numa_node': self.numa_node,
'parent': parent,
@@ -457,9 +476,37 @@ class FakePCIDevice(object):
if self.numa_node == -1:
self.pci_device = self.pci_device.replace("<numa node='-1'/>", "")
+ def format_vpd_cap(self):
+ if not self.vpd_fields:
+ return ''
+ fields = []
+ for access_type in ('readonly', 'readwrite'):
+ section_fields = []
+ for field_name, field_value in self.vpd_fields.get(
+ access_type, {}).items():
+ section_fields.append(self.vpd_field_templ % {
+ 'field_name': field_name,
+ 'field_value': field_value,
+ })
+ if section_fields:
+ fields.append(
+ self.vpd_fields_templ % {
+ 'access': access_type,
+ 'section_fields': '\n'.join(section_fields),
+ }
+ )
+ return self.vpd_cap_templ % {
+ 'name': self.vpd_fields.get('name', ''),
+ 'fields': '\n'.join(fields)
+ }
+
def XMLDesc(self, flags):
return self.pci_device
+ @property
+ def address(self):
+ return "0000:%02x:%02x.%1x" % (self.bus, self.slot, self.function)
+
# TODO(stephenfin): Remove all of these HostFooDevicesInfo objects in favour of
# a unified devices object
@@ -487,7 +534,7 @@ class HostPCIDevicesInfo(object):
"""
self.devices = {}
- if not (num_vfs or num_pfs) and not num_mdevcap:
+ if not (num_vfs or num_pfs or num_pci) and not num_mdevcap:
return
if num_vfs and not num_pfs:
@@ -572,7 +619,7 @@ class HostPCIDevicesInfo(object):
self, dev_type, bus, slot, function, iommu_group, numa_node,
vf_ratio=None, multiple_gpu_types=False, generic_types=False,
parent=None, vend_id=None, vend_name=None, prod_id=None,
- prod_name=None, driver_name=None,
+ prod_name=None, driver_name=None, vpd_fields=None, mac_address=None,
):
pci_dev_name = _get_libvirt_nodedev_name(bus, slot, function)
@@ -593,7 +640,10 @@ class HostPCIDevicesInfo(object):
vend_name=vend_name,
prod_id=prod_id,
prod_name=prod_name,
- driver_name=driver_name)
+ driver_name=driver_name,
+ vpd_fields=vpd_fields,
+ mac_address=mac_address,
+ )
self.devices[pci_dev_name] = dev
return dev
@@ -612,6 +662,13 @@ class HostPCIDevicesInfo(object):
return [dev for dev in self.devices
if self.devices[dev].is_capable_of_mdevs]
+ def get_pci_address_mac_mapping(self):
+ return {
+ device.address: device.mac_address
+ for dev_addr, device in self.devices.items()
+ if device.mac_address
+ }
+
class FakeMdevDevice(object):
template = """
@@ -1377,21 +1434,31 @@ class Domain(object):
'Test attempts to add more than 8 PCI devices. This is '
'not supported by the fake libvirt implementation.')
nic['func'] = func
- # this branch covers most interface types with a source
- # such as linux bridge interfaces.
- if 'source' in nic:
+ if nic['type'] in ('ethernet',):
+ # this branch covers kernel ovs interfaces
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
- <source %(type)s='%(source)s'/>
<target dev='tap274487d1-6%(func)s'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03'
function='0x%(func)s'/>
</interface>''' % nic
- elif nic['type'] in ('ethernet',):
- # this branch covers kernel ovs interfaces
+ elif nic['type'] in ('vdpa',):
+ # this branch covers hardware offloaded ovs with vdpa
nics += '''<interface type='%(type)s'>
<mac address='%(mac)s'/>
+ <source dev='%(source)s'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
+ function='0x%(func)s'/>
+ </interface>''' % nic
+ # this branch covers most interface types with a source
+ # such as linux bridge interfaces.
+ elif 'source' in nic:
+ nics += '''<interface type='%(type)s'>
+ <mac address='%(mac)s'/>
+ <source %(type)s='%(source)s'/>
<target dev='tap274487d1-6%(func)s'/>
+ <address type='pci' domain='0x0000' bus='0x00' slot='0x03'
+ function='0x%(func)s'/>
</interface>''' % nic
else:
# This branch covers the macvtap vnic-type.
@@ -1977,6 +2044,12 @@ class Connection(object):
return VIR_CPU_COMPARE_IDENTICAL
+ def compareHypervisorCPU(
+ self, emulator, arch, machine, virttype,
+ xml, flags
+ ):
+ return self.compareCPU(xml, flags)
+
def getCPUStats(self, cpuNum, flag):
if cpuNum < 2:
return {'kernel': 5664160000000,
@@ -2141,6 +2214,15 @@ class LibvirtFixture(fixtures.Fixture):
def __init__(self, stub_os_vif=True):
self.stub_os_vif = stub_os_vif
+ self.pci_address_to_mac_map = collections.defaultdict(
+ lambda: '52:54:00:1e:59:c6')
+
+ def update_sriov_mac_address_mapping(self, pci_address_to_mac_map):
+ self.pci_address_to_mac_map.update(pci_address_to_mac_map)
+
+ def fake_get_mac_by_pci_address(self, pci_addr, pf_interface=False):
+ res = self.pci_address_to_mac_map[pci_addr]
+ return res
def setUp(self):
super().setUp()
@@ -2153,27 +2235,39 @@ class LibvirtFixture(fixtures.Fixture):
self.useFixture(
fixtures.MockPatch('nova.virt.libvirt.utils.get_fs_info'))
- self.useFixture(
- fixtures.MockPatch('nova.compute.utils.get_machine_ips'))
+ self.mock_get_machine_ips = self.useFixture(
+ fixtures.MockPatch('nova.compute.utils.get_machine_ips')).mock
# libvirt driver needs to call out to the filesystem to get the
# parent_ifname for the SRIOV VFs.
+ self.mock_get_ifname_by_pci_address = self.useFixture(
+ fixtures.MockPatch(
+ "nova.pci.utils.get_ifname_by_pci_address",
+ return_value="fake_pf_interface_name",
+ )
+ ).mock
+
self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_ifname_by_pci_address',
- return_value='fake_pf_interface_name'))
+ 'nova.pci.utils.get_mac_by_pci_address',
+ side_effect=self.fake_get_mac_by_pci_address))
# libvirt calls out to sysfs to get the vfs ID during macvtap plug
- self.useFixture(fixtures.MockPatch(
- 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1))
+ self.mock_get_vf_num_by_pci_address = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_vf_num_by_pci_address', return_value=1
+ )
+ ).mock
# libvirt calls out to privsep to set the mac and vlan of a macvtap
- self.useFixture(fixtures.MockPatch(
- 'nova.privsep.linux_net.set_device_macaddr_and_vlan'))
+ self.mock_set_device_macaddr_and_vlan = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.privsep.linux_net.set_device_macaddr_and_vlan')).mock
# libvirt calls out to privsep to set the port state during macvtap
# plug
- self.useFixture(fixtures.MockPatch(
- 'nova.privsep.linux_net.set_device_macaddr'))
+ self.mock_set_device_macaddr = self.useFixture(
+ fixtures.MockPatch(
+ 'nova.privsep.linux_net.set_device_macaddr')).mock
# Don't assume that the system running tests has a valid machine-id
self.useFixture(fixtures.MockPatch(
@@ -2188,8 +2282,17 @@ class LibvirtFixture(fixtures.Fixture):
# Ensure tests perform the same on all host architectures
fake_uname = os_uname(
'Linux', '', '5.4.0-0-generic', '', obj_fields.Architecture.X86_64)
- self.useFixture(
- fixtures.MockPatch('os.uname', return_value=fake_uname))
+ self.mock_uname = self.useFixture(
+ fixtures.MockPatch('os.uname', return_value=fake_uname)).mock
+
+ real_exists = os.path.exists
+
+ def fake_exists(path):
+ if path == host.SEV_KERNEL_PARAM_FILE:
+ return False
+ return real_exists(path)
+
+ self.useFixture(fixtures.MonkeyPatch('os.path.exists', fake_exists))
# ...and on all machine types
fake_loaders = [
diff --git a/nova/tests/fixtures/libvirt_data.py b/nova/tests/fixtures/libvirt_data.py
index 463cb0ae3f..f022860f61 100644
--- a/nova/tests/fixtures/libvirt_data.py
+++ b/nova/tests/fixtures/libvirt_data.py
@@ -2002,6 +2002,210 @@ _fake_NodeDevXml = {
</capability>
</device>
""",
+ # A PF with the VPD capability.
+ "pci_0000_82_00_0": """
+ <device>
+ <name>pci_0000_82_00_0</name>
+ <path>/sys/devices/pci0000:80/0000:80:03.0/0000:82:00.0</path>
+ <parent>pci_0000_80_03_0</parent>
+ <driver>
+ <name>mlx5_core</name>
+ </driver>
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>0</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>0</function>
+ <product id='0xa2d6'>MT42822 BlueField-2 integrated ConnectX-6 Dx network controller</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='virt_functions' maxCount='8'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x3'/>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x4'/>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x5'/>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x6'/>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x7'/>
+ <address domain='0x0000' bus='0x82' slot='0x01' function='0x0'/>
+ <address domain='0x0000' bus='0x82' slot='0x01' function='0x1'/>
+ <address domain='0x0000' bus='0x82' slot='0x01' function='0x2'/>
+ </capability>
+ <capability type='vpd'>
+ <name>BlueField-2 DPU 25GbE Dual-Port SFP56, Crypto Enabled, 16GB on-board DDR, 1GbE OOB management, Tall Bracket</name>
+ <fields access='readonly'>
+ <change_level>B1</change_level>
+ <manufacture_id>foobar</manufacture_id>
+ <part_number>MBF2H332A-AEEOT</part_number>
+ <serial_number>MT2113X00000</serial_number>
+ <vendor_field index='0'>PCIeGen4 x8</vendor_field>
+ <vendor_field index='2'>MBF2H332A-AEEOT</vendor_field>
+ <vendor_field index='3'>3c53d07eec484d8aab34dabd24fe575aa</vendor_field>
+ <vendor_field index='A'>MLX:MN=MLNX:CSKU=V2:UUID=V3:PCI=V0:MODL=BF2H332A</vendor_field>
+ </fields>
+ <fields access='readwrite'>
+ <asset_tag>fooasset</asset_tag>
+ <vendor_field index='0'>vendorfield0</vendor_field>
+ <vendor_field index='2'>vendorfield2</vendor_field>
+ <vendor_field index='A'>vendorfieldA</vendor_field>
+ <system_field index='B'>systemfieldB</system_field>
+ <system_field index='0'>systemfield0</system_field>
+ </fields>
+ </capability>
+ <iommuGroup number='65'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x0'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' speed='8' width='8'/>
+ </pci-express>
+ </capability>
+ </device>""", # noqa:E501
+ # A VF without the VPD capability with a PF that has a VPD capability.
+ "pci_0000_82_00_3": """
+ <device>
+ <name>pci_0000_82_00_3</name>
+ <path>/sys/devices/pci0000:80/0000:80:03.0/0000:82:00.3</path>
+ <parent>pci_0000_80_03_0</parent>
+ <driver>
+ <name>mlx5_core</name>
+ </driver>
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>0</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>3</function>
+ <product id='0x101e'>ConnectX Family mlx5Gen Virtual Function</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x0'/>
+ </capability>
+ <iommuGroup number='99'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x3'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' width='0'/>
+ </pci-express>
+ </capability>
+ </device>""",
+ # A VF with the VPD capability but without a parent defined in test data
+ # so that the VPD cap is extracted from the VF directly.
+ "pci_0001_82_00_3": """
+ <device>
+ <name>pci_0001_82_00_3</name>
+ <path>/sys/devices/pci0001:80/0001:80:03.0/0001:82:00.3</path>
+ <parent>pci_0001_80_03_0</parent>
+ <driver>
+ <name>mlx5_core</name>
+ </driver>
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>1</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>3</function>
+ <product id='0x101e'>ConnectX Family mlx5Gen Virtual Function</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0001' bus='0x82' slot='0x00' function='0x0'/>
+ </capability>
+ <capability type='vpd'>
+ <name>BlueField-2 DPU 25GbE Dual-Port SFP56, Crypto Enabled, 16GB on-board DDR, 1GbE OOB management, Tall Bracket</name>
+ <fields access='readonly'>
+ <change_level>B1</change_level>
+ <part_number>MBF2H332A-AEEOT</part_number>
+ <serial_number>MT2113XBEEF0</serial_number>
+ <vendor_field index='2'>MBF2H332A-AEEOT</vendor_field>
+ <vendor_field index='3'>9644e3586190eb118000b8cef671bf3e</vendor_field>
+ <vendor_field index='A'>MLX:MN=MLNX:CSKU=V2:UUID=V3:PCI=V0:MODL=BF2H332A</vendor_field>
+ <vendor_field index='0'>PCIeGen4 x8</vendor_field>
+ </fields>
+ </capability>
+ <iommuGroup number='99'>
+ <address domain='0x0001' bus='0x82' slot='0x00' function='0x3'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' width='0'/>
+ </pci-express>
+ </capability>
+ </device>""", # noqa:E501
+ # A VF without the VPD capability and without a parent PF defined
+ # in the test data.
+ "pci_0002_82_00_3": """
+ <device>
+ <name>pci_0002_82_00_3</name>
+ <path>/sys/devices/pci0002:80/0002:80:03.0/0002:82:00.3</path>
+ <parent>pci_0002_80_03_0</parent>
+ <driver>
+ <name>mlx5_core</name>
+ </driver>
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>2</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>3</function>
+ <product id='0x101e'>ConnectX Family mlx5Gen Virtual Function</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='phys_function'>
+ <address domain='0x0002' bus='0x82' slot='0x00' function='0x0'/>
+ </capability>
+ <iommuGroup number='99'>
+ <address domain='0x0002' bus='0x82' slot='0x00' function='0x3'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' width='0'/>
+ </pci-express>
+ </capability>
+ </device>""",
+ "net_enp130s0f0v0_36_33_10_a3_94_64": """
+ <device>
+ <name>net_enp130s0f0v0_36_33_10_a3_94_64</name>
+ <path>/sys/devices/pci0000:80/0000:80:03.0/0000:82:00.3/net/enp130s0f0v0</path>
+ <parent>pci_0000_82_00_3</parent>
+ <capability type='net'>
+ <interface>enp130s0f0v0</interface>
+ <address>36:33:10:a3:94:64</address>
+ <link state='down'/>
+ <feature name='rx'/>
+ <feature name='tx'/>
+ <feature name='sg'/>
+ <feature name='tso'/>
+ <feature name='gso'/>
+ <feature name='gro'/>
+ <feature name='rxvlan'/>
+ <feature name='txvlan'/>
+ <feature name='rxhash'/>
+ <capability type='80203'/>
+ </capability>
+ </device>""", # noqa:E501
+ "net_enp130s0f0v0_36_33_10_a3_94_65": """
+ <device>
+ <name>net_enp130s0f0v0_36_33_10_a3_94_64</name>
+ <path>/sys/devices/pci0002:80/0002:80:03.0/0002:82:00.3/net/enp130s0f0v0</path>
+ <parent>pci_0002_82_00_3</parent>
+ <capability type='net'>
+ <interface>enp130s0f0v0</interface>
+ <address>36:33:10:a3:94:65</address>
+ <link state='down'/>
+ <feature name='rx'/>
+ <feature name='tx'/>
+ <feature name='sg'/>
+ <feature name='tso'/>
+ <feature name='gso'/>
+ <feature name='gro'/>
+ <feature name='rxvlan'/>
+ <feature name='txvlan'/>
+ <feature name='rxhash'/>
+ <capability type='80203'/>
+ </capability>
+ </device>""", # noqa:E501
}
_fake_NodeDevXml_parents = {
diff --git a/nova/tests/fixtures/libvirt_imagebackend.py b/nova/tests/fixtures/libvirt_imagebackend.py
index c3b6f7898e..4ce3f03710 100644
--- a/nova/tests/fixtures/libvirt_imagebackend.py
+++ b/nova/tests/fixtures/libvirt_imagebackend.py
@@ -16,9 +16,9 @@
import collections
import functools
import os
+from unittest import mock
import fixtures
-import mock
from nova.virt.libvirt import config
from nova.virt.libvirt import driver
@@ -154,7 +154,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# their construction. Tests can use this to assert that disks were
# created of the expected type.
- def image_init(instance=None, disk_name=None, path=None):
+ def image_init(
+ instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
# There's nothing special about this path except that it's
# predictable and unique for (instance, disk).
if path is None:
@@ -169,6 +171,7 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# the real constructor.
setattr(disk, 'path', path)
setattr(disk, 'is_block_dev', mock.sentinel.is_block_dev)
+ setattr(disk, 'disk_info_mapping', disk_info_mapping)
# Used by tests. Note that image_init is a closure over image_type.
setattr(disk, 'image_type', image_type)
@@ -187,6 +190,9 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
# Set the SUPPORTS_CLONE member variable to mimic the Image base
# class.
image_init.SUPPORTS_CLONE = False
+ # Set the SUPPORTS_LUKS member variable to mimic the Image base
+ # class.
+ image_init.SUPPORTS_LUKS = False
# Ditto for the 'is_shared_block_storage' and
# 'is_file_in_instance_path' functions
@@ -217,16 +223,16 @@ class LibvirtImageBackendFixture(fixtures.Fixture):
self.imported_files.append((local_filename, remote_filename))
def _fake_libvirt_info(
- self, mock_disk, disk_info, cache_mode, extra_specs, disk_unit=None,
+ self, mock_disk, cache_mode, extra_specs, disk_unit=None,
boot_order=None,
):
# For tests in test_virt_drivers which expect libvirt_info to be
# functional
info = config.LibvirtConfigGuestDisk()
info.source_type = 'file'
- info.source_device = disk_info['type']
- info.target_bus = disk_info['bus']
- info.target_dev = disk_info['dev']
+ info.source_device = mock_disk.disk_info_mapping['type']
+ info.target_bus = mock_disk.disk_info_mapping['bus']
+ info.target_dev = mock_disk.disk_info_mapping['dev']
info.driver_cache = cache_mode
info.driver_format = 'raw'
info.source_path = mock_disk.path
diff --git a/nova/tests/fixtures/neutron.py b/nova/tests/fixtures/neutron.py
index 681d52601d..a41007b83c 100644
--- a/nova/tests/fixtures/neutron.py
+++ b/nova/tests/fixtures/neutron.py
@@ -730,19 +730,22 @@ class NeutronFixture(fixtures.Fixture):
self._validate_port_binding(port_id, host_id)
del self._port_bindings[port_id][host_id]
- def _activate_port_binding(self, port_id, host_id):
+ def _activate_port_binding(self, port_id, host_id, modify_port=False):
# It makes sure that only one binding is active for a port
for host, binding in self._port_bindings[port_id].items():
if host == host_id:
# NOTE(gibi): neutron returns 409 if this binding is already
# active but nova does not depend on this behaviour yet.
binding['status'] = 'ACTIVE'
+ if modify_port:
+ # We need to ensure that port's binding:host_id is valid
+ self._merge_in_active_binding(self._ports[port_id])
else:
binding['status'] = 'INACTIVE'
def activate_port_binding(self, port_id, host_id):
self._validate_port_binding(port_id, host_id)
- self._activate_port_binding(port_id, host_id)
+ self._activate_port_binding(port_id, host_id, modify_port=True)
def show_port_binding(self, port_id, host_id):
self._validate_port_binding(port_id, host_id)
diff --git a/nova/tests/fixtures/notifications.py b/nova/tests/fixtures/notifications.py
index c46b3a919d..817982d4ff 100644
--- a/nova/tests/fixtures/notifications.py
+++ b/nova/tests/fixtures/notifications.py
@@ -39,7 +39,7 @@ class _Sub(object):
def received(self, notification):
with self._cond:
self._notifications.append(notification)
- self._cond.notifyAll()
+ self._cond.notify_all()
def wait_n(self, n, event, timeout):
"""Wait until at least n notifications have been received, and return
@@ -170,8 +170,8 @@ class FakeVersionedNotifier(FakeNotifier):
'test case which is different from the currently running test '
'case %s. This notification is ignored. The sender test case '
'probably leaked a running eventlet that emitted '
- 'notifications after the test case finished. Now this eventlet'
- 'is terminated by raising this exception.' %
+ 'notifications after the test case finished. Now this '
+ 'eventlet is terminated by raising this exception.' %
(event_type, sender_test_case_id, self.test_case_id))
payload = self._serializer.serialize_entity(ctxt, payload)
diff --git a/nova/tests/fixtures/nova.py b/nova/tests/fixtures/nova.py
index ef873f6654..abfc3ecc6c 100644
--- a/nova/tests/fixtures/nova.py
+++ b/nova/tests/fixtures/nova.py
@@ -20,14 +20,17 @@ import collections
import contextlib
from contextlib import contextmanager
import functools
+from importlib.abc import MetaPathFinder
import logging as std_logging
import os
+import sys
+import time
+from unittest import mock
import warnings
import eventlet
import fixtures
import futurist
-import mock
from openstack import service_description
from oslo_concurrency import lockutils
from oslo_config import cfg
@@ -62,6 +65,7 @@ from nova.scheduler import weights
from nova import service
from nova.tests.functional.api import client
from nova import utils
+from nova.virt import node
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -451,6 +455,13 @@ class CellDatabases(fixtures.Fixture):
# yield to do the actual work. We can do schedulable things
# here and not exclude other threads from making progress.
# If an exception is raised, we capture that and save it.
+ # Note that it is possible that another thread has changed the
+ # global state (step #2) after we released the writer lock but
+ # before we acquired the reader lock. If this happens, we will
+ # detect the global state change and retry step #2 a limited number
+ # of times. If we happen to race repeatedly with another thread and
+ # exceed our retry limit, we will give up and raise a RuntimeError,
+ # which will fail the test.
# 4. If we changed state in #2, we need to change it back. So we grab
# a writer lock again and do that.
# 5. Finally, if an exception was raised in #3 while state was
@@ -469,29 +480,47 @@ class CellDatabases(fixtures.Fixture):
raised_exc = None
- with self._cell_lock.write_lock():
- if cell_mapping is not None:
- # This assumes the next local DB access is the same cell that
- # was targeted last time.
- self._last_ctxt_mgr = desired
+ def set_last_ctxt_mgr():
+ with self._cell_lock.write_lock():
+ if cell_mapping is not None:
+ # This assumes the next local DB access is the same cell
+ # that was targeted last time.
+ self._last_ctxt_mgr = desired
- with self._cell_lock.read_lock():
- if self._last_ctxt_mgr != desired:
- # NOTE(danms): This is unlikely to happen, but it's possible
- # another waiting writer changed the state between us letting
- # it go and re-acquiring as a reader. If lockutils supported
- # upgrading and downgrading locks, this wouldn't be a problem.
- # Regardless, assert that it is still as we left it here
- # so we don't hit the wrong cell. If this becomes a problem,
- # we just need to retry the write section above until we land
- # here with the cell we want.
- raise RuntimeError('Global DB state changed underneath us')
+ # Set last context manager to the desired cell's context manager.
+ set_last_ctxt_mgr()
+ # Retry setting the last context manager if we detect that a writer
+ # changed global DB state before we take the read lock.
+ for retry_time in range(0, 3):
try:
- with self._real_target_cell(context, cell_mapping) as ccontext:
- yield ccontext
- except Exception as exc:
- raised_exc = exc
+ with self._cell_lock.read_lock():
+ if self._last_ctxt_mgr != desired:
+ # NOTE(danms): This is unlikely to happen, but it's
+ # possible another waiting writer changed the state
+ # between us letting it go and re-acquiring as a
+ # reader. If lockutils supported upgrading and
+ # downgrading locks, this wouldn't be a problem.
+ # Regardless, assert that it is still as we left it
+ # here so we don't hit the wrong cell. If this becomes
+ # a problem, we just need to retry the write section
+ # above until we land here with the cell we want.
+ raise RuntimeError(
+ 'Global DB state changed underneath us')
+ try:
+ with self._real_target_cell(
+ context, cell_mapping
+ ) as ccontext:
+ yield ccontext
+ except Exception as exc:
+ raised_exc = exc
+ # Leave the retry loop after calling target_cell
+ break
+ except RuntimeError:
+ # Give other threads a chance to make progress, increasing the
+ # wait time between attempts.
+ time.sleep(retry_time)
+ set_last_ctxt_mgr()
with self._cell_lock.write_lock():
# Once we have returned from the context, we need
@@ -537,11 +566,10 @@ class CellDatabases(fixtures.Fixture):
call_monitor_timeout=None):
"""Mirror rpc.get_client() but with our special sauce."""
serializer = CheatingSerializer(serializer)
- return messaging.RPCClient(rpc.TRANSPORT,
- target,
- version_cap=version_cap,
- serializer=serializer,
- call_monitor_timeout=call_monitor_timeout)
+ return messaging.get_rpc_client(rpc.TRANSPORT, target,
+ version_cap=version_cap,
+ serializer=serializer,
+ call_monitor_timeout=call_monitor_timeout)
def add_cell_database(self, connection_str, default=False):
"""Add a cell database to the fixture.
@@ -780,7 +808,7 @@ class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
def setUp(self):
- super(WarningsFixture, self).setUp()
+ super().setUp()
self._original_warning_filters = warnings.filters[:]
@@ -793,15 +821,19 @@ class WarningsFixture(fixtures.Fixture):
# forward on is_admin, the deprecation is definitely really premature.
warnings.filterwarnings(
'ignore',
- message='Policy enforcement is depending on the value of is_admin.'
- ' This key is deprecated. Please update your policy '
- 'file to use the standard policy values.')
+ message=(
+ 'Policy enforcement is depending on the value of is_admin. '
+ 'This key is deprecated. Please update your policy '
+ 'file to use the standard policy values.'
+ ),
+ )
# NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
warnings.filterwarnings(
'ignore',
message="Policy .* failed scope check",
- category=UserWarning)
+ category=UserWarning,
+ )
# NOTE(gibi): The UUIDFields emits a warning if the value is not a
# valid UUID. Let's escalate that to an exception in the test to
@@ -813,70 +845,36 @@ class WarningsFixture(fixtures.Fixture):
# how to handle (or isn't given a fallback callback).
warnings.filterwarnings(
'error',
- message="Cannot convert <oslo_db.sqlalchemy.enginefacade"
- "._Default object at ",
- category=UserWarning)
-
- warnings.filterwarnings(
- 'error', message='Evaluating non-mapped column expression',
- category=sqla_exc.SAWarning)
+ message=(
+ 'Cannot convert <oslo_db.sqlalchemy.enginefacade._Default '
+ 'object at '
+ ),
+ category=UserWarning,
+ )
# Enable deprecation warnings for nova itself to capture upcoming
# SQLAlchemy changes
warnings.filterwarnings(
'ignore',
- category=sqla_exc.SADeprecationWarning)
+ category=sqla_exc.SADeprecationWarning,
+ )
warnings.filterwarnings(
'error',
module='nova',
- category=sqla_exc.SADeprecationWarning)
-
- # ...but filter everything out until we get around to fixing them
- # TODO(stephenfin): Fix all of these
-
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'The current statement is being autocommitted .*',
- category=sqla_exc.SADeprecationWarning)
-
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'The Column.copy\(\) method is deprecated .*',
- category=sqla_exc.SADeprecationWarning)
-
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'The Connection.connect\(\) method is considered .*',
- category=sqla_exc.SADeprecationWarning)
-
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'Using strings to indicate column or relationship .*',
- category=sqla_exc.SADeprecationWarning)
+ category=sqla_exc.SADeprecationWarning,
+ )
- warnings.filterwarnings(
- 'ignore',
- module='nova',
- message=r'Using strings to indicate relationship names .*',
- category=sqla_exc.SADeprecationWarning)
+ # Enable general SQLAlchemy warnings also to ensure we're not doing
+ # silly stuff. It's possible that we'll need to filter things out here
+ # with future SQLAlchemy versions, but that's a good thing
warnings.filterwarnings(
- 'ignore',
+ 'error',
module='nova',
- message=r'Invoking and_\(\) without arguments is deprecated, .*',
- category=sqla_exc.SADeprecationWarning)
-
- # TODO(stephenfin): Remove once we fix this in placement 5.0.2 or 6.0.0
- warnings.filterwarnings(
- 'ignore',
- message='Implicit coercion of SELECT and textual SELECT .*',
- category=sqla_exc.SADeprecationWarning)
+ category=sqla_exc.SAWarning,
+ )
self.addCleanup(self._reset_warning_filters)
@@ -1006,9 +1004,15 @@ class OSAPIFixture(fixtures.Fixture):
self.api = client.TestOpenStackClient(
'fake', base_url, project_id=self.project_id,
roles=['reader', 'member'])
+ self.alternative_api = client.TestOpenStackClient(
+ 'fake', base_url, project_id=self.project_id,
+ roles=['reader', 'member'])
self.admin_api = client.TestOpenStackClient(
'admin', base_url, project_id=self.project_id,
roles=['reader', 'member', 'admin'])
+ self.alternative_admin_api = client.TestOpenStackClient(
+ 'admin', base_url, project_id=self.project_id,
+ roles=['reader', 'member', 'admin'])
self.reader_api = client.TestOpenStackClient(
'reader', base_url, project_id=self.project_id,
roles=['reader'])
@@ -1104,9 +1108,9 @@ class PoisonFunctions(fixtures.Fixture):
# Don't poison the function if it's already mocked
import nova.virt.libvirt.host
if not isinstance(nova.virt.libvirt.host.Host._init_events, mock.Mock):
- self.useFixture(fixtures.MockPatch(
+ self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.Host._init_events',
- side_effect=evloop))
+ evloop))
class IndirectionAPIFixture(fixtures.Fixture):
@@ -1314,6 +1318,77 @@ class PrivsepFixture(fixtures.Fixture):
nova.privsep.sys_admin_pctxt, 'client_mode', False))
+class CGroupsFixture(fixtures.Fixture):
+ """Mocks checks made for available subsystems on the host's control group.
+
+ The fixture mocks all calls made on the host to verify the capabilities
+ provided by its kernel. Through this, one can simulate the underlying
+ system hosts work on top of and have tests react to expected outcomes from
+ such.
+
+ Use sample:
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ >>> cgroups = self.useFixture(CGroupsFixture(version=2))
+ >>> cgroups = self.useFixture(CGroupsFixture())
+ ... cgroups.version = 2
+
+ :attr version: Arranges mocks to simulate the host interact with nova
+ following the given version of cgroups.
+ Available values are:
+ - 0: All checks related to cgroups will return False.
+ - 1: Checks related to cgroups v1 will return True.
+ - 2: Checks related to cgroups v2 will return True.
+ Defaults to 1.
+ """
+
+ def __init__(self, version=1):
+ self._cpuv1 = None
+ self._cpuv2 = None
+
+ self._version = version
+
+ @property
+ def version(self):
+ return self._version
+
+ @version.setter
+ def version(self, value):
+ self._version = value
+ self._update_mocks()
+
+ def setUp(self):
+ super().setUp()
+ self._cpuv1 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv1_cpu_controller')).mock
+ self._cpuv2 = self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.host.Host._has_cgroupsv2_cpu_controller')).mock
+ self._update_mocks()
+
+ def _update_mocks(self):
+ if not self._cpuv1:
+ return
+
+ if not self._cpuv2:
+ return
+
+ if self.version == 0:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 1:
+ self._cpuv1.return_value = True
+ self._cpuv2.return_value = False
+ return
+
+ if self.version == 2:
+ self._cpuv1.return_value = False
+ self._cpuv2.return_value = True
+ return
+
+ raise ValueError(f"Unknown cgroups version: '{self.version}'.")
+
+
class NoopQuotaDriverFixture(fixtures.Fixture):
"""A fixture to run tests using the NoopQuotaDriver.
@@ -1459,7 +1534,7 @@ class AvailabilityZoneFixture(fixtures.Fixture):
``get_availability_zones``.
``get_instance_availability_zone`` will return the availability_zone
- requested when creating a server otherwise the instance.availabilty_zone
+ requested when creating a server otherwise the instance.availability_zone
or default_availability_zone is returned.
"""
@@ -1611,7 +1686,11 @@ class GenericPoisonFixture(fixtures.Fixture):
current = __import__(components[0], {}, {})
for component in components[1:]:
current = getattr(current, component)
- if not isinstance(getattr(current, attribute), mock.Mock):
+
+ # NOTE(stephenfin): There are a couple of mock libraries in use
+ # (including mocked versions of mock from oslotest) so we can't
+ # use isinstance checks here
+ if 'mock' not in str(type(getattr(current, attribute))):
self.useFixture(fixtures.MonkeyPatch(
meth, poison_configure(meth, why)))
except ImportError:
@@ -1733,3 +1812,129 @@ class ReaderWriterLock(lockutils.ReaderWriterLock):
'threading.current_thread', eventlet.getcurrent)
with mpatch if eventlet_patched else contextlib.ExitStack():
super().__init__(*a, **kw)
+
+
+class SysFsPoisonFixture(fixtures.Fixture):
+
+ def inject_poison(self, module_name, function_name):
+ import importlib
+ mod = importlib.import_module(module_name)
+ orig_f = getattr(mod, function_name)
+ if (
+ isinstance(orig_f, mock.Mock) or
+ # FIXME(gibi): Is this a bug in unittest.mock? If I remove this
+ # then LibvirtReportSevTraitsTests fails as builtins.open is mocked
+ # there at import time via @test.patch_open. That injects a
+ # MagicMock instance to builtins.open which we check here against
+ # Mock (or even MagicMock) via isinstance and that check says it is
+ # not a mock. More interestingly I cannot reproduce the same
+ # issue with @test.patch_open and isinstance in a simple python
+ # interpreter. So to make progress I'm checking the class name
+ # here instead as that works.
+ orig_f.__class__.__name__ == "MagicMock"
+ ):
+ # the target is already mocked, probably via a decorator run at
+ # import time, so we don't need to inject our poison
+ return
+
+ full_name = module_name + "." + function_name
+
+ def toxic_wrapper(*args, **kwargs):
+ path = args[0]
+ if isinstance(path, bytes):
+ pattern = b'/sys'
+ elif isinstance(path, str):
+ pattern = '/sys'
+ else:
+ # we ignore the rest of the potential pathlike types for now
+ pattern = None
+
+ if pattern and path.startswith(pattern):
+ raise Exception(
+ 'This test invokes %s on %s. It is bad, you '
+ 'should mock it.'
+ % (full_name, path)
+ )
+ else:
+ return orig_f(*args, **kwargs)
+
+ self.useFixture(fixtures.MonkeyPatch(full_name, toxic_wrapper))
+
+ def setUp(self):
+ super().setUp()
+ self.inject_poison("os.path", "isdir")
+ self.inject_poison("builtins", "open")
+ self.inject_poison("glob", "iglob")
+ self.inject_poison("os", "listdir")
+ self.inject_poison("glob", "glob")
+ # TODO(gibi): Would be good to poison these too but that makes
+ # a bunch of test to fail
+ # self.inject_poison("os.path", "exists")
+ # self.inject_poison("os", "stat")
+
+
+class ImportModulePoisonFixture(fixtures.Fixture):
+ """Poison imports of modules unsuitable for the test environment.
+
+ Examples are guestfs and libvirt. Ordinarily, these would not be installed
+ in the test environment but if they _are_ present, it can result in
+ actual calls to libvirt, for example, which could cause tests to fail.
+
+ This fixture will inspect module imports and if they are in the disallowed
+ list, it will fail the test with a helpful message about mocking needed in
+ the test.
+ """
+
+ class ForbiddenModules(MetaPathFinder):
+ def __init__(self, test, modules):
+ super().__init__()
+ self.test = test
+ self.modules = modules
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.modules:
+ self.test.fail_message = (
+ f"This test imports the '{fullname}' module, which it "
+ f'should not in the test environment. Please add '
+ f'appropriate mocking to this test.'
+ )
+ raise ImportError(fullname)
+
+ def __init__(self, module_names):
+ self.module_names = module_names
+ self.fail_message = ''
+ if isinstance(module_names, str):
+ self.module_names = {module_names}
+ self.meta_path_finder = self.ForbiddenModules(self, self.module_names)
+
+ def setUp(self):
+ super().setUp()
+ self.addCleanup(self.cleanup)
+ sys.meta_path.insert(0, self.meta_path_finder)
+
+ def cleanup(self):
+ sys.meta_path.remove(self.meta_path_finder)
+ # We use a flag and check it during the cleanup phase to fail the test
+ # if needed. This is done because some module imports occur inside of a
+ # try-except block that ignores all exceptions, so raising an exception
+ # there (which is also what self.assert* and self.fail() do underneath)
+ # will not work to cause a failure in the test.
+ if self.fail_message:
+ raise ImportError(self.fail_message)
+
+
+class ComputeNodeIdFixture(fixtures.Fixture):
+ def setUp(self):
+ super().setUp()
+
+ node.LOCAL_NODE_UUID = None
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ lambda: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ lambda uuid: None))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeManager.'
+ '_ensure_existing_node_identity',
+ mock.DEFAULT))
diff --git a/nova/tests/fixtures/os_brick.py b/nova/tests/fixtures/os_brick.py
index e636e8b8f5..2062e8ed14 100644
--- a/nova/tests/fixtures/os_brick.py
+++ b/nova/tests/fixtures/os_brick.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from os_brick.initiator import connector as brick_connector
diff --git a/nova/tests/fixtures/policy.py b/nova/tests/fixtures/policy.py
index daecc2868b..b0b844fe37 100644
--- a/nova/tests/fixtures/policy.py
+++ b/nova/tests/fixtures/policy.py
@@ -65,7 +65,7 @@ class RealPolicyFixture(fixtures.Fixture):
def add_missing_default_rules(self, rules):
"""Adds default rules and their values to the given rules dict.
- The given rulen dict may have an incomplete set of policy rules.
+ The given rules dict may have an incomplete set of policy rules.
This method will add the default policy rules and their values to
the dict. It will not override the existing rules.
"""
@@ -141,7 +141,7 @@ class OverridePolicyFixture(RealPolicyFixture):
not used. One example is when policy rules are deprecated. In that case
tests can use this fixture and verify if deprecated rules are overridden
then does nova code enforce the overridden rules not only defaults.
- As per oslo.policy deprecattion feature, if deprecated rule is overridden
+ As per oslo.policy deprecation feature, if deprecated rule is overridden
in policy file then, overridden check is used to verify the policy.
Example of usage:
diff --git a/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl
index 6b56f72139..d35850baed 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/images/images-details-get-resp.json.tpl
@@ -217,6 +217,124 @@
"progress": 100,
"status": "ACTIVE",
"updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_disabled_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_disabled_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "False"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_luks_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_luks_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "luks"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
+ },
+ {
+ "created": "2011-01-01T01:02:03Z",
+ "id": "%(eph_encryption_plain_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_plain_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "metadata": {
+ "hw_ephemeral_encryption": "True",
+ "hw_ephemeral_encryption_format": "plain"
+ },
+ "minDisk": 0,
+ "minRam": 0,
+ "name": "fakeimage123456",
+ "OS-EXT-IMG-SIZE:size": %(int)s,
+ "progress": 100,
+ "status": "ACTIVE",
+ "updated": "2011-01-01T01:02:03Z"
}
]
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl
index 035cc83695..dc08ba7053 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/images/images-list-get-resp.json.tpl
@@ -132,6 +132,82 @@
}
],
"name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_disabled_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_disabled_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_disabled_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_luks_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_luks_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_luks_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
+ },
+ {
+ "id": "%(eph_encryption_plain_id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/images/%(eph_encryption_plain_id)s",
+ "rel": "bookmark"
+ },
+ {
+ "href": "http://glance.openstack.example.com/images/%(eph_encryption_plain_id)s",
+ "rel": "alternate",
+ "type": "application/vnd.openstack.image"
+ }
+ ],
+ "name": "fakeimage123456"
}
]
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-find-host-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
new file mode 100644
index 0000000000..8abf0b4e18
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-evacuate/v2.95/server-evacuate-req.json.tpl
@@ -0,0 +1,5 @@
+{
+ "evacuate": {
+ "adminPass": "%(adminPass)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json.tpl
new file mode 100644
index 0000000000..03e60c0133
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "keypair": {
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "public_key": "%(public_key)s",
+ "user_id": "%(user_id)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json.tpl
new file mode 100644
index 0000000000..30d3fa969d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-import-post-resp.json.tpl
@@ -0,0 +1,9 @@
+{
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "public_key": "%(public_key)s",
+ "user_id": "%(user_id)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-post-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-post-req.json.tpl
new file mode 100644
index 0000000000..f6a6d47b56
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-keypairs/v2.92/keypairs-post-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "keypair": {
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "user_id": "%(user_id)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-az.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-az.json.tpl
new file mode 100644
index 0000000000..9bcd25139a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-az.json.tpl
@@ -0,0 +1,5 @@
+{
+ "%(action)s": {
+ "availability_zone": "%(availability_zone)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl
index 9bcd25139a..d78efa84e1 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve.json.tpl
@@ -1,5 +1,3 @@
{
- "%(action)s": {
- "availability_zone": "%(availability_zone)s"
- }
+ "unshelve": null
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-null.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-shelve.json.tpl
index 5a19f85cff..5a19f85cff 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.77/os-unshelve-null.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-shelve.json.tpl
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az-host.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az-host.json.tpl
new file mode 100644
index 0000000000..eecc4271cb
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az-host.json.tpl
@@ -0,0 +1,6 @@
+{
+ "%(action)s": {
+ "availability_zone": "%(availability_zone)s",
+ "host": "%(host)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az.json.tpl
new file mode 100644
index 0000000000..9bcd25139a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-az.json.tpl
@@ -0,0 +1,5 @@
+{
+ "%(action)s": {
+ "availability_zone": "%(availability_zone)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json.tpl
new file mode 100644
index 0000000000..f9d2a2b17a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host-and-unpin-az.json.tpl
@@ -0,0 +1,6 @@
+{
+ "%(action)s": {
+ "availability_zone": null,
+ "host": "%(host)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host.json.tpl
new file mode 100644
index 0000000000..3363b524ee
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-host.json.tpl
@@ -0,0 +1,5 @@
+{
+ "%(action)s": {
+ "host": "%(host)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json.tpl
new file mode 100644
index 0000000000..3815586c5c
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve-unpin-az.json.tpl
@@ -0,0 +1,5 @@
+{
+ "%(action)s": {
+ "availability_zone": null
+ }
+}
diff --git a/doc/api_samples/os-shelve/v2.77/os-unshelve-null.json b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve.json.tpl
index fd05c2a2fe..d78efa84e1 100644
--- a/doc/api_samples/os-shelve/v2.77/os-unshelve-null.json
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-shelve/v2.91/os-unshelve.json.tpl
@@ -1,3 +1,3 @@
{
"unshelve": null
-} \ No newline at end of file
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..486433733d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,80 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.1.30",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "config_drive": "",
+ "created": "%(isotime)s",
+ "description": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "locked_reason": null,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "os-extended-volumes:volumes_attached": [],
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..3becc83fba
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-action-rebuild.json.tpl
@@ -0,0 +1,15 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "%(access_ip_v4)s",
+ "accessIPv6" : "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "imageRef" : "%(uuid)s",
+ "name" : "%(name)s",
+ "adminPass" : "%(pass)s",
+ "hostname": "%(hostname)s",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ },
+ "user_data": "ZWNobyAiaGVsbG8gd29ybGQi"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
new file mode 100644
index 0000000000..f83c78fdc9
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-req.json.tpl
@@ -0,0 +1,21 @@
+{
+ "server" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "name" : "new-server-test",
+ "imageRef" : "%(image_id)s",
+ "flavorRef" : "1",
+ "OS-DCF:diskConfig": "AUTO",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ },
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "user_data" : "%(user_data)s",
+ "networks": "auto",
+ "hostname": "custom-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
new file mode 100644
index 0000000000..4b30e0cfbd
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-create-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
new file mode 100644
index 0000000000..ae2088619a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-get-resp.json.tpl
@@ -0,0 +1,81 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "%(hostname)s",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
new file mode 100644
index 0000000000..bc4be64a8e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "hostname": "new-server-hostname.example.com"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
new file mode 100644
index 0000000000..2adc16df5e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/server-update-resp.json.tpl
@@ -0,0 +1,78 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": null,
+ "locked": false,
+ "locked_reason": null,
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "us-west",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "server_groups": [],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..f49d21e7a2
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-details-resp.json.tpl
@@ -0,0 +1,88 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "00:0c:29:0d:11:74",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "description": "",
+ "flavor": {
+ "disk": 1,
+ "ephemeral": 0,
+ "extra_specs": {},
+ "original_name": "m1.tiny",
+ "ram": 512,
+ "swap": 0,
+ "vcpus": 1
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "config_drive": "%(cdrive)s",
+ "locked": false,
+ "locked_reason": "",
+ "OS-DCF:diskConfig": "AUTO",
+ "OS-EXT-AZ:availability_zone": "nova",
+ "OS-EXT-SRV-ATTR:hostname": "custom-hostname.example.com",
+ "OS-EXT-STS:power_state": 1,
+ "OS-EXT-STS:task_state": null,
+ "OS-EXT-STS:vm_state": "active",
+ "os-extended-volumes:volumes_attached": [
+ {"id": "volume_id1", "delete_on_termination": false},
+ {"id": "volume_id2", "delete_on_termination": false}
+ ],
+ "OS-SRV-USG:launched_at": "%(strtime)s",
+ "OS-SRV-USG:terminated_at": null,
+ "progress": 0,
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ],
+ "status": "ACTIVE",
+ "tags": [],
+ "tenant_id": "6f70656e737461636b20342065766572",
+ "trusted_image_certificates": null,
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/detail?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..9cdb3aa644
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.94/servers-list-resp.json.tpl
@@ -0,0 +1,24 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ],
+ "servers_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers?limit=1&marker=%(id)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/test_baremetal_nodes.py b/nova/tests/functional/api_sample_tests/test_baremetal_nodes.py
index 59ef2496b5..569df728e3 100644
--- a/nova/tests/functional/api_sample_tests/test_baremetal_nodes.py
+++ b/nova/tests/functional/api_sample_tests/test_baremetal_nodes.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.functional.api_sample_tests import api_sample_base
diff --git a/nova/tests/functional/api_sample_tests/test_compare_result.py b/nova/tests/functional/api_sample_tests/test_compare_result.py
index 652b9c9035..1b68439184 100644
--- a/nova/tests/functional/api_sample_tests/test_compare_result.py
+++ b/nova/tests/functional/api_sample_tests/test_compare_result.py
@@ -13,8 +13,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
import testtools
from nova import test
diff --git a/nova/tests/functional/api_sample_tests/test_create_backup.py b/nova/tests/functional/api_sample_tests/test_create_backup.py
index 2e5758c36e..cf454e948b 100644
--- a/nova/tests/functional/api_sample_tests/test_create_backup.py
+++ b/nova/tests/functional/api_sample_tests/test_create_backup.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests import fixtures
from nova.tests.functional.api_sample_tests import test_servers
diff --git a/nova/tests/functional/api_sample_tests/test_evacuate.py b/nova/tests/functional/api_sample_tests/test_evacuate.py
index 1b12e2caf4..15efb39d44 100644
--- a/nova/tests/functional/api_sample_tests/test_evacuate.py
+++ b/nova/tests/functional/api_sample_tests/test_evacuate.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.tests.functional.api_sample_tests import test_servers
@@ -79,7 +79,8 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -95,7 +96,8 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV214(EvacuateJsonTest):
@@ -116,7 +118,8 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -131,7 +134,8 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV229(EvacuateJsonTestV214):
@@ -158,7 +162,8 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
@mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
@@ -178,7 +183,8 @@ class EvacuateJsonTestV229(EvacuateJsonTestV214):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host='testHost', request_spec=mock.ANY)
+ host='testHost', request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
class EvacuateJsonTestV268(EvacuateJsonTestV229):
@@ -204,8 +210,47 @@ class EvacuateJsonTestV268(EvacuateJsonTestV229):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None, request_spec=mock.ANY)
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state=None)
def test_server_evacuate_with_force(self):
# doesn't apply to v2.68+, which removed the ability to force migrate
pass
+
+
+class EvacuateJsonTestV295(EvacuateJsonTestV268):
+ microversion = '2.95'
+ scenarios = [('v2_95', {'api_major_version': 'v2.1'})]
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate(self, rebuild_mock):
+ req_subs = {
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
+
+ @mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
+ def test_server_evacuate_find_host(self, rebuild_mock):
+ req_subs = {
+ 'host': 'testHost',
+ "adminPass": "MySecretPass",
+ }
+ self._test_evacuate(req_subs, 'server-evacuate-find-host-req',
+ server_resp=None, expected_resp_code=200)
+
+ rebuild_mock.assert_called_once_with(mock.ANY, instance=mock.ANY,
+ orig_image_ref=mock.ANY, image_ref=mock.ANY,
+ injected_files=mock.ANY, new_pass="MySecretPass",
+ orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
+ on_shared_storage=None, preserve_ephemeral=mock.ANY,
+ host=None, request_spec=mock.ANY,
+ reimage_boot_volume=False, target_state="stopped")
diff --git a/nova/tests/functional/api_sample_tests/test_hypervisors.py b/nova/tests/functional/api_sample_tests/test_hypervisors.py
index f402f9ebde..f5832ab4ac 100644
--- a/nova/tests/functional/api_sample_tests/test_hypervisors.py
+++ b/nova/tests/functional/api_sample_tests/test_hypervisors.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.functional.api_sample_tests import api_sample_base
diff --git a/nova/tests/functional/api_sample_tests/test_images.py b/nova/tests/functional/api_sample_tests/test_images.py
index 924bc7768f..c84e566409 100644
--- a/nova/tests/functional/api_sample_tests/test_images.py
+++ b/nova/tests/functional/api_sample_tests/test_images.py
@@ -19,10 +19,29 @@ from nova.tests.functional.api_sample_tests import api_sample_base
class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
sample_dir = 'images'
+ def generalize_subs(self, subs, vanilla_regexes):
+ """Give the test a chance to modify subs after the server response
+ was verified, and before the on-disk doc/api_samples file is checked.
+ """
+ # When comparing the template to the sample we just care that the image
+ # IDs are UUIDs.
+ subs['eph_encryption_id'] = vanilla_regexes['uuid']
+ subs['eph_encryption_disabled_id'] = vanilla_regexes['uuid']
+ subs['eph_encryption_luks_id'] = vanilla_regexes['uuid']
+ subs['eph_encryption_plain_id'] = vanilla_regexes['uuid']
+ return subs
+
def test_images_list(self):
# Get api sample of images get list request.
response = self._do_get('images')
- self._verify_response('images-list-get-resp', {}, response, 200)
+ subs = {
+ 'eph_encryption_id': self.glance.eph_encryption['id'],
+ 'eph_encryption_disabled_id':
+ self.glance.eph_encryption_disabled['id'],
+ 'eph_encryption_luks_id': self.glance.eph_encryption_luks['id'],
+ 'eph_encryption_plain_id': self.glance.eph_encryption_plain['id'],
+ }
+ self._verify_response('images-list-get-resp', subs, response, 200)
def test_image_get(self):
# Get api sample of one single image details request.
@@ -34,7 +53,14 @@ class ImagesSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
def test_images_details(self):
# Get api sample of all images details request.
response = self._do_get('images/detail')
- self._verify_response('images-details-get-resp', {}, response, 200)
+ subs = {
+ 'eph_encryption_id': self.glance.eph_encryption['id'],
+ 'eph_encryption_disabled_id':
+ self.glance.eph_encryption_disabled['id'],
+ 'eph_encryption_luks_id': self.glance.eph_encryption_luks['id'],
+ 'eph_encryption_plain_id': self.glance.eph_encryption_plain['id'],
+ }
+ self._verify_response('images-details-get-resp', subs, response, 200)
def test_image_metadata_get(self):
# Get api sample of an image metadata request.
diff --git a/nova/tests/functional/api_sample_tests/test_keypairs.py b/nova/tests/functional/api_sample_tests/test_keypairs.py
index eab88f61e1..a121b98449 100644
--- a/nova/tests/functional/api_sample_tests/test_keypairs.py
+++ b/nova/tests/functional/api_sample_tests/test_keypairs.py
@@ -319,3 +319,66 @@ class KeyPairsV235SampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
% keypairs_user2[1])
subs = {'keypair_name': keypairs_user2[2]}
self._verify_response('keypairs-list-user2-resp', subs, response, 200)
+
+
+class KeyPairsV292SampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
+ ADMIN_API = True
+ sample_dir = 'os-keypairs'
+ microversion = '2.92'
+ expected_post_status_code = 201
+ scenarios = [('v2_92', {'api_major_version': 'v2.1'})]
+
+ def setUp(self):
+ super(KeyPairsV292SampleJsonTest, self).setUp()
+ self.api.microversion = self.microversion
+
+ # NOTE(sbauza): This method is stupidly needed for _verify_response().
+ # See the TODO(sdague) above.
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['keypair_name'] = '[0-9a-zA-Z-_.@ ]+'
+ return subs
+
+ def test_keypairs_post_no_longer_supported(self):
+ subs = {
+ 'keypair_name': 'foo',
+ 'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
+ 'user_id': 'fake'
+ }
+ response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
+ self.assertEqual(400, response.status_code)
+
+ def test_keypairs_import_key_invalid_name(self):
+ public_key = fake_crypto.get_ssh_public_key()
+ subs = {
+ 'keypair_name': '!nvalid=name|',
+ 'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
+ 'user_id': 'fake',
+ 'public_key': public_key,
+ }
+ response = self._do_post('os-keypairs', 'keypairs-import-post-req',
+ subs)
+ self.assertEqual(400, response.status_code)
+
+ def _test_keypairs_import_key_post(self, name=None):
+ if not name:
+ name = 'keypair-' + uuids.fake
+ public_key = fake_crypto.get_ssh_public_key()
+ params = {
+ 'keypair_name': name,
+ 'keypair_type': keypair_obj.KEYPAIR_TYPE_SSH,
+ 'user_id': 'fake',
+ 'public_key': public_key,
+ }
+ response = self._do_post('os-keypairs', 'keypairs-import-post-req',
+ params)
+ # NOTE(sbauza): We do some crazy regexp change in _verify_response() so
+ # we only need to pass the keypair name.
+ subs = {'keypair_name': name}
+ self._verify_response('keypairs-import-post-resp', subs, response,
+ self.expected_post_status_code)
+
+ def test_keypairs_import_key_post(self):
+ self._test_keypairs_import_key_post()
+
+ def test_keypairs_import_key_special_characters(self):
+ self._test_keypairs_import_key_post(name='my-key@ my.host')
diff --git a/nova/tests/functional/api_sample_tests/test_migrate_server.py b/nova/tests/functional/api_sample_tests/test_migrate_server.py
index 59321c845a..5fe7070410 100644
--- a/nova/tests/functional/api_sample_tests/test_migrate_server.py
+++ b/nova/tests/functional/api_sample_tests/test_migrate_server.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import versionutils
from nova import exception
diff --git a/nova/tests/functional/api_sample_tests/test_networks.py b/nova/tests/functional/api_sample_tests/test_networks.py
index 0a75d156cb..dd5d945e2a 100644
--- a/nova/tests/functional/api_sample_tests/test_networks.py
+++ b/nova/tests/functional/api_sample_tests/test_networks.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import exception
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/api_sample_tests/test_remote_consoles.py b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
index 986826bfee..e304402ee9 100644
--- a/nova/tests/functional/api_sample_tests/test_remote_consoles.py
+++ b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
@@ -13,6 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova.compute import api as compute
+from nova import exception
from nova.tests.functional.api_sample_tests import test_servers
HTTP_RE = r'(https?://)([\w\d:#@%/;$()~_?\+-=\\.&](#!)?)*'
@@ -38,6 +42,22 @@ class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
self._verify_response('get-vnc-console-post-resp', {'url': HTTP_RE},
response, 200)
+ @mock.patch.object(compute.API, 'get_vnc_console')
+ def test_get_vnc_console_instance_invalid_state(self,
+ mock_get_vnc_console):
+ uuid = self._post_server()
+
+ def fake_get_vnc_console(*args, **kwargs):
+ raise exception.InstanceInvalidState(
+ attr='fake_attr', state='fake_state', method='fake_method',
+ instance_uuid=uuid)
+
+ mock_get_vnc_console.side_effect = fake_get_vnc_console
+ response = self._do_post('servers/%s/action' % uuid,
+ 'get-vnc-console-post-req',
+ {'action': 'os-getVNCConsole'})
+ self.assertEqual(409, response.status_code)
+
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
diff --git a/nova/tests/functional/api_sample_tests/test_server_migrations.py b/nova/tests/functional/api_sample_tests/test_server_migrations.py
index 15fb72945c..8ee3b6a36a 100644
--- a/nova/tests/functional/api_sample_tests/test_server_migrations.py
+++ b/nova/tests/functional/api_sample_tests/test_server_migrations.py
@@ -14,9 +14,9 @@
# under the License.
import datetime
+from unittest import mock
import futurist
-import mock
from nova.conductor import manager as conductor_manager
from nova import context
diff --git a/nova/tests/functional/api_sample_tests/test_server_password.py b/nova/tests/functional/api_sample_tests/test_server_password.py
index 11921291f1..8c4800103b 100644
--- a/nova/tests/functional/api_sample_tests/test_server_password.py
+++ b/nova/tests/functional/api_sample_tests/test_server_password.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.functional.api_sample_tests import test_servers
diff --git a/nova/tests/functional/api_sample_tests/test_servers.py b/nova/tests/functional/api_sample_tests/test_servers.py
index aa07b88247..7679c9b734 100644
--- a/nova/tests/functional/api_sample_tests/test_servers.py
+++ b/nova/tests/functional/api_sample_tests/test_servers.py
@@ -618,6 +618,13 @@ class ServersSampleJson290Test(ServersSampleJsonTest):
ADMIN_API = False
+class ServersSampleJson294Test(ServersSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ use_common_server_post = False
+ ADMIN_API = False
+
+
class ServersUpdateSampleJsonTest(ServersSampleBase):
# Many of the 'os_compute_api:servers:*' policies are admin-only, and we
@@ -702,6 +709,44 @@ class ServersUpdateSampleJson290Test(ServersUpdateSampleJsonTest):
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+class ServersUpdateSampleJson294Test(ServersUpdateSampleJsonTest):
+ microversion = '2.94'
+ scenarios = [('v2_94', {'api_major_version': 'v2.1'})]
+ ADMIN_API = False
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ subs = {}
+ subs['hostid'] = '[a-f0-9]+'
+ subs['access_ip_v4'] = '1.2.3.4'
+ subs['access_ip_v6'] = '80fe::'
+ subs['hostname'] = 'updated-hostname.example.com'
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-req', subs)
+ self._verify_response('server-update-resp', subs, response, 200)
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ params = {
+ 'uuid': self.glance.auto_disk_config_enabled_image['id'],
+ 'name': 'foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '80fe::',
+ 'hostname': 'updated-hostname.example.com',
+ }
+
+ resp = self._do_post(
+ 'servers/%s/action' % uuid,
+ 'server-action-rebuild',
+ params,
+ )
+ subs = params.copy()
+ del subs['uuid']
+ self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+
+
class ServerSortKeysJsonTests(ServersSampleBase):
sample_dir = 'servers-sort'
diff --git a/nova/tests/functional/api_sample_tests/test_shelve.py b/nova/tests/functional/api_sample_tests/test_shelve.py
index 37d24b6cea..0dfef71055 100644
--- a/nova/tests/functional/api_sample_tests/test_shelve.py
+++ b/nova/tests/functional/api_sample_tests/test_shelve.py
@@ -15,10 +15,25 @@
import nova.conf
+from nova import objects
from nova.tests.functional.api_sample_tests import test_servers
+from oslo_utils.fixture import uuidsentinel
+from unittest import mock
CONF = nova.conf.CONF
+fake_aggregate = {
+ 'deleted': 0,
+ 'deleted_at': None,
+ 'created_at': None,
+ 'updated_at': None,
+ 'id': 123,
+ 'uuid': uuidsentinel.fake_aggregate,
+ 'name': 'us-west',
+ 'hosts': ['host01'],
+ 'metadetails': {'availability_zone': 'us-west'},
+}
+
class ShelveJsonTest(test_servers.ServersSampleBase):
# The 'os_compute_api:os-shelve:shelve_offload' policy is admin-only
@@ -30,9 +45,11 @@ class ShelveJsonTest(test_servers.ServersSampleBase):
# Don't offload instance, so we can test the offload call.
CONF.set_override('shelved_offload_time', -1)
- def _test_server_action(self, uuid, template, action):
+ def _test_server_action(self, uuid, template, action, subs=None):
+ subs = subs or {}
+ subs.update({'action': action})
response = self._do_post('servers/%s/action' % uuid,
- template, {'action': action})
+ template, subs)
self.assertEqual(202, response.status_code)
self.assertEqual("", response.text)
@@ -51,26 +68,288 @@ class ShelveJsonTest(test_servers.ServersSampleBase):
self._test_server_action(uuid, 'os-unshelve', 'unshelve')
-class UnshelveJson277Test(test_servers.ServersSampleBase):
+class UnshelveJson277Test(ShelveJsonTest):
+ ADMIN_API = False
sample_dir = "os-shelve"
microversion = '2.77'
scenarios = [('v2_77', {'api_major_version': 'v2.1'})]
+ def setUp(self):
+ super(UnshelveJson277Test, self).setUp()
+ # Almost all next tests require the instance to be shelve offloaded.
+ # So shelve offload the instance and skip the shelve_offload_test
+ # below.
+ CONF.set_override('shelved_offload_time', 0)
+
+ def test_shelve_offload(self):
+ # Skip this test as the instance is already shelve offloaded.
+ pass
+
+ def test_unshelve_with_az(self):
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-az',
+ 'unshelve',
+ subs={"availability_zone": "us-west"}
+ )
+
+
+class UnshelveJson291Test(UnshelveJson277Test):
+ ADMIN_API = True
+ sample_dir = "os-shelve"
+ microversion = '2.91'
+ scenarios = [('v2_91', {'api_major_version': 'v2.1'})]
+
+ def _test_server_action_invalid(
+ self, uuid, template, action, subs=None, msg=None):
+ subs = subs or {}
+ subs.update({'action': action})
+ response = self._do_post('servers/%s/action' % uuid,
+ template, subs)
+ self.assertEqual(400, response.status_code)
+ self.assertIn(msg, response.text)
+
+ def test_unshelve_with_non_valid_host(self):
+ """Ensure an exception rise if host is invalid and
+ a http 400 error
+ """
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action_invalid(
+ uuid, 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01'},
+ msg='Compute host host01 could not be found.')
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_valid_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unshelve to a host
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01'}
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_az_and_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unshelve to a host and az
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01', 'availability_zone': 'us-west'},
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_unpin_az_and_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unshelve to a host and az
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-host-and-unpin-az',
+ 'unshelve',
+ subs={'host': 'host01'},
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_unpin_az(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unpin an az
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action(
+ uuid,
+ 'os-unshelve-unpin-az',
+ 'unshelve',
+ subs={'host': 'host01'},
+ )
+
+
+class UnshelveJson291NonAdminTest(UnshelveJson291Test):
+ # Use non admin api credentials.
+ ADMIN_API = False
+ sample_dir = "os-shelve"
+ microversion = '2.91'
+ scenarios = [('v2_91', {'api_major_version': 'v2.1'})]
+
+ def _test_server_action_invalid(self, uuid, template, action, subs=None):
+ subs = subs or {}
+ subs.update({'action': action})
+ response = self._do_post('servers/%s/action' % uuid,
+ template, subs)
+ self.assertEqual(403, response.status_code)
+ self.assertIn(
+ "Policy doesn\'t allow os_compute_api:os-shelve:unshelve_to_host" +
+ " to be performed.", response.text)
+
def _test_server_action(self, uuid, template, action, subs=None):
subs = subs or {}
subs.update({'action': action})
response = self._do_post('servers/%s/action' % uuid,
template, subs)
self.assertEqual(202, response.status_code)
- self.assertEqual("", response.text)
+ self.assertEqual('', response.text)
+
+ def test_unshelve_with_non_valid_host(self):
+ """Ensure an exception rise if user is not admin.
+ a http 403 error
+ """
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ self._test_server_action_invalid(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01'}
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_unpin_az_and_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
- def test_unshelve_with_az(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
- self._test_server_action(uuid, 'os-unshelve', 'unshelve',
- subs={"availability_zone": "us-west"})
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action_invalid(
+ uuid,
+ 'os-unshelve-host-and-unpin-az',
+ 'unshelve',
+ subs={'host': 'host01'},
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_valid_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
+
+ uuid = self._post_server()
+ self._test_server_action(uuid, 'os-shelve', 'shelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action_invalid(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01'}
+ )
+
+ @mock.patch('nova.objects.aggregate._get_by_host_from_db')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_unshelve_with_az_and_host(
+ self, compute_node_get_all_by_host, mock_api_get_by_host):
+ """Ensure we can unshelve to a host and az
+ """
+ # Put compute in the correct az
+ mock_api_get_by_host.return_value = [fake_aggregate]
- def test_unshelve_no_az(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
- self._test_server_action(uuid, 'os-unshelve-null', 'unshelve')
+ fake_computes = objects.ComputeNodeList(
+ objects=[
+ objects.ComputeNode(
+ host='host01',
+ uuid=uuidsentinel.host1,
+ hypervisor_hostname='host01')
+ ]
+ )
+ compute_node_get_all_by_host.return_value = fake_computes
+
+ self._test_server_action_invalid(
+ uuid,
+ 'os-unshelve-host',
+ 'unshelve',
+ subs={'host': 'host01', 'availability_zone': 'us-west'},
+ )
diff --git a/nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py b/nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py
index 5ee3ba7163..36b224510d 100644
--- a/nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py
+++ b/nova/tests/functional/api_sample_tests/test_simple_tenant_usage.py
@@ -13,9 +13,9 @@
# under the License.
import datetime
+from unittest import mock
from urllib import parse
-import mock
from oslo_utils import timeutils
from nova.tests.functional.api_sample_tests import test_servers
diff --git a/nova/tests/functional/compute/test_init_host.py b/nova/tests/functional/compute/test_init_host.py
index f506f6ed59..f5c821e116 100644
--- a/nova/tests/functional/compute/test_init_host.py
+++ b/nova/tests/functional/compute/test_init_host.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import time
+from unittest import mock
from nova import context as nova_context
from nova import objects
diff --git a/nova/tests/functional/compute/test_live_migration.py b/nova/tests/functional/compute/test_live_migration.py
index b4d68cd1d5..fb7315a23c 100644
--- a/nova/tests/functional/compute/test_live_migration.py
+++ b/nova/tests/functional/compute/test_live_migration.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/functional/compute/test_migration_list.py b/nova/tests/functional/compute/test_migration_list.py
index 49ea236bc4..bfcb018320 100644
--- a/nova/tests/functional/compute/test_migration_list.py
+++ b/nova/tests/functional/compute/test_migration_list.py
@@ -64,7 +64,7 @@ class TestMigrationListObjects(test.TestCase):
self.context, filters, limit, marker,
sort_keys, sort_dirs)
found_uuids = [x.uuid for x in migs]
- had_uuids = sorted([x['uuid'] for x in self.migrations])
+ had_uuids = sorted([x.uuid for x in self.migrations])
self.assertEqual(had_uuids, found_uuids)
def test_get_instance_objects_sorted_paged(self):
@@ -80,7 +80,7 @@ class TestMigrationListObjects(test.TestCase):
['created_at'], ['asc'])
self.assertEqual(len(self.migrations), len(migp1))
migp2 = migration_list.get_migration_objects_sorted(
- self.context, {}, None, migp1[-1]['uuid'],
+ self.context, {}, None, migp1[-1].uuid,
['created_at'], ['asc'])
self.assertEqual(0, len(migp2))
@@ -93,7 +93,7 @@ class TestMigrationListObjects(test.TestCase):
def test_get_sorted_with_limit(self):
migs = migration_list.get_migration_objects_sorted(
self.context, {}, 2, None, ['uuid'], ['asc'])
- uuids = [mig['uuid'] for mig in migs]
+ uuids = [mig.uuid for mig in migs]
had_uuids = [mig.uuid for mig in self.migrations]
self.assertEqual(sorted(had_uuids)[:2], uuids)
self.assertEqual(2, len(uuids))
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index 81b7dfb68c..139fb5e6ac 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -12,9 +12,9 @@
import copy
import os
+from unittest import mock
import fixtures
-import mock
import os_resource_classes as orc
import os_traits
from oslo_utils.fixture import uuidsentinel as uuids
@@ -29,7 +29,6 @@ from nova import conf
from nova import context
from nova import objects
from nova import test
-from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
from nova.virt import driver as virt_driver
@@ -249,6 +248,7 @@ class IronicResourceTrackerTest(test.TestCase):
'numa_topology': None,
'resource_class': None, # Act like admin hasn't set yet...
'stats': stats,
+ 'uuid': str(getattr(uuids, nodename)),
}
self.rt.update_available_resource(self.ctx, nodename)
@@ -694,15 +694,6 @@ class TestProviderConfig(integrated_helpers.ProviderUsageBaseTestCase):
feature a vm cannot be spawning using a custom trait and then start a
compute service that provides that trait.
"""
-
- self.useFixture(nova_fixtures.NeutronFixture(self))
- self.useFixture(nova_fixtures.GlanceFixture(self))
-
- # Start nova services.
- self.api = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1')).admin_api
- self.api.microversion = 'latest'
- self.start_service('conductor')
# start nova-compute that will not have the additional trait.
self._start_compute("fake-host-1")
diff --git a/nova/tests/functional/db/test_aggregate.py b/nova/tests/functional/db/test_aggregate.py
index 35d9024576..be3cd67e38 100644
--- a/nova/tests/functional/db/test_aggregate.py
+++ b/nova/tests/functional/db/test_aggregate.py
@@ -11,8 +11,8 @@
# under the License.
from copy import deepcopy
+from unittest import mock
-import mock
from oslo_db import exception as db_exc
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
diff --git a/nova/tests/functional/db/test_compute_api.py b/nova/tests/functional/db/test_compute_api.py
index 49fa10281a..0cf3e4f5e9 100644
--- a/nova/tests/functional/db/test_compute_api.py
+++ b/nova/tests/functional/db/test_compute_api.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import api as compute_api
diff --git a/nova/tests/functional/db/test_compute_node.py b/nova/tests/functional/db/test_compute_node.py
index 0c605121e4..1bca4eecf6 100644
--- a/nova/tests/functional/db/test_compute_node.py
+++ b/nova/tests/functional/db/test_compute_node.py
@@ -267,7 +267,7 @@ class ComputeNodeTestCase(test.TestCase):
self.assertEqual(res, (1, 1))
# the ratio is refreshed to CONF.initial_xxx_allocation_ratio
- # beacause CONF.xxx_allocation_ratio is None
+ # because CONF.xxx_allocation_ratio is None
cns = db.compute_node_get_all(self.context)
# the ratio is refreshed to CONF.xxx_allocation_ratio
for cn in cns:
diff --git a/nova/tests/functional/db/test_host_mapping.py b/nova/tests/functional/db/test_host_mapping.py
index e4b5a365a7..3d0c5575ca 100644
--- a/nova/tests/functional/db/test_host_mapping.py
+++ b/nova/tests/functional/db/test_host_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
diff --git a/nova/tests/functional/db/test_instance_group.py b/nova/tests/functional/db/test_instance_group.py
index 50314f17ac..6a801f2a55 100644
--- a/nova/tests/functional/db/test_instance_group.py
+++ b/nova/tests/functional/db/test_instance_group.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import fixture as ovo_fixture
diff --git a/nova/tests/functional/db/test_instance_mapping.py b/nova/tests/functional/db/test_instance_mapping.py
index 1b740df629..ef78e7910a 100644
--- a/nova/tests/functional/db/test_instance_mapping.py
+++ b/nova/tests/functional/db/test_instance_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
diff --git a/nova/tests/functional/db/test_quota.py b/nova/tests/functional/db/test_quota.py
index 8c2b637269..cdadebd408 100644
--- a/nova/tests/functional/db/test_quota.py
+++ b/nova/tests/functional/db/test_quota.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils import uuidutils
from nova import context
diff --git a/nova/tests/functional/db/test_virtual_interface.py b/nova/tests/functional/db/test_virtual_interface.py
index 0d64f99cc8..2accb80c01 100644
--- a/nova/tests/functional/db/test_virtual_interface.py
+++ b/nova/tests/functional/db/test_virtual_interface.py
@@ -11,7 +11,8 @@
# under the License.
import datetime
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_utils import timeutils
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index 70918bc5f5..cdf71da0d4 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -247,6 +247,27 @@ class InstanceHelperMixin:
self.assertIn(error_in_tb, event['traceback'])
return event
+ def _assert_build_request_success(self, server_request):
+ server = self.api.post_server({'server': server_request})
+ self._wait_for_state_change(server, 'ACTIVE')
+ return server['id']
+
+ def _assert_build_request_schedule_failure(self, server_request):
+ server = self.api.post_server({'server': server_request})
+ self._wait_for_state_change(server, 'ERROR')
+
+ def _assert_bad_build_request_error(self, server_request):
+ ex = self.assertRaises(
+ api_client.OpenStackApiException, self.api.post_server,
+ {'server': server_request})
+ self.assertEqual(400, ex.response.status_code)
+
+ def _assert_build_request_error(self, server_request):
+ ex = self.assertRaises(
+ api_client.OpenStackApiException, self.api.post_server,
+ {'server': server_request})
+ self.assertEqual(500, ex.response.status_code)
+
def _wait_for_migration_status(self, server, expected_statuses):
"""Waits for a migration record with the given statuses to be found
for the given server, else the test fails. The migration record, if
@@ -540,8 +561,8 @@ class InstanceHelperMixin:
self.api.post_server_action(
server['id'],
{'os-migrateLive': {'host': None, 'block_migration': 'auto'}})
- self._wait_for_state_change(server, server_expected_state)
self._wait_for_migration_status(server, [migration_expected_state])
+ return self._wait_for_state_change(server, server_expected_state)
_live_migrate_server = _live_migrate
@@ -577,7 +598,7 @@ class InstanceHelperMixin:
def _evacuate_server(
self, server, extra_post_args=None, expected_host=None,
- expected_state='ACTIVE', expected_task_state=NOT_SPECIFIED,
+ expected_state='SHUTOFF', expected_task_state=NOT_SPECIFIED,
expected_migration_status='done'):
"""Evacuate a server."""
api = getattr(self, 'admin_api', self.api)
@@ -606,9 +627,18 @@ class InstanceHelperMixin:
self.api.post_server_action(server['id'], {'os-start': None})
return self._wait_for_state_change(server, 'ACTIVE')
- def _stop_server(self, server):
+ def _stop_server(self, server, wait_for_stop=True):
self.api.post_server_action(server['id'], {'os-stop': None})
- return self._wait_for_state_change(server, 'SHUTOFF')
+ if wait_for_stop:
+ return self._wait_for_state_change(server, 'SHUTOFF')
+ return server
+
+ def _snapshot_server(self, server, snapshot_name):
+ """Create server snapshot."""
+ self.api.post_server_action(
+ server['id'],
+ {'createImage': {'name': snapshot_name}}
+ )
class PlacementHelperMixin:
@@ -629,12 +659,16 @@ class PlacementHelperMixin:
'/resource_providers', version='1.14'
).body['resource_providers']
- def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid):
+ def _get_all_rps_in_a_tree(self, in_tree_rp_uuid):
rps = self.placement.get(
'/resource_providers?in_tree=%s' % in_tree_rp_uuid,
version='1.20',
).body['resource_providers']
- return [rp['uuid'] for rp in rps]
+ return rps
+
+ def _get_all_rp_uuids_in_a_tree(self, in_tree_rp_uuid):
+ return [
+ rp['uuid'] for rp in self._get_all_rps_in_a_tree(in_tree_rp_uuid)]
def _post_resource_provider(self, rp_name):
return self.placement.post(
@@ -842,6 +876,20 @@ class PlacementHelperMixin:
'Test expected a single migration but found %i' % len(migrations))
return migrations[0].uuid
+ def _reserve_placement_resource(self, rp_name, rc_name, reserved):
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ inv = self.placement.get(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26'
+ ).body
+ inv["reserved"] = reserved
+ result = self.placement.put(
+ '/resource_providers/%s/inventories/%s' % (rp_uuid, rc_name),
+ version='1.26', body=inv
+ ).body
+ self.assertEqual(reserved, result["reserved"])
+ return result
+
class PlacementInstanceHelperMixin(InstanceHelperMixin, PlacementHelperMixin):
"""A placement-aware variant of InstanceHelperMixin."""
diff --git a/nova/tests/functional/libvirt/base.py b/nova/tests/functional/libvirt/base.py
index f53baa1e24..1ee46a3217 100644
--- a/nova/tests/functional/libvirt/base.py
+++ b/nova/tests/functional/libvirt/base.py
@@ -15,9 +15,10 @@
import copy
import io
+from unittest import mock
import fixtures
-import mock
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
from nova.tests import fixtures as nova_fixtures
@@ -42,7 +43,8 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
super(ServersTestBase, self).setUp()
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
- self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
+ self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
self.useFixture(nova_fixtures.OSBrickFixture())
self.useFixture(fixtures.MockPatch(
@@ -51,12 +53,12 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128, 'used': 44, 'free': 84}))
- self.useFixture(fixtures.MockPatch(
+ self.mock_is_valid_hostname = self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname',
- return_value=True))
- self.useFixture(fixtures.MockPatch(
+ return_value=True)).mock
+ self.mock_file_open = self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.file_open',
- side_effect=lambda *a, **k: io.BytesIO(b'')))
+ side_effect=lambda *a, **k: io.BytesIO(b''))).mock
self.useFixture(fixtures.MockPatch(
'nova.privsep.utils.supports_direct_io',
return_value=True))
@@ -114,7 +116,7 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
def start_compute(
self, hostname='compute1', host_info=None, pci_info=None,
mdev_info=None, vdpa_info=None, libvirt_version=None,
- qemu_version=None,
+ qemu_version=None, cell_name=None, connection=None
):
"""Start a compute service.
@@ -124,34 +126,65 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
:param host_info: A fakelibvirt.HostInfo object for the host. Defaults
to a HostInfo with 2 NUMA nodes, 2 cores per node, 2 threads per
core, and 16GB of RAM.
+ :param connection: A fake libvirt connection. You should not provide it
+ directly. However it is used by restart_compute_service to
+ implement restart without loosing the hypervisor state.
:returns: The hostname of the created service, which can be used to
lookup the created service and UUID of the assocaited resource
provider.
"""
+ if connection and (
+ host_info or
+ pci_info or
+ mdev_info or
+ vdpa_info or
+ libvirt_version or
+ qemu_version
+ ):
+ raise ValueError(
+ "Either an existing connection instance can be provided or a "
+ "list of parameters for a new connection"
+ )
def _start_compute(hostname, host_info):
- fake_connection = self._get_connection(
- host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
- qemu_version, hostname,
- )
+ if connection:
+ fake_connection = connection
+ else:
+ fake_connection = self._get_connection(
+ host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
+ qemu_version, hostname,
+ )
+
+ # If the compute is configured with PCI devices then we need to
+ # make sure that the stubs around sysfs has the MAC address
+ # information for the PCI PF devices
+ if pci_info:
+ self.libvirt.update_sriov_mac_address_mapping(
+ pci_info.get_pci_address_mac_mapping())
# This is fun. Firstly we need to do a global'ish mock so we can
# actually start the service.
- with mock.patch('nova.virt.libvirt.host.Host.get_connection',
- return_value=fake_connection):
- compute = self.start_service('compute', host=hostname)
- # Once that's done, we need to tweak the compute "service" to
- # make sure it returns unique objects. We do this inside the
- # mock context to avoid a small window between the end of the
- # context and the tweaking where get_connection would revert to
- # being an autospec mock.
- compute.driver._host.get_connection = lambda: fake_connection
+ orig_con = self.mock_conn.return_value
+ self.mock_conn.return_value = fake_connection
+ compute = self.start_service(
+ 'compute', host=hostname, cell_name=cell_name)
+ # Once that's done, we need to tweak the compute "service" to
+ # make sure it returns unique objects.
+ compute.driver._host.get_connection = lambda: fake_connection
+ # Then we revert the local mock tweaking so the next compute can
+ # get its own
+ self.mock_conn.return_value = orig_con
return compute
# ensure we haven't already registered services with these hostnames
self.assertNotIn(hostname, self.computes)
self.assertNotIn(hostname, self.compute_rp_uuids)
- self.computes[hostname] = _start_compute(hostname, host_info)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ self.computes[hostname] = _start_compute(hostname, host_info)
+ # We need to trigger libvirt.Host() to capture the node-local
+ # uuid while we have it mocked out.
+ self.computes[hostname].driver._host.get_node_uuid()
self.compute_rp_uuids[hostname] = self.placement.get(
'/resource_providers?name=%s' % hostname).body[
@@ -159,6 +192,74 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
return hostname
+ def restart_compute_service(
+ self,
+ hostname,
+ host_info=None,
+ pci_info=None,
+ mdev_info=None,
+ vdpa_info=None,
+ libvirt_version=None,
+ qemu_version=None,
+ keep_hypervisor_state=True,
+ ):
+ """Stops the service and starts a new one to have realistic restart
+
+ :param hostname: the hostname of the nova-compute service to be
+ restarted
+ :param keep_hypervisor_state: If True then we reuse the fake connection
+ from the existing driver. If False a new connection will be created
+ based on the other parameters provided
+ """
+ # We are intentionally not calling super() here. Nova's base test class
+ # defines starting and restarting compute service with a very
+ # different signatures and also those calls are cannot be made aware of
+ # the intricacies of the libvirt fixture. So we simply hide that
+ # implementation.
+
+ if keep_hypervisor_state and (
+ host_info or
+ pci_info or
+ mdev_info or
+ vdpa_info or
+ libvirt_version or
+ qemu_version
+ ):
+ raise ValueError(
+ "Either keep_hypervisor_state=True or a list of libvirt "
+ "parameters can be provided but not both"
+ )
+
+ compute = self.computes.pop(hostname)
+ self.compute_rp_uuids.pop(hostname)
+
+ # NOTE(gibi): The service interface cannot be used to simulate a real
+ # service restart as the manager object will not be recreated after a
+ # service.stop() and service.start() therefore the manager state will
+ # survive. For example the resource tracker will not be recreated after
+ # a stop start. The service.kill() call cannot help as it deletes
+ # the service from the DB which is unrealistic and causes that some
+ # operation that refers to the killed host (e.g. evacuate) fails.
+ # So this helper method will stop the original service and then starts
+ # a brand new compute service for the same host and node. This way
+ # a new ComputeManager instance will be created and initialized during
+ # the service startup.
+ compute.stop()
+
+ # this service was running previously, so we have to make sure that
+ # we restart it in the same cell
+ cell_name = self.host_mappings[compute.host].cell_mapping.name
+
+ old_connection = compute.manager.driver._get_connection()
+
+ self.start_compute(
+ hostname, host_info, pci_info, mdev_info, vdpa_info,
+ libvirt_version, qemu_version, cell_name,
+ old_connection if keep_hypervisor_state else None
+ )
+
+ return self.computes[hostname]
+
class LibvirtMigrationMixin(object):
"""A simple mixin to facilliate successful libvirt live migrations
@@ -316,6 +417,21 @@ class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
+ network_3_port_2 = {
+ 'id': '132c3875-b175-4b20-8a57-7a76219a13ae',
+ 'network_id': network_3['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': 'd2:0b:fd:99:89:8b',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.2.7',
+ 'subnet_id': subnet_3['id']
+ }
+ ],
+ 'binding:vif_details': {},
+ 'binding:vif_type': 'ovs',
+ 'binding:vnic_type': 'remote-managed',
+ }
network_4_port_1 = {
'id': 'b4cd0b93-2ac8-40a7-9fa4-2cd680ccdf3e',
'network_id': network_4['id'],
@@ -361,6 +477,37 @@ class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
'binding:vif_type': 'hw_veb',
'binding:vnic_type': 'direct',
}
+ network_4_port_4 = {
+ 'id': 'a31e381d-41ec-41e4-b5a5-ec4ef705fafa',
+ 'network_id': network_1['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': '71:ce:c7:2b:cd:dd',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.4.9',
+ 'subnet_id': subnet_4['id']
+ }
+ ],
+ 'binding:vif_details': {},
+ 'binding:vif_type': 'ovs',
+ 'binding:vnic_type': 'remote-managed',
+ }
+
+ network_4_port_pf = {
+ 'id': 'c6f51315-9202-416f-9e2f-eb78b3ac36d9',
+ 'network_id': network_4['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': 'b5:bc:2e:e7:51:01',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.4.8',
+ 'subnet_id': subnet_4['id']
+ }
+ ],
+ 'binding:vif_details': {'vlan': 42},
+ 'binding:vif_type': 'hostdev_physical',
+ 'binding:vnic_type': 'direct-physical',
+ }
def __init__(self, test):
super(LibvirtNeutronFixture, self).__init__(test)
diff --git a/nova/tests/functional/libvirt/test_device_bus_migration.py b/nova/tests/functional/libvirt/test_device_bus_migration.py
new file mode 100644
index 0000000000..3852e31c68
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_device_bus_migration.py
@@ -0,0 +1,407 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+from unittest import mock
+
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova.cmd import manage
+from nova import context as nova_context
+from nova import objects
+from nova import test
+from nova.tests.functional.libvirt import base
+from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt import driver as libvirt_driver
+
+
+class LibvirtDeviceBusMigration(base.ServersTestBase):
+
+ microversion = 'latest'
+ # needed for move operations
+ ADMIN_API = True
+
+ def setUp(self):
+ super().setUp()
+ self.context = nova_context.get_admin_context()
+ self.compute_hostname = self.start_compute()
+ self.compute = self.computes[self.compute_hostname]
+ self.commands = manage.ImagePropertyCommands()
+
+ def _unset_stashed_image_properties(self, server_id, properties):
+ instance = objects.Instance.get_by_uuid(self.context, server_id)
+ for p in properties:
+ instance.system_metadata.pop(f'image_{p}')
+ instance.save()
+
+ def _assert_stashed_image_properties(self, server_id, properties):
+ instance = objects.Instance.get_by_uuid(self.context, server_id)
+ for p, value in properties.items():
+ self.assertEqual(instance.system_metadata.get(f'image_{p}'), value)
+
+ def _assert_stashed_image_properties_persist(self, server, properties):
+ # Assert the stashed properties persist across a host reboot
+ self.restart_compute_service(self.compute_hostname)
+ self._assert_stashed_image_properties(server['id'], properties)
+
+ # Assert the stashed properties persist across a guest reboot
+ self._reboot_server(server, hard=True)
+ self._assert_stashed_image_properties(server['id'], properties)
+
+ # Assert the stashed properties persist across a migration
+ if 'other_compute' not in self.computes:
+ self.start_compute('other_compute')
+ # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
+ # probably be less...dumb
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ self._migrate_server(server)
+ self._confirm_resize(server)
+ self._assert_stashed_image_properties(server['id'], properties)
+
+ def test_default_image_property_registration(self):
+ """Assert that the defaults for various hw image properties don't
+ change over the lifecycle of an instance.
+ """
+ default_image_properties = {
+ 'hw_machine_type': 'pc',
+ 'hw_cdrom_bus': 'ide',
+ 'hw_disk_bus': 'virtio',
+ 'hw_input_bus': 'usb',
+ 'hw_pointer_model': 'usbtablet',
+ 'hw_video_model': 'virtio',
+ 'hw_vif_model': 'virtio',
+ }
+
+ server = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server['id'], default_image_properties)
+
+ # Unset the defaults here to ensure that init_host resets them
+ # when the compute restarts the libvirt driver
+ self._unset_stashed_image_properties(
+ server['id'], libvirt_driver.REGISTER_IMAGE_PROPERTY_DEFAULTS)
+
+ # Assert the defaults persist across a host reboot, guest reboot, and
+ # guest migration
+ self._assert_stashed_image_properties_persist(
+ server, default_image_properties)
+
+ def test_non_default_image_property_registration(self):
+ """Assert that non-default values for various hw image properties
+ don't change over the lifecycle of an instance.
+ """
+ non_default_image_properties = {
+ 'hw_machine_type': 'q35',
+ 'hw_cdrom_bus': 'sata',
+ 'hw_disk_bus': 'sata',
+ 'hw_input_bus': 'virtio',
+ 'hw_video_model': 'qxl',
+ 'hw_vif_model': 'e1000',
+ }
+ self.glance.create(
+ None,
+ {
+ 'id': uuids.hw_bus_model_image_uuid,
+ 'name': 'hw_bus_model_image',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'bare',
+ 'disk_format': 'qcow2',
+ 'size': '74185822',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': [],
+ 'properties': non_default_image_properties,
+ }
+ )
+ server = self._create_server(
+ networks='none', image_uuid=uuids.hw_bus_model_image_uuid)
+ self._assert_stashed_image_properties(
+ server['id'], non_default_image_properties)
+
+ # Assert the non defaults persist across a host reboot, guest reboot,
+ # and guest migration
+ self._assert_stashed_image_properties_persist(
+ server, non_default_image_properties)
+
+ def test_default_image_property_persists_across_osinfo_changes(self):
+ # Create a server with default image properties
+ default_image_properties = {
+ 'hw_vif_model': 'virtio',
+ 'hw_disk_bus': 'virtio',
+ }
+ server = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server['id'], default_image_properties)
+
+ with test.nested(
+ mock.patch('nova.virt.osinfo.HardwareProperties.network_model',
+ new=mock.PropertyMock()),
+ mock.patch('nova.virt.osinfo.HardwareProperties.disk_model',
+ new=mock.PropertyMock())
+ ) as (mock_nw_model, mock_disk_model):
+ # osinfo returning new things
+ mock_nw_model.return_value = 'e1000'
+ mock_disk_model.return_value = 'sata'
+
+ # Assert the defaults persist across a host reboot, guest reboot,
+ # and guest migration
+ self._assert_stashed_image_properties_persist(
+ server, default_image_properties)
+
+ def test_default_image_property_persists_across_host_flag_changes(self):
+ # Set the default to ps2 via host flag
+ self.flags(pointer_model='ps2mouse')
+ # Restart compute to pick up ps2 setting, which means the guest will
+ # not get a prescribed pointer device
+ self.restart_compute_service(self.compute_hostname)
+
+ # Create a server with default image properties
+ default_image_properties1 = {
+ 'hw_pointer_model': None,
+ 'hw_input_bus': None,
+ }
+ server1 = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server1['id'], default_image_properties1)
+
+ # Assert the defaults persist across a host flag change
+ self.flags(pointer_model='usbtablet')
+ # Restart compute to pick up usb setting
+ self.restart_compute_service(self.compute_hostname)
+ self._assert_stashed_image_properties(
+ server1['id'], default_image_properties1)
+
+ # Assert the defaults persist across a host reboot, guest reboot, and
+ # guest migration
+ self._assert_stashed_image_properties_persist(
+ server1, default_image_properties1)
+
+ # Create a server with new default image properties since the host flag
+ # change
+ default_image_properties2 = {
+ 'hw_pointer_model': 'usbtablet',
+ 'hw_input_bus': 'usb',
+ }
+ server2 = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server2['id'], default_image_properties2)
+
+ # Assert the defaults persist across a host reboot, guest reboot, and
+ # guest migration
+ self._assert_stashed_image_properties_persist(
+ server2, default_image_properties2)
+
+ # Finally, try changing the host flag again to None. Note that it is
+ # not possible for a user to specify None for this option:
+ # https://bugs.launchpad.net/nova/+bug/1866106
+ self.flags(pointer_model=None)
+ # Restart compute to pick up None setting
+ self.restart_compute_service(self.compute_hostname)
+ self._assert_stashed_image_properties(
+ server1['id'], default_image_properties1)
+ self._assert_stashed_image_properties(
+ server2['id'], default_image_properties2)
+
+ # Create a server since the host flag change to None. The defaults
+ # should be the same as for ps2mouse
+ server3 = self._create_server(networks='none')
+ self._assert_stashed_image_properties(
+ server3['id'], default_image_properties1)
+
+ # Assert the defaults persist across a host reboot, guest reboot, and
+ # guest migration for server1, server2, and server3
+ self._assert_stashed_image_properties_persist(
+ server1, default_image_properties1)
+ self._assert_stashed_image_properties_persist(
+ server2, default_image_properties2)
+ self._assert_stashed_image_properties_persist(
+ server3, default_image_properties1)
+
+ def _assert_guest_config(self, config, image_properties):
+ verified_properties = set()
+
+ # Verify the machine type matches the image property
+ value = image_properties.get('hw_machine_type')
+ if value:
+ self.assertEqual(value, config.os_mach_type)
+ verified_properties.add('hw_machine_type')
+
+ # Look at all the devices and verify that their bus and model values
+ # match the desired image properties
+ for device in config.devices:
+ if isinstance(device, vconfig.LibvirtConfigGuestDisk):
+ if device.source_device == 'cdrom':
+ value = image_properties.get('hw_cdrom_bus')
+ if value:
+ self.assertEqual(value, device.target_bus)
+ verified_properties.add('hw_cdrom_bus')
+
+ if device.source_device == 'disk':
+ value = image_properties.get('hw_disk_bus')
+ if value:
+ self.assertEqual(value, device.target_bus)
+ verified_properties.add('hw_disk_bus')
+
+ if isinstance(device, vconfig.LibvirtConfigGuestInput):
+ value = image_properties.get('hw_input_bus')
+ if value:
+ self.assertEqual(value, device.bus)
+ verified_properties.add('hw_input_bus')
+
+ if device.type == 'tablet':
+ value = image_properties.get('hw_pointer_model')
+ if value:
+ self.assertEqual('usbtablet', value)
+ verified_properties.add('hw_pointer_model')
+
+ if isinstance(device, vconfig.LibvirtConfigGuestVideo):
+ value = image_properties.get('hw_video_model')
+ if value:
+ self.assertEqual(value, device.type)
+ verified_properties.add('hw_video_model')
+
+ if isinstance(device, vconfig.LibvirtConfigGuestInterface):
+ value = image_properties.get('hw_vif_model')
+ if value:
+ self.assertEqual(value, device.model)
+ verified_properties.add('hw_vif_model')
+
+ # If hw_pointer_model or hw_input_bus are in the image properties but
+ # we did not encounter devices for them, they should be None
+ for p in ['hw_pointer_model', 'hw_input_bus']:
+ if p in image_properties and p not in verified_properties:
+ self.assertIsNone(image_properties[p])
+ verified_properties.add(p)
+
+ # Assert that we verified all of the image properties
+ self.assertEqual(
+ len(image_properties), len(verified_properties),
+ f'image_properties: {image_properties}, '
+ f'verified_properties: {verified_properties}'
+ )
+
+ def test_machine_type_and_bus_and_model_migration(self):
+ """Assert the behaviour of the nova-manage image_property set command
+ when used to migrate between machine types and associated device buses.
+ """
+ # Create a pass-through mock around _get_guest_config to capture the
+ # config of an instance so we can assert things about it later.
+ # TODO(lyarwood): This seems like a useful thing to do in the libvirt
+ # func tests for all computes we start?
+ self.guest_configs = {}
+ orig_get_config = self.compute.driver._get_guest_config
+
+ def _get_guest_config(_self, *args, **kwargs):
+ guest_config = orig_get_config(*args, **kwargs)
+ instance = args[0]
+ self.guest_configs[instance.uuid] = guest_config
+ return self.guest_configs[instance.uuid]
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_guest_config',
+ _get_guest_config))
+
+ pc_image_properties = {
+ 'hw_machine_type': 'pc',
+ 'hw_cdrom_bus': 'ide',
+ 'hw_disk_bus': 'sata',
+ 'hw_input_bus': 'usb',
+ 'hw_pointer_model': 'usbtablet',
+ 'hw_video_model': 'cirrus',
+ 'hw_vif_model': 'e1000',
+ }
+ self.glance.create(
+ None,
+ {
+ 'id': uuids.pc_image_uuid,
+ 'name': 'pc_image',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'bare',
+ 'disk_format': 'qcow2',
+ 'size': '74185822',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': [],
+ 'properties': pc_image_properties,
+ }
+ )
+
+ body = self._build_server(
+ image_uuid=uuids.pc_image_uuid, networks='auto')
+
+ # Add a cdrom to be able to verify hw_cdrom_bus
+ body['block_device_mapping_v2'] = [{
+ 'source_type': 'blank',
+ 'destination_type': 'local',
+ 'disk_bus': 'ide',
+ 'device_type': 'cdrom',
+ 'boot_index': 0,
+ }]
+
+ # Create the server and verify stashed image properties
+ server = self.api.post_server({'server': body})
+ self._wait_for_state_change(server, 'ACTIVE')
+ self._assert_stashed_image_properties(
+ server['id'], pc_image_properties)
+
+ # Verify the guest config matches the image properties
+ guest_config = self.guest_configs[server['id']]
+ self._assert_guest_config(guest_config, pc_image_properties)
+
+ # Set the image properties with nova-manage
+ self._stop_server(server)
+
+ q35_image_properties = {
+ 'hw_machine_type': 'q35',
+ 'hw_cdrom_bus': 'sata',
+ 'hw_disk_bus': 'virtio',
+ 'hw_input_bus': 'virtio',
+ 'hw_pointer_model': 'usbtablet',
+ 'hw_video_model': 'qxl',
+ 'hw_vif_model': 'virtio',
+ }
+ property_list = [
+ f'{p}={value}' for p, value in q35_image_properties.items()
+ ]
+
+ self.commands.set(
+ instance_uuid=server['id'], image_properties=property_list)
+
+ # Verify the updated stashed image properties
+ self._start_server(server)
+ self._assert_stashed_image_properties(
+ server['id'], q35_image_properties)
+
+ # The guest config should reflect the new values except for the cdrom
+ # block device bus which is taken from the block_device_mapping record,
+ # not system_metadata, so it cannot be changed
+ q35_image_properties['hw_cdrom_bus'] = 'ide'
+ guest_config = self.guest_configs[server['id']]
+ self._assert_guest_config(guest_config, q35_image_properties)
diff --git a/nova/tests/functional/libvirt/test_evacuate.py b/nova/tests/functional/libvirt/test_evacuate.py
index 531cefc63c..0e89a3cdb6 100644
--- a/nova/tests/functional/libvirt/test_evacuate.py
+++ b/nova/tests/functional/libvirt/test_evacuate.py
@@ -13,10 +13,10 @@
# under the License.
import collections
-import fixtures
-import mock
import os.path
+from unittest import mock
+import fixtures
from oslo_utils import fileutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
@@ -415,7 +415,9 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
with mock.patch.object(fakelibvirt.Connection, 'getHostname',
return_value=name):
- compute = self.start_service('compute', host=name)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % name))
+ compute = self.start_service('compute', host=name)
compute.driver._host.get_connection().getHostname = lambda: name
return compute
@@ -427,6 +429,7 @@ class _LibvirtEvacuateTest(integrated_helpers.InstanceHelperMixin):
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.GlanceFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
fake_network.set_stub_network_methods(self)
api_fixture = self.useFixture(
diff --git a/nova/tests/functional/libvirt/test_live_migration.py b/nova/tests/functional/libvirt/test_live_migration.py
index f714a5f043..31ff9dfca0 100644
--- a/nova/tests/functional/libvirt/test_live_migration.py
+++ b/nova/tests/functional/libvirt/test_live_migration.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import threading
from lxml import etree
@@ -19,15 +20,18 @@ from nova.tests.functional import integrated_helpers
from nova.tests.functional.libvirt import base as libvirt_base
-class LiveMigrationQueuedAbortTest(
+class LiveMigrationWithLockBase(
libvirt_base.LibvirtMigrationMixin,
libvirt_base.ServersTestBase,
integrated_helpers.InstanceHelperMixin
):
- """Functional test for bug 1949808.
+ """Base for live migration tests which require live migration to be
+ locked for certain period of time and then unlocked afterwards.
- This test is used to confirm that VM's state is reverted properly
- when queued Live migration is aborted.
+ Separate base class is needed because locking mechanism could work
+ in an unpredicted way if two tests for the same class would try to
+ use it simultaneously. Every test using this mechanism should use
+ separate class instance.
"""
api_major_version = 'v2.1'
@@ -69,7 +73,15 @@ class LiveMigrationQueuedAbortTest(
dom = conn.lookupByUUIDString(server)
dom.complete_job()
- def test_queued_live_migration_abort(self):
+
+class LiveMigrationQueuedAbortTestVmStatus(LiveMigrationWithLockBase):
+ """Functional test for bug #1949808.
+
+ This test is used to confirm that VM's state is reverted properly
+ when queued Live migration is aborted.
+ """
+
+ def test_queued_live_migration_abort_vm_status(self):
# Lock live migrations
self.lock_live_migration.acquire()
@@ -105,13 +117,96 @@ class LiveMigrationQueuedAbortTest(
'/servers/%s/migrations/%s' % (self.server_b['id'],
serverb_migration['id']))
self._wait_for_migration_status(self.server_b, ['cancelled'])
- # Unlock live migrations and confirm that server_a becomes
- # active again after successful live migration
+ # Unlock live migrations and confirm that both servers become
+ # active again after successful (server_a) and aborted
+ # (server_b) live migrations
self.lock_live_migration.release()
self._wait_for_state_change(self.server_a, 'ACTIVE')
+ self._wait_for_state_change(self.server_b, 'ACTIVE')
+
+
+class LiveMigrationQueuedAbortTestLeftoversRemoved(LiveMigrationWithLockBase):
+ """Functional test for bug #1960412.
+
+ Placement allocations for live migration and inactive Neutron port
+ bindings on destination host created by Nova control plane when live
+ migration is initiated should be removed when queued live migration
+ is aborted using Nova API.
+ """
+
+ def test_queued_live_migration_abort_leftovers_removed(self):
+ # Lock live migrations
+ self.lock_live_migration.acquire()
+
+ # Start instances: first one would be used to occupy
+ # executor's live migration queue, second one would be used
+ # to actually confirm that queued live migrations are
+ # aborted properly.
+ # port_1 is created automatically when neutron fixture is
+ # initialized, port_2 is created manually
+ self.server_a = self._create_server(
+ host=self.src_hostname,
+ networks=[{'port': self.neutron.port_1['id']}])
+ self.neutron.create_port({'port': self.neutron.port_2})
+ self.server_b = self._create_server(
+ host=self.src_hostname,
+ networks=[{'port': self.neutron.port_2['id']}])
+ # Issue live migration requests for both servers. We expect that
+ # server_a live migration would be running, but locked by
+ # self.lock_live_migration and server_b live migration would be
+ # queued.
+ self._live_migrate(
+ self.server_a,
+ migration_expected_state='running',
+ server_expected_state='MIGRATING'
+ )
+ self._live_migrate(
+ self.server_b,
+ migration_expected_state='queued',
+ server_expected_state='MIGRATING'
+ )
- # FIXME(artom) Assert the server_b never comes out of 'MIGRATING'
- self.assertRaises(
- AssertionError,
- self._wait_for_state_change, self.server_b, 'ACTIVE')
- self._wait_for_state_change(self.server_b, 'MIGRATING')
+ # Abort live migration for server_b
+ migration_server_a = self.api.api_get(
+ '/os-migrations?instance_uuid=%s' % self.server_a['id']
+ ).body['migrations'].pop()
+ migration_server_b = self.api.api_get(
+ '/os-migrations?instance_uuid=%s' % self.server_b['id']
+ ).body['migrations'].pop()
+
+ self.api.api_delete(
+ '/servers/%s/migrations/%s' % (self.server_b['id'],
+ migration_server_b['id']))
+ self._wait_for_migration_status(self.server_b, ['cancelled'])
+ # Unlock live migrations and confirm that both servers become
+ # active again after successful (server_a) and aborted
+ # (server_b) live migrations
+ self.lock_live_migration.release()
+ self._wait_for_state_change(self.server_a, 'ACTIVE')
+ self._wait_for_migration_status(self.server_a, ['completed'])
+ self._wait_for_state_change(self.server_b, 'ACTIVE')
+
+ # Allocations for both successful (server_a) and aborted queued live
+ # migration (server_b) should be removed.
+ allocations_server_a_migration = self.placement.get(
+ '/allocations/%s' % migration_server_a['uuid']
+ ).body['allocations']
+ self.assertEqual({}, allocations_server_a_migration)
+ allocations_server_b_migration = self.placement.get(
+ '/allocations/%s' % migration_server_b['uuid']
+ ).body['allocations']
+ self.assertEqual({}, allocations_server_b_migration)
+
+ # INACTIVE port binding on destination host should be removed when
+ # queued live migration is aborted, so only 1 port binding would
+ # exist for ports attached to both servers.
+ port_binding_server_a = copy.deepcopy(
+ self.neutron._port_bindings[self.neutron.port_1['id']]
+ )
+ self.assertEqual(1, len(port_binding_server_a))
+ self.assertNotIn('src', port_binding_server_a)
+ port_binding_server_b = copy.deepcopy(
+ self.neutron._port_bindings[self.neutron.port_2['id']]
+ )
+ self.assertEqual(1, len(port_binding_server_b))
+ self.assertNotIn('dest', port_binding_server_b)
diff --git a/nova/tests/functional/libvirt/test_machine_type.py b/nova/tests/functional/libvirt/test_machine_type.py
index 3b496189d0..04c38b7338 100644
--- a/nova/tests/functional/libvirt/test_machine_type.py
+++ b/nova/tests/functional/libvirt/test_machine_type.py
@@ -103,7 +103,7 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
self.computes['compute1'].stop()
self._unset_machine_type(server_without['id'])
- self.flags(hw_machine_type='x86_64=pc-q35-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-q35-2.4', group='libvirt')
# Restart the compute
self.computes['compute1'].start()
@@ -115,9 +115,9 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
# is able to pass. This just keeps the tests clean.
self._reboot_server(server_without, hard=True)
- # Assert server_without now has a machine type of pc-q35-1.2.3 picked
+ # Assert server_without now has a machine type of pc-q35-2.4 picked
# up from [libvirt]hw_machine_type during init_host
- self._assert_machine_type(server_without['id'], 'pc-q35-1.2.3')
+ self._assert_machine_type(server_without['id'], 'pc-q35-2.4')
def test_machine_type_after_config_change(self):
"""Assert new instances pick up a new default machine type after the
@@ -129,11 +129,11 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
self._assert_machine_type(server_with['id'], 'q35')
self._assert_machine_type(server_without['id'], 'pc')
- self.flags(hw_machine_type='x86_64=pc-q35-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-q35-2.4', group='libvirt')
server_with_new, server_without_new = self._create_servers()
self._assert_machine_type(server_with_new['id'], 'q35')
- self._assert_machine_type(server_without_new['id'], 'pc-q35-1.2.3')
+ self._assert_machine_type(server_without_new['id'], 'pc-q35-2.4')
def test_machine_type_after_server_rebuild(self):
"""Assert that the machine type of an instance changes with a full
@@ -202,26 +202,26 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
)
def test_machine_type_update_stopped(self):
- self.flags(hw_machine_type='x86_64=pc-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-1.2', group='libvirt')
server = self._create_server(networks='none')
- self._assert_machine_type(server['id'], 'pc-1.2.3')
+ self._assert_machine_type(server['id'], 'pc-1.2')
self._stop_server(server)
machine_type_utils.update_machine_type(
self.context,
server['id'],
- 'pc-1.2.4'
+ 'pc-1.2'
)
self._start_server(server)
- self._assert_machine_type(server['id'], 'pc-1.2.4')
+ self._assert_machine_type(server['id'], 'pc-1.2')
def test_machine_type_update_blocked_active(self):
- self.flags(hw_machine_type='x86_64=pc-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-1.2', group='libvirt')
server = self._create_server(networks='none')
- self._assert_machine_type(server['id'], 'pc-1.2.3')
+ self._assert_machine_type(server['id'], 'pc-1.2')
self.assertRaises(
exception.InstanceInvalidState,
@@ -247,10 +247,10 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
)
def test_machine_type_update_blocked_between_versioned_and_alias(self):
- self.flags(hw_machine_type='x86_64=pc-1.2.3', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-1.2', group='libvirt')
server = self._create_server(networks='none')
- self._assert_machine_type(server['id'], 'pc-1.2.3')
+ self._assert_machine_type(server['id'], 'pc-1.2')
self._stop_server(server)
self.assertRaises(
@@ -372,7 +372,7 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
)
# Change the actual config on the compute
- self.flags(hw_machine_type='x86_64=pc-q35-1.2', group='libvirt')
+ self.flags(hw_machine_type='x86_64=pc-q35-2.4', group='libvirt')
# Assert the existing instances remain the same after being rebooted or
# unshelved, rebuilding their domain configs
@@ -389,4 +389,4 @@ class LibvirtMachineTypeTest(base.ServersTestBase):
# Assert that new instances are spawned with the expected machine types
server_with_new, server_without_new = self._create_servers()
self._assert_machine_type(server_with_new['id'], 'q35')
- self._assert_machine_type(server_without_new['id'], 'pc-q35-1.2')
+ self._assert_machine_type(server_without_new['id'], 'pc-q35-2.4')
diff --git a/nova/tests/functional/libvirt/test_numa_live_migration.py b/nova/tests/functional/libvirt/test_numa_live_migration.py
index 2f3897d6b2..0e504d2df2 100644
--- a/nova/tests/functional/libvirt/test_numa_live_migration.py
+++ b/nova/tests/functional/libvirt/test_numa_live_migration.py
@@ -206,10 +206,8 @@ class NUMALiveMigrationPositiveTests(NUMALiveMigrationPositiveBase):
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
- self.computes['host_a'] = self.restart_compute_service(
- self.computes['host_a'])
- self.computes['host_b'] = self.restart_compute_service(
- self.computes['host_b'])
+ self.restart_compute_service('host_a')
+ self.restart_compute_service('host_b')
# Live migrate, RPC-pinning the destination host if asked
if pin_dest:
@@ -333,10 +331,8 @@ class NUMALiveMigrationRollbackTests(NUMALiveMigrationPositiveBase):
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
- self.computes['host_a'] = self.restart_compute_service(
- self.computes['host_a'])
- self.computes['host_b'] = self.restart_compute_service(
- self.computes['host_b'])
+ self.restart_compute_service('host_a')
+ self.restart_compute_service('host_b')
# Live migrate, RPC-pinning the destination host if asked. This is a
# rollback test, so server_a is expected to remain on host_a.
diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py
index fd09a11e20..5b73e1b965 100644
--- a/nova/tests/functional/libvirt/test_numa_servers.py
+++ b/nova/tests/functional/libvirt/test_numa_servers.py
@@ -13,11 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
-import testtools
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
+import testtools
import nova
from nova.compute import manager
@@ -346,6 +346,76 @@ class NUMAServersTest(NUMAServersTestBase):
# There shouldn't be any hosts available to satisfy this request
self._run_build_test(flavor_id, end_status='ERROR')
+ def test_create_server_with_mixed_policy_asymmetric_multi_numa(self):
+ """Boot an instance stretched to two NUMA nodes requesting only
+ shared CPUs in one NUMA and only dedicated in the other NUMA node.
+ """
+ # shared dedicated
+ # NUMA0 pCPU | 0 | 2 3
+ # NUMA1 pCPU | | 6 7
+ self.flags(
+ cpu_shared_set='0',
+ cpu_dedicated_set='2,3,6,7',
+ group='compute',
+ )
+ self.flags(vcpu_pin_set=None)
+
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=2, cpu_sockets=1, cpu_cores=4, cpu_threads=1)
+ self.start_compute(host_info=host_info, hostname='compute1')
+
+ # sanity check the created host topology object; this is really just a
+ # test of the fakelibvirt module
+ host_numa = objects.NUMATopology.obj_from_db_obj(
+ objects.ComputeNode.get_by_nodename(
+ self.ctxt, 'compute1',
+ ).numa_topology
+ )
+ self.assertEqual(2, len(host_numa.cells))
+ self.assertEqual({0}, host_numa.cells[0].cpuset)
+ self.assertEqual({2, 3}, host_numa.cells[0].pcpuset)
+
+ self.assertEqual(set(), host_numa.cells[1].cpuset)
+ self.assertEqual({6, 7}, host_numa.cells[1].pcpuset)
+
+ # create a flavor with 1 shared and 2 dedicated CPUs stretched to
+ # different NUMA nodes
+ extra_spec = {
+ 'hw:cpu_policy': 'mixed',
+ 'hw:cpu_dedicated_mask': '^0',
+ 'hw:numa_nodes': '2',
+ 'hw:numa_cpus.0': '0',
+ 'hw:numa_cpus.1': '1,2',
+ 'hw:numa_mem.0': '256',
+ 'hw:numa_mem.1': '768',
+ }
+ flavor_id = self._create_flavor(
+ vcpu=3, memory_mb=1024, extra_spec=extra_spec)
+ expected_usage = {
+ 'DISK_GB': 20, 'MEMORY_MB': 1024, 'PCPU': 2, 'VCPU': 1,
+ }
+ # The only possible solution (ignoring the order of vCPU1,2):
+ # vCPU 0 => pCPU 0, NUMA0, shared
+ # vCPU 1 => pCPU 6, NUMA1, dedicated
+ # vCPU 2 => pCPU 7, NUMA1, dedicated
+ server = self._run_build_test(
+ flavor_id, expected_usage=expected_usage)
+
+ # sanity check the instance topology
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ self.assertEqual(2, len(inst.numa_topology.cells))
+
+ self.assertEqual({0}, inst.numa_topology.cells[0].cpuset)
+ self.assertEqual(set(), inst.numa_topology.cells[0].pcpuset)
+ self.assertIsNone(inst.numa_topology.cells[0].cpu_pinning)
+
+ self.assertEqual(set(), inst.numa_topology.cells[1].cpuset)
+ self.assertEqual({1, 2}, inst.numa_topology.cells[1].pcpuset)
+ self.assertEqual(
+ {6, 7},
+ set(inst.numa_topology.cells[1].cpu_pinning.values())
+ )
+
def test_create_server_with_dedicated_policy_old_configuration(self):
"""Create a server using the legacy extra spec and configuration.
@@ -731,7 +801,7 @@ class NUMAServersTest(NUMAServersTestBase):
for host, compute_rp_uuid in self.compute_rp_uuids.items():
if host == original_host:
# the host that had the instance should no longer have
- # alocations since the resize has been confirmed
+ # allocations since the resize has been confirmed
expected_usage = {'VCPU': 0, 'PCPU': 0, 'DISK_GB': 0,
'MEMORY_MB': 0}
else:
@@ -1187,10 +1257,8 @@ class ReshapeForPCPUsTest(NUMAServersTestBase):
self.flags(cpu_dedicated_set='0-7', group='compute')
self.flags(vcpu_pin_set=None)
- computes = {}
- for host, compute in self.computes.items():
- computes[host] = self.restart_compute_service(compute)
- self.computes = computes
+ for host in list(self.computes.keys()):
+ self.restart_compute_service(host)
# verify that the inventory, usages and allocation are correct after
# the reshape
diff --git a/nova/tests/functional/libvirt/test_pci_in_placement.py b/nova/tests/functional/libvirt/test_pci_in_placement.py
new file mode 100644
index 0000000000..41d6c8e008
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_pci_in_placement.py
@@ -0,0 +1,1997 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from unittest import mock
+
+import ddt
+import fixtures
+import os_resource_classes
+import os_traits
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+
+from nova import exception
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_pci_sriov_servers
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+
+class PlacementPCIReportingTests(test_pci_sriov_servers._PCIServersTestBase):
+ PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
+ PF_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PF_PROD_ID}"
+ VF_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.VF_PROD_ID}"
+
+ # Just placeholders to satisfy the base class. The real value will be
+ # redefined by the tests
+ PCI_DEVICE_SPEC = []
+ PCI_ALIAS = [
+ jsonutils.dumps(x)
+ for x in (
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-pci-dev",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "device_type": "type-PF",
+ "name": "a-pf",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "device_type": "type-VF",
+ "name": "a-vf",
+ },
+ )
+ ]
+
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+
+ # These tests should not depend on the host's sysfs
+ self.useFixture(
+ fixtures.MockPatch('nova.pci.utils.is_physical_function'))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_function_by_ifname',
+ return_value=(None, False)
+ )
+ )
+
+
+class PlacementPCIInventoryReportingTests(PlacementPCIReportingTests):
+
+ def test_new_compute_init_with_pci_devs(self):
+ """A brand new compute is started with multiple pci devices configured
+ for nova.
+ """
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI devs (slot 0 and 1)
+ # * two type-PFs (slot 2 and 3) with two type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=2, num_vfs=4)
+
+ # the emulated devices will then be filtered by the device_spec:
+ device_spec = self._to_list_of_json_str(
+ [
+ # PCI_PROD_ID will match two type-PCI devs (slot 0, 1)
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "traits": ",".join(
+ [os_traits.HW_GPU_API_VULKAN, "CUSTOM_GPU", "purple"]
+ )
+ },
+ # PF_PROD_ID + slot 2 will match one PF but not their children
+ # VFs
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:02.0",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV, "CUSTOM_PF", "pf-white"]
+ ),
+ },
+ # VF_PROD_ID + slot 3 will match two VFs but not their parent
+ # PF
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:03.*",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV_TRUSTED, "CUSTOM_VF", "vf-red"]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ # Finally we assert that only the filtered devices are reported to
+ # placement.
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ # Note that the VF inventory is reported on the parent PF
+ "0000:81:03.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_GPU",
+ "CUSTOM_PURPLE",
+ ],
+ "0000:81:01.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_GPU",
+ "CUSTOM_PURPLE",
+ ],
+ "0000:81:02.0": [
+ "HW_NIC_SRIOV",
+ "CUSTOM_PF",
+ "CUSTOM_PF_WHITE",
+ ],
+ "0000:81:03.0": [
+ "HW_NIC_SRIOV_TRUSTED",
+ "CUSTOM_VF",
+ "CUSTOM_VF_RED",
+ ],
+ },
+ )
+
+ def test_new_compute_init_with_pci_dev_custom_rc(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI devs slot 0
+ # * one type-PF dev in slot 1 with a single type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=1, num_vfs=1)
+
+ device_spec = self._to_list_of_json_str(
+ [
+ # PCI_PROD_ID will match the type-PCI in slot 0
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "resource_class": os_resource_classes.PGPU,
+ "traits": os_traits.HW_GPU_API_VULKAN,
+ },
+ # slot 1 func 0 is the type-PF dev. The child VF is ignored
+ {
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:01.0",
+ "resource_class": "crypto",
+ "traits": "to-the-moon,hodl"
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {os_resource_classes.PGPU: 1},
+ "0000:81:01.0": {"CUSTOM_CRYPTO": 1},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_TO_THE_MOON",
+ "CUSTOM_HODL",
+ ],
+ },
+ )
+
+ def test_dependent_device_config_is_rejected(self):
+ """Configuring both the PF and its children VFs is not supported.
+ Only either of them can be given to nova.
+ """
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with a single type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # both device will be matched by our config
+ device_spec = self._to_list_of_json_str(
+ [
+ # PF
+ {
+ "address": "0000:81:00.0"
+ },
+ # Its child VF
+ {
+ "address": "0000:81:00.1"
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.start_compute,
+ hostname="compute1",
+ pci_info=pci_info
+ )
+ self.assertIn(
+ "Configuring both 0000:81:00.1 and 0000:81:00.0 in "
+ "[pci]device_spec is not supported",
+ str(ex)
+ )
+
+ def test_sibling_vfs_with_contradicting_resource_classes_rejected(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with two type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # the config matches the two VFs separately and tries to configure
+ # them with different resource class
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.1",
+ "resource_class": "vf1"
+ },
+ {
+ "address": "0000:81:00.2",
+ "resource_class": "vf2"
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciMixedResourceClassException,
+ self.start_compute,
+ hostname="compute1",
+ pci_info=pci_info
+ )
+ self.assertIn(
+ "VFs from the same PF cannot be configured with different "
+ "'resource_class' values in [pci]device_spec. We got "
+ "CUSTOM_VF2 for 0000:81:00.2 and CUSTOM_VF1 for 0000:81:00.1.",
+ str(ex)
+ )
+
+ def test_sibling_vfs_with_contradicting_traits_rejected(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with two type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # the config matches the two VFs separately and tries to configure
+ # them with different trait list
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.1",
+ "traits": "foo",
+ },
+ {
+ "address": "0000:81:00.2",
+ "traits": "bar",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ self.start_compute,
+ hostname="compute1",
+ pci_info=pci_info
+ )
+ self.assertIn(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR for 0000:81:00.2 and "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_FOO for 0000:81:00.1.",
+ str(ex)
+ )
+
+ def test_neutron_sriov_devs_ignored(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF dev in slot 0 with one type-VF under it
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # then the config assigns physnet to the dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "physical_network": "physnet0",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ # As every matching dev has physnet configured they are ignored
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_devname_based_dev_spec_rejected(self):
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "devname": "eth0",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.start_compute,
+ hostname="compute1",
+ )
+ self.assertIn(
+ " Invalid [pci]device_spec configuration. PCI Placement reporting "
+ "does not support 'devname' based device specification but we got "
+ "{'devname': 'eth0'}. Please use PCI address in the configuration "
+ "instead.",
+ str(ex)
+ )
+
+ def test_remove_pci(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches that PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # now un-configure the PCI device and restart the compute
+ self.flags(group='pci', device_spec=self._to_list_of_json_str([]))
+ self.restart_compute_service(hostname="compute1")
+
+ # the RP had no allocation so nova could remove it
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_remove_one_vf(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config matching the VFs
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # remove one of the VFs from the hypervisor and then restart the
+ # compute
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # total value is expected to decrease to 1
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ def test_remove_all_vfs(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config patches the VFs
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # remove both VFs from the hypervisor and restart the compute
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=0)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # we expect that the RP is deleted
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_remove_all_vfs_add_pf(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config matches both VFs
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # change the config to match the PF but do not match the VFs and
+ # restart the compute
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # we expect that VF inventory is removed and the PF inventory is added
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PF_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ def test_remove_pf_add_vfs(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs in slot 0 with two type-VFs 00.1, 00.2
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # then the config only matches the PF
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PF_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # remove the PF from the config and add the VFs instead then restart
+ # the compute
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ # we expect that PF inventory is removed and the VF inventory is added
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ def test_device_reconfiguration(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with two type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # from slot 0 we match the PF only and ignore the VFs
+ # from slot 1 we match the VFs but ignore the parent PF
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:00.0",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV, "CUSTOM_PF", "pf-white"]
+ ),
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:01.*",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV_TRUSTED, "CUSTOM_VF", "vf-red"]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PF_RC: 1},
+ "0000:81:01.0": {self.VF_RC: 2},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_NIC_SRIOV",
+ "CUSTOM_PF",
+ "CUSTOM_PF_WHITE",
+ ],
+ "0000:81:01.0": [
+ "HW_NIC_SRIOV_TRUSTED",
+ "CUSTOM_VF",
+ "CUSTOM_VF_RED",
+ ],
+ },
+ )
+
+ # change the resource class and traits configuration and restart the
+ # compute
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "resource_class": "CUSTOM_PF",
+ "address": "0000:81:00.0",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV, "pf-black"]
+ ),
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "CUSTOM_VF",
+ "address": "0000:81:01.*",
+ "traits": ",".join(
+ [os_traits.HW_NIC_SRIOV_TRUSTED, "vf-blue", "foobar"]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {"CUSTOM_PF": 1},
+ "0000:81:01.0": {"CUSTOM_VF": 2},
+ },
+ traits={
+ "0000:81:00.0": [
+ "HW_NIC_SRIOV",
+ "CUSTOM_PF_BLACK",
+ ],
+ "0000:81:01.0": [
+ "HW_NIC_SRIOV_TRUSTED",
+ "CUSTOM_VF_BLUE",
+ "CUSTOM_FOOBAR",
+ ],
+ },
+ )
+
+ def _create_one_compute_with_a_pf_consumed_by_an_instance(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, with one type-VF
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # we match the PF only and ignore the VF
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:00.0",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Create an instance consuming the PF
+ extra_spec = {"pci_passthrough:alias": "a-pf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {self.PF_RC: 1},
+ }
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.PF_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ return server, compute1_expected_placement_view
+
+ def test_device_reconfiguration_with_allocations_config_change_warn(self):
+ server, compute1_expected_placement_view = (
+ self._create_one_compute_with_a_pf_consumed_by_an_instance())
+
+ # remove 0000:81:00.0 from the device spec and restart the compute
+ device_spec = self._to_list_of_json_str([])
+ self.flags(group='pci', device_spec=device_spec)
+ # The PF is used but removed from the config. The PciTracker warns
+ # but keeps the device so the placement logic mimic this and only warns
+ # but keeps the RP and the allocation in placement intact.
+ self.restart_compute_service(hostname="compute1")
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ # the warning from the PciTracker
+ self.assertIn(
+ "WARNING [nova.pci.manager] Unable to remove device with status "
+ "'allocated' and ownership %s because of PCI device "
+ "1:0000:81:00.0 is allocated instead of ['available', "
+ "'unavailable', 'unclaimable']. Check your [pci]device_spec "
+ "configuration to make sure this allocated device is whitelisted. "
+ "If you have removed the device from the whitelist intentionally "
+ "or the device is no longer available on the host you will need "
+ "to delete the server or migrate it to another host to silence "
+ "this warning."
+ % server['id'],
+ self.stdlog.logger.output,
+ )
+ # the warning from the placement PCI tracking logic
+ self.assertIn(
+ "WARNING [nova.compute.pci_placement_translator] Device spec is "
+ "not found for device 0000:81:00.0 in [pci]device_spec. We are "
+ "skipping this devices during Placement update. The device is "
+ "allocated by %s. You should not remove an allocated device from "
+ "the configuration. Please restore the configuration or cold "
+ "migrate the instance to resolve the inconsistency."
+ % server['id'],
+ self.stdlog.logger.output,
+ )
+
+ def test_device_reconfiguration_with_allocations_config_change_stop(self):
+ self._create_one_compute_with_a_pf_consumed_by_an_instance()
+
+ # switch 0000:81:00.0 PF to 0000:81:00.1 VF
+ # in the config, then restart the compute service
+
+ # only match the VF now
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.1",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # The compute fails to start as the new config would mean that the PF
+ # inventory is removed from the 0000:81:00.0 RP and the PF inventory is
+ # added instead there, but the VF inventory has allocations. Keeping
+ # the old inventory as in
+ # test_device_reconfiguration_with_allocations_config_change_warn is
+ # not an option as it would result in two resource class on the same RP
+ # one for the PF and one for the VF. That would allow consuming
+ # the same physical device twice. Such dependent device configuration
+ # is intentionally not supported so we are stopping the compute
+ # service.
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.restart_compute_service,
+ hostname="compute1"
+ )
+ self.assertRegex(
+ str(ex),
+ "Failed to gather or report PCI resources to Placement: There was "
+ "a conflict when trying to complete your request.\n\n "
+ "update conflict: Inventory for 'CUSTOM_PCI_8086_1528' on "
+ "resource provider '.*' in use.",
+ )
+
+ def test_device_reconfiguration_with_allocations_hyp_change(self):
+ server, compute1_expected_placement_view = (
+ self._create_one_compute_with_a_pf_consumed_by_an_instance())
+
+ # restart the compute but simulate that the device 0000:81:00.0 is
+ # removed from the hypervisor while the device spec config left
+ # intact. The PciTracker will notice this and log a warning. The
+ # placement tracking logic simply keeps the allocation intact in
+ # placement as both the PciDevice and the DeviceSpec is available.
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=0, num_vfs=0)
+ self.restart_compute_service(
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False
+ )
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ # the warning from the PciTracker
+ self.assertIn(
+ "WARNING [nova.pci.manager] Unable to remove device with status "
+ "'allocated' and ownership %s because of PCI device "
+ "1:0000:81:00.0 is allocated instead of ['available', "
+ "'unavailable', 'unclaimable']. Check your [pci]device_spec "
+ "configuration to make sure this allocated device is whitelisted. "
+ "If you have removed the device from the whitelist intentionally "
+ "or the device is no longer available on the host you will need "
+ "to delete the server or migrate it to another host to silence "
+ "this warning."
+ % server['id'],
+ self.stdlog.logger.output,
+ )
+
+ def test_reporting_disabled_nothing_is_reported(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches the PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # Disable placement reporting so even if there are PCI devices on the
+ # hypervisor matching the [pci]device_spec config they are not reported
+ # to Placement
+ self.flags(group="pci", report_in_placement=False)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={},
+ traits={},
+ )
+
+ def test_reporting_cannot_be_disable_once_it_is_enabled(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches the PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ traits={
+ "0000:81:00.0": [],
+ },
+ )
+
+ # Try to disable placement reporting. The compute will refuse to start
+ # as there are already PCI device RPs in placement.
+ self.flags(group="pci", report_in_placement=False)
+ ex = self.assertRaises(
+ exception.PlacementPciException,
+ self.restart_compute_service,
+ hostname="compute1",
+ pci_info=pci_info,
+ keep_hypervisor_state=False,
+ )
+ self.assertIn(
+ "The [pci]report_in_placement is False but it was enabled before "
+ "on this compute. Nova does not support disabling it after it is "
+ "enabled.",
+ str(ex)
+ )
+
+
+class PlacementPCIAllocationHealingTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ # Make migration succeed
+ self.useFixture(
+ fixtures.MockPatch(
+ "nova.virt.libvirt.driver.LibvirtDriver."
+ "migrate_disk_and_power_off",
+ new=mock.Mock(return_value='{}'),
+ )
+ )
+
+ def test_heal_single_pci_allocation(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ # the config matches the PCI dev
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ # Start a compute *without* PCI tracking in placement
+ self.flags(group="pci", report_in_placement=False)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+
+ # Create an instance that consume our PCI dev
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+
+ # Restart the compute but now with PCI tracking enabled
+ self.flags(group="pci", report_in_placement=True)
+ self.restart_compute_service("compute1")
+ # Assert that the PCI allocation is healed in placement
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 1}
+ },
+ "allocations": {
+ server['id']: {
+ "0000:81:00.0": {self.PCI_RC: 1}
+ }
+ }
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # run an update_available_resources periodic and assert that the usage
+ # and allocation stays
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ def test_heal_multiple_allocations(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI devs (slot 0 and 1)
+ # * two type-PFs (slot 2 and 3) with 4 type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=2, num_vfs=8)
+ # the config matches:
+ device_spec = self._to_list_of_json_str(
+ [
+ # both type-PCI
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ # the PF in slot 2
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:02.0",
+ },
+ # the VFs in slot 3
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:03.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ # Start a compute *without* PCI tracking in placement
+ self.flags(group="pci", report_in_placement=False)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+ # 2 PCI + 1 PF + 4 VFs
+ self.assertPCIDeviceCounts("compute1", total=7, free=7)
+
+ # Create three instances consuming devices:
+ # * server_2pci: two type-PCI
+ # * server_pf_vf: one PF and one VF
+ # * server_2vf: two VFs
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2pci = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=5)
+
+ extra_spec = {"pci_passthrough:alias": "a-pf:1,a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_pf_vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=3)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=1)
+
+ # Restart the compute but now with PCI tracking enabled
+ self.flags(group="pci", report_in_placement=True)
+ self.restart_compute_service("compute1")
+ # Assert that the PCI allocation is healed in placement
+ self.assertPCIDeviceCounts("compute1", total=7, free=1)
+ expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 4},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ "0000:81:03.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 3},
+ },
+ "allocations": {
+ server_2pci['id']: {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ server_pf_vf['id']: {
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 1},
+ },
+ server_2vf['id']: {
+ "0000:81:03.0": {self.VF_RC: 2}
+ },
+ },
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # run an update_available_resources periodic and assert that the usage
+ # and allocation stays
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ def test_heal_partial_allocations(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI devs (slot 0 and 1)
+ # * two type-PFs (slot 2 and 3) with 4 type-VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=2, num_vfs=8)
+ # the config matches:
+ device_spec = self._to_list_of_json_str(
+ [
+ # both type-PCI
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ # the PF in slot 2
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:02.0",
+ },
+ # the VFs in slot 3
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:03.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+ # 2 PCI + 1 PF + 4 VFs
+ self.assertPCIDeviceCounts("compute1", total=7, free=7)
+ expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ "0000:81:03.0": {self.VF_RC: 4},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ "0000:81:03.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PF_RC: 0},
+ "0000:81:03.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # Create an instance consuming a VF
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=6)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ expected_placement_view["usages"]["0000:81:03.0"][self.VF_RC] = 1
+ expected_placement_view["allocations"][server_vf["id"]] = {
+ "0000:81:03.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ # Create another instance consuming two VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=7, free=4)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ expected_placement_view["usages"]["0000:81:03.0"][self.VF_RC] = 3
+ expected_placement_view["allocations"][server_2vf["id"]] = {
+ "0000:81:03.0": {self.VF_RC: 2}
+ }
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view("compute1", **expected_placement_view)
+
+ def test_heal_partial_allocations_during_resize_downsize(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 2 type-VFs
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=2)
+ # the config matches just the VFs
+ compute1_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute1_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 2},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Create an instance consuming two VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=2, free=0)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 2
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 2}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Resize server to use only one VF
+
+ # Start a new compute with only one VF available
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 1 type-VFs
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # the config matches just the VFs
+ compute2_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute2_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts("compute2", total=1, free=1)
+ compute2_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._resize_server(server, flavor_id)
+
+ self.assertPCIDeviceCounts("compute2", total=1, free=0)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler on the
+ # destination. BUT the resource tracker in the compute will heal the
+ # missing PCI allocation
+ compute2_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute2_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ # the resize is not confirmed, so we expect that the source host
+ # still has PCI allocation in placement, but it is held by the
+ # migration UUID now.
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"], server['id'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # revert the resize
+ server = self._revert_resize(server)
+ # the dest host should be freed up
+ compute2_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 0
+ del compute2_expected_placement_view["allocations"][server["id"]]
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ # on the source host the allocation should be moved back from the
+ # migration UUID to the instance UUID
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"],
+ server['id'],
+ revert=True
+ )
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # resize again and this time confirm the resize
+ server = self._resize_server(server, flavor_id)
+ server = self._confirm_resize(server)
+ # the dest should have the allocation for the server
+ compute2_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute2_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ # the source host should be freed
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 0
+ del compute1_expected_placement_view["allocations"][server["id"]]
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ def test_heal_partial_allocations_during_resize_change_dev_type(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 1 type-VFs
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=1)
+ # the config matches just the VFs
+ compute1_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute1_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Create an instance consuming one VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # Resize the instance to consume a PF and two PCI devs instead
+
+ # start a compute with enough devices for the resize
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI (slot 0, 1)
+ # * one type-PFs (slot 2) with 1 type-VFs
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=2, num_pfs=1, num_vfs=1)
+ # the config matches the PCI devs and hte PF but not the VFs
+ compute2_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:*",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "address": "0000:81:*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute2_device_spec)
+
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts("compute2", total=3, free=3)
+ compute2_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+
+ # resize the server to consume a PF and two PCI devs instead
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:2,a-pf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._resize_server(server, flavor_id)
+ server = self._confirm_resize(server)
+
+ # on the dest we have the new PCI allocations
+ self.assertPCIDeviceCounts("compute2", total=3, free=0)
+ compute2_expected_placement_view["usages"] = (
+ {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ }
+ )
+ compute2_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PF_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute2", **compute2_expected_placement_view)
+
+ # on the source the allocation is freed up
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 0
+ del compute1_expected_placement_view["allocations"][server["id"]]
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ def test_heal_allocation_during_same_host_resize(self):
+ self.flags(allow_resize_to_same_host=True)
+ # The fake libvirt will emulate on the host:
+ # * one type-PFs (slot 0) with 3 type-VFs
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=1, num_vfs=3)
+ # the config matches just the VFs
+ compute1_device_spec = self._to_list_of_json_str(
+ [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "address": "0000:81:00.*",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=compute1_device_spec)
+ # Start a compute with PCI tracking in placement
+ self.flags(group="pci", report_in_placement=True)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=3, free=3)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {self.VF_RC: 3},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.VF_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ # Create an instance consuming one VFs
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=3, free=2)
+ # As scheduling does not support PCI in placement yet no allocation
+ # is created for the PCI consumption by the scheduler. BUT the resource
+ # tracker in the compute will heal the missing PCI allocation
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # resize the server to consume 2 VFs on the same host
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._resize_server(server, flavor_id)
+ # during resize both the source and the dest allocation is kept
+ # and in same host resize that means both consumed from the same host
+ self.assertPCIDeviceCounts("compute1", total=3, free=0)
+ # the source side of the allocation held by the migration
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"], server['id'])
+ # NOTE(gibi): we intentionally don't heal allocation for the instance
+ # while it is being resized. See the comment in the
+ # pci_placement_translator about the reasoning.
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # revert the resize
+ self._revert_resize(server)
+ self.assertPCIDeviceCounts("compute1", total=3, free=2)
+ # the original allocations are restored
+ self._move_server_allocation(
+ compute1_expected_placement_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 1
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # now resize and then confirm it
+ self._resize_server(server, flavor_id)
+ self._confirm_resize(server)
+
+ # we expect that the consumption is according to the new flavor
+ self.assertPCIDeviceCounts("compute1", total=3, free=1)
+ compute1_expected_placement_view[
+ "usages"]["0000:81:00.0"][self.VF_RC] = 2
+ compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {self.VF_RC: 2}
+ }
+ # NOTE(gibi): This is unfortunate but during same host resize
+ # confirm when the PCI scheduling is not enabled the healing logic
+ # cannot heal the dest host allocation during the claim. It will only
+ # heal it in the next run of the ResourceTracker._update(). This due
+ # to the fact that ResourceTracker.drop_move_claim runs both for
+ # revert (on the dest) and confirm (on the source) and in same host
+ # resize this means that it runs on both the source and the dest as
+ # they are the same.
+ # Anyhow the healing will happen just a bit later. And the end goal is
+ # to make the scheduler support enabled by default and delete the
+ # whole healing logic. So I think this is acceptable.
+ self._run_periodics()
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+
+@ddt.ddt
+class SimpleRCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=0, num_vfs=0)
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "gpu",
+ "traits": ",".join(
+ [
+ os_traits.HW_GPU_API_VULKAN,
+ "purple",
+ "round",
+ ]
+ ),
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_GPU": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "HW_GPU_API_VULKAN",
+ "CUSTOM_PURPLE",
+ "CUSTOM_ROUND",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_GPU": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view)
+
+ @ddt.data(
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-gpu-wrong-rc",
+ },
+ {
+ "resource_class": os_resource_classes.PGPU,
+ "name": "a-gpu-wrong-rc-2",
+ },
+ {
+ "resource_class": "GPU",
+ # NOTE(gibi): "big" is missing from device spec
+ "traits": "purple,big",
+ "name": "a-gpu-missing-trait",
+ },
+ )
+ def test_boot_with_custom_rc_and_traits_no_matching_device(
+ self, pci_alias
+ ):
+ self.flags(group="pci", alias=self._to_list_of_json_str([pci_alias]))
+ extra_spec = {"pci_passthrough:alias": f"{pci_alias['name']}:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state="ERROR"
+ )
+ self.assertIn("fault", server)
+ self.assertIn("No valid host", server["fault"]["message"])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+
+ def test_boot_with_custom_rc_and_traits_succeeds(self):
+ pci_alias_gpu = {
+ "resource_class": "GPU",
+ "traits": "HW_GPU_API_VULKAN,PURPLE",
+ "name": "a-gpu",
+ }
+ self.flags(
+ group="pci", alias=self._to_list_of_json_str([pci_alias_gpu])
+ )
+
+ extra_spec = {"pci_passthrough:alias": "a-gpu:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+ self.compute1_expected_placement_view["usages"]["0000:81:00.0"][
+ "CUSTOM_GPU"
+ ] = 1
+ self.compute1_expected_placement_view["allocations"][server["id"]] = {
+ "0000:81:00.0": {"CUSTOM_GPU": 1}
+ }
+ self.assert_placement_pci_view(
+ "compute1", **self.compute1_expected_placement_view
+ )
+ self.assert_no_pci_healing("compute1")
+
+
+class RCAndTraitBasedPCIAliasTests(PlacementPCIReportingTests):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_device_claim_consistent_with_placement_allocation(self):
+ """As soon as [filter_scheduler]pci_in_placement is enabled the
+ nova-scheduler will allocate PCI devices in placement. Then on the
+ nova-compute side the PCI claim will also allocate PCI devices in the
+ nova DB. This test will create a situation where the two allocation
+ could contradict and observes that in a contradicting situation the PCI
+ claim will fail instead of allocating a device that is not allocated in
+ placement.
+
+ For the contradiction to happen we need two PCI devices that looks
+ different from placement perspective than from the nova DB perspective.
+
+ We can do that by assigning different traits from in placement and
+ having different product_id in the Nova DB. Then we will create a
+ request that would match from placement perspective to one of the
+ device only and would match to the other device from nova DB
+ perspective. Then we will expect that the boot request fails with no
+ valid host.
+ """
+ # The fake libvirt will emulate on the host:
+ # * one type-PCI in slot 0
+ # * one type-PF in slot 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=1, num_pfs=1, num_vfs=0)
+ # we allow both device to be consumed, but we assign different traits
+ # so we can selectively schedule to one of the devices in placement
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "address": "0000:81:00.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PCI",
+ },
+ {
+ "address": "0000:81:01.0",
+ "resource_class": "MY_DEV",
+ "traits": "A_PF",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 1},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 1},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_A_PCI",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_A_PF",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_DEV": 0},
+ "0000:81:01.0": {"CUSTOM_MY_DEV": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+
+ # now we create a PCI alias that cannot be fulfilled from both
+ # nova and placement perspective at the same time, but can be fulfilled
+ # from each perspective individually
+ pci_alias_no_match = {
+ "resource_class": "MY_DEV",
+ # by product_id this matches 81.00 only
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ # by trait this matches 81.01 only
+ "traits": "A_PF",
+ "name": "a-pci",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_no_match]),
+ )
+
+ # then try to boot with the alias and expect no valid host error
+ extra_spec = {"pci_passthrough:alias": "a-pci:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks=[], expected_state='ERROR')
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_vf_with_split_allocation(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # reserve VFs from 81.01 in placement to drive the first instance to
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 2)
+ # boot an instance with a single VF
+ # we expect that it is allocated from 81.00 as both VF on 81.01 is
+ # reserved
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_1vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=3)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1}
+ }
+ compute1_expected_placement_view["allocations"][server_1vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ # Boot a second instance requesting two VFs and ensure that the only
+ # way that placement allows this is to split the two VFs between PFs.
+ # Let's remove the reservation of one resource from 81.01 so the only
+ # viable placement candidate is: one VF from 81.00 and one VF from
+ # 81.01
+ self._reserve_placement_resource(
+ "compute1_0000:81:01.0", "CUSTOM_MY_VF", 1)
+
+ extra_spec = {"pci_passthrough:alias": "a-vf:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server_2vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ # both VM uses one VF
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ compute1_expected_placement_view["allocations"][server_2vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 1},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
+
+ def test_3vfs_asymmetric_split_between_pfs(self):
+ # The fake libvirt will emulate on the host:
+ # * two type-PFs in slot 0, 1 with 2 VFs each
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=2, num_vfs=4)
+ # make all 4 VFs available
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ self.start_compute(hostname="compute1", pci_info=pci_info)
+
+ compute1_expected_placement_view = {
+ "inventories": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 2},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ },
+ "traits": {
+ "0000:81:00.0": [
+ "CUSTOM_BLUE",
+ ],
+ "0000:81:01.0": [
+ "CUSTOM_BLUE",
+ ],
+ },
+ "usages": {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 0},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assertPCIDeviceCounts('compute1', total=4, free=4)
+
+ pci_alias_vf = {
+ "resource_class": "MY_VF",
+ "traits": "blue",
+ "name": "a-vf",
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias_vf]),
+ )
+
+ # Boot an instance requesting three VFs. The 3 VFs can be split between
+ # the two PFs two ways: 2 from 81.00 and 1 from 81.01, or 1 from 81.00
+ # and 2 from 81.01.
+ # Let's block the first way in placement by reserving 1 device from
+ # 81.00
+ self._reserve_placement_resource(
+ "compute1_0000:81:00.0", "CUSTOM_MY_VF", 1)
+ extra_spec = {"pci_passthrough:alias": "a-vf:3"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # We expect this to fit.
+ server_3vf = self._create_server(flavor_id=flavor_id, networks=[])
+
+ self.assertPCIDeviceCounts('compute1', total=4, free=1)
+ compute1_expected_placement_view["usages"] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ compute1_expected_placement_view["allocations"][server_3vf["id"]] = {
+ "0000:81:00.0": {"CUSTOM_MY_VF": 1},
+ "0000:81:01.0": {"CUSTOM_MY_VF": 2},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_expected_placement_view)
+ self.assert_no_pci_healing("compute1")
diff --git a/nova/tests/functional/libvirt/test_pci_sriov_servers.py b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
index 2aa95a3016..098a0e857b 100644
--- a/nova/tests/functional/libvirt/test_pci_sriov_servers.py
+++ b/nova/tests/functional/libvirt/test_pci_sriov_servers.py
@@ -14,12 +14,14 @@
# under the License.
import copy
+import pprint
+import typing as ty
+from unittest import mock
from urllib import parse as urlparse
import ddt
import fixtures
from lxml import etree
-import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
@@ -27,10 +29,13 @@ from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
import nova
+from nova.compute import pci_placement_translator
from nova import context
+from nova import exception
from nova.network import constants
from nova import objects
from nova.objects import fields
+from nova.pci.utils import parse_address
from nova.tests import fixtures as nova_fixtures
from nova.tests.fixtures import libvirt as fakelibvirt
from nova.tests.functional.api import client
@@ -40,15 +45,65 @@ CONF = cfg.CONF
LOG = logging.getLogger(__name__)
+class PciPlacementHealingFixture(fixtures.Fixture):
+ """Allow asserting if the pci_placement_translator module needed to
+ heal PCI allocations. Such healing is only normal during upgrade. After
+ every compute is upgraded and the scheduling support of PCI tracking in
+ placement is enabled there should be no need to heal PCI allocations in
+ the resource tracker. We assert this as we eventually want to remove the
+ automatic healing logic from the resource tracker.
+ """
+
+ def __init__(self):
+ super().__init__()
+ # a list of (nodename, result, allocation_before, allocation_after)
+ # tuples recoding the result of the calls to
+ # update_provider_tree_for_pci
+ self.calls = []
+
+ def setUp(self):
+ super().setUp()
+
+ orig = pci_placement_translator.update_provider_tree_for_pci
+
+ def wrapped_update(
+ provider_tree, nodename, pci_tracker, allocations, same_host
+ ):
+ alloc_before = copy.deepcopy(allocations)
+ updated = orig(
+ provider_tree, nodename, pci_tracker, allocations, same_host)
+ alloc_after = copy.deepcopy(allocations)
+ self.calls.append((nodename, updated, alloc_before, alloc_after))
+ return updated
+
+ self.useFixture(
+ fixtures.MonkeyPatch(
+ "nova.compute.pci_placement_translator."
+ "update_provider_tree_for_pci",
+ wrapped_update,
+ )
+ )
+
+ def last_healing(self, hostname: str) -> ty.Optional[ty.Tuple[dict, dict]]:
+ for h, updated, before, after in self.calls:
+ if h == hostname and updated:
+ return before, after
+ return None
+
+
class _PCIServersTestBase(base.ServersTestBase):
ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+ PCI_RC = f"CUSTOM_PCI_{fakelibvirt.PCI_VEND_ID}_{fakelibvirt.PCI_PROD_ID}"
+
def setUp(self):
self.ctxt = context.get_admin_context()
- self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
- alias=self.PCI_ALIAS,
- group='pci')
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=self.PCI_ALIAS,
+ group='pci'
+ )
super(_PCIServersTestBase, self).setUp()
@@ -62,6 +117,9 @@ class _PCIServersTestBase(base.ServersTestBase):
'.PciPassthroughFilter.host_passes',
side_effect=host_pass_mock)).mock
+ self.pci_healing_fixture = self.useFixture(
+ PciPlacementHealingFixture())
+
def assertPCIDeviceCounts(self, hostname, total, free):
"""Ensure $hostname has $total devices, $free of which are free."""
devices = objects.PciDeviceList.get_by_compute_node(
@@ -71,8 +129,218 @@ class _PCIServersTestBase(base.ServersTestBase):
self.assertEqual(total, len(devices))
self.assertEqual(free, len([d for d in devices if d.is_available()]))
+ def assert_no_pci_healing(self, hostname):
+ last_healing = self.pci_healing_fixture.last_healing(hostname)
+ before = last_healing[0] if last_healing else None
+ after = last_healing[1] if last_healing else None
+ self.assertIsNone(
+ last_healing,
+ "The resource tracker needed to heal PCI allocation in placement "
+ "on host %s. This should not happen in normal operation as the "
+ "scheduler should create the proper allocation instead.\n"
+ "Allocations before healing:\n %s\n"
+ "Allocations after healing:\n %s\n"
+ % (
+ hostname,
+ pprint.pformat(before),
+ pprint.pformat(after),
+ ),
+ )
+
+ def _get_rp_by_name(self, name, rps):
+ for rp in rps:
+ if rp["name"] == name:
+ return rp
+ self.fail(f'RP {name} is not found in Placement {rps}')
+
+ def assert_placement_pci_inventory(self, hostname, inventories, traits):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
+ # rps also contains the root provider so we subtract 1
+ self.assertEqual(
+ len(inventories),
+ len(rps) - 1,
+ f"Number of RPs on {hostname} doesn't match. "
+ f"Expected {list(inventories)} actual {[rp['name'] for rp in rps]}"
+ )
+
+ for rp_name, inv in inventories.items():
+ real_rp_name = f'{hostname}_{rp_name}'
+ rp = self._get_rp_by_name(real_rp_name, rps)
+ rp_inv = self._get_provider_inventory(rp['uuid'])
+
+ self.assertEqual(
+ len(inv),
+ len(rp_inv),
+ f"Number of inventories on {real_rp_name} are not as "
+ f"expected. Expected {inv}, actual {rp_inv}"
+ )
+ for rc, total in inv.items():
+ self.assertEqual(
+ total,
+ rp_inv[rc]["total"])
+ self.assertEqual(
+ total,
+ rp_inv[rc]["max_unit"])
+
+ rp_traits = self._get_provider_traits(rp['uuid'])
+ self.assertEqual(
+ # COMPUTE_MANAGED_PCI_DEVICE is automatically reported on
+ # PCI device RPs by nova
+ set(traits[rp_name]) | {"COMPUTE_MANAGED_PCI_DEVICE"},
+ set(rp_traits),
+ f"Traits on RP {real_rp_name} does not match with expectation"
+ )
-class SRIOVServersTest(_PCIServersTestBase):
+ def assert_placement_pci_usages(self, hostname, usages):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
+ for rp_name, usage in usages.items():
+ real_rp_name = f'{hostname}_{rp_name}'
+ rp = self._get_rp_by_name(real_rp_name, rps)
+ rp_usage = self._get_provider_usages(rp['uuid'])
+ self.assertEqual(
+ usage,
+ rp_usage,
+ f"Usage on RP {real_rp_name} does not match with expectation"
+ )
+
+ def assert_placement_pci_allocations(self, allocations):
+ for consumer, expected_allocations in allocations.items():
+ actual_allocations = self._get_allocations_by_server_uuid(consumer)
+ self.assertEqual(
+ len(expected_allocations),
+ len(actual_allocations),
+ f"The consumer {consumer} allocates from different number of "
+ f"RPs than expected. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ for rp_name, expected_rp_allocs in expected_allocations.items():
+ rp_uuid = self._get_provider_uuid_by_name(rp_name)
+ self.assertIn(
+ rp_uuid,
+ actual_allocations,
+ f"The consumer {consumer} expected to allocate from "
+ f"{rp_name}. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ actual_rp_allocs = actual_allocations[rp_uuid]['resources']
+ self.assertEqual(
+ expected_rp_allocs,
+ actual_rp_allocs,
+ f"The consumer {consumer} expected to have allocation "
+ f"{expected_rp_allocs} on {rp_name} but it has "
+ f"{actual_rp_allocs} instead."
+ )
+
+ def assert_placement_pci_allocations_on_host(self, hostname, allocations):
+ compute_rp_uuid = self.compute_rp_uuids[hostname]
+ rps = self._get_all_rps_in_a_tree(compute_rp_uuid)
+
+ for consumer, expected_allocations in allocations.items():
+ actual_allocations = self._get_allocations_by_server_uuid(consumer)
+ self.assertEqual(
+ len(expected_allocations),
+ # actual_allocations also contains allocations against the
+ # root provider for VCPU, MEMORY_MB, and DISK_GB so subtract
+ # one
+ len(actual_allocations) - 1,
+ f"The consumer {consumer} allocates from different number of "
+ f"RPs than expected. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ for rp_name, expected_rp_allocs in expected_allocations.items():
+ real_rp_name = f'{hostname}_{rp_name}'
+ rp = self._get_rp_by_name(real_rp_name, rps)
+ self.assertIn(
+ rp['uuid'],
+ actual_allocations,
+ f"The consumer {consumer} expected to allocate from "
+ f"{rp['uuid']}. Expected: {expected_allocations}, "
+ f"Actual: {actual_allocations}"
+ )
+ actual_rp_allocs = actual_allocations[rp['uuid']]['resources']
+ self.assertEqual(
+ expected_rp_allocs,
+ actual_rp_allocs,
+ f"The consumer {consumer} expected to have allocation "
+ f"{expected_rp_allocs} on {rp_name} but it has "
+ f"{actual_rp_allocs} instead."
+ )
+
+ def assert_placement_pci_view(
+ self, hostname, inventories, traits, usages=None, allocations=None
+ ):
+ if not usages:
+ usages = {}
+
+ if not allocations:
+ allocations = {}
+
+ self.assert_placement_pci_inventory(hostname, inventories, traits)
+ self.assert_placement_pci_usages(hostname, usages)
+ self.assert_placement_pci_allocations_on_host(hostname, allocations)
+
+ @staticmethod
+ def _to_list_of_json_str(list):
+ return [jsonutils.dumps(x) for x in list]
+
+ @staticmethod
+ def _move_allocation(allocations, from_uuid, to_uuid):
+ allocations[to_uuid] = allocations[from_uuid]
+ del allocations[from_uuid]
+
+ def _move_server_allocation(self, allocations, server_uuid, revert=False):
+ migration_uuid = self.get_migration_uuid_for_instance(server_uuid)
+ if revert:
+ self._move_allocation(allocations, migration_uuid, server_uuid)
+ else:
+ self._move_allocation(allocations, server_uuid, migration_uuid)
+
+
+class _PCIServersWithMigrationTestBase(_PCIServersTestBase):
+
+ def setUp(self):
+ super().setUp()
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.tests.fixtures.libvirt.Domain.migrateToURI3',
+ self._migrate_stub))
+
+ def _migrate_stub(self, domain, destination, params, flags):
+ """Stub out migrateToURI3."""
+
+ src_hostname = domain._connection.hostname
+ dst_hostname = urlparse.urlparse(destination).netloc
+
+ # In a real live migration, libvirt and QEMU on the source and
+ # destination talk it out, resulting in the instance starting to exist
+ # on the destination. Fakelibvirt cannot do that, so we have to
+ # manually create the "incoming" instance on the destination
+ # fakelibvirt.
+ dst = self.computes[dst_hostname]
+ dst.driver._host.get_connection().createXML(
+ params['destination_xml'],
+ 'fake-createXML-doesnt-care-about-flags')
+
+ src = self.computes[src_hostname]
+ conn = src.driver._host.get_connection()
+
+ # because migrateToURI3 is spawned in a background thread, this method
+ # does not block the upper nova layers. Because we don't want nova to
+ # think the live migration has finished until this method is done, the
+ # last thing we do is make fakelibvirt's Domain.jobStats() return
+ # VIR_DOMAIN_JOB_COMPLETED.
+ server = etree.fromstring(
+ params['destination_xml']
+ ).find('./uuid').text
+ dom = conn.lookupByUUIDString(server)
+ dom.complete_job()
+
+
+class SRIOVServersTest(_PCIServersWithMigrationTestBase):
# TODO(stephenfin): We're using this because we want to be able to force
# the host during scheduling. We should instead look at overriding policy
@@ -82,7 +350,7 @@ class SRIOVServersTest(_PCIServersTestBase):
VFS_ALIAS_NAME = 'vfs'
PFS_ALIAS_NAME = 'pfs'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
@@ -120,40 +388,6 @@ class SRIOVServersTest(_PCIServersTestBase):
# fixture already stubbed.
self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
- self.useFixture(fixtures.MonkeyPatch(
- 'nova.tests.fixtures.libvirt.Domain.migrateToURI3',
- self._migrate_stub))
-
- def _migrate_stub(self, domain, destination, params, flags):
- """Stub out migrateToURI3."""
-
- src_hostname = domain._connection.hostname
- dst_hostname = urlparse.urlparse(destination).netloc
-
- # In a real live migration, libvirt and QEMU on the source and
- # destination talk it out, resulting in the instance starting to exist
- # on the destination. Fakelibvirt cannot do that, so we have to
- # manually create the "incoming" instance on the destination
- # fakelibvirt.
- dst = self.computes[dst_hostname]
- dst.driver._host.get_connection().createXML(
- params['destination_xml'],
- 'fake-createXML-doesnt-care-about-flags')
-
- src = self.computes[src_hostname]
- conn = src.driver._host.get_connection()
-
- # because migrateToURI3 is spawned in a background thread, this method
- # does not block the upper nova layers. Because we don't want nova to
- # think the live migration has finished until this method is done, the
- # last thing we do is make fakelibvirt's Domain.jobStats() return
- # VIR_DOMAIN_JOB_COMPLETED.
- server = etree.fromstring(
- params['destination_xml']
- ).find('./uuid').text
- dom = conn.lookupByUUIDString(server)
- dom.complete_job()
-
def _disable_sriov_in_pf(self, pci_info):
# Check for PF and change the capability from virt_functions
# Delete all the VFs
@@ -357,31 +591,66 @@ class SRIOVServersTest(_PCIServersTestBase):
expect_fail=False):
# The purpose here is to force an observable PCI slot update when
# moving from source to dest. This is accomplished by having a single
- # PCI device on the source, 2 PCI devices on the test, and relying on
- # the fact that our fake HostPCIDevicesInfo creates predictable PCI
- # addresses. The PCI device on source and the first PCI device on dest
- # will have identical PCI addresses. By sticking a "placeholder"
- # instance on that first PCI device on the dest, the incoming instance
- # from source will be forced to consume the second dest PCI device,
- # with a different PCI address.
+ # PCI VF device on the source, 2 PCI VF devices on the dest, and
+ # relying on the fact that our fake HostPCIDevicesInfo creates
+ # predictable PCI addresses. The PCI VF device on source and the first
+ # PCI VF device on dest will have identical PCI addresses. By sticking
+ # a "placeholder" instance on that first PCI VF device on the dest, the
+ # incoming instance from source will be forced to consume the second
+ # dest PCI VF device, with a different PCI address.
+ # We want to test server operations with SRIOV VFs and SRIOV PFs so
+ # the config of the compute hosts also have one extra PCI PF devices
+ # without any VF children. But the two compute has different PCI PF
+ # addresses and MAC so that the test can observe the slot update as
+ # well as the MAC updated during migration and after revert.
+ source_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
+ # add an extra PF without VF to be used by direct-physical ports
+ source_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:aa',
+ )
self.start_compute(
hostname='source',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=1, num_vfs=1))
+ pci_info=source_pci_info)
+
+ dest_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ # add an extra PF without VF to be used by direct-physical ports
+ dest_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x6, # make it different from the source host
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:bb',
+ )
self.start_compute(
hostname='dest',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=1, num_vfs=2))
+ pci_info=dest_pci_info)
source_port = self.neutron.create_port(
{'port': self.neutron.network_4_port_1})
+ source_pf_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_pf})
dest_port1 = self.neutron.create_port(
{'port': self.neutron.network_4_port_2})
dest_port2 = self.neutron.create_port(
{'port': self.neutron.network_4_port_3})
source_server = self._create_server(
- networks=[{'port': source_port['port']['id']}], host='source')
+ networks=[
+ {'port': source_port['port']['id']},
+ {'port': source_pf_port['port']['id']}
+ ],
+ host='source',
+ )
dest_server1 = self._create_server(
networks=[{'port': dest_port1['port']['id']}], host='dest')
dest_server2 = self._create_server(
@@ -389,6 +658,7 @@ class SRIOVServersTest(_PCIServersTestBase):
# Refresh the ports.
source_port = self.neutron.show_port(source_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
dest_port1 = self.neutron.show_port(dest_port1['port']['id'])
dest_port2 = self.neutron.show_port(dest_port2['port']['id'])
@@ -404,11 +674,24 @@ class SRIOVServersTest(_PCIServersTestBase):
same_slot_port = dest_port2
self._delete_server(dest_server1)
- # Before moving, explictly assert that the servers on source and dest
+ # Before moving, explicitly assert that the servers on source and dest
# have the same pci_slot in their port's binding profile
self.assertEqual(source_port['port']['binding:profile']['pci_slot'],
same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the source host PF PCI device.
+ self.assertEqual(
+ '0000:82:00.0', # which is in sync with the source host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the source host
+ self.assertEqual(
+ 'b4:96:91:34:f4:aa',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+
# Before moving, assert that the servers on source and dest have the
# same PCI source address in their XML for their SRIOV nic.
source_conn = self.computes['source'].driver._host.get_connection()
@@ -425,14 +708,28 @@ class SRIOVServersTest(_PCIServersTestBase):
move_operation(source_server)
# Refresh the ports again, keeping in mind the source_port is now bound
- # on the dest after unshelving.
+ # on the dest after the move.
source_port = self.neutron.show_port(source_port['port']['id'])
same_slot_port = self.neutron.show_port(same_slot_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
self.assertNotEqual(
source_port['port']['binding:profile']['pci_slot'],
same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the dest host PF PCI device.
+ self.assertEqual(
+ '0000:82:06.0', # which is in sync with the dest host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the dest host
+ self.assertEqual(
+ 'b4:96:91:34:f4:bb',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+
conn = self.computes['dest'].driver._host.get_connection()
vms = [vm._def for vm in conn._vms.values()]
self.assertEqual(2, len(vms))
@@ -460,6 +757,169 @@ class SRIOVServersTest(_PCIServersTestBase):
self._confirm_resize(source_server)
self._test_move_operation_with_neutron(move_operation)
+ def test_cold_migrate_and_rever_server_with_neutron(self):
+ # The purpose here is to force an observable PCI slot update when
+ # moving from source to dest and the from dest to source after the
+ # revert. This is accomplished by having a single
+ # PCI VF device on the source, 2 PCI VF devices on the dest, and
+ # relying on the fact that our fake HostPCIDevicesInfo creates
+ # predictable PCI addresses. The PCI VF device on source and the first
+ # PCI VF device on dest will have identical PCI addresses. By sticking
+ # a "placeholder" instance on that first PCI VF device on the dest, the
+ # incoming instance from source will be forced to consume the second
+ # dest PCI VF device, with a different PCI address.
+ # We want to test server operations with SRIOV VFs and SRIOV PFs so
+ # the config of the compute hosts also have one extra PCI PF devices
+ # without any VF children. But the two compute has different PCI PF
+ # addresses and MAC so that the test can observe the slot update as
+ # well as the MAC updated during migration and after revert.
+ source_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
+ # add an extra PF without VF to be used by direct-physical ports
+ source_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:aa',
+ )
+ self.start_compute(
+ hostname='source',
+ pci_info=source_pci_info)
+ dest_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ # add an extra PF without VF to be used by direct-physical ports
+ dest_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x6, # make it different from the source host
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:bb',
+ )
+ self.start_compute(
+ hostname='dest',
+ pci_info=dest_pci_info)
+ source_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_1})
+ source_pf_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_pf})
+ dest_port1 = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_2})
+ dest_port2 = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_3})
+ source_server = self._create_server(
+ networks=[
+ {'port': source_port['port']['id']},
+ {'port': source_pf_port['port']['id']}
+ ],
+ host='source',
+ )
+ dest_server1 = self._create_server(
+ networks=[{'port': dest_port1['port']['id']}], host='dest')
+ dest_server2 = self._create_server(
+ networks=[{'port': dest_port2['port']['id']}], host='dest')
+ # Refresh the ports.
+ source_port = self.neutron.show_port(source_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
+ dest_port1 = self.neutron.show_port(dest_port1['port']['id'])
+ dest_port2 = self.neutron.show_port(dest_port2['port']['id'])
+ # Find the server on the dest compute that's using the same pci_slot as
+ # the server on the source compute, and delete the other one to make
+ # room for the incoming server from the source.
+ source_pci_slot = source_port['port']['binding:profile']['pci_slot']
+ dest_pci_slot1 = dest_port1['port']['binding:profile']['pci_slot']
+ if dest_pci_slot1 == source_pci_slot:
+ same_slot_port = dest_port1
+ self._delete_server(dest_server2)
+ else:
+ same_slot_port = dest_port2
+ self._delete_server(dest_server1)
+ # Before moving, explicitly assert that the servers on source and dest
+ # have the same pci_slot in their port's binding profile
+ self.assertEqual(source_port['port']['binding:profile']['pci_slot'],
+ same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the source host PF PCI device.
+ self.assertEqual(
+ '0000:82:00.0', # which is in sync with the source host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the source host
+ self.assertEqual(
+ 'b4:96:91:34:f4:aa',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+ # Before moving, assert that the servers on source and dest have the
+ # same PCI source address in their XML for their SRIOV nic.
+ source_conn = self.computes['source'].driver._host.get_connection()
+ dest_conn = self.computes['source'].driver._host.get_connection()
+ source_vms = [vm._def for vm in source_conn._vms.values()]
+ dest_vms = [vm._def for vm in dest_conn._vms.values()]
+ self.assertEqual(1, len(source_vms))
+ self.assertEqual(1, len(dest_vms))
+ self.assertEqual(1, len(source_vms[0]['devices']['nics']))
+ self.assertEqual(1, len(dest_vms[0]['devices']['nics']))
+ self.assertEqual(source_vms[0]['devices']['nics'][0]['source'],
+ dest_vms[0]['devices']['nics'][0]['source'])
+
+ # TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
+ # probably be less...dumb
+ with mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}'):
+ self._migrate_server(source_server)
+
+ # Refresh the ports again, keeping in mind the ports are now bound
+ # on the dest after migrating.
+ source_port = self.neutron.show_port(source_port['port']['id'])
+ same_slot_port = self.neutron.show_port(same_slot_port['port']['id'])
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
+ self.assertNotEqual(
+ source_port['port']['binding:profile']['pci_slot'],
+ same_slot_port['port']['binding:profile']['pci_slot'])
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the dest host PF PCI device.
+ self.assertEqual(
+ '0000:82:06.0', # which is in sync with the dest host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the dest host
+ self.assertEqual(
+ 'b4:96:91:34:f4:bb',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+ conn = self.computes['dest'].driver._host.get_connection()
+ vms = [vm._def for vm in conn._vms.values()]
+ self.assertEqual(2, len(vms))
+ for vm in vms:
+ self.assertEqual(1, len(vm['devices']['nics']))
+ self.assertNotEqual(vms[0]['devices']['nics'][0]['source'],
+ vms[1]['devices']['nics'][0]['source'])
+
+ self._revert_resize(source_server)
+
+ # Refresh the ports again, keeping in mind the ports are now bound
+ # on the source as the migration is reverted
+ source_pf_port = self.neutron.show_port(source_pf_port['port']['id'])
+
+ # Assert that the direct-physical port got the pci_slot information
+ # according to the source host PF PCI device.
+ self.assertEqual(
+ '0000:82:00.0', # which is in sync with the source host pci_info
+ source_pf_port['port']['binding:profile']['pci_slot']
+ )
+ # Assert that the direct-physical port is updated with the MAC address
+ # of the PF device from the source host
+ self.assertEqual(
+ 'b4:96:91:34:f4:aa',
+ source_pf_port['port']['binding:profile']['device_mac_address']
+ )
+
def test_evacuate_server_with_neutron(self):
def move_operation(source_server):
# Down the source compute to enable the evacuation
@@ -477,17 +937,44 @@ class SRIOVServersTest(_PCIServersTestBase):
"""
# start two compute services with differing PCI device inventory
- self.start_compute(
- hostname='test_compute0',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=2, num_vfs=8, numa_node=0))
- self.start_compute(
- hostname='test_compute1',
- pci_info=fakelibvirt.HostPCIDevicesInfo(
- num_pfs=1, num_vfs=2, numa_node=1))
+ source_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=4, numa_node=0)
+ # add an extra PF without VF to be used by direct-physical ports
+ source_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:aa',
+ )
+ self.start_compute(hostname='test_compute0', pci_info=source_pci_info)
- # create the port
- self.neutron.create_port({'port': self.neutron.network_4_port_1})
+ dest_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=2, numa_node=1)
+ # add an extra PF without VF to be used by direct-physical ports
+ dest_pci_info.add_device(
+ dev_type='PF',
+ bus=0x82, # the HostPCIDevicesInfo use the 0x81 by default
+ slot=0x6, # make it different from the source host
+ function=0,
+ iommu_group=42,
+ # numa node needs to be aligned with the other pci devices in this
+ # host as the instance needs to fit into a single host numa node
+ numa_node=1,
+ vf_ratio=0,
+ mac_address='b4:96:91:34:f4:bb',
+ )
+
+ self.start_compute(hostname='test_compute1', pci_info=dest_pci_info)
+
+ # create the ports
+ port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_1})['port']
+ pf_port = self.neutron.create_port(
+ {'port': self.neutron.network_4_port_pf})['port']
# create a server using the VF via neutron
extra_spec = {'hw:cpu_policy': 'dedicated'}
@@ -495,7 +982,8 @@ class SRIOVServersTest(_PCIServersTestBase):
server = self._create_server(
flavor_id=flavor_id,
networks=[
- {'port': base.LibvirtNeutronFixture.network_4_port_1['id']},
+ {'port': port['id']},
+ {'port': pf_port['id']},
],
host='test_compute0',
)
@@ -503,8 +991,8 @@ class SRIOVServersTest(_PCIServersTestBase):
# our source host should have marked two PCI devices as used, the VF
# and the parent PF, while the future destination is currently unused
self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host'])
- self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
- self.assertPCIDeviceCounts('test_compute1', total=3, free=3)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=3)
+ self.assertPCIDeviceCounts('test_compute1', total=4, free=4)
# the instance should be on host NUMA node 0, since that's where our
# PCI devices are
@@ -527,19 +1015,32 @@ class SRIOVServersTest(_PCIServersTestBase):
# TODO(stephenfin): Stop relying on a side-effect of how nova
# chooses from multiple PCI devices (apparently the last
# matching one)
- 'pci_slot': '0000:81:01.4',
+ 'pci_slot': '0000:81:00.4',
'physical_network': 'physnet4',
},
port['binding:profile'],
)
+ # ensure the binding details sent to "neutron" are correct
+ pf_port = self.neutron.show_port(pf_port['id'],)['port']
+ self.assertIn('binding:profile', pf_port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '8086:1528',
+ 'pci_slot': '0000:82:00.0',
+ 'physical_network': 'physnet4',
+ 'device_mac_address': 'b4:96:91:34:f4:aa',
+ },
+ pf_port['binding:profile'],
+ )
+
# now live migrate that server
self._live_migrate(server, 'completed')
# we should now have transitioned our usage to the destination, freeing
# up the source in the process
- self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
- self.assertPCIDeviceCounts('test_compute1', total=3, free=1)
+ self.assertPCIDeviceCounts('test_compute0', total=6, free=6)
+ self.assertPCIDeviceCounts('test_compute1', total=4, free=1)
# the instance should now be on host NUMA node 1, since that's where
# our PCI devices are for this second host
@@ -564,6 +1065,18 @@ class SRIOVServersTest(_PCIServersTestBase):
},
port['binding:profile'],
)
+ # ensure the binding details sent to "neutron" are correct
+ pf_port = self.neutron.show_port(pf_port['id'],)['port']
+ self.assertIn('binding:profile', pf_port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '8086:1528',
+ 'pci_slot': '0000:82:06.0',
+ 'physical_network': 'physnet4',
+ 'device_mac_address': 'b4:96:91:34:f4:bb',
+ },
+ pf_port['binding:profile'],
+ )
def test_get_server_diagnostics_server_with_VF(self):
"""Ensure server disagnostics include info on VF-type PCI devices."""
@@ -622,11 +1135,8 @@ class SRIOVServersTest(_PCIServersTestBase):
# Disable SRIOV capabilties in PF and delete the VFs
self._disable_sriov_in_pf(pci_info_no_sriov)
- fake_connection = self._get_connection(pci_info=pci_info_no_sriov,
- hostname='test_compute0')
- self.mock_conn.return_value = fake_connection
-
- self.compute = self.start_service('compute', host='test_compute0')
+ self.start_compute('test_compute0', pci_info=pci_info_no_sriov)
+ self.compute = self.computes['test_compute0']
ctxt = context.get_admin_context()
pci_devices = objects.PciDeviceList.get_by_compute_node(
@@ -638,13 +1148,9 @@ class SRIOVServersTest(_PCIServersTestBase):
self.assertEqual(1, len(pci_devices))
self.assertEqual('type-PCI', pci_devices[0].dev_type)
- # Update connection with original pci info with sriov PFs
- fake_connection = self._get_connection(pci_info=pci_info,
- hostname='test_compute0')
- self.mock_conn.return_value = fake_connection
-
- # Restart the compute service
- self.restart_compute_service(self.compute)
+ # Restart the compute service with sriov PFs
+ self.restart_compute_service(
+ self.compute.host, pci_info=pci_info, keep_hypervisor_state=False)
# Verify if PCI devices are of type type-PF or type-VF
pci_devices = objects.PciDeviceList.get_by_compute_node(
@@ -666,12 +1172,92 @@ class SRIOVServersTest(_PCIServersTestBase):
],
)
+ def test_change_bound_port_vnic_type_kills_compute_at_restart(self):
+ """Create a server with a direct port and change the vnic_type of the
+ bound port to macvtap. Then restart the compute service.
+
+ As the vnic_type is changed on the port but the vif_type is hwveb
+ instead of macvtap the vif plug logic will try to look up the netdev
+ of the parent VF. Howvere that VF consumed by the instance so the
+ netdev does not exists. This causes that the compute service will fail
+ with an exception during startup
+ """
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=2)
+ self.start_compute(pci_info=pci_info)
+
+ # create a direct port
+ port = self.neutron.network_4_port_1
+ self.neutron.create_port({'port': port})
+
+ # create a server using the VF via neutron
+ server = self._create_server(networks=[{'port': port['id']}])
+
+ # update the vnic_type of the port in neutron
+ port = copy.deepcopy(port)
+ port['binding:vnic_type'] = 'macvtap'
+ self.neutron.update_port(port['id'], {"port": port})
+
+ compute = self.computes['compute1']
+
+ # Force an update on the instance info cache to ensure nova gets the
+ # information about the updated port
+ with context.target_cell(
+ context.get_admin_context(),
+ self.host_mappings['compute1'].cell_mapping
+ ) as cctxt:
+ compute.manager._heal_instance_info_cache(cctxt)
+ self.assertIn(
+ 'The vnic_type of the bound port %s has been changed in '
+ 'neutron from "direct" to "macvtap". Changing vnic_type of a '
+ 'bound port is not supported by Nova. To avoid breaking the '
+ 'connectivity of the instance please change the port '
+ 'vnic_type back to "direct".' % port['id'],
+ self.stdlog.logger.output,
+ )
+
+ def fake_get_ifname_by_pci_address(pci_addr: str, pf_interface=False):
+ # we want to fail the netdev lookup only if the pci_address is
+ # already consumed by our instance. So we look into the instance
+ # definition to see if the device is attached to the instance as VF
+ conn = compute.manager.driver._host.get_connection()
+ dom = conn.lookupByUUIDString(server['id'])
+ dev = dom._def['devices']['nics'][0]
+ lookup_addr = pci_addr.replace(':', '_').replace('.', '_')
+ if (
+ dev['type'] == 'hostdev' and
+ dev['source'] == 'pci_' + lookup_addr
+ ):
+ # nova tried to look up the netdev of an already consumed VF.
+ # So we have to fail
+ raise exception.PciDeviceNotFoundById(id=pci_addr)
+
+ # We need to simulate the actual failure manually as in our functional
+ # environment all the PCI lookup is mocked. In reality nova tries to
+ # look up the netdev of the pci device on the host used by the port as
+ # the parent of the macvtap. However, as the originally direct port is
+ # bound to the instance, the VF pci device is already consumed by the
+ # instance and therefore there is no netdev for the VF.
+ self.libvirt.mock_get_ifname_by_pci_address.side_effect = (
+ fake_get_ifname_by_pci_address
+ )
+ # Nova cannot prevent the vnic_type change on a bound port. Neutron
+ # should prevent that instead. But the nova-compute should still
+ # be able to start up and only log an ERROR for this instance in
+ # inconsistent state.
+ self.restart_compute_service('compute1')
+ self.assertIn(
+ 'Virtual interface plugging failed for instance. Probably the '
+ 'vnic_type of the bound port has been changed. Nova does not '
+ 'support such change.',
+ self.stdlog.logger.output,
+ )
+
class SRIOVAttachDetachTest(_PCIServersTestBase):
# no need for aliases as these test will request SRIOV via neutron
PCI_ALIAS = []
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
@@ -729,10 +1315,9 @@ class SRIOVAttachDetachTest(_PCIServersTestBase):
host_info = fakelibvirt.HostInfo(cpu_nodes=2, cpu_sockets=1,
cpu_cores=2, cpu_threads=2)
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pfs=1, num_vfs=1)
- fake_connection = self._get_connection(host_info, pci_info)
- self.mock_conn.return_value = fake_connection
-
- self.compute = self.start_service('compute', host='test_compute0')
+ self.start_compute(
+ 'test_compute0', host_info=host_info, pci_info=pci_info)
+ self.compute = self.computes['test_compute0']
# Create server with a port
server = self._create_server(networks=[{'port': first_port_id}])
@@ -782,7 +1367,7 @@ class SRIOVAttachDetachTest(_PCIServersTestBase):
self.neutron.sriov_pf_port2['id'])
-class VDPAServersTest(_PCIServersTestBase):
+class VDPAServersTest(_PCIServersWithMigrationTestBase):
# this is needed for os_compute_api:os-migrate-server:migrate policy
ADMIN_API = True
@@ -791,7 +1376,7 @@ class VDPAServersTest(_PCIServersTestBase):
# Whitelist both the PF and VF; in reality, you probably wouldn't do this
# but we want to make sure that the PF is correctly taken off the table
# once any VF is used
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': '15b3',
'product_id': '101d',
@@ -814,14 +1399,13 @@ class VDPAServersTest(_PCIServersTestBase):
def setUp(self):
super().setUp()
-
# The ultimate base class _IntegratedTestBase uses NeutronFixture but
# we need a bit more intelligent neutron for these tests. Applying the
# new fixture here means that we re-stub what the previous neutron
# fixture already stubbed.
self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
- def start_compute(self):
+ def start_vdpa_compute(self, hostname='compute-0'):
vf_ratio = self.NUM_VFS // self.NUM_PFS
pci_info = fakelibvirt.HostPCIDevicesInfo(
@@ -859,7 +1443,7 @@ class VDPAServersTest(_PCIServersTestBase):
driver_name='mlx5_core')
vdpa_info.add_device(f'vdpa_vdpa{idx}', idx, vf)
- return super().start_compute(
+ return super().start_compute(hostname=hostname,
pci_info=pci_info, vdpa_info=vdpa_info,
libvirt_version=self.FAKE_LIBVIRT_VERSION,
qemu_version=self.FAKE_QEMU_VERSION)
@@ -900,7 +1484,6 @@ class VDPAServersTest(_PCIServersTestBase):
expected = """
<interface type="vdpa">
<mac address="b5:bc:2e:e7:51:ee"/>
- <model type="virtio"/>
<source dev="/dev/vhost-vdpa-3"/>
</interface>"""
actual = etree.tostring(elem, encoding='unicode')
@@ -914,7 +1497,7 @@ class VDPAServersTest(_PCIServersTestBase):
fake_create,
)
- hostname = self.start_compute()
+ hostname = self.start_vdpa_compute()
num_pci = self.NUM_PFS + self.NUM_VFS
# both the PF and VF with vDPA capabilities (dev_type=vdpa) should have
@@ -947,12 +1530,16 @@ class VDPAServersTest(_PCIServersTestBase):
port['binding:profile'],
)
- def _test_common(self, op, *args, **kwargs):
- self.start_compute()
-
+ def _create_port_and_server(self):
# create the port and a server, with the port attached to the server
vdpa_port = self.create_vdpa_port()
server = self._create_server(networks=[{'port': vdpa_port['id']}])
+ return vdpa_port, server
+
+ def _test_common(self, op, *args, **kwargs):
+ self.start_vdpa_compute()
+
+ vdpa_port, server = self._create_port_and_server()
# attempt the unsupported action and ensure it fails
ex = self.assertRaises(
@@ -962,40 +1549,393 @@ class VDPAServersTest(_PCIServersTestBase):
'not supported for instance with vDPA ports',
ex.response.text)
- def test_attach_interface(self):
- self.start_compute()
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
+ def test_attach_interface_service_version_61(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=61
+ ):
+ self._test_common(self._attach_interface, uuids.vdpa_port)
+ def test_attach_interface(self):
+ hostname = self.start_vdpa_compute()
# create the port and a server, but don't attach the port to the server
# yet
- vdpa_port = self.create_vdpa_port()
server = self._create_server(networks='none')
-
+ vdpa_port = self.create_vdpa_port()
# attempt to attach the port to the server
- ex = self.assertRaises(
- client.OpenStackApiException,
- self._attach_interface, server, vdpa_port['id'])
- self.assertIn(
- 'not supported for instance with vDPA ports',
- ex.response.text)
+ self._attach_interface(server, vdpa_port['id'])
+ # ensure the binding details sent to "neutron" were correct
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:06:00.4',
+ 'physical_network': 'physnet4',
+ },
+ port['binding:profile'],
+ )
+ self.assertEqual(hostname, port['binding:host_id'])
+ self.assertEqual(server['id'], port['device_id'])
+
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
+ def test_detach_interface_service_version_61(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=61
+ ):
+ self._test_common(self._detach_interface, uuids.vdpa_port)
def test_detach_interface(self):
- self._test_common(self._detach_interface, uuids.vdpa_port)
+ self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # ensure the binding details sent to "neutron" were correct
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ self._detach_interface(server, vdpa_port['id'])
+ # ensure the port is no longer owned by the vm
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual('', port['device_id'])
+ self.assertEqual({}, port['binding:profile'])
- def test_shelve(self):
- self._test_common(self._shelve_server)
+ def test_shelve_offload(self):
+ hostname = self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # assert the port is bound to the vm and the compute host
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ self.assertEqual(hostname, port['binding:host_id'])
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ # -2 we claim the vdpa device which make the parent PF unavailable
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ server = self._shelve_server(server)
+ # now that the vm is shelve offloaded it should not be bound
+ # to any host but should still be owned by the vm
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(server['id'], port['device_id'])
+ self.assertIsNone(port['binding:host_id'])
+ self.assertIn('binding:profile', port)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:host'])
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
- def test_suspend(self):
- self._test_common(self._suspend_server)
+ def test_unshelve_to_same_host(self):
+ hostname = self.start_vdpa_compute()
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(hostname, port['binding:host_id'])
+
+ server = self._shelve_server(server)
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertIsNone(port['binding:host_id'])
+
+ server = self._unshelve_server(server)
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ hostname, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(hostname, port['binding:host_id'])
+
+ def test_unshelve_to_different_host(self):
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(source, port['binding:host_id'])
+
+ server = self._shelve_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertIsNone(server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertIsNone(port['binding:host_id'])
+
+ # force the unshelve to the other host
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ server = self._unshelve_server(server)
+ # the dest devices should be claimed
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ # and the source host devices should still be free
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(dest, port['binding:host_id'])
def test_evacute(self):
- self._test_common(self._evacuate_server)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
- def test_resize(self):
- flavor_id = self._create_flavor()
- self._test_common(self._resize_server, flavor_id)
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(source, port['binding:host_id'])
+
+ # stop the source compute and enable the dest
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ self.computes['source'].stop()
+ # Down the source compute to enable the evacuation
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'forced_down': True})
+
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ server = self._evacuate_server(server)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ port = self.neutron.show_port(vdpa_port['id'])['port']
+ self.assertEqual(dest, port['binding:host_id'])
+
+ # as the source compute is offline the pci claims will not be cleaned
+ # up on the source compute.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # but if you fix/restart the source node the allocations for evacuated
+ # instances should be released.
+ self.restart_compute_service(source)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+
+ def test_resize_same_host(self):
+ self.flags(allow_resize_to_same_host=True)
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ source = self.start_vdpa_compute()
+ vdpa_port, server = self._create_port_and_server()
+ # before we resize the vm should be using 1 VF but that will mark
+ # the PF as unavailable so we assert 2 devices are in use.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ # in resize verify the VF claims should be doubled even
+ # for same host resize so assert that 3 are in devices in use
+ # 1 PF and 2 VFs .
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 3)
+ server = self._confirm_resize(server)
+ # but once we confrim it should be reduced back to 1 PF and 1 VF
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # assert the hostname has not have changed as part
+ # of the resize.
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_resize_different_host(self):
+ self.flags(allow_resize_to_same_host=False)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ # disable the source compute and enable the dest
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._confirm_resize(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ def test_resize_revert(self):
+ self.flags(allow_resize_to_same_host=False)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ flavor_id = self._create_flavor(name='new-flavor')
+ self.assertNotEqual(server['flavor']['original_name'], 'new-flavor')
+ # disable the source compute and enable the dest
+ self.api.put_service(
+ self.computes['source'].service_ref.uuid, {'status': 'disabled'})
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._resize_server(server, flavor_id)
+ self.assertEqual(
+ server['flavor']['original_name'], 'new-flavor')
+ # in resize verify both the dest and source pci claims should be
+ # present.
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._revert_resize(server)
+ # but once we revert the dest claims should be freed.
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
def test_cold_migrate(self):
- self._test_common(self._migrate_server)
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # enable the dest we do not need to disable the source since cold
+ # migrate wont happen to the same host in the libvirt driver
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}',
+ ):
+ server = self._migrate_server(server)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ server = self._confirm_resize(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
+ def test_suspend_and_resume_service_version_62(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=62
+ ):
+ self._test_common(self._suspend_server)
+
+ def test_suspend_and_resume(self):
+ source = self.start_vdpa_compute(hostname='source')
+ vdpa_port, server = self._create_port_and_server()
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ server = self._suspend_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual('SUSPENDED', server['status'])
+ server = self._resume_server(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ self.assertEqual('ACTIVE', server['status'])
+
+ # NOTE(sbauza): Now we're post-Antelope release, we don't need to support
+ # this test
+ def test_live_migrate_service_version_62(self):
+ self.flags(disable_compute_service_check_for_ffu=True,
+ group='workarounds')
+ with mock.patch(
+ "nova.objects.service.get_minimum_version_all_cells",
+ return_value=62
+ ):
+ self._test_common(self._live_migrate)
+
+ def test_live_migrate(self):
+ source = self.start_vdpa_compute(hostname='source')
+ dest = self.start_vdpa_compute(hostname='dest')
+
+ num_pci = self.NUM_PFS + self.NUM_VFS
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci)
+
+ # ensure we boot the vm on the "source" compute
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'disabled'})
+ vdpa_port, server = self._create_port_and_server()
+ self.assertEqual(
+ source, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
+
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci - 2)
+ # enable the dest we do not need to disable the source since cold
+ # migrate wont happen to the same host in the libvirt driver
+ self.api.put_service(
+ self.computes['dest'].service_ref.uuid, {'status': 'enabled'})
+
+ with mock.patch(
+ 'nova.virt.libvirt.LibvirtDriver.'
+ '_detach_direct_passthrough_vifs'
+ ):
+ server = self._live_migrate(server)
+ self.assertPCIDeviceCounts(source, total=num_pci, free=num_pci)
+ self.assertPCIDeviceCounts(dest, total=num_pci, free=num_pci - 2)
+ self.assertEqual(
+ dest, server['OS-EXT-SRV-ATTR:hypervisor_hostname'])
class PCIServersTest(_PCIServersTestBase):
@@ -1004,7 +1944,7 @@ class PCIServersTest(_PCIServersTestBase):
microversion = 'latest'
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PCI_PROD_ID,
@@ -1018,9 +1958,15 @@ class PCIServersTest(_PCIServersTestBase):
}
)]
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
def test_create_server_with_pci_dev_and_numa(self):
"""Verifies that an instance can be booted with cpu pinning and with an
- assigned pci device.
+ assigned pci device with legacy policy and numa info for the pci
+ device.
"""
self.flags(cpu_dedicated_set='0-7', group='compute')
@@ -1028,6 +1974,13 @@ class PCIServersTest(_PCIServersTestBase):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
self.start_compute(pci_info=pci_info)
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={"0000:81:00.0": {self.PCI_RC: 1}},
+ traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 0}},
+ )
+
# create a flavor
extra_spec = {
'hw:cpu_policy': 'dedicated',
@@ -1035,18 +1988,35 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(flavor_id=flavor_id, networks='none')
+
+ self.assert_placement_pci_view(
+ "compute1",
+ inventories={"0000:81:00.0": {self.PCI_RC: 1}},
+ traits={"0000:81:00.0": []},
+ usages={"0000:81:00.0": {self.PCI_RC: 1}},
+ allocations={server['id']: {"0000:81:00.0": {self.PCI_RC: 1}}},
+ )
+ self.assert_no_pci_healing("compute1")
def test_create_server_with_pci_dev_and_numa_fails(self):
"""This test ensures that it is not possible to allocated CPU and
- memory resources from one NUMA node and a PCI device from another.
+ memory resources from one NUMA node and a PCI device from another
+ if we use the legacy policy and the pci device reports numa info.
"""
-
self.flags(cpu_dedicated_set='0-7', group='compute')
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {'hw:cpu_policy': 'dedicated'}
flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
@@ -1058,6 +2028,10 @@ class PCIServersTest(_PCIServersTestBase):
self._create_server(
flavor_id=flavor_id, networks='none', expected_state='ERROR')
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
def test_live_migrate_server_with_pci(self):
"""Live migrate an instance with a PCI passthrough device.
@@ -1069,14 +2043,42 @@ class PCIServersTest(_PCIServersTestBase):
self.start_compute(
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.start_compute(
hostname='test_compute1',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
# create a server
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
- server = self._create_server(flavor_id=flavor_id, networks='none')
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute0")
+
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now live migrate that server
ex = self.assertRaises(
@@ -1088,28 +2090,400 @@ class PCIServersTest(_PCIServersTestBase):
# this will bubble to the API
self.assertEqual(500, ex.response.status_code)
self.assertIn('NoValidHost', str(ex))
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
def test_resize_pci_to_vanilla(self):
# Start two computes, one with PCI and one without.
self.start_compute(
hostname='test_compute0',
pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# Boot a server with a single PCI device.
extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
# Resize it to a flavor without PCI devices. We expect this to work, as
# test_compute1 is available.
- # FIXME(artom) This is bug 1941005.
flavor_id = self._create_flavor()
- ex = self.assertRaises(client.OpenStackApiException,
- self._resize_server, server, flavor_id)
- self.assertEqual(500, ex.response.status_code)
- self.assertIn('NoValidHost', str(ex))
- # self._confirm_resize(server)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_vanilla_to_pci(self):
+ """Resize an instance from a non PCI flavor to a PCI flavor"""
+ # Start two computes, one with PCI and one without.
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(hostname='test_compute1')
+ test_compute1_placement_pci_view = {
+ "inventories": {},
+ "traits": {},
+ "usages": {},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Boot a server without PCI device and make sure it lands on the
+ # compute that has no device, so we can resize it later to the other
+ # host having PCI device.
+ extra_spec = {}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, networks='none', host="test_compute1")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # Resize it to a flavor with a PCI devices. We expect this to work, as
+ # test_compute0 is available and having PCI devices.
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assertPCIDeviceCounts('test_compute1', total=0, free=0)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_resize_from_one_dev_to_two(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=2),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize the server to a flavor requesting two devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # one the source host the PCI allocation is now held by the migration
+ self._move_server_allocation(
+ test_compute0_placement_pci_view['allocations'], server['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # on the dest we have now two device allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now revert the resize
+ self._revert_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ # on the host the allocation should move back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # so the dest should be freed
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=2)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ del test_compute1_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # now resize again and confirm it
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ # the source host now need to be freed up
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # and dest allocated
+ self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ test_compute1_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_same_host_resize_with_pci(self):
+ """Start a single compute with 3 PCI devs and resize and instance
+ from one dev to two devs
+ """
+ self.flags(allow_resize_to_same_host=True)
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # Boot a server with a single PCI device.
+ # To stabilize the test we reserve 81.01 and 81.02 in placement so
+ # we can be sure that the instance will use 81.00, otherwise the
+ # allocation will be random between 00, 01, and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 1)
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=pci_flavor_id, networks='none')
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "allocations"][server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+ # remove the reservations, so we can resize on the same host and
+ # consume 01 and 02
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 0)
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:02.0", self.PCI_RC, 0)
+
+ # Resize the server to use 2 PCI devices
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:2'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=0)
+ # the source host side of the allocation is now held by the migration
+ # UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server['id'])
+ # but we have the dest host side of the allocations on the same host
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # revert the resize so the instance should go back to use a single
+ # device
+ self._revert_resize(server)
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=2)
+ # the migration allocation is moved back to the instance UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"],
+ server["id"],
+ revert=True,
+ )
+ # and the "dest" side of the allocation is dropped
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:01.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view[
+ "usages"]["0000:81:02.0"][self.PCI_RC] = 0
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # resize again but now confirm the same host resize and assert that
+ # only the new flavor usage remains
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off',
+ return_value='{}',
+ ):
+ self._resize_server(server, pci_flavor_id)
+ self._confirm_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {self.PCI_RC: 1}
+ test_compute0_placement_pci_view["allocations"][server['id']] = {
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ }
+ self.assert_no_pci_healing("test_compute0")
def _confirm_resize(self, server, host='host1'):
# NOTE(sbauza): Unfortunately, _cleanup_resize() in libvirt checks the
@@ -1124,7 +2498,6 @@ class PCIServersTest(_PCIServersTestBase):
self.flags(host=orig_host)
def test_cold_migrate_server_with_pci(self):
-
host_devices = {}
orig_create = nova.virt.libvirt.guest.Guest.create
@@ -1153,6 +2526,41 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
self.start_compute(hostname=hostname, pci_info=pci_info)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# boot an instance with a PCI device on each host
extra_spec = {
@@ -1160,8 +2568,16 @@ class PCIServersTest(_PCIServersTestBase):
}
flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # force the allocation on test_compute0 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute0_0000:81:01.0", self.PCI_RC, 1)
server_a = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute0')
+ # force the allocation on test_compute1 to 81:00 to make it easy
+ # to assert the placement allocation
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 1)
server_b = self._create_server(
flavor_id=flavor_id, networks='none', host='test_compute1')
@@ -1173,6 +2589,25 @@ class PCIServersTest(_PCIServersTestBase):
for hostname in ('test_compute0', 'test_compute1'):
self.assertPCIDeviceCounts(hostname, total=2, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ test_compute1_placement_pci_view[
+ "usages"]["0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_b['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # remove the resource reservation from test_compute1 to be able to
+ # migrate server_a there
+ self._reserve_placement_resource(
+ "test_compute1_0000:81:01.0", self.PCI_RC, 0)
+
# TODO(stephenfin): The mock of 'migrate_disk_and_power_off' should
# probably be less...dumb
with mock.patch(
@@ -1190,19 +2625,390 @@ class PCIServersTest(_PCIServersTestBase):
server_a['OS-EXT-SRV-ATTR:host'], server_b['OS-EXT-SRV-ATTR:host'],
)
self.assertPCIDeviceCounts('test_compute0', total=2, free=1)
+ # on the source host the allocation is now held by the migration UUID
+ self._move_server_allocation(
+ test_compute0_placement_pci_view["allocations"], server_a['id'])
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ # sever_a now have allocation on test_compute1 on 81:01
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:01.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server_a['id']] = {"0000:81:01.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
# now, confirm the migration and check our counts once again
self._confirm_resize(server_a)
self.assertPCIDeviceCounts('test_compute0', total=2, free=2)
+ # the source host now has no allocations as the migration allocation
+ # is removed by confirm resize
+ test_compute0_placement_pci_view["usages"] = {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ }
+ test_compute0_placement_pci_view["allocations"] = {}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
self.assertPCIDeviceCounts('test_compute1', total=2, free=0)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_request_two_pci_but_host_has_one(self):
+ # simulate a single type-PCI device on the host
+ self.start_compute(pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('compute1', total=1, free=1)
+
+ alias = [jsonutils.dumps(x) for x in (
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': 'a1',
+ },
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': 'a2',
+ },
+ )]
+ self.flags(group='pci', alias=alias)
+ # request two PCI devices both are individually matching with the
+ # single available device on the host
+ extra_spec = {'pci_passthrough:alias': 'a1:1,a2:1'}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ # so we expect that the boot fails with no valid host error as only
+ # one of the requested PCI device can be allocated
+ server = self._create_server(
+ flavor_id=flavor_id, networks="none", expected_state='ERROR')
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+
+ def _create_two_computes(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1))
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.start_compute(
+ hostname='test_compute1',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=1),
+ )
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ test_compute1_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ return (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def _create_two_computes_and_an_instance_on_the_first(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # boot a VM on test_compute0 with a single PCI dev
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none', host="test_compute0")
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ return (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ )
+
+ def test_evacuate(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # kill test_compute0 and evacuate the instance
+ self.computes['test_compute0'].stop()
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"forced_down": True},
+ )
+ self._evacuate_server(server)
+ # source allocation should be kept as source is dead but the server
+ # now has allocation on both hosts as evacuation does not use migration
+ # allocations.
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=0)
+ self.assert_placement_pci_inventory(
+ "test_compute0",
+ test_compute0_placement_pci_view["inventories"],
+ test_compute0_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute0", test_compute0_placement_pci_view["usages"]
+ )
+ self.assert_placement_pci_allocations(
+ {
+ server['id']: {
+ "test_compute0": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute0_0000:81:00.0": {self.PCI_RC: 1},
+ "test_compute1": {
+ "VCPU": 2,
+ "MEMORY_MB": 2048,
+ "DISK_GB": 20,
+ },
+ "test_compute1_0000:81:00.0": {self.PCI_RC: 1},
+ },
+ }
+ )
+
+ # dest allocation should be created
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_inventory(
+ "test_compute1",
+ test_compute1_placement_pci_view["inventories"],
+ test_compute1_placement_pci_view["traits"]
+ )
+ self.assert_placement_pci_usages(
+ "test_compute1", test_compute0_placement_pci_view["usages"]
+ )
+
+ # recover test_compute0 and check that it is cleaned
+ self.restart_compute_service('test_compute0')
+
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view = {
+ "inventories": {"0000:81:00.0": {self.PCI_RC: 1}},
+ "traits": {"0000:81:00.0": []},
+ "usages": {"0000:81:00.0": {self.PCI_RC: 0}},
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # and test_compute1 is not changes (expect that the instance now has
+ # only allocation on this compute)
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_unshelve_after_offload(self):
+ (
+ server,
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes_and_an_instance_on_the_first()
+
+ # shelve offload the server
+ self._shelve_server(server)
+
+ # source allocation should be freed
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ test_compute0_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 0
+ del test_compute0_placement_pci_view["allocations"][server['id']]
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should not be touched
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ # disable test_compute0 and unshelve the instance
+ self.api.put_service(
+ self.computes["test_compute0"].service_ref.uuid,
+ {"status": "disabled"},
+ )
+ self._unshelve_server(server)
+
+ # test_compute0 should be unchanged
+ self.assertPCIDeviceCounts('test_compute0', total=1, free=1)
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ # test_compute1 should be allocated
+ self.assertPCIDeviceCounts('test_compute1', total=1, free=0)
+ test_compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ test_compute1_placement_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(
+ "test_compute1", **test_compute1_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_reschedule(self):
+ (
+ test_compute0_placement_pci_view,
+ test_compute1_placement_pci_view,
+ ) = self._create_two_computes()
+
+ # try to boot a VM with a single device but inject fault on the first
+ # compute so that the VM is re-scheduled to the other
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+
+ calls = []
+ orig_guest_create = (
+ nova.virt.libvirt.driver.LibvirtDriver._create_guest)
+
+ def fake_guest_create(*args, **kwargs):
+ if not calls:
+ calls.append(1)
+ raise fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ )
+ else:
+ return orig_guest_create(*args, **kwargs)
+
+ with mock.patch(
+ 'nova.virt.libvirt.driver.LibvirtDriver._create_guest',
+ new=fake_guest_create
+ ):
+ server = self._create_server(
+ flavor_id=pci_flavor_id, networks='none')
+
+ compute_pci_view_map = {
+ 'test_compute0': test_compute0_placement_pci_view,
+ 'test_compute1': test_compute1_placement_pci_view,
+ }
+ allocated_compute = server['OS-EXT-SRV-ATTR:host']
+ not_allocated_compute = (
+ "test_compute0"
+ if allocated_compute == "test_compute1"
+ else "test_compute1"
+ )
+
+ allocated_pci_view = compute_pci_view_map.pop(
+ server['OS-EXT-SRV-ATTR:host'])
+ not_allocated_pci_view = list(compute_pci_view_map.values())[0]
+
+ self.assertPCIDeviceCounts(allocated_compute, total=1, free=0)
+ allocated_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ allocated_pci_view["allocations"][
+ server['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+ self.assert_placement_pci_view(allocated_compute, **allocated_pci_view)
+
+ self.assertPCIDeviceCounts(not_allocated_compute, total=1, free=1)
+ self.assert_placement_pci_view(
+ not_allocated_compute, **not_allocated_pci_view)
+ self.assert_no_pci_healing("test_compute0")
+ self.assert_no_pci_healing("test_compute1")
+
+ def test_multi_create(self):
+ self.start_compute(
+ hostname='test_compute0',
+ pci_info=fakelibvirt.HostPCIDevicesInfo(num_pci=3))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=3)
+ test_compute0_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ "0000:81:02.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ "0000:81:01.0": [],
+ "0000:81:02.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ "0000:81:02.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ extra_spec = {'pci_passthrough:alias': f'{self.ALIAS_NAME}:1'}
+ pci_flavor_id = self._create_flavor(extra_spec=extra_spec)
+ body = self._build_server(flavor_id=pci_flavor_id, networks='none')
+ body.update(
+ {
+ "min_count": "2",
+ }
+ )
+ self.api.post_server({'server': body})
+
+ servers = self.api.get_servers(detail=False)
+ for server in servers:
+ self._wait_for_state_change(server, 'ACTIVE')
+
+ self.assertEqual(2, len(servers))
+ self.assertPCIDeviceCounts('test_compute0', total=3, free=1)
+ # we have no way to influence which instance takes which device, so
+ # we need to look at the nova DB to properly assert the placement
+ # allocation
+ devices = objects.PciDeviceList.get_by_compute_node(
+ self.ctxt,
+ objects.ComputeNode.get_by_nodename(self.ctxt, 'test_compute0').id,
+ )
+ for dev in devices:
+ if dev.instance_uuid:
+ test_compute0_placement_pci_view["usages"][
+ dev.address][self.PCI_RC] = 1
+ test_compute0_placement_pci_view["allocations"][
+ dev.instance_uuid] = {dev.address: {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "test_compute0", **test_compute0_placement_pci_view)
+
+ self.assert_no_pci_healing("test_compute0")
class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PCI_PROD_ID,
@@ -1219,6 +3025,11 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
)]
expected_state = 'ACTIVE'
+ def setUp(self):
+ super().setUp()
+ self.flags(group="pci", report_in_placement=True)
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
def test_create_server_with_pci_dev_and_numa(self):
"""Validate behavior of 'preferred' PCI NUMA policy.
@@ -1231,6 +3042,20 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=0)
self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": [],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
# boot one instance with no PCI device to "fill up" NUMA node 0
extra_spec = {
@@ -1239,13 +3064,26 @@ class PCIServersWithPreferredNUMATest(_PCIServersTestBase):
flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
self._create_server(flavor_id=flavor_id)
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
# now boot one with a PCI device, which should succeed thanks to the
# use of the PCI policy
extra_spec['pci_passthrough:alias'] = '%s:1' % self.ALIAS_NAME
flavor_id = self._create_flavor(extra_spec=extra_spec)
- self._create_server(
+ server_with_pci = self._create_server(
flavor_id=flavor_id, expected_state=self.expected_state)
+ if self.expected_state == 'ACTIVE':
+ compute1_placement_pci_view["usages"][
+ "0000:81:00.0"][self.PCI_RC] = 1
+ compute1_placement_pci_view["allocations"][
+ server_with_pci['id']] = {"0000:81:00.0": {self.PCI_RC: 1}}
+
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
@@ -1261,12 +3099,105 @@ class PCIServersWithRequiredNUMATest(PCIServersWithPreferredNUMATest):
)]
expected_state = 'ERROR'
+ def setUp(self):
+ super().setUp()
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.is_physical_function', return_value=False
+ )
+ )
+
+ def test_create_server_with_pci_dev_and_numa_placement_conflict(self):
+ # fakelibvirt will simulate the devices:
+ # * one type-PCI in 81.00 on numa 0
+ # * one type-PCI in 81.01 on numa 1
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
+ # the device_spec will assign different traits to 81.00 than 81.01
+ # so the two devices become different from placement perspective
+ device_spec = self._to_list_of_json_str(
+ [
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:00.0",
+ "traits": "green",
+ },
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ "address": "0000:81:01.0",
+ "traits": "red",
+ },
+ ]
+ )
+ self.flags(group='pci', device_spec=device_spec)
+ # both numa 0 and numa 1 has 4 PCPUs
+ self.flags(cpu_dedicated_set='0-7', group='compute')
+ self.start_compute(pci_info=pci_info)
+ compute1_placement_pci_view = {
+ "inventories": {
+ "0000:81:00.0": {self.PCI_RC: 1},
+ "0000:81:01.0": {self.PCI_RC: 1},
+ },
+ "traits": {
+ "0000:81:00.0": ["CUSTOM_GREEN"],
+ "0000:81:01.0": ["CUSTOM_RED"],
+ },
+ "usages": {
+ "0000:81:00.0": {self.PCI_RC: 0},
+ "0000:81:01.0": {self.PCI_RC: 0},
+ },
+ "allocations": {},
+ }
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+
+ # boot one instance with no PCI device to "fill up" NUMA node 0
+ # so we will have PCPUs on numa 0 and we have PCI on both nodes
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ }
+ flavor_id = self._create_flavor(vcpu=4, extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+
+ pci_alias = {
+ "resource_class": self.PCI_RC,
+ # this means only 81.00 will match in placement which is on numa 0
+ "traits": "green",
+ "name": "pci-dev",
+ # this forces the scheduler to only accept a solution where the
+ # PCI device is on the same numa node as the pinned CPUs
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ self.flags(
+ group="pci",
+ alias=self._to_list_of_json_str([pci_alias]),
+ )
+
+ # Ask for dedicated CPUs, that can only be fulfilled on numa 1.
+ # And ask for a PCI alias that can only be fulfilled on numa 0 due to
+ # trait request.
+ # We expect that this makes the scheduling fail.
+ extra_spec = {
+ "hw:cpu_policy": "dedicated",
+ "pci_passthrough:alias": "pci-dev:1",
+ }
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(
+ flavor_id=flavor_id, expected_state="ERROR")
+
+ self.assertIn('fault', server)
+ self.assertIn('No valid host', server['fault']['message'])
+ self.assert_placement_pci_view(
+ "compute1", **compute1_placement_pci_view)
+ self.assert_no_pci_healing("compute1")
+
@ddt.ddt
class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PCI_PROD_ID,
@@ -1296,7 +3227,7 @@ class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
num_pci=1, numa_node=pci_numa_node)
self.start_compute(pci_info=pci_info)
- # request cpu pinning to create a numa toplogy and allow the test to
+ # request cpu pinning to create a numa topology and allow the test to
# force which numa node the vm would have to be pinned too.
extra_spec = {
'hw:cpu_policy': 'dedicated',
@@ -1371,9 +3302,11 @@ class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
}
)]
- self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
- alias=alias,
- group='pci')
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=alias,
+ group='pci'
+ )
self._test_policy(pci_numa_node, status, 'required')
@@ -1451,7 +3384,7 @@ class PCIServersWithSRIOVAffinityPoliciesTest(_PCIServersTestBase):
class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
ALIAS_NAME = 'a1'
- PCI_PASSTHROUGH_WHITELIST = [jsonutils.dumps(x) for x in (
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
{
'vendor_id': fakelibvirt.PCI_VEND_ID,
'product_id': fakelibvirt.PF_PROD_ID,
@@ -1507,7 +3440,7 @@ class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
num_pfs=1, num_vfs=2, numa_node=pci_numa_node)
self.start_compute(pci_info=pci_info)
- # request cpu pinning to create a numa toplogy and allow the test to
+ # request cpu pinning to create a numa topology and allow the test to
# force which numa node the vm would have to be pinned too.
extra_spec = {
'hw:cpu_policy': 'dedicated',
@@ -1589,9 +3522,11 @@ class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
}
)]
- self.flags(passthrough_whitelist=self.PCI_PASSTHROUGH_WHITELIST,
- alias=alias,
- group='pci')
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=alias,
+ group='pci'
+ )
self._test_policy(pci_numa_node, status, 'required')
@@ -1680,3 +3615,568 @@ class PCIServersWithPortNUMAPoliciesTest(_PCIServersTestBase):
],
)
self.assertTrue(self.mock_filter.called)
+
+
+class RemoteManagedServersTest(_PCIServersWithMigrationTestBase):
+
+ ADMIN_API = True
+ microversion = 'latest'
+
+ PCI_DEVICE_SPEC = [jsonutils.dumps(x) for x in (
+ # A PF with access to physnet4.
+ {
+ 'vendor_id': '15b3',
+ 'product_id': 'a2dc',
+ 'physical_network': 'physnet4',
+ 'remote_managed': 'false',
+ },
+ # A VF with access to physnet4.
+ {
+ 'vendor_id': '15b3',
+ 'product_id': '1021',
+ 'physical_network': 'physnet4',
+ 'remote_managed': 'true',
+ },
+ # A PF programmed to forward traffic to an overlay network.
+ {
+ 'vendor_id': '15b3',
+ 'product_id': 'a2d6',
+ 'physical_network': None,
+ 'remote_managed': 'false',
+ },
+ # A VF programmed to forward traffic to an overlay network.
+ {
+ 'vendor_id': '15b3',
+ 'product_id': '101e',
+ 'physical_network': None,
+ 'remote_managed': 'true',
+ },
+ )]
+
+ PCI_ALIAS = []
+
+ NUM_PFS = 1
+ NUM_VFS = 4
+ vf_ratio = NUM_VFS // NUM_PFS
+
+ # Min Libvirt version that supports working with PCI VPD.
+ FAKE_LIBVIRT_VERSION = 7_009_000 # 7.9.0
+ FAKE_QEMU_VERSION = 5_001_000 # 5.1.0
+
+ def setUp(self):
+ super().setUp()
+ self.neutron = self.useFixture(base.LibvirtNeutronFixture(self))
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.pci.utils.get_vf_num_by_pci_address',
+ new=mock.MagicMock(
+ side_effect=lambda addr: self._get_pci_function_number(addr))))
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.pci.utils.get_mac_by_pci_address',
+ new=mock.MagicMock(
+ side_effect=(
+ lambda addr: {
+ "0000:80:00.0": "52:54:00:1e:59:42",
+ "0000:81:00.0": "52:54:00:1e:59:01",
+ "0000:82:00.0": "52:54:00:1e:59:02",
+ }.get(addr)
+ )
+ )
+ ))
+
+ @classmethod
+ def _get_pci_function_number(cls, pci_addr: str):
+ """Get a VF function number based on a PCI address.
+
+ Assume that the PCI ARI capability is enabled (slot bits become a part
+ of a function number).
+ """
+ _, _, slot, function = parse_address(pci_addr)
+ # The number of PFs is extracted to get a VF number.
+ return int(slot, 16) + int(function, 16) - cls.NUM_PFS
+
+ def start_compute(
+ self, hostname='test_compute0', host_info=None, pci_info=None,
+ mdev_info=None, vdpa_info=None,
+ libvirt_version=None,
+ qemu_version=None):
+
+ if not pci_info:
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=0, num_pfs=0, num_vfs=0)
+
+ pci_info.add_device(
+ dev_type='PF',
+ bus=0x81,
+ slot=0x0,
+ function=0,
+ iommu_group=42,
+ numa_node=0,
+ vf_ratio=self.vf_ratio,
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='a2dc',
+ prod_name='BlueField-3 integrated ConnectX-7 controller',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT43244 BlueField-3 integrated ConnectX-7',
+ 'readonly': {
+ 'serial_number': 'MT0000X00001',
+ },
+ }
+ )
+
+ for idx in range(self.NUM_VFS):
+ pci_info.add_device(
+ dev_type='VF',
+ bus=0x81,
+ slot=0x0,
+ function=idx + 1,
+ iommu_group=idx + 43,
+ numa_node=0,
+ vf_ratio=self.vf_ratio,
+ parent=(0x81, 0x0, 0),
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='1021',
+ prod_name='MT2910 Family [ConnectX-7]',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT2910 Family [ConnectX-7]',
+ 'readonly': {
+ 'serial_number': 'MT0000X00001',
+ },
+ }
+ )
+
+ pci_info.add_device(
+ dev_type='PF',
+ bus=0x82,
+ slot=0x0,
+ function=0,
+ iommu_group=84,
+ numa_node=0,
+ vf_ratio=self.vf_ratio,
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='a2d6',
+ prod_name='MT42822 BlueField-2 integrated ConnectX-6',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT42822 BlueField-2 integrated ConnectX-6',
+ 'readonly': {
+ 'serial_number': 'MT0000X00002',
+ },
+ }
+ )
+
+ for idx in range(self.NUM_VFS):
+ pci_info.add_device(
+ dev_type='VF',
+ bus=0x82,
+ slot=0x0,
+ function=idx + 1,
+ iommu_group=idx + 85,
+ numa_node=0,
+ vf_ratio=self.vf_ratio,
+ parent=(0x82, 0x0, 0),
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='101e',
+ prod_name='ConnectX Family mlx5Gen Virtual Function',
+ driver_name='mlx5_core')
+
+ return super().start_compute(
+ hostname=hostname, host_info=host_info, pci_info=pci_info,
+ mdev_info=mdev_info, vdpa_info=vdpa_info,
+ libvirt_version=libvirt_version or self.FAKE_LIBVIRT_VERSION,
+ qemu_version=qemu_version or self.FAKE_QEMU_VERSION)
+
+ def create_remote_managed_tunnel_port(self):
+ dpu_tunnel_port = {
+ 'id': uuids.dpu_tunnel_port,
+ 'network_id': self.neutron.network_3['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': 'fa:16:3e:f0:a4:bb',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.2.8',
+ 'subnet_id': self.neutron.subnet_3['id']
+ }
+ ],
+ 'binding:vif_details': {},
+ 'binding:vif_type': 'ovs',
+ 'binding:vnic_type': 'remote-managed',
+ }
+
+ self.neutron.create_port({'port': dpu_tunnel_port})
+ return dpu_tunnel_port
+
+ def create_remote_managed_physnet_port(self):
+ dpu_physnet_port = {
+ 'id': uuids.dpu_physnet_port,
+ 'network_id': self.neutron.network_4['id'],
+ 'status': 'ACTIVE',
+ 'mac_address': 'd2:0b:fd:99:89:8b',
+ 'fixed_ips': [
+ {
+ 'ip_address': '192.168.4.10',
+ 'subnet_id': self.neutron.subnet_4['id']
+ }
+ ],
+ 'binding:vif_details': {},
+ 'binding:vif_type': 'ovs',
+ 'binding:vnic_type': 'remote-managed',
+ }
+
+ self.neutron.create_port({'port': dpu_physnet_port})
+ return dpu_physnet_port
+
+ def test_create_server_physnet(self):
+ """Create an instance with a tunnel remote-managed port."""
+
+ hostname = self.start_compute()
+ num_pci = (self.NUM_PFS + self.NUM_VFS) * 2
+
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+
+ dpu_port = self.create_remote_managed_physnet_port()
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertNotIn('binding:profile', port)
+
+ self._create_server(networks=[{'port': dpu_port['id']}])
+
+ # Ensure there is one less VF available and that the PF
+ # is no longer usable.
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+
+ # Ensure the binding:profile details sent to Neutron are correct after
+ # a port update.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual({
+ 'card_serial_number': 'MT0000X00001',
+ 'pci_slot': '0000:81:00.4',
+ 'pci_vendor_info': '15b3:1021',
+ 'pf_mac_address': '52:54:00:1e:59:01',
+ 'physical_network': 'physnet4',
+ 'vf_num': 3
+ }, port['binding:profile'])
+
+ def test_create_server_tunnel(self):
+ """Create an instance with a tunnel remote-managed port."""
+
+ hostname = self.start_compute()
+ num_pci = (self.NUM_PFS + self.NUM_VFS) * 2
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci)
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertNotIn('binding:profile', port)
+
+ self._create_server(networks=[{'port': dpu_port['id']}])
+
+ # Ensure there is one less VF available and that the PF
+ # is no longer usable.
+ self.assertPCIDeviceCounts(hostname, total=num_pci, free=num_pci - 2)
+
+ # Ensure the binding:profile details sent to Neutron are correct after
+ # a port update.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual({
+ 'card_serial_number': 'MT0000X00002',
+ 'pci_slot': '0000:82:00.4',
+ 'pci_vendor_info': '15b3:101e',
+ 'pf_mac_address': '52:54:00:1e:59:02',
+ 'physical_network': None,
+ 'vf_num': 3
+ }, port['binding:profile'])
+
+ def _test_common(self, op, *args, **kwargs):
+ self.start_compute()
+ dpu_port = self.create_remote_managed_tunnel_port()
+ server = self._create_server(networks=[{'port': dpu_port['id']}])
+ op(server, *args, **kwargs)
+
+ def test_attach_interface(self):
+ self.start_compute()
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ server = self._create_server(networks='none')
+
+ self._attach_interface(server, dpu_port['id'])
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:82:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:02',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00002',
+ },
+ port['binding:profile'],
+ )
+
+ def test_detach_interface(self):
+ self._test_common(self._detach_interface, uuids.dpu_tunnel_port)
+
+ port = self.neutron.show_port(uuids.dpu_tunnel_port)['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual({}, port['binding:profile'])
+
+ def test_shelve(self):
+ self._test_common(self._shelve_server)
+
+ port = self.neutron.show_port(uuids.dpu_tunnel_port)['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual({}, port['binding:profile'])
+
+ def test_suspend(self):
+ self.start_compute()
+ dpu_port = self.create_remote_managed_tunnel_port()
+ server = self._create_server(networks=[{'port': dpu_port['id']}])
+ self._suspend_server(server)
+ # TODO(dmitriis): detachDevice does not properly handle hostdevs
+ # so full suspend/resume testing is problematic.
+
+ def _test_move_operation_with_neutron(self, move_operation, dpu_port):
+ """Test a move operation with a remote-managed port.
+ """
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=0, num_vfs=0)
+
+ compute1_pci_info.add_device(
+ dev_type='PF',
+ bus=0x80,
+ slot=0x0,
+ function=0,
+ iommu_group=84,
+ numa_node=1,
+ vf_ratio=self.vf_ratio,
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='a2d6',
+ prod_name='MT42822 BlueField-2 integrated ConnectX-6',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT42822 BlueField-2 integrated ConnectX-6',
+ 'readonly': {
+ 'serial_number': 'MT0000X00042',
+ },
+ }
+ )
+ for idx in range(self.NUM_VFS):
+ compute1_pci_info.add_device(
+ dev_type='VF',
+ bus=0x80,
+ slot=0x0,
+ function=idx + 1,
+ iommu_group=idx + 85,
+ numa_node=1,
+ vf_ratio=self.vf_ratio,
+ parent=(0x80, 0x0, 0),
+ vend_id='15b3',
+ vend_name='Mellanox Technologies',
+ prod_id='101e',
+ prod_name='ConnectX Family mlx5Gen Virtual Function',
+ driver_name='mlx5_core',
+ vpd_fields={
+ 'name': 'MT42822 BlueField-2 integrated ConnectX-6',
+ 'readonly': {
+ 'serial_number': 'MT0000X00042',
+ },
+ }
+ )
+
+ self.start_compute(hostname='test_compute0')
+ self.start_compute(hostname='test_compute1',
+ pci_info=compute1_pci_info)
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertNotIn('binding:profile', port)
+
+ flavor_id = self._create_flavor(vcpu=4)
+ server = self._create_server(
+ flavor_id=flavor_id,
+ networks=[{'port': dpu_port['id']}],
+ host='test_compute0',
+ )
+
+ self.assertEqual('test_compute0', server['OS-EXT-SRV-ATTR:host'])
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=5)
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:82:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:02',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00002',
+ },
+ port['binding:profile'],
+ )
+
+ move_operation(server)
+
+ def test_unshelve_server_with_neutron(self):
+ def move_operation(source_server):
+ self._shelve_server(source_server)
+ # Disable the source compute, to force unshelving on the dest.
+ self.api.put_service(
+ self.computes['test_compute0'].service_ref.uuid,
+ {'status': 'disabled'})
+ self._unshelve_server(source_server)
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ # Ensure the binding:profile details got updated, including the
+ # fields relevant to remote-managed ports.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:80:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:42',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00042',
+ },
+ port['binding:profile'],
+ )
+
+ def test_cold_migrate_server_with_neutron(self):
+ def move_operation(source_server):
+ with mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}'):
+ server = self._migrate_server(source_server)
+ self._confirm_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ # Ensure the binding:profile details got updated, including the
+ # fields relevant to remote-managed ports.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:80:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:42',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00042',
+ },
+ port['binding:profile'],
+ )
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ def test_cold_migrate_server_with_neutron_revert(self):
+ def move_operation(source_server):
+ with mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
+ '.migrate_disk_and_power_off', return_value='{}'):
+ server = self._migrate_server(source_server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ self._revert_resize(server)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=5)
+
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:82:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:02',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00002',
+ },
+ port['binding:profile'],
+ )
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ def test_evacuate_server_with_neutron(self):
+ def move_operation(source_server):
+ # Down the source compute to enable the evacuation
+ self.api.put_service(
+ self.computes['test_compute0'].service_ref.uuid,
+ {'forced_down': True})
+ self.computes['test_compute0'].stop()
+ self._evacuate_server(source_server)
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=8)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ # Ensure the binding:profile details got updated, including the
+ # fields relevant to remote-managed ports.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:80:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:42',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00042',
+ },
+ port['binding:profile'],
+ )
+
+ def test_live_migrate_server_with_neutron(self):
+ """Live migrate an instance using a remote-managed port.
+
+ This should succeed since we support this via detach and attach of the
+ PCI device similar to how this is done for SR-IOV ports.
+ """
+ def move_operation(source_server):
+ self._live_migrate(source_server, 'completed')
+
+ dpu_port = self.create_remote_managed_tunnel_port()
+ self._test_move_operation_with_neutron(move_operation, dpu_port)
+
+ self.assertPCIDeviceCounts('test_compute0', total=10, free=10)
+ self.assertPCIDeviceCounts('test_compute1', total=5, free=3)
+
+ # Ensure the binding:profile details got updated, including the
+ # fields relevant to remote-managed ports.
+ port = self.neutron.show_port(dpu_port['id'])['port']
+ self.assertIn('binding:profile', port)
+ self.assertEqual(
+ {
+ 'pci_vendor_info': '15b3:101e',
+ 'pci_slot': '0000:80:00.4',
+ 'physical_network': None,
+ 'pf_mac_address': '52:54:00:1e:59:42',
+ 'vf_num': 3,
+ 'card_serial_number': 'MT0000X00042',
+ },
+ port['binding:profile'],
+ )
diff --git a/nova/tests/functional/libvirt/test_power_manage.py b/nova/tests/functional/libvirt/test_power_manage.py
new file mode 100644
index 0000000000..9f80446bd6
--- /dev/null
+++ b/nova/tests/functional/libvirt/test_power_manage.py
@@ -0,0 +1,270 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+import fixtures
+
+from nova import context as nova_context
+from nova import exception
+from nova import objects
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import base
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import api as cpu_api
+
+
+class PowerManagementTestsBase(base.ServersTestBase):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter']
+
+ ADMIN_API = True
+
+ def setUp(self):
+ super(PowerManagementTestsBase, self).setUp()
+
+ self.ctxt = nova_context.get_admin_context()
+
+ # Mock the 'NUMATopologyFilter' filter, as most tests need to inspect
+ # this
+ host_manager = self.scheduler.manager.host_manager
+ numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
+ host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
+ _p = mock.patch('nova.scheduler.filters'
+ '.numa_topology_filter.NUMATopologyFilter.host_passes',
+ side_effect=host_pass_mock)
+ self.mock_filter = _p.start()
+ self.addCleanup(_p.stop)
+
+ # for the sake of resizing, we need to patch the two methods below
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.libvirt.LibvirtDriver._get_instance_disk_info',
+ return_value=[]))
+ self.useFixture(fixtures.MockPatch('os.rename'))
+
+ self.useFixture(nova_fixtures.PrivsepFixture())
+
+ # Defining the main flavor for 4 vCPUs all pinned
+ self.extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'hw:cpu_thread_policy': 'prefer',
+ }
+ self.pcpu_flavor_id = self._create_flavor(
+ vcpu=4, extra_spec=self.extra_spec)
+
+ def _assert_server_cpus_state(self, server, expected='online'):
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ if not inst.numa_topology:
+ self.fail('Instance should have a NUMA topology in order to know '
+ 'its physical CPUs')
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ self._assert_cpu_set_state(instance_pcpus, expected=expected)
+ return instance_pcpus
+
+ def _assert_cpu_set_state(self, cpu_set, expected='online'):
+ for i in cpu_set:
+ core = cpu_api.Core(i)
+ if expected == 'online':
+ self.assertTrue(core.online, f'{i} is not online')
+ elif expected == 'offline':
+ self.assertFalse(core.online, f'{i} is online')
+ elif expected == 'powersave':
+ self.assertEqual('powersave', core.governor)
+ elif expected == 'performance':
+ self.assertEqual('performance', core.governor)
+
+
+class PowerManagementTests(PowerManagementTestsBase):
+ """Test suite for a single host with 9 dedicated cores and 1 used for OS"""
+
+ def setUp(self):
+ super(PowerManagementTests, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # All cores are shutdown at startup, let's check.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ def test_hardstop_compute_service_if_wrong_opt(self):
+ self.flags(cpu_dedicated_set=None, cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.assertRaises(exception.InvalidConfiguration,
+ self.start_compute, host_info=self.host_info,
+ hostname='compute2')
+
+ def test_create_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # Let's verify that the pinned CPUs are now online
+ self._assert_server_cpus_state(server, expected='online')
+
+ # Verify that the unused CPUs are still offline
+ inst = objects.Instance.get_by_uuid(self.ctxt, server['id'])
+ instance_pcpus = inst.numa_topology.cpu_pinning
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ unused_cpus = cpu_dedicated_set - instance_pcpus
+ self._assert_cpu_set_state(unused_cpus, expected='offline')
+
+ def test_stop_start_server(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+
+ server = self._stop_server(server)
+ # Let's verify that the pinned CPUs are now stopped...
+ self._assert_server_cpus_state(server, expected='offline')
+
+ server = self._start_server(server)
+ # ...and now, they should be back.
+ self._assert_server_cpus_state(server, expected='online')
+
+ def test_resize(self):
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ server_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+
+ new_flavor_id = self._create_flavor(
+ vcpu=5, extra_spec=self.extra_spec)
+ self._resize_server(server, new_flavor_id)
+ server2_pcpus = self._assert_server_cpus_state(server,
+ expected='online')
+ # Even if the resize is not confirmed yet, the original guest is now
+ # destroyed so the cores are now offline.
+ self._assert_cpu_set_state(server_pcpus, expected='offline')
+
+ # let's revert the resize
+ self._revert_resize(server)
+ # So now the original CPUs will be online again, while the previous
+ # cores should be back offline.
+ self._assert_cpu_set_state(server_pcpus, expected='online')
+ self._assert_cpu_set_state(server2_pcpus, expected='offline')
+
+ def test_changing_strategy_fails(self):
+ # As a reminder, all cores have been shutdown before.
+ # Now we want to change the strategy and then we restart the service
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ # See, this is not possible as we would have offline CPUs.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementTestsGovernor(PowerManagementTestsBase):
+ """Test suite for speific governor usage (same 10-core host)"""
+
+ def setUp(self):
+ super(PowerManagementTestsGovernor, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining the CPUs to be pinned.
+ self.flags(cpu_dedicated_set='1-9', cpu_shared_set=None,
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+
+ self.flags(allow_resize_to_same_host=True)
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ def test_create(self):
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ # With the governor strategy, cores are still online but run with a
+ # powersave governor.
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='powersave')
+
+ # Now, start an instance
+ server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # When pinned cores are run, the governor state is now performance
+ self._assert_server_cpus_state(server, expected='performance')
+
+ def test_changing_strategy_fails(self):
+ # Arbitratly set a core governor strategy to be performance
+ cpu_api.Core(1).set_high_governor()
+ # and then forget about it while changing the strategy.
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ # This time, this wouldn't be acceptable as some core would have a
+ # difference performance while Nova would only online/offline it.
+ self.assertRaises(exception.InvalidConfiguration,
+ self.restart_compute_service, hostname='compute1')
+
+
+class PowerManagementMixedInstances(PowerManagementTestsBase):
+ """Test suite for a single host with 6 dedicated cores, 3 shared and one
+ OS-restricted.
+ """
+
+ def setUp(self):
+ super(PowerManagementMixedInstances, self).setUp()
+
+ self.useFixture(nova_fixtures.SysFileSystemFixture())
+
+ # Definining 6 CPUs to be dedicated, not all of them in a series.
+ self.flags(cpu_dedicated_set='1-3,5-7', cpu_shared_set='4,8-9',
+ group='compute')
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_power_management=True, group='libvirt')
+
+ self.host_info = fakelibvirt.HostInfo(cpu_nodes=1, cpu_sockets=1,
+ cpu_cores=5, cpu_threads=2)
+ self.compute1 = self.start_compute(host_info=self.host_info,
+ hostname='compute1')
+
+ # Make sure only 6 are offline now
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+
+ # cores 4 and 8-9 should be online
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ def test_standard_server_works_and_passes(self):
+
+ std_flavor_id = self._create_flavor(vcpu=2)
+ self._create_server(flavor_id=std_flavor_id, expected_state='ACTIVE')
+
+ # Since this is an instance with floating vCPUs on the shared set, we
+ # can only lookup the host CPUs and see they haven't changed state.
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set()
+ self._assert_cpu_set_state(cpu_dedicated_set, expected='offline')
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
+
+ # We can now try to boot an instance with pinned CPUs to test the mix
+ pinned_server = self._create_server(
+ flavor_id=self.pcpu_flavor_id,
+ expected_state='ACTIVE')
+ # We'll see that its CPUs are now online
+ self._assert_server_cpus_state(pinned_server, expected='online')
+ # but it doesn't change the shared set
+ self._assert_cpu_set_state({4, 8, 9}, expected='online')
diff --git a/nova/tests/functional/libvirt/test_report_cpu_traits.py b/nova/tests/functional/libvirt/test_report_cpu_traits.py
index 2386ec5251..99e68b7b5c 100644
--- a/nova/tests/functional/libvirt/test_report_cpu_traits.py
+++ b/nova/tests/functional/libvirt/test_report_cpu_traits.py
@@ -13,11 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import os_resource_classes as orc
import os_traits as ost
-
from nova import conf
from nova.db import constants as db_const
from nova import test
@@ -190,7 +190,6 @@ class LibvirtReportNoSevTraitsTests(LibvirtReportTraitsTestBase):
class LibvirtReportSevTraitsTests(LibvirtReportTraitsTestBase):
STUB_INIT_HOST = False
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, True)
@test.patch_open(SEV_KERNEL_PARAM_FILE, "1\n")
@mock.patch.object(
fakelibvirt.virConnect, '_domain_capability_features',
@@ -198,7 +197,8 @@ class LibvirtReportSevTraitsTests(LibvirtReportTraitsTestBase):
def setUp(self):
super(LibvirtReportSevTraitsTests, self).setUp()
self.flags(num_memory_encrypted_guests=16, group='libvirt')
- self.start_compute()
+ with test.patch_exists(SEV_KERNEL_PARAM_FILE, True):
+ self.start_compute()
def test_sev_trait_on_off(self):
"""Test that the compute service reports the SEV trait in the list of
diff --git a/nova/tests/functional/libvirt/test_reshape.py b/nova/tests/functional/libvirt/test_reshape.py
index 5c73ffbf5f..1f924739e3 100644
--- a/nova/tests/functional/libvirt/test_reshape.py
+++ b/nova/tests/functional/libvirt/test_reshape.py
@@ -12,7 +12,7 @@
# under the License.
import io
-import mock
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
@@ -30,17 +30,7 @@ LOG = logging.getLogger(__name__)
class VGPUReshapeTests(base.ServersTestBase):
- @mock.patch('nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
- return_value={'total': 128,
- 'used': 44,
- 'free': 84})
- @mock.patch('nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname',
- return_value=True)
- @mock.patch('nova.virt.libvirt.driver.libvirt_utils.file_open',
- side_effect=[io.BytesIO(b''), io.BytesIO(b''),
- io.BytesIO(b'')])
- def test_create_servers_with_vgpu(
- self, mock_file_open, mock_valid_hostname, mock_get_fs_info):
+ def test_create_servers_with_vgpu(self):
"""Verify that vgpu reshape works with libvirt driver
1) create two servers with an old tree where the VGPU resource is on
@@ -49,7 +39,8 @@ class VGPUReshapeTests(base.ServersTestBase):
3) check that the allocations of the servers are still valid
4) create another server now against the new tree
"""
-
+ self.mock_file_open.side_effect = [
+ io.BytesIO(b''), io.BytesIO(b''), io.BytesIO(b'')]
# NOTE(gibi): We cannot simply ask the virt driver to create an old
# RP tree with vgpu on the root RP as that code path does not exist
# any more. So we have to hack a "bit". We will create a compute
@@ -81,11 +72,11 @@ class VGPUReshapeTests(base.ServersTestBase):
# ignore the content of the above HostMdevDeviceInfo
self.flags(enabled_mdev_types='', group='devices')
- hostname = self.start_compute(
+ self.hostname = self.start_compute(
hostname='compute1',
mdev_info=fakelibvirt.HostMdevDevicesInfo(devices=mdevs),
)
- self.compute = self.computes[hostname]
+ self.compute = self.computes[self.hostname]
# create the VGPU resource in placement manually
compute_rp_uuid = self.placement.get(
@@ -167,7 +158,7 @@ class VGPUReshapeTests(base.ServersTestBase):
allocations[compute_rp_uuid]['resources'])
# restart compute which will trigger a reshape
- self.compute = self.restart_compute_service(self.compute)
+ self.compute = self.restart_compute_service(self.hostname)
# verify that the inventory, usages and allocation are correct after
# the reshape
diff --git a/nova/tests/functional/libvirt/test_uefi.py b/nova/tests/functional/libvirt/test_uefi.py
index 1eee1ab5e1..40becf425e 100644
--- a/nova/tests/functional/libvirt/test_uefi.py
+++ b/nova/tests/functional/libvirt/test_uefi.py
@@ -14,6 +14,7 @@
# under the License.
import datetime
+import re
from lxml import etree
from oslo_log import log as logging
@@ -47,6 +48,8 @@ class UEFIServersTest(base.ServersTestBase):
orig_create = nova.virt.libvirt.guest.Guest.create
def fake_create(cls, xml, host):
+ xml = re.sub('type arch.*machine',
+ 'type machine', xml)
tree = etree.fromstring(xml)
self.assertXmlEqual(
"""
diff --git a/nova/tests/functional/libvirt/test_vgpu.py b/nova/tests/functional/libvirt/test_vgpu.py
index f25ce44221..686582120a 100644
--- a/nova/tests/functional/libvirt/test_vgpu.py
+++ b/nova/tests/functional/libvirt/test_vgpu.py
@@ -49,11 +49,11 @@ class VGPUTestBase(base.ServersTestBase):
def setUp(self):
super(VGPUTestBase, self).setUp()
- self.useFixture(fixtures.MockPatch(
- 'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
- return_value={'total': 128,
- 'used': 44,
- 'free': 84}))
+ libvirt_driver.LibvirtDriver._get_local_gb_info.return_value = {
+ 'total': 128,
+ 'used': 44,
+ 'free': 84,
+ }
self.useFixture(fixtures.MockPatch(
'nova.privsep.libvirt.create_mdev',
side_effect=self._create_mdev))
@@ -113,8 +113,8 @@ class VGPUTestBase(base.ServersTestBase):
parent=libvirt_parent)})
return uuid
- def start_compute(self, hostname):
- hostname = super().start_compute(
+ def start_compute_with_vgpu(self, hostname):
+ hostname = self.start_compute(
pci_info=fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
),
@@ -197,7 +197,7 @@ class VGPUTests(VGPUTestBase):
enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
group='devices')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
def assert_vgpu_usage_for_compute(self, compute, expected):
self.assert_mdev_usage(compute, expected_amount=expected)
@@ -211,7 +211,7 @@ class VGPUTests(VGPUTestBase):
def test_resize_servers_with_vgpu(self):
# Add another compute for the sake of resizing
- self.compute2 = self.start_compute('host2')
+ self.compute2 = self.start_compute_with_vgpu('host2')
server = self._create_server(
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
flavor_id=self.flavor, host=self.compute1.host,
@@ -337,7 +337,7 @@ class VGPUMultipleTypesTests(VGPUTestBase):
# Prepare traits for later on
self._create_trait('CUSTOM_NVIDIA_11')
self._create_trait('CUSTOM_NVIDIA_12')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
def test_create_servers_with_vgpu(self):
self._create_server(
@@ -369,13 +369,12 @@ class VGPUMultipleTypesTests(VGPUTestBase):
def test_create_servers_with_specific_type(self):
# Regenerate the PCI addresses so both pGPUs now support nvidia-12
- connection = self.computes[
- self.compute1.host].driver._host.get_connection()
- connection.pci_info = fakelibvirt.HostPCIDevicesInfo(
+ pci_info = fakelibvirt.HostPCIDevicesInfo(
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
multiple_gpu_types=True)
# Make a restart to update the Resource Providers
- self.compute1 = self.restart_compute_service(self.compute1)
+ self.compute1 = self.restart_compute_service(
+ self.compute1.host, pci_info=pci_info, keep_hypervisor_state=False)
pgpu1_rp_uuid = self._get_provider_uuid_by_name(
self.compute1.host + '_' + fakelibvirt.MDEVCAP_DEV1_PCI_ADDR)
pgpu2_rp_uuid = self._get_provider_uuid_by_name(
@@ -451,7 +450,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
group='mdev_nvidia-12')
self.flags(mdev_class='CUSTOM_NOTVGPU', group='mdev_mlx5_core')
- self.compute1 = self.start_compute('host1')
+ self.compute1 = self.start_compute_with_vgpu('host1')
# Regenerate the PCI addresses so they can support both mlx5 and
# nvidia-12 types
connection = self.computes[
@@ -460,7 +459,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
generic_types=True)
# Make a restart to update the Resource Providers
- self.compute1 = self.restart_compute_service(self.compute1)
+ self.compute1 = self.restart_compute_service('host1')
def test_create_servers_with_different_mdev_classes(self):
physdev1_rp_uuid = self._get_provider_uuid_by_name(
@@ -498,7 +497,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
def test_resize_servers_with_mlx5(self):
# Add another compute for the sake of resizing
- self.compute2 = self.start_compute('host2')
+ self.compute2 = self.start_compute_with_vgpu('host2')
# Regenerate the PCI addresses so they can support both mlx5 and
# nvidia-12 types
connection = self.computes[
@@ -507,7 +506,7 @@ class DifferentMdevClassesTests(VGPUTestBase):
num_pci=0, num_pfs=0, num_vfs=0, num_mdevcap=2,
generic_types=True)
# Make a restart to update the Resource Providers
- self.compute2 = self.restart_compute_service(self.compute2)
+ self.compute2 = self.restart_compute_service('host2')
# Use the new flavor for booting
server = self._create_server(
diff --git a/nova/tests/functional/libvirt/test_vpmem.py b/nova/tests/functional/libvirt/test_vpmem.py
index d1cad0e376..cb524fe8b6 100644
--- a/nova/tests/functional/libvirt/test_vpmem.py
+++ b/nova/tests/functional/libvirt/test_vpmem.py
@@ -12,9 +12,11 @@
# under the License.
import fixtures
+from unittest import mock
from oslo_config import cfg
from oslo_log import log as logging
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.tests import fixtures as nova_fixtures
@@ -75,6 +77,7 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
'nova.privsep.libvirt.get_pmem_namespaces',
return_value=self.fake_pmem_namespaces))
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128,
@@ -99,7 +102,9 @@ class VPMEMTestBase(integrated_helpers.LibvirtProviderUsageBaseTestCase):
cpu_cores=2, cpu_threads=2),
hostname=hostname)
self.mock_conn.return_value = fake_connection
- compute = self._start_compute(host=hostname)
+ with mock.patch('nova.virt.node.get_local_node_uuid') as m:
+ m.return_value = str(getattr(uuids, 'node_%s' % hostname))
+ compute = self._start_compute(host=hostname)
# Ensure populating the existing pmems correctly.
vpmems = compute.driver._vpmems_by_name
diff --git a/nova/tests/functional/libvirt/test_vtpm.py b/nova/tests/functional/libvirt/test_vtpm.py
index c07c38f02d..3b5ae9a60f 100644
--- a/nova/tests/functional/libvirt/test_vtpm.py
+++ b/nova/tests/functional/libvirt/test_vtpm.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from castellan.common.objects import passphrase
from castellan.key_manager import key_manager
@@ -128,7 +128,7 @@ class VTPMServersTest(base.ServersTestBase):
# the presence of users on the host, none of which makes sense here
_p = mock.patch(
'nova.virt.libvirt.driver.LibvirtDriver._check_vtpm_support')
- self.mock_conn = _p.start()
+ _p.start()
self.addCleanup(_p.stop)
self.key_mgr = crypto._get_key_manager()
diff --git a/nova/tests/functional/notification_sample_tests/notification_sample_base.py b/nova/tests/functional/notification_sample_tests/notification_sample_base.py
index 72291e55cd..d987ff127c 100644
--- a/nova/tests/functional/notification_sample_tests/notification_sample_base.py
+++ b/nova/tests/functional/notification_sample_tests/notification_sample_base.py
@@ -12,9 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import os
import time
+from unittest import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
diff --git a/nova/tests/functional/notification_sample_tests/test_compute_task.py b/nova/tests/functional/notification_sample_tests/test_compute_task.py
index 3de1c7d4e1..05d2d32fde 100644
--- a/nova/tests/functional/notification_sample_tests/test_compute_task.py
+++ b/nova/tests/functional/notification_sample_tests/test_compute_task.py
@@ -10,6 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
+from nova import objects
from nova.tests import fixtures
from nova.tests.functional.notification_sample_tests \
import notification_sample_base
@@ -53,6 +56,10 @@ class TestComputeTaskNotificationSample(
},
actual=self.notifier.versioned_notifications[1])
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_rebuild_fault(self):
server = self._boot_a_server(
extra_params={'networks': [{'port': self.neutron.port_1['id']}]},
diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py
index 710b2a71fb..5a52c2dad6 100644
--- a/nova/tests/functional/notification_sample_tests/test_instance.py
+++ b/nova/tests/functional/notification_sample_tests/test_instance.py
@@ -11,8 +11,8 @@
# under the License.
import time
+from unittest import mock
-import mock
from nova import exception
from nova.tests import fixtures
@@ -46,18 +46,18 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.compute2 = self.start_service('compute', host='host2')
actions = [
- self._test_live_migration_rollback,
- self._test_live_migration_abort,
- self._test_live_migration_success,
- self._test_evacuate_server,
- self._test_live_migration_force_complete
+ (self._test_live_migration_rollback, 'ACTIVE'),
+ (self._test_live_migration_abort, 'ACTIVE'),
+ (self._test_live_migration_success, 'ACTIVE'),
+ (self._test_evacuate_server, 'SHUTOFF'),
+ (self._test_live_migration_force_complete, 'ACTIVE'),
]
- for action in actions:
+ for action, expected_state in actions:
self.notifier.reset()
action(server)
# Ensure that instance is in active state after an action
- self._wait_for_state_change(server, 'ACTIVE')
+ self._wait_for_state_change(server, expected_state)
@mock.patch('nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags', return_value=[True, False])
@@ -193,7 +193,7 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.admin_api.delete_migration(server['id'], migrations[0]['id'])
self._wait_for_notification('instance.live_migration_abort.start')
self._wait_for_state_change(server, 'ACTIVE')
- # NOTE(gibi): the intance.live_migration_rollback notification emitted
+ # NOTE(gibi): the instance.live_migration_rollback notification emitted
# after the instance.live_migration_abort notification so we have to
# wait for the rollback to ensure we can assert both notifications
# below
@@ -275,6 +275,12 @@ class TestInstanceNotificationSampleWithMultipleCompute(
self.admin_api.put_service(service_id, {'forced_down': False})
def _test_live_migration_force_complete(self, server):
+ # In the scenario evacuate happened before which stopped the
+ # server.
+ self._start_server(server)
+ self._wait_for_state_change(server, 'ACTIVE')
+ self.notifier.reset()
+
post = {
'os-migrateLive': {
'host': 'host2',
@@ -1231,7 +1237,7 @@ class TestInstanceNotificationSample(
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
- 'nova_object.version': '1.8',
+ 'nova_object.version': '1.12',
},
'image.size': 58145823,
'image.tags': [],
@@ -1327,7 +1333,7 @@ class TestInstanceNotificationSample(
'nova_object.data': {},
'nova_object.name': 'ImageMetaPropsPayload',
'nova_object.namespace': 'nova',
- 'nova_object.version': '1.8',
+ 'nova_object.version': '1.12',
},
'image.size': 58145823,
'image.tags': [],
@@ -1500,8 +1506,8 @@ class TestInstanceNotificationSample(
self.api.delete_server_volume(server['id'], volume_id)
self._wait_for_notification('instance.volume_detach.end')
- def _volume_swap_server(self, server, attachement_id, volume_id):
- self.api.put_server_volume(server['id'], attachement_id, volume_id)
+ def _volume_swap_server(self, server, attachment_id, volume_id):
+ self.api.put_server_volume(server['id'], attachment_id, volume_id)
def test_volume_swap_server(self):
server = self._boot_a_server(
diff --git a/nova/tests/functional/notification_sample_tests/test_keypair.py b/nova/tests/functional/notification_sample_tests/test_keypair.py
index b2481f1b2a..01c59b0f36 100644
--- a/nova/tests/functional/notification_sample_tests/test_keypair.py
+++ b/nova/tests/functional/notification_sample_tests/test_keypair.py
@@ -16,7 +16,12 @@ from nova.tests.functional.notification_sample_tests \
class TestKeypairNotificationSample(
notification_sample_base.NotificationSampleTestBase):
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+
def test_keypair_create_delete(self):
+ # Keypair generation is no longer supported with 2.92 microversion.
+ self.api.microversion = '2.91'
keypair_req = {
"keypair": {
"name": "my-key",
diff --git a/nova/tests/functional/notification_sample_tests/test_libvirt.py b/nova/tests/functional/notification_sample_tests/test_libvirt.py
index 8106edd44a..feed05a64c 100644
--- a/nova/tests/functional/notification_sample_tests/test_libvirt.py
+++ b/nova/tests/functional/notification_sample_tests/test_libvirt.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
import nova.conf
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1554631.py b/nova/tests/functional/regressions/test_bug_1554631.py
index 2db5e37b91..25a4613e72 100644
--- a/nova/tests/functional/regressions/test_bug_1554631.py
+++ b/nova/tests/functional/regressions/test_bug_1554631.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from cinderclient import exceptions as cinder_exceptions
-import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/regressions/test_bug_1595962.py b/nova/tests/functional/regressions/test_bug_1595962.py
index ebdf82f21a..9232eea335 100644
--- a/nova/tests/functional/regressions/test_bug_1595962.py
+++ b/nova/tests/functional/regressions/test_bug_1595962.py
@@ -13,10 +13,10 @@
# under the License.
import time
+from unittest import mock
import fixtures
import io
-import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
@@ -47,6 +47,7 @@ class TestSerialConsoleLiveMigrate(test.TestCase):
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
self.admin_api = api_fixture.admin_api
self.api = api_fixture.api
diff --git a/nova/tests/functional/regressions/test_bug_1628606.py b/nova/tests/functional/regressions/test_bug_1628606.py
new file mode 100644
index 0000000000..0fccd78cce
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1628606.py
@@ -0,0 +1,60 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+from unittest import mock
+
+
+class PostLiveMigrationFail(
+ test.TestCase, integrated_helpers.InstanceHelperMixin):
+ """Regression test for bug 1628606
+ """
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+
+ self.start_service('conductor')
+ self.start_service('scheduler')
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+
+ self.api = api_fixture.admin_api
+ self.api.microversion = 'latest'
+
+ self.src = self._start_compute(host='host1')
+ self.dest = self._start_compute(host='host2')
+
+ @mock.patch(
+ 'nova.compute.manager.ComputeManager'
+ '._post_live_migration_remove_source_vol_connections')
+ def test_post_live_migration(self, mock_migration):
+ server = self._create_server(networks=[])
+ self.assertEqual(self.src.host, server['OS-EXT-SRV-ATTR:host'])
+
+ error = client.OpenStackApiException(
+ "Failed to remove source vol connection post live migration")
+ mock_migration.side_effect = error
+
+ server = self._live_migrate(
+ server, migration_expected_state='error',
+ server_expected_state='ERROR')
+
+ self.assertEqual(self.dest.host, server['OS-EXT-SRV-ATTR:host'])
diff --git a/nova/tests/functional/regressions/test_bug_1669054.py b/nova/tests/functional/regressions/test_bug_1669054.py
index 6180dbfbaa..b20e1530cc 100644
--- a/nova/tests/functional/regressions/test_bug_1669054.py
+++ b/nova/tests/functional/regressions/test_bug_1669054.py
@@ -59,7 +59,8 @@ class ResizeEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# Now try to evacuate the server back to the original source compute.
server = self._evacuate_server(
server, {'onSharedStorage': 'False'},
- expected_host=self.compute.host, expected_migration_status='done')
+ expected_host=self.compute.host, expected_migration_status='done',
+ expected_state='ACTIVE')
# Assert the RequestSpec.ignore_hosts field is not populated.
reqspec = objects.RequestSpec.get_by_instance_uuid(
diff --git a/nova/tests/functional/regressions/test_bug_1713783.py b/nova/tests/functional/regressions/test_bug_1713783.py
index 9a6a79d7a2..8088ccfe06 100644
--- a/nova/tests/functional/regressions/test_bug_1713783.py
+++ b/nova/tests/functional/regressions/test_bug_1713783.py
@@ -13,9 +13,11 @@
# limitations under the License.
import time
+from unittest import mock
from oslo_log import log as logging
+from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
@@ -81,6 +83,10 @@ class FailedEvacuateStateTests(test.TestCase,
created_server = self.api.post_server({'server': server_req})
return self._wait_for_state_change(created_server, 'ACTIVE')
+ @mock.patch.object(
+ objects.service, 'get_minimum_version_all_cells',
+ new=mock.Mock(return_value=62)
+ )
def test_evacuate_no_valid_host(self):
# Boot a server
server = self._boot_a_server()
diff --git a/nova/tests/functional/regressions/test_bug_1732947.py b/nova/tests/functional/regressions/test_bug_1732947.py
index 3637f40bc2..db518fa8ce 100644
--- a/nova/tests/functional/regressions/test_bug_1732947.py
+++ b/nova/tests/functional/regressions/test_bug_1732947.py
@@ -28,7 +28,9 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase):
original image.
"""
api_major_version = 'v2.1'
- microversion = 'latest'
+ # We need microversion <=2.93 to get the old BFV rebuild behavior
+ # that was the environment for this regression.
+ microversion = '2.92'
def _setup_scheduler_service(self):
# Add the IsolatedHostsFilter to the list of enabled filters since it
diff --git a/nova/tests/functional/regressions/test_bug_1764883.py b/nova/tests/functional/regressions/test_bug_1764883.py
index aa86770584..59bbed4f46 100644
--- a/nova/tests/functional/regressions/test_bug_1764883.py
+++ b/nova/tests/functional/regressions/test_bug_1764883.py
@@ -95,7 +95,8 @@ class TestEvacuationWithSourceReturningDuringRebuild(
# Evacuate the instance from the source_host
server = self._evacuate_server(
- server, expected_migration_status='done')
+ server, expected_migration_status='done',
+ expected_state='ACTIVE')
host = server['OS-EXT-SRV-ATTR:host']
migrations = self.api.get_migrations()
diff --git a/nova/tests/functional/regressions/test_bug_1781286.py b/nova/tests/functional/regressions/test_bug_1781286.py
index 7b2d603092..c123fd9214 100644
--- a/nova/tests/functional/regressions/test_bug_1781286.py
+++ b/nova/tests/functional/regressions/test_bug_1781286.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
+
from oslo_db import exception as oslo_db_exc
from nova.compute import manager as compute_manager
@@ -67,11 +67,11 @@ class RescheduleBuildAvailabilityZoneUpCall(
def wrap_bari(*args, **kwargs):
# Poison the AZ query to blow up as if the cell conductor does not
# have access to the API DB.
- self.useFixture(
- fixtures.MockPatch(
- 'nova.objects.AggregateList.get_by_host',
- side_effect=oslo_db_exc.CantStartEngineError))
- return original_bari(*args, **kwargs)
+ with mock.patch(
+ 'nova.objects.AggregateList.get_by_host',
+ side_effect=oslo_db_exc.CantStartEngineError
+ ):
+ return original_bari(*args, **kwargs)
self.stub_out('nova.compute.manager.ComputeManager.'
'build_and_run_instance', wrap_bari)
@@ -81,10 +81,6 @@ class RescheduleBuildAvailabilityZoneUpCall(
# compute service we have to wait for the notification that the build
# is complete and then stop the mock so we can use the API again.
self.notifier.wait_for_versioned_notifications('instance.create.end')
- # Note that we use stopall here because we actually called
- # build_and_run_instance twice so we have more than one instance of
- # the mock that needs to be stopped.
- mock.patch.stopall()
server = self._wait_for_state_change(server, 'ACTIVE')
# We should have rescheduled and the instance AZ should be set from the
# Selection object. Since neither compute host is in an AZ, the server
@@ -128,19 +124,20 @@ class RescheduleMigrateAvailabilityZoneUpCall(
self.rescheduled = None
def wrap_prep_resize(_self, *args, **kwargs):
- # Poison the AZ query to blow up as if the cell conductor does not
- # have access to the API DB.
- self.agg_mock = self.useFixture(
- fixtures.MockPatch(
- 'nova.objects.AggregateList.get_by_host',
- side_effect=oslo_db_exc.CantStartEngineError)).mock
if self.rescheduled is None:
# Track the first host that we rescheduled from.
self.rescheduled = _self.host
# Trigger a reschedule.
raise exception.ComputeResourcesUnavailable(
reason='test_migrate_reschedule_blocked_az_up_call')
- return original_prep_resize(_self, *args, **kwargs)
+ # Poison the AZ query to blow up as if the cell conductor does not
+ # have access to the API DB.
+ with mock.patch(
+ 'nova.objects.AggregateList.get_by_host',
+ side_effect=oslo_db_exc.CantStartEngineError,
+ ) as agg_mock:
+ self.agg_mock = agg_mock
+ return original_prep_resize(_self, *args, **kwargs)
self.stub_out('nova.compute.manager.ComputeManager._prep_resize',
wrap_prep_resize)
diff --git a/nova/tests/functional/regressions/test_bug_1823370.py b/nova/tests/functional/regressions/test_bug_1823370.py
index 5e69905f5f..af134070cd 100644
--- a/nova/tests/functional/regressions/test_bug_1823370.py
+++ b/nova/tests/functional/regressions/test_bug_1823370.py
@@ -66,4 +66,5 @@ class MultiCellEvacuateTestCase(integrated_helpers._IntegratedTestBase):
# higher than host3.
self._evacuate_server(
server, {'onSharedStorage': 'False'}, expected_host='host3',
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
diff --git a/nova/tests/functional/regressions/test_bug_1830747.py b/nova/tests/functional/regressions/test_bug_1830747.py
index 4cd8c3b1af..a28c896b99 100644
--- a/nova/tests/functional/regressions/test_bug_1830747.py
+++ b/nova/tests/functional/regressions/test_bug_1830747.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.conductor import api as conductor_api
from nova import context as nova_context
diff --git a/nova/tests/functional/regressions/test_bug_1831771.py b/nova/tests/functional/regressions/test_bug_1831771.py
index 2ec448b249..11e3ec7682 100644
--- a/nova/tests/functional/regressions/test_bug_1831771.py
+++ b/nova/tests/functional/regressions/test_bug_1831771.py
@@ -13,8 +13,8 @@
# under the License.
import collections
+from unittest import mock
-import mock
from nova.compute import task_states
from nova.compute import vm_states
diff --git a/nova/tests/functional/regressions/test_bug_1843090.py b/nova/tests/functional/regressions/test_bug_1843090.py
index ed02d59cb4..72793cc0bc 100644
--- a/nova/tests/functional/regressions/test_bug_1843090.py
+++ b/nova/tests/functional/regressions/test_bug_1843090.py
@@ -9,7 +9,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.compute
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1843708.py b/nova/tests/functional/regressions/test_bug_1843708.py
index 2ae725a5eb..2eda92125b 100644
--- a/nova/tests/functional/regressions/test_bug_1843708.py
+++ b/nova/tests/functional/regressions/test_bug_1843708.py
@@ -15,6 +15,7 @@
from nova import context
from nova import objects
from nova.tests.functional import integrated_helpers
+from nova.tests.unit import fake_crypto
class RebuildWithKeypairTestCase(integrated_helpers._IntegratedTestBase):
@@ -26,14 +27,19 @@ class RebuildWithKeypairTestCase(integrated_helpers._IntegratedTestBase):
microversion = 'latest'
def test_rebuild_with_keypair(self):
+ pub_key1 = fake_crypto.get_ssh_public_key()
+
keypair_req = {
'keypair': {
'name': 'test-key1',
'type': 'ssh',
+ 'public_key': pub_key1,
},
}
keypair1 = self.api.post_keypair(keypair_req)
+ pub_key2 = fake_crypto.get_ssh_public_key()
keypair_req['keypair']['name'] = 'test-key2'
+ keypair_req['keypair']['public_key'] = pub_key2
keypair2 = self.api.post_keypair(keypair_req)
server = self._build_server(networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1845291.py b/nova/tests/functional/regressions/test_bug_1845291.py
index 101774416a..e5e9c953a6 100644
--- a/nova/tests/functional/regressions/test_bug_1845291.py
+++ b/nova/tests/functional/regressions/test_bug_1845291.py
@@ -9,7 +9,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1849165.py b/nova/tests/functional/regressions/test_bug_1849165.py
index f2a7f82ee9..1d4cf2eece 100644
--- a/nova/tests/functional/regressions/test_bug_1849165.py
+++ b/nova/tests/functional/regressions/test_bug_1849165.py
@@ -9,7 +9,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova
from nova.tests.functional import integrated_helpers
diff --git a/nova/tests/functional/regressions/test_bug_1853009.py b/nova/tests/functional/regressions/test_bug_1853009.py
index 2ec69482a2..5266e6166b 100644
--- a/nova/tests/functional/regressions/test_bug_1853009.py
+++ b/nova/tests/functional/regressions/test_bug_1853009.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import objects
diff --git a/nova/tests/functional/regressions/test_bug_1862633.py b/nova/tests/functional/regressions/test_bug_1862633.py
index 5cfcc75ab2..021093cf59 100644
--- a/nova/tests/functional/regressions/test_bug_1862633.py
+++ b/nova/tests/functional/regressions/test_bug_1862633.py
@@ -9,8 +9,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from neutronclient.common import exceptions as neutron_exception
+from unittest import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/regressions/test_bug_1879878.py b/nova/tests/functional/regressions/test_bug_1879878.py
index 3a21c5c11d..c50f8ac92e 100644
--- a/nova/tests/functional/regressions/test_bug_1879878.py
+++ b/nova/tests/functional/regressions/test_bug_1879878.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from nova.compute import resource_tracker as rt
from nova import context as nova_context
diff --git a/nova/tests/functional/regressions/test_bug_1888395.py b/nova/tests/functional/regressions/test_bug_1888395.py
index 36eb0e0f52..c50b78e2f6 100644
--- a/nova/tests/functional/regressions/test_bug_1888395.py
+++ b/nova/tests/functional/regressions/test_bug_1888395.py
@@ -23,14 +23,8 @@ from nova.tests.fixtures import libvirt as fakelibvirt
from nova.tests.functional.libvirt import base as libvirt_base
-class TestLiveMigrationWithoutMultiplePortBindings(
+class TestLiveMigrationWithoutMultiplePortBindingsBase(
libvirt_base.ServersTestBase):
- """Regression test for bug 1888395.
-
- This regression test asserts that Live migration works when
- neutron does not support the binding-extended api extension
- and the legacy single port binding workflow is used.
- """
ADMIN_API = True
microversion = 'latest'
@@ -72,6 +66,16 @@ class TestLiveMigrationWithoutMultiplePortBindings(
'nova.tests.fixtures.libvirt.Domain.migrateToURI3',
self._migrate_stub))
+
+class TestLiveMigrationWithoutMultiplePortBindings(
+ TestLiveMigrationWithoutMultiplePortBindingsBase):
+ """Regression test for bug 1888395.
+
+ This regression test asserts that Live migration works when
+ neutron does not support the binding-extended api extension
+ and the legacy single port binding workflow is used.
+ """
+
def _migrate_stub(self, domain, destination, params, flags):
"""Stub out migrateToURI3."""
@@ -108,7 +112,7 @@ class TestLiveMigrationWithoutMultiplePortBindings(
networks=[{'port': self.neutron.port_1['id']}])
self.assertFalse(
- self.neutron_api.supports_port_binding_extension(self.ctxt))
+ self.neutron_api.has_port_binding_extension(self.ctxt))
# TODO(sean-k-mooney): extend _live_migrate to support passing a host
self.api.post_server_action(
server['id'],
@@ -124,3 +128,25 @@ class TestLiveMigrationWithoutMultiplePortBindings(
server, {'OS-EXT-SRV-ATTR:host': 'end_host', 'status': 'ACTIVE'})
msg = "NotImplementedError: Cannot load 'vif_type' in the base class"
self.assertNotIn(msg, self.stdlog.logger.output)
+
+
+class TestLiveMigrationRollbackWithoutMultiplePortBindings(
+ TestLiveMigrationWithoutMultiplePortBindingsBase):
+
+ def _migrate_stub(self, domain, destination, params, flags):
+ source = self.computes['start_host']
+ conn = source.driver._host.get_connection()
+ dom = conn.lookupByUUIDString(self.server['id'])
+ dom.fail_job()
+
+ def test_live_migration_rollback(self):
+ self.server = self._create_server(
+ host='start_host',
+ networks=[{'port': self.neutron.port_1['id']}])
+
+ self.assertFalse(
+ self.neutron_api.has_port_binding_extension(self.ctxt))
+ # NOTE(artom) The live migration will still fail (we fail it in
+ # _migrate_stub()), but the server should correctly rollback to ACTIVE.
+ self._live_migrate(self.server, migration_expected_state='failed',
+ server_expected_state='ACTIVE')
diff --git a/nova/tests/functional/regressions/test_bug_1889108.py b/nova/tests/functional/regressions/test_bug_1889108.py
index 0e847e81ab..9ec67e4bf7 100644
--- a/nova/tests/functional/regressions/test_bug_1889108.py
+++ b/nova/tests/functional/regressions/test_bug_1889108.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/regressions/test_bug_1890244.py b/nova/tests/functional/regressions/test_bug_1890244.py
new file mode 100644
index 0000000000..bf969eebe7
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1890244.py
@@ -0,0 +1,96 @@
+# Copyright 2017 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from nova import context
+from nova import objects
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+
+
+class IgnoreDeletedServerGroupsTest(
+ test.TestCase, integrated_helpers.InstanceHelperMixin,
+):
+ """Regression test for bug 1890244
+
+ If instance are created as member of server groups it
+ should be possibel to evacuate them if the server groups are
+ deleted prior to the host failure.
+ """
+
+ def setUp(self):
+ super().setUp()
+ # Stub out external dependencies.
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+ # Start nova controller services.
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+ self.api = api_fixture.admin_api
+ self.start_service('conductor')
+ # Use a custom weigher to make sure that we have a predictable
+ # scheduling sort order.
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+ self.start_service('scheduler')
+ # Start two computes, one where the server will be created and another
+ # where we'll evacuate it to.
+ self.src = self._start_compute('host1')
+ self.dest = self._start_compute('host2')
+ self.notifier = self.useFixture(
+ nova_fixtures.NotificationFixture(self)
+ )
+
+ def test_evacuate_after_group_delete(self):
+ # Create an anti-affinity group for the server.
+ body = {
+ 'server_group': {
+ 'name': 'test-group',
+ 'policies': ['anti-affinity']
+ }
+ }
+ group_id = self.api.api_post(
+ '/os-server-groups', body).body['server_group']['id']
+
+ # Create a server in the group which should land on host1 due to our
+ # custom weigher.
+ body = {'server': self._build_server()}
+ body['os:scheduler_hints'] = {'group': group_id}
+ server = self.api.post_server(body)
+ server = self._wait_for_state_change(server, 'ACTIVE')
+ self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host'])
+
+ # Down the source compute to enable the evacuation
+ self.api.microversion = '2.11' # Cap for the force-down call.
+ self.api.force_down_service('host1', 'nova-compute', True)
+ self.api.microversion = 'latest'
+ self.src.stop()
+
+ # assert the server currently has a server group
+ reqspec = objects.RequestSpec.get_by_instance_uuid(
+ context.get_admin_context(), server['id'])
+ self.assertIsNotNone(reqspec.instance_group)
+ self.assertIn('group', reqspec.scheduler_hints)
+ # then delete it so that we need to clean it up on evac
+ self.api.api_delete(f'/os-server-groups/{group_id}')
+
+ # Initiate evacuation
+ server = self._evacuate_server(
+ server, expected_host='host2', expected_migration_status='done'
+ )
+ reqspec = objects.RequestSpec.get_by_instance_uuid(
+ context.get_admin_context(), server['id'])
+ self.assertIsNone(reqspec.instance_group)
+ self.assertNotIn('group', reqspec.scheduler_hints)
diff --git a/nova/tests/functional/regressions/test_bug_1893284.py b/nova/tests/functional/regressions/test_bug_1893284.py
index b7ca848c4d..ccb12f50b7 100644
--- a/nova/tests/functional/regressions/test_bug_1893284.py
+++ b/nova/tests/functional/regressions/test_bug_1893284.py
@@ -25,7 +25,7 @@ class TestServersPerUserQuota(test.TestCase,
tracking usages in a separate database table. As part of that change,
per-user quota functionality was broken for server creates.
- When mulitple users in the same project have per-user quota, they are meant
+ When multiple users in the same project have per-user quota, they are meant
to be allowed to create resources such that may not exceed their
per-user quota nor their project quota.
diff --git a/nova/tests/functional/regressions/test_bug_1896463.py b/nova/tests/functional/regressions/test_bug_1896463.py
index dc74791e0e..3cfece8d36 100644
--- a/nova/tests/functional/regressions/test_bug_1896463.py
+++ b/nova/tests/functional/regressions/test_bug_1896463.py
@@ -216,7 +216,7 @@ class TestEvacuateResourceTrackerRace(
self._run_periodics()
self._wait_for_server_parameter(
- server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'ACTIVE'})
+ server, {'OS-EXT-SRV-ATTR:host': 'host2', 'status': 'SHUTOFF'})
self._assert_pci_device_allocated(server['id'], self.compute1_id)
self._assert_pci_device_allocated(server['id'], self.compute2_id)
diff --git a/nova/tests/functional/regressions/test_bug_1899835.py b/nova/tests/functional/regressions/test_bug_1899835.py
index 4713763f0f..ad4d315659 100644
--- a/nova/tests/functional/regressions/test_bug_1899835.py
+++ b/nova/tests/functional/regressions/test_bug_1899835.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import objects
diff --git a/nova/tests/functional/regressions/test_bug_1902925.py b/nova/tests/functional/regressions/test_bug_1902925.py
index f0e823e2a4..59105c6cc6 100644
--- a/nova/tests/functional/regressions/test_bug_1902925.py
+++ b/nova/tests/functional/regressions/test_bug_1902925.py
@@ -28,6 +28,11 @@ class ComputeVersion5xPinnedRpcTests(integrated_helpers._IntegratedTestBase):
self.compute1 = self._start_compute(host='host1')
def _test_rebuild_instance_with_compute_rpc_pin(self, version_cap):
+ # Since passing the latest microversion (>= 2.93) passes
+ # the 'reimage_boot_volume' parameter as True and it is
+ # not acceptable with compute RPC version (required 6.1)
+ # These tests fail, so assigning microversion to 2.92
+ self.api.microversion = '2.92'
self.flags(compute=version_cap, group='upgrade_levels')
server_req = self._build_server(networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1914777.py b/nova/tests/functional/regressions/test_bug_1914777.py
index d8c9f5e15f..470c852669 100644
--- a/nova/tests/functional/regressions/test_bug_1914777.py
+++ b/nova/tests/functional/regressions/test_bug_1914777.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context as nova_context
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1922053.py b/nova/tests/functional/regressions/test_bug_1922053.py
index 612be27b2b..70bb3d4cab 100644
--- a/nova/tests/functional/regressions/test_bug_1922053.py
+++ b/nova/tests/functional/regressions/test_bug_1922053.py
@@ -1,3 +1,4 @@
+
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -27,6 +28,7 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
ADMIN_API = True
microversion = 'latest'
+ expected_state = 'SHUTOFF'
def _create_test_server(self, compute_host):
return self._create_server(host=compute_host, networks='none')
@@ -59,7 +61,8 @@ class ForceUpWithDoneEvacuations(integrated_helpers._IntegratedTestBase):
server = self._evacuate_server(
server,
expected_host='compute2',
- expected_migration_status='done'
+ expected_migration_status='done',
+ expected_state=self.expected_state
)
# Assert that the request to force up the host is rejected
@@ -97,6 +100,7 @@ class ForceUpWithDoneEvacuationsv252(ForceUpWithDoneEvacuations):
"""
microversion = '2.52'
+ expected_state = 'ACTIVE'
def _create_test_server(self, compute_host):
return self._create_server(az='nova:compute', networks='none')
diff --git a/nova/tests/functional/regressions/test_bug_1928063.py b/nova/tests/functional/regressions/test_bug_1928063.py
index b1b1d36e16..94d7b8122c 100644
--- a/nova/tests/functional/regressions/test_bug_1928063.py
+++ b/nova/tests/functional/regressions/test_bug_1928063.py
@@ -11,7 +11,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
@@ -30,7 +30,6 @@ class TestSEVInstanceReboot(base.ServersTestBase):
"""
microversion = 'latest'
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, True)
@test.patch_open(SEV_KERNEL_PARAM_FILE, "1\n")
@mock.patch.object(
fakelibvirt.virConnect, '_domain_capability_features',
@@ -40,7 +39,8 @@ class TestSEVInstanceReboot(base.ServersTestBase):
# Configure the compute to allow SEV based instances and then start
self.flags(num_memory_encrypted_guests=16, group='libvirt')
- self.start_compute()
+ with test.patch_exists(SEV_KERNEL_PARAM_FILE, True):
+ self.start_compute()
# Create a SEV enabled image for the test
sev_image = copy.deepcopy(self.glance.image1)
diff --git a/nova/tests/functional/regressions/test_bug_1937084.py b/nova/tests/functional/regressions/test_bug_1937084.py
index 3ef432ae5e..bec3c9f5cb 100644
--- a/nova/tests/functional/regressions/test_bug_1937084.py
+++ b/nova/tests/functional/regressions/test_bug_1937084.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1937375.py b/nova/tests/functional/regressions/test_bug_1937375.py
index 860d80acd7..13a1f5c4c9 100644
--- a/nova/tests/functional/regressions/test_bug_1937375.py
+++ b/nova/tests/functional/regressions/test_bug_1937375.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import exception
diff --git a/nova/tests/functional/regressions/test_bug_1944619.py b/nova/tests/functional/regressions/test_bug_1944619.py
new file mode 100644
index 0000000000..430a6e3981
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1944619.py
@@ -0,0 +1,76 @@
+# Copyright 2021, Canonical, Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception as nova_exceptions
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+
+
+class TestRollbackWithHWOffloadedOVS(
+ base.LibvirtMigrationMixin,
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+ """Regression test for bug LP#1944619
+
+ Assert the behaviour observed in bug LP#1944619 caused by the live
+ migration cleanup code being used to cleanup pre-live migration failures.
+ When SRIOV devices are in use on a VM, that will cause the source host to
+ try to re-attach a VIF not actually de-attached causing a failure.
+
+ The exception mocked in pre_live_migration reproduce an arbitrary error
+ that might cause the pre-live migration process to fail and
+ rollback_live_migration_at_source reproduce the device re-attach failure.
+ """
+
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+ ADMIN_API = True
+
+ def setUp(self):
+ super().setUp()
+
+ self.start_compute(
+ hostname='src',
+ host_info=fakelibvirt.HostInfo(
+ cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))
+ self.start_compute(
+ hostname='dest',
+ host_info=fakelibvirt.HostInfo(
+ cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))
+
+ self.src = self.computes['src']
+ self.dest = self.computes['dest']
+
+ def test_rollback_pre_live_migration(self):
+ self.server = self._create_server(host='src', networks='none')
+
+ lib_path = "nova.virt.libvirt.driver.LibvirtDriver"
+ funtion_path = "pre_live_migration"
+ mock_lib_path_prelive = "%s.%s" % (lib_path, funtion_path)
+ with mock.patch(mock_lib_path_prelive,
+ side_effect=nova_exceptions.DestinationDiskExists(
+ path='/var/non/existent')) as mlpp:
+ funtion_path = "rollback_live_migration_at_source"
+ mock_lib_path_rollback = "%s.%s" % (lib_path, funtion_path)
+ with mock.patch(mock_lib_path_rollback) as mlpr:
+ # Live migrate the instance to another host
+ self._live_migrate(self.server,
+ migration_expected_state='failed',
+ server_expected_state='ACTIVE')
+ mlpr.assert_not_called()
+ mlpp.assert_called_once()
diff --git a/nova/tests/functional/regressions/test_bug_1951656.py b/nova/tests/functional/regressions/test_bug_1951656.py
new file mode 100644
index 0000000000..d705ff6fe3
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1951656.py
@@ -0,0 +1,73 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import uuidutils
+
+
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_vgpu
+from nova.virt.libvirt import utils as libvirt_utils
+
+
+class VGPUTestsLibvirt7_7(test_vgpu.VGPUTestBase):
+
+ def _create_mdev(self, physical_device, mdev_type, uuid=None):
+ # We need to fake the newly created sysfs object by adding a new
+ # FakeMdevDevice in the existing persisted Connection object so
+ # when asking to get the existing mdevs, we would see it.
+ if not uuid:
+ uuid = uuidutils.generate_uuid()
+ mdev_name = libvirt_utils.mdev_uuid2name(uuid)
+ libvirt_parent = self.pci2libvirt_address(physical_device)
+
+ # Libvirt 7.7 now creates mdevs with a parent_addr suffix.
+ new_mdev_name = '_'.join([mdev_name, libvirt_parent])
+
+ # Here, we get the right compute thanks by the self.current_host that
+ # was modified just before
+ connection = self.computes[
+ self._current_host].driver._host.get_connection()
+ connection.mdev_info.devices.update(
+ {mdev_name: fakelibvirt.FakeMdevDevice(dev_name=new_mdev_name,
+ type_id=mdev_type,
+ parent=libvirt_parent)})
+ return uuid
+
+ def setUp(self):
+ super(VGPUTestsLibvirt7_7, self).setUp()
+ extra_spec = {"resources:VGPU": "1"}
+ self.flavor = self._create_flavor(extra_spec=extra_spec)
+
+ # Start compute1 supporting only nvidia-11
+ self.flags(
+ enabled_mdev_types=fakelibvirt.NVIDIA_11_VGPU_TYPE,
+ group='devices')
+
+ self.compute1 = self.start_compute_with_vgpu('host1')
+
+ def test_create_servers_with_vgpu(self):
+
+ # Create a single instance against a specific compute node.
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=1)
+
+ self._create_server(
+ image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
+ flavor_id=self.flavor, host=self.compute1.host,
+ networks='auto', expected_state='ACTIVE')
+
+ self.assert_mdev_usage(self.compute1, expected_amount=2)
diff --git a/nova/tests/functional/regressions/test_bug_1978983.py b/nova/tests/functional/regressions/test_bug_1978983.py
new file mode 100644
index 0000000000..51465900da
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1978983.py
@@ -0,0 +1,71 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional import fixtures as func_fixtures
+from nova.tests.functional import integrated_helpers
+
+
+class EvacuateServerWithTaskState(
+ test.TestCase, integrated_helpers.InstanceHelperMixin,
+):
+ """Regression test for bug 1978983
+ If instance task state is powering-off or not None
+ instance should be allowed to evacuate.
+ """
+
+ def setUp(self):
+ super().setUp()
+ # Stub out external dependencies.
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+ self.useFixture(nova_fixtures.HostNameWeigherFixture())
+
+ # Start nova controller services.
+ self.start_service('conductor')
+ self.start_service('scheduler')
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+ self.api = api_fixture.admin_api
+ self.api.microversion = 'latest'
+
+ self.src = self._start_compute(host='host1')
+ self.dest = self._start_compute(host='host2')
+
+ def test_evacuate_instance(self):
+ """Evacuating a server
+ """
+ server = self._create_server(networks=[])
+
+ server = self._wait_for_state_change(server, 'ACTIVE')
+ self.assertEqual(self.src.host, server['OS-EXT-SRV-ATTR:host'])
+
+ # stop host1 compute service
+ self.src.stop()
+ self.api.put_service_force_down(self.src.service_ref.uuid, True)
+
+ # poweroff instance
+ self._stop_server(server, wait_for_stop=False)
+ server = self._wait_for_server_parameter(
+ server, {'OS-EXT-STS:task_state': 'powering-off'})
+
+ # evacuate instance
+ server = self._evacuate_server(
+ server, expected_host=self.dest.host
+ )
+ self.assertEqual(self.dest.host, server['OS-EXT-SRV-ATTR:host'])
diff --git a/nova/tests/functional/regressions/test_bug_1980720.py b/nova/tests/functional/regressions/test_bug_1980720.py
new file mode 100644
index 0000000000..ad2e6e6ba2
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1980720.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2022 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+from unittest import mock
+
+
+class LibvirtDriverTests(
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+ api_major_version = 'v2.1'
+ microversion = 'latest'
+
+ def setUp(self):
+ super(LibvirtDriverTests, self).setUp()
+ self.cinder = self.useFixture(nova_fixtures.CinderFixture(self))
+ self.start_compute()
+
+ def _create_server_with_block_device(self):
+ server_request = self._build_server(
+ networks=[],
+ )
+ # removing imageRef is required as we want
+ # to boot from volume
+ server_request.pop('imageRef')
+ server_request['block_device_mapping_v2'] = [{
+ 'boot_index': 0,
+ 'source_type': 'volume',
+ 'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL_QUIESCE,
+ 'destination_type': 'volume'}]
+
+ server = self.api.post_server({
+ 'server': server_request,
+ })
+ self._wait_for_state_change(server, 'ACTIVE')
+ return server
+
+ def test_snapshot_quiesce_fail(self):
+ server = self._create_server_with_block_device()
+ with mock.patch.object(
+ nova_fixtures.libvirt.Domain, 'fsFreeze'
+ ) as mock_obj:
+ ex = nova_fixtures.libvirt.libvirtError("Error")
+ ex.err = (nova_fixtures.libvirt.VIR_ERR_AGENT_UNRESPONSIVE,)
+
+ mock_obj.side_effect = ex
+ excep = self.assertRaises(
+ client.OpenStackApiException,
+ self._snapshot_server, server, "snapshot-1"
+ )
+ self.assertEqual(409, excep.response.status_code)
diff --git a/nova/tests/functional/regressions/test_bug_1983753.py b/nova/tests/functional/regressions/test_bug_1983753.py
new file mode 100644
index 0000000000..3658d6aeb8
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1983753.py
@@ -0,0 +1,177 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import fixtures
+
+from oslo_serialization import jsonutils
+
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional.libvirt import test_pci_sriov_servers
+
+
+class TestPciResize(test_pci_sriov_servers._PCIServersTestBase):
+ # these tests use multiple different configs so the whitelist is set by
+ # each testcase individually
+ PCI_DEVICE_SPEC = []
+ PCI_ALIAS = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ "name": "a-pci-dev",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ "device_type": "type-PF",
+ "name": "a-pf",
+ },
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ "device_type": "type-VF",
+ "name": "a-vf",
+ },
+ ]
+ ]
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.virt.libvirt.driver.LibvirtDriver.'
+ 'migrate_disk_and_power_off',
+ return_value='{}'
+ )
+ )
+ # These tests should not depend on the host's sysfs
+ self.useFixture(
+ fixtures.MockPatch('nova.pci.utils.is_physical_function'))
+ self.useFixture(
+ fixtures.MockPatch(
+ 'nova.pci.utils.get_function_by_ifname',
+ return_value=(None, False)
+ )
+ )
+
+ def _test_resize_from_two_devs_to_one_dev(self, num_pci_on_dest):
+ # The fake libvirt will emulate on the host:
+ # * two type-PCI in slot 0, 1
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=2)
+ # the config matches the PCI dev
+ compute1_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute1_device_spec)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+
+ # create a server that requests two PCI devs
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:2"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=2, free=0)
+
+ # start another compute with a different amount of PCI dev available
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pci=num_pci_on_dest)
+ # the config matches the PCI dev
+ compute2_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PCI_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute2_device_spec)
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts(
+ "compute2", total=num_pci_on_dest, free=num_pci_on_dest)
+
+ # resize the server to request only one PCI dev instead of the current
+ # two. This should fit to compute2 having at least one dev
+ extra_spec = {"pci_passthrough:alias": "a-pci-dev:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._resize_server(server, flavor_id=flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts("compute1", total=2, free=2)
+ self.assertPCIDeviceCounts(
+ "compute2", total=num_pci_on_dest, free=num_pci_on_dest - 1)
+
+ def test_resize_from_two_devs_to_one_dev_dest_has_two_devs(self):
+ self._test_resize_from_two_devs_to_one_dev(num_pci_on_dest=2)
+
+ def test_resize_from_two_devs_to_one_dev_dest_has_one_dev(self):
+ self._test_resize_from_two_devs_to_one_dev(num_pci_on_dest=1)
+
+ def test_resize_from_vf_to_pf(self):
+ # The fake libvirt will emulate on the host:
+ # * one type-PF in slot 0 with one VF
+ compute1_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=1)
+ # the config matches only the VF
+ compute1_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.VF_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute1_device_spec)
+ self.start_compute(hostname="compute1", pci_info=compute1_pci_info)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+
+ # create a server that requests one Vf
+ extra_spec = {"pci_passthrough:alias": "a-vf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ server = self._create_server(flavor_id=flavor_id, networks=[])
+ self.assertPCIDeviceCounts("compute1", total=1, free=0)
+
+ # start another compute with a single PF dev available
+ # The fake libvirt will emulate on the host:
+ # * one type-PF in slot 0 with 1 VF
+ compute2_pci_info = fakelibvirt.HostPCIDevicesInfo(
+ num_pfs=1, num_vfs=1)
+ # the config matches the PF dev but not the VF
+ compute2_device_spec = [
+ jsonutils.dumps(x)
+ for x in [
+ {
+ "vendor_id": fakelibvirt.PCI_VEND_ID,
+ "product_id": fakelibvirt.PF_PROD_ID,
+ },
+ ]
+ ]
+ self.flags(group='pci', passthrough_whitelist=compute2_device_spec)
+ self.start_compute(hostname="compute2", pci_info=compute2_pci_info)
+ self.assertPCIDeviceCounts("compute2", total=1, free=1)
+
+ # resize the server to request on PF dev instead of the current VF
+ # dev. This should fit to compute2 having exactly one PF dev.
+ extra_spec = {"pci_passthrough:alias": "a-pf:1"}
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._resize_server(server, flavor_id=flavor_id)
+ self._confirm_resize(server)
+ self.assertPCIDeviceCounts("compute1", total=1, free=1)
+ self.assertPCIDeviceCounts("compute2", total=1, free=0)
diff --git a/nova/tests/functional/regressions/test_bug_1995153.py b/nova/tests/functional/regressions/test_bug_1995153.py
new file mode 100644
index 0000000000..f4e61d06df
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1995153.py
@@ -0,0 +1,107 @@
+# Copyright (C) 2023 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import fixtures
+from unittest import mock
+
+from oslo_serialization import jsonutils
+from oslo_utils import units
+
+from nova.objects import fields
+from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.functional import integrated_helpers
+from nova.tests.functional.libvirt import base
+
+
+class Bug1995153RegressionTest(
+ base.ServersTestBase,
+ integrated_helpers.InstanceHelperMixin
+):
+
+ ADDITIONAL_FILTERS = ['NUMATopologyFilter', 'PciPassthroughFilter']
+
+ ALIAS_NAME = 'a1'
+ PCI_DEVICE_SPEC = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ }
+ )]
+ # we set the numa_affinity policy to required to ensure strict affinity
+ # between pci devices and the guest cpu and memory will be enforced.
+ PCI_ALIAS = [jsonutils.dumps(
+ {
+ 'vendor_id': fakelibvirt.PCI_VEND_ID,
+ 'product_id': fakelibvirt.PCI_PROD_ID,
+ 'name': ALIAS_NAME,
+ 'device_type': fields.PciDeviceType.STANDARD,
+ 'numa_policy': fields.PCINUMAAffinityPolicy.REQUIRED,
+ }
+ )]
+
+ def setUp(self):
+ super(Bug1995153RegressionTest, self).setUp()
+ self.flags(
+ device_spec=self.PCI_DEVICE_SPEC,
+ alias=self.PCI_ALIAS,
+ group='pci'
+ )
+ host_manager = self.scheduler.manager.host_manager
+ pci_filter_class = host_manager.filter_cls_map['PciPassthroughFilter']
+ host_pass_mock = mock.Mock(wraps=pci_filter_class().host_passes)
+ self.mock_filter = self.useFixture(fixtures.MockPatch(
+ 'nova.scheduler.filters.pci_passthrough_filter'
+ '.PciPassthroughFilter.host_passes',
+ side_effect=host_pass_mock)).mock
+
+ def test_socket_policy_bug_1995153(self):
+ """Previously, the numa_usage_from_instance_numa() method in
+ hardware.py saved the host NUMAToplogy object with NUMACells that have
+ no `socket` set. This was an omission in the original implementation of
+ the `socket` PCI NUMA affinity policy. The consequence was that any
+ code path that called into numa_usage_from_instance_numa() would
+ clobber the host NUMA topology in the database with a socket-less
+ version. Booting an instance with NUMA toplogy would do that, for
+ example. If then a second instance was booted with the `socket` PCI
+ NUMA affinity policy, it would read the socket-less host NUMATopology
+ from the database, and error out with a NotImplementedError. This was
+ bug 1995153. Demonstrate that this is fixed.
+ """
+ host_info = fakelibvirt.HostInfo(
+ cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2,
+ kB_mem=(16 * units.Gi) // units.Ki)
+ self.flags(cpu_dedicated_set='0-3', group='compute')
+ pci_info = fakelibvirt.HostPCIDevicesInfo(num_pci=1, numa_node=1)
+
+ self.start_compute(host_info=host_info, pci_info=pci_info)
+
+ extra_spec = {
+ 'hw:cpu_policy': 'dedicated',
+ 'pci_passthrough:alias': '%s:1' % self.ALIAS_NAME,
+ 'hw:pci_numa_affinity_policy': 'socket'
+ }
+ # Boot a first instance with a guest NUMA topology to run the
+ # numa_usage_from_instance_numa() and update the host NUMATopology in
+ # the database.
+ self._create_server(
+ flavor_id=self._create_flavor(
+ extra_spec={'hw:cpu_policy': 'dedicated'}))
+
+ # Boot an instance with the `socket` PCI NUMA affinity policy and
+ # assert that it boots correctly now.
+ flavor_id = self._create_flavor(extra_spec=extra_spec)
+ self._create_server(flavor_id=flavor_id)
+ self.assertTrue(self.mock_filter.called)
diff --git a/nova/tests/functional/test_aggregates.py b/nova/tests/functional/test_aggregates.py
index 8dfb345578..1ffa3ada92 100644
--- a/nova/tests/functional/test_aggregates.py
+++ b/nova/tests/functional/test_aggregates.py
@@ -935,11 +935,11 @@ class TestAggregateMultiTenancyIsolationFilter(
# Start nova services.
self.start_service('conductor')
- self.admin_api = self.useFixture(
- nova_fixtures.OSAPIFixture(api_version='v2.1')).admin_api
- self.api = self.useFixture(
- nova_fixtures.OSAPIFixture(api_version='v2.1',
- project_id=uuids.non_admin)).api
+ api_fixture = self.useFixture(
+ nova_fixtures.OSAPIFixture(api_version='v2.1'))
+ self.admin_api = api_fixture.admin_api
+ self.api = api_fixture.api
+ self.api.project_id = uuids.non_admin
# Add the AggregateMultiTenancyIsolation to the list of enabled
# filters since it is not enabled by default.
enabled_filters = CONF.filter_scheduler.enabled_filters
@@ -1037,15 +1037,15 @@ class AggregateMultiTenancyIsolationColdMigrateTest(
self.glance = self.useFixture(nova_fixtures.GlanceFixture(self))
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
- # Intentionally keep these separate since we want to create the
- # server with the non-admin user in a different project.
- admin_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ # Intentionally define different project id for the two client since
+ # we want to create the server with the non-admin user in a different
+ # project.
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1', project_id=uuids.admin_project))
- self.admin_api = admin_api_fixture.admin_api
+ self.admin_api = api_fixture.admin_api
self.admin_api.microversion = 'latest'
- user_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1', project_id=uuids.user_project))
- self.api = user_api_fixture.api
+ self.api = api_fixture.api
+ self.api.project_id = uuids.user_project
self.api.microversion = 'latest'
self.start_service('conductor')
diff --git a/nova/tests/functional/test_availability_zones.py b/nova/tests/functional/test_availability_zones.py
index 991f86148d..c376423303 100644
--- a/nova/tests/functional/test_availability_zones.py
+++ b/nova/tests/functional/test_availability_zones.py
@@ -10,12 +10,16 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from nova.api.openstack.compute import hosts
+from nova.compute import instance_actions
from nova import context
from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client as api_client
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
+from nova.tests.unit.api.openstack import fakes
class TestAvailabilityZoneScheduling(
@@ -36,6 +40,9 @@ class TestAvailabilityZoneScheduling(
self.api = api_fixture.admin_api
self.api.microversion = 'latest'
+ self.controller = hosts.HostController()
+ self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
+
self.start_service('conductor')
self.start_service('scheduler')
@@ -68,18 +75,18 @@ class TestAvailabilityZoneScheduling(
self.api.api_post(
'/os-aggregates/%s/action' % aggregate['id'], add_host_body)
- def _create_server(self, name):
+ def _create_server(self, name, zone=None):
# Create a server, it doesn't matter which host it ends up in.
server = super(TestAvailabilityZoneScheduling, self)._create_server(
flavor_id=self.flavor1,
- networks='none',)
- original_host = server['OS-EXT-SRV-ATTR:host']
- # Assert the server has the AZ set (not None or 'nova').
- expected_zone = 'zone1' if original_host == 'host1' else 'zone2'
- self.assertEqual(expected_zone, server['OS-EXT-AZ:availability_zone'])
+ networks='none',
+ az=zone,
+ )
return server
- def _assert_instance_az(self, server, expected_zone):
+ def _assert_instance_az_and_host(
+ self, server, expected_zone, expected_host=None):
+ # Check AZ
# Check the API.
self.assertEqual(expected_zone, server['OS-EXT-AZ:availability_zone'])
# Check the DB.
@@ -88,6 +95,51 @@ class TestAvailabilityZoneScheduling(
ctxt, self.cell_mappings[test.CELL1_NAME]) as cctxt:
instance = objects.Instance.get_by_uuid(cctxt, server['id'])
self.assertEqual(expected_zone, instance.availability_zone)
+ # Check host
+ if expected_host:
+ self.assertEqual(expected_host, server['OS-EXT-SRV-ATTR:host'])
+
+ def _assert_request_spec_az(self, ctxt, server, az):
+ request_spec = objects.RequestSpec.get_by_instance_uuid(
+ ctxt, server['id'])
+ self.assertEqual(request_spec.availability_zone, az)
+
+ def _assert_server_with_az_unshelved_to_specified_az(self, server, az):
+ """Ensure a server with an az constraints is unshelved in the
+ corresponding az.
+ """
+ host_to_disable = 'host1' if az == 'zone1' else 'host2'
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+ compute_service_id = self.api.get_services(
+ host=host_to_disable, binary='nova-compute')[0]['id']
+ self.api.put_service(compute_service_id, {'status': 'disabled'})
+
+ req = {
+ 'unshelve': None
+ }
+
+ self.api.post_server_action(server['id'], req)
+
+ server = self._wait_for_action_fail_completion(
+ server, instance_actions.UNSHELVE, 'schedule_instances')
+ self.assertIn('Error', server['result'])
+ self.assertIn('No valid host', server['details'])
+
+ def _shelve_unshelve_server(self, ctxt, server, req):
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+
+ self.api.post_server_action(server['id'], req)
+ server = self._wait_for_server_parameter(
+ server,
+ {'status': 'ACTIVE', },
+ )
+ return self.api.get_server(server['id'])
+
+ def other_az_than(self, az):
+ return 'zone2' if az == 'zone1' else 'zone1'
+
+ def other_host_than(self, host):
+ return 'host2' if host == 'host1' else 'host1'
def test_live_migrate_implicit_az(self):
"""Tests live migration of an instance with an implicit AZ.
@@ -111,7 +163,8 @@ class TestAvailabilityZoneScheduling(
still not restricted to its current zone even if it says it is in one.
"""
server = self._create_server('test_live_migrate_implicit_az')
- original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ expected_zone = self.other_az_than(original_az)
# Attempt to live migrate the instance; again, we don't specify a host
# because there are only two hosts so the scheduler would only be able
@@ -132,8 +185,379 @@ class TestAvailabilityZoneScheduling(
# the database because the API will return the AZ from the host
# aggregate if instance.host is not None.
server = self.api.get_server(server['id'])
- expected_zone = 'zone2' if original_host == 'host1' else 'zone1'
- self._assert_instance_az(server, expected_zone)
+ self._assert_instance_az_and_host(server, expected_zone)
+
+ def test_create_server(self):
+ """Create a server without an AZ constraint and make sure asking a new
+ request spec will not have the request_spec.availability_zone set.
+ """
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ self._assert_request_spec_az(ctxt, server, None)
+
+ def test_create_server_to_zone(self):
+ """Create a server with an AZ constraint and make sure asking a new
+ request spec will have the request_spec.availability_zone to the
+ required zone.
+ """
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone2')
+
+ server = self.api.get_server(server['id'])
+ self._assert_instance_az_and_host(server, 'zone2')
+ self._assert_request_spec_az(ctxt, server, 'zone2')
+
+ def test_cold_migrate_cross_az(self):
+ """Test a cold migration cross AZ.
+ """
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ expected_host = self.other_host_than(original_host)
+ expected_zone = self.other_az_than(original_az)
+
+ self._migrate_server(server)
+ self._confirm_resize(server)
+
+ server = self.api.get_server(server['id'])
+ self._assert_instance_az_and_host(server, expected_zone, expected_host)
+
+# Next tests attempt to check the following behavior
+# +----------+---------------------------+-------+----------------------------+
+# | Boot | Unshelve after offload AZ | Host | Result |
+# +==========+===========================+=======+============================+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (1). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (1) |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (1) |
+# +----------+---------------------------+-------+----------------------------+
+#
+# (1) Check at the api and return an error.
+#
+#
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+
+ def test_unshelve_server_without_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+
+ req = {
+ 'unshelve': None
+ }
+
+ self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_request_spec_az(ctxt, server, None)
+
+ def test_unshelve_unpin_az_server_without_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+
+ req = {
+ 'unshelve': {'availability_zone': None}
+ }
+
+ self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_request_spec_az(ctxt, server, None)
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_host_server_without_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = self.other_host_than(original_host)
+ expected_zone = self.other_az_than(original_az)
+
+ req = {
+ 'unshelve': {'host': dest_hostname}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, expected_zone, dest_hostname)
+ self._assert_request_spec_az(ctxt, server, None)
+
+ def test_unshelve_to_host_and_unpin_server_without_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = self.other_host_than(original_host)
+ expected_zone = self.other_az_than(original_az)
+
+ req = {
+ 'unshelve': {
+ 'host': dest_hostname,
+ 'availability_zone': None,
+ }
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, expected_zone, dest_hostname)
+ self._assert_request_spec_az(ctxt, server, None)
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_az_server_without_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = 'host2' if original_host == 'host1' else 'host1'
+ dest_az = self.other_az_than(original_az)
+
+ req = {
+ 'unshelve': {'availability_zone': dest_az}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, dest_az, dest_hostname)
+ self._assert_request_spec_az(ctxt, server, dest_az)
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, dest_az)
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (3). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_az_and_host_server_without_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = 'host2' if original_host == 'host1' else 'host1'
+ dest_az = self.other_az_than(original_az)
+
+ req = {
+ 'unshelve': {'host': dest_hostname, 'availability_zone': dest_az}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, dest_az, dest_hostname)
+ self._assert_request_spec_az(ctxt, server, dest_az)
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, dest_az)
+
+ def test_unshelve_to_wrong_az_and_host_server_without_az_constraint(self):
+ server = self._create_server('server01')
+ original_host = server['OS-EXT-SRV-ATTR:host']
+ original_az = server['OS-EXT-AZ:availability_zone']
+ dest_hostname = 'host2' if original_host == 'host1' else 'host1'
+
+ req = {
+ 'unshelve': {'host': dest_hostname,
+ 'availability_zone': original_az}
+ }
+
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.api.post_server_action,
+ server['id'],
+ req
+ )
+
+ self.assertEqual(409, exc.response.status_code)
+ self.assertIn(
+ 'Host \\\"{}\\\" is not in the availability zone \\\"{}\\\".'
+ .format(dest_hostname, original_az),
+ exc.response.text
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_a_server_with_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone2')
+
+ req = {
+ 'unshelve': None
+ }
+
+ self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_request_spec_az(ctxt, server, 'zone2')
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, 'zone2')
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_unpin_az_a_server_with_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone2')
+
+ req = {
+ 'unshelve': {'availability_zone': None}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_request_spec_az(ctxt, server, None)
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (3) |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_host_server_with_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host1'}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, 'zone1', 'host1')
+ self._assert_request_spec_az(ctxt, server, 'zone1')
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, 'zone1')
+
+ def test_unshelve_to_host_wrong_az_server_with_az_contraint(self):
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host2'}
+ }
+
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.api.post_server_action,
+ server['id'],
+ req
+ )
+
+ self.assertEqual(409, exc.response.status_code)
+ self.assertIn(
+ 'Host \\\"host2\\\" is not in the availability '
+ 'zone \\\"zone1\\\".',
+ exc.response.text
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_host_and_unpin_server_with_az_contraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host2',
+ 'availability_zone': None,
+ }
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, 'zone2', 'host2')
+ self._assert_request_spec_az(ctxt, server, None)
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_az_a_server_with_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'availability_zone': 'zone2'}
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, 'zone2', 'host2')
+ self._assert_request_spec_az(ctxt, server, 'zone2')
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, 'zone2')
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (3) |
+# +----------+---------------------------+-------+----------------------------+
+ def test_unshelve_to_host_and_az_a_server_with_az_constraint(self):
+ ctxt = context.get_admin_context()
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host2',
+ 'availability_zone': 'zone2',
+ }
+ }
+
+ server = self._shelve_unshelve_server(ctxt, server, req)
+ self._assert_instance_az_and_host(server, 'zone2', 'host2')
+ self._assert_request_spec_az(ctxt, server, 'zone2')
+ self._assert_server_with_az_unshelved_to_specified_az(
+ server, 'zone2')
+
+ def test_unshelve_to_host_and_wrong_az_a_server_with_az_constraint(self):
+ server = self._create_server('server01', 'zone1')
+
+ req = {
+ 'unshelve': {'host': 'host2',
+ 'availability_zone': 'zone1',
+ }
+ }
+
+ self._shelve_server(server, expected_state='SHELVED_OFFLOADED')
+ exc = self.assertRaises(
+ api_client.OpenStackApiException,
+ self.api.post_server_action,
+ server['id'],
+ req
+ )
+
+ self.assertEqual(409, exc.response.status_code)
+ self.assertIn(
+ 'Host \\\"host2\\\" is not in the availability '
+ 'zone \\\"zone1\\\".',
+ exc.response.text
+
+ )
def test_resize_revert_across_azs(self):
"""Creates two compute service hosts in separate AZs. Creates a server
@@ -152,9 +576,9 @@ class TestAvailabilityZoneScheduling(
# Now the server should be in the other AZ.
new_zone = 'zone2' if original_host == 'host1' else 'zone1'
- self._assert_instance_az(server, new_zone)
+ self._assert_instance_az_and_host(server, new_zone)
# Revert the resize and the server should be back in the original AZ.
self.api.post_server_action(server['id'], {'revertResize': None})
server = self._wait_for_state_change(server, 'ACTIVE')
- self._assert_instance_az(server, original_az)
+ self._assert_instance_az_and_host(server, original_az)
diff --git a/nova/tests/functional/test_boot_from_volume.py b/nova/tests/functional/test_boot_from_volume.py
index 45555b002d..6396954bf4 100644
--- a/nova/tests/functional/test_boot_from_volume.py
+++ b/nova/tests/functional/test_boot_from_volume.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+import fixtures
+from unittest import mock
from nova import context
from nova import objects
@@ -50,6 +51,9 @@ class BootFromVolumeTest(integrated_helpers._IntegratedTestBase):
self.flags(allow_resize_to_same_host=True)
super(BootFromVolumeTest, self).setUp()
self.admin_api = self.api_fixture.admin_api
+ self.useFixture(nova_fixtures.CinderFixture(self))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.compute.manager.ComputeVirtAPI.wait_for_instance_event'))
def test_boot_from_volume_larger_than_local_gb(self):
# Verify no local disk is being used currently
@@ -138,6 +142,42 @@ class BootFromVolumeTest(integrated_helpers._IntegratedTestBase):
image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
post_data = {'rebuild': {'imageRef': image_uuid}}
self.api.post_server_action(server_id, post_data)
+
+ def test_rebuild_volume_backed_larger_than_local_gb(self):
+ # Verify no local disk is being used currently
+ self._verify_zero_local_gb_used()
+
+ # Create flavors with disk larger than available host local disk
+ flavor_id = self._create_flavor(memory_mb=64, vcpu=1, disk=8192,
+ ephemeral=0)
+
+ # Boot a server with a flavor disk larger than the available local
+ # disk. It should succeed for boot from volume.
+ server = self._build_server(image_uuid='', flavor_id=flavor_id)
+ volume_uuid = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
+ bdm = {'boot_index': 0,
+ 'uuid': volume_uuid,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'}
+ server['block_device_mapping_v2'] = [bdm]
+ created_server = self.api.post_server({"server": server})
+ server_id = created_server['id']
+ self._wait_for_state_change(created_server, 'ACTIVE')
+
+ # Check that hypervisor local disk reporting is still 0
+ self._verify_zero_local_gb_used()
+ # Check that instance has not been saved with 0 root_gb
+ self._verify_instance_flavor_not_zero(server_id)
+ # Check that request spec has not been saved with 0 root_gb
+ self._verify_request_spec_flavor_not_zero(server_id)
+
+ # Rebuild
+ # The image_uuid is from CinderFixture for the
+ # volume representing IMAGE_BACKED_VOL.
+ self.api.microversion = '2.93'
+ image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
+ post_data = {'rebuild': {'imageRef': image_uuid}}
+ self.api.post_server_action(server_id, post_data)
self._wait_for_state_change(created_server, 'ACTIVE')
# Check that hypervisor local disk reporting is still 0
diff --git a/nova/tests/functional/test_cold_migrate.py b/nova/tests/functional/test_cold_migrate.py
index e07820ba2a..b78db14a14 100644
--- a/nova/tests/functional/test_cold_migrate.py
+++ b/nova/tests/functional/test_cold_migrate.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.compute import api as compute_api
from nova import context as nova_context
diff --git a/nova/tests/functional/test_compute_mgr.py b/nova/tests/functional/test_compute_mgr.py
index 38b7f9d7a6..d8892843b4 100644
--- a/nova/tests/functional/test_compute_mgr.py
+++ b/nova/tests/functional/test_compute_mgr.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova import context
from nova.network import model as network_model
diff --git a/nova/tests/functional/test_cross_cell_migrate.py b/nova/tests/functional/test_cross_cell_migrate.py
index a1186ca7a5..92cf805945 100644
--- a/nova/tests/functional/test_cross_cell_migrate.py
+++ b/nova/tests/functional/test_cross_cell_migrate.py
@@ -11,7 +11,7 @@
# under the License.
import datetime
-import mock
+from unittest import mock
from oslo_db import exception as oslo_db_exc
from oslo_utils import fixture as osloutils_fixture
diff --git a/nova/tests/functional/test_ephemeral_encryption.py b/nova/tests/functional/test_ephemeral_encryption.py
new file mode 100644
index 0000000000..ba5e411902
--- /dev/null
+++ b/nova/tests/functional/test_ephemeral_encryption.py
@@ -0,0 +1,381 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils.fixture import uuidsentinel
+
+from nova import context
+from nova import objects
+from nova.tests.functional import integrated_helpers
+
+
+class _TestEphemeralEncryptionBase(
+ integrated_helpers.ProviderUsageBaseTestCase
+):
+ # NOTE(lyarwood): A dict of test flavors defined per test class,
+ # keyed by flavor name and providing an additional dict containing an 'id'
+ # and optional 'extra_specs' dict. For example:
+ # {
+ # 'name': {
+ # 'id': uuidsentinel.flavor_id
+ # 'extra_specs': {
+ # 'hw:foo': 'bar'
+ # }
+ # }
+ # }
+ flavors = {}
+
+ def setUp(self):
+ super().setUp()
+
+ self.ctxt = context.get_admin_context()
+
+ # Create the required test flavors
+ for name, details in self.flavors.items():
+ flavor = self.admin_api.post_flavor({
+ 'flavor': {
+ 'name': name,
+ 'id': details['id'],
+ 'ram': 512,
+ 'vcpus': 1,
+ 'disk': 1024,
+ }
+ })
+ # Add the optional extra_specs
+ if details.get('extra_specs'):
+ self.admin_api.post_extra_spec(
+ flavor['id'], {'extra_specs': details['extra_specs']})
+
+ # We only need a single compute for these tests
+ self._start_compute(host='compute1')
+
+ def _assert_ephemeral_encryption_enabled(
+ self, server_id, encryption_format=None):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.ctxt, server_id)
+ for bdm in bdms:
+ if bdm.is_local:
+ self.assertTrue(bdm.encrypted)
+ if encryption_format:
+ self.assertEqual(
+ encryption_format, bdm.encryption_format)
+
+ def _assert_ephemeral_encryption_disabled(self, server_id):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ self.ctxt, server_id)
+ for bdm in bdms:
+ if bdm.is_local:
+ self.assertFalse(bdm.encrypted)
+
+
+class TestEphemeralEncryptionAvailable(_TestEphemeralEncryptionBase):
+
+ compute_driver = 'fake.EphEncryptionDriver'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ }
+
+ def test_image_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(server_id)
+
+ def test_image_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(server_id)
+
+ def test_flavor_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_and_image_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(server_id)
+
+ def test_flavor_and_image_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_requested_and_image_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+ def test_flavor_disabled_and_image_requested(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+
+class TestEphemeralEncryptionUnavailable(_TestEphemeralEncryptionBase):
+
+ compute_driver = 'fake.MediumFakeDriver'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ }
+
+ def test_requested_but_unavailable(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_flavor,
+ image_uuid=uuidsentinel.eph_encryption,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_image_disabled(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_disabled,
+ flavor_id=uuidsentinel.no_eph_encryption,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+ def test_flavor_disabled(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_disabled_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_disabled(server_id)
+
+
+class TestEphemeralEncryptionLUKS(TestEphemeralEncryptionAvailable):
+
+ compute_driver = 'fake.EphEncryptionDriverLUKS'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ 'eph_encryption_luks': {
+ 'id': uuidsentinel.eph_encryption_luks_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }
+ },
+ 'eph_encryption_plain': {
+ 'id': uuidsentinel.eph_encryption_plain_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'plain'
+ }
+ },
+
+ }
+
+ def test_image_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='luks')
+
+ def test_flavor_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='luks')
+
+ def test_flavor_and_image_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='luks')
+
+ def test_image_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_flavor_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_image_requested_luks_flavor_requested_plain(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+ def test_image_requested_plain_flavor_requested_luks(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+
+class TestEphemeralEncryptionPLAIN(_TestEphemeralEncryptionBase):
+
+ compute_driver = 'fake.EphEncryptionDriverPLAIN'
+ flavors = {
+ 'no_eph_encryption': {
+ 'id': uuidsentinel.no_eph_encryption
+ },
+ 'eph_encryption': {
+ 'id': uuidsentinel.eph_encryption_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True'
+ }
+ },
+ 'eph_encryption_disabled': {
+ 'id': uuidsentinel.eph_encryption_disabled_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'False'
+ }
+ },
+ 'eph_encryption_luks': {
+ 'id': uuidsentinel.eph_encryption_luks_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }
+ },
+ 'eph_encryption_plain': {
+ 'id': uuidsentinel.eph_encryption_plain_flavor,
+ 'extra_specs': {
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'plain'
+ }
+ },
+ }
+
+ def test_image_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='plain')
+
+ def test_flavor_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='plain')
+
+ def test_flavor_and_image_requested_plain(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ networks=[])
+ server_id = self._assert_build_request_success(server_request)
+ self._assert_ephemeral_encryption_enabled(
+ server_id, encryption_format='plain')
+
+ def test_image_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.no_eph_encryption,
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_flavor_requested_luks(self):
+ server_request = self._build_server(
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ self._assert_build_request_schedule_failure(server_request)
+
+ def test_image_requested_plain_flavor_requested_luks(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_plain,
+ flavor_id=uuidsentinel.eph_encryption_luks_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
+
+ def test_image_requested_luks_flavor_requested_plain(self):
+ server_request = self._build_server(
+ image_uuid=uuidsentinel.eph_encryption_luks,
+ flavor_id=uuidsentinel.eph_encryption_plain_flavor,
+ networks=[])
+ self._assert_bad_build_request_error(server_request)
diff --git a/nova/tests/functional/test_images.py b/nova/tests/functional/test_images.py
index 340e883da9..e7e9f2a6c9 100644
--- a/nova/tests/functional/test_images.py
+++ b/nova/tests/functional/test_images.py
@@ -12,7 +12,6 @@
from oslo_utils.fixture import uuidsentinel as uuids
-from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -70,10 +69,9 @@ class ImagesTest(integrated_helpers._IntegratedTestBase):
server = self.api.post_server({"server": server})
server = self._wait_for_state_change(server, 'ACTIVE')
- # Create an admin API fixture with a unique project ID.
- admin_api = self.useFixture(
- nova_fixtures.OSAPIFixture(
- project_id=uuids.admin_project)).admin_api
+ # use an admin API with a unique project ID.
+ admin_api = self.api_fixture.alternative_admin_api
+ admin_api.project_id = uuids.admin_project
# Create a snapshot of the server using the admin project.
name = 'admin-created-snapshot'
diff --git a/nova/tests/functional/test_instance_actions.py b/nova/tests/functional/test_instance_actions.py
index 054def5183..060133ce93 100644
--- a/nova/tests/functional/test_instance_actions.py
+++ b/nova/tests/functional/test_instance_actions.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_policy import policy as oslo_policy
from nova import exception
@@ -58,6 +59,15 @@ class InstanceActionsTestV221(InstanceActionsTestV21):
self.assertEqual('delete', actions[0]['action'])
self.assertEqual('create', actions[1]['action'])
+ def test_get_instance_actions_shelve_deleted(self):
+ server = self._create_server()
+ self._shelve_server(server)
+ self._delete_server(server)
+ actions = self.api.get_instance_actions(server['id'])
+ self.assertEqual('delete', actions[0]['action'])
+ self.assertEqual('shelve', actions[1]['action'])
+ self.assertEqual('create', actions[2]['action'])
+
class HypervisorError(Exception):
"""This is just used to make sure the exception type is in the events."""
diff --git a/nova/tests/functional/test_ip_allocation.py b/nova/tests/functional/test_ip_allocation.py
new file mode 100644
index 0000000000..a899641abe
--- /dev/null
+++ b/nova/tests/functional/test_ip_allocation.py
@@ -0,0 +1,53 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.functional import integrated_helpers
+
+
+class IPAllocationTests(integrated_helpers._IntegratedTestBase):
+ """Test behavior with various IP allocation policies.
+
+ This mainly exists to test the 'deferred' and 'none' policies.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = 'latest'
+ ADMIN_API = True
+
+ def setUp(self):
+ super().setUp()
+
+ # add a port with an ip_allocation of 'none'
+ port = {
+ 'name': '',
+ 'description': '',
+ 'network_id': self.neutron.network_1['id'],
+ 'admin_state_up': True,
+ 'status': 'ACTIVE',
+ 'mac_address': 'ee:94:88:57:d5:7a',
+ # The ip_allocation is 'none', so fixed_ips should be null
+ 'fixed_ips': [],
+ 'tenant_id': self.neutron.tenant_id,
+ 'project_id': self.neutron.tenant_id,
+ 'device_id': '',
+ 'binding:profile': {},
+ 'binding:vnic_type': 'normal',
+ 'binding:vif_type': 'ovs',
+ 'binding:vif_details': {},
+ 'ip_allocation': 'none',
+ }
+ created_port = self.neutron.create_port({'port': port})
+ self.port_id = created_port['port']['id']
+
+ def test_boot_with_none_policy(self):
+ """Create a port with the 'none' policy."""
+ self._create_server(
+ networks=[{'port': self.port_id}])
diff --git a/nova/tests/functional/test_monkey_patch.py b/nova/tests/functional/test_monkey_patch.py
deleted file mode 100644
index b471d333cf..0000000000
--- a/nova/tests/functional/test_monkey_patch.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 2020 Red Hat, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# NOTE(artom) This file exists to test eventlet monkeypatching. How and what
-# eventlet monkeypatches can be controlled by environment variables that
-# are processed by eventlet at import-time (for exmaple, EVENTLET_NO_GREENDNS).
-# Nova manages all of this in nova.monkey_patch. Therefore, nova.monkey_patch
-# must be the first thing to import eventlet. As nova.tests.functional.__init__
-# imports nova.monkey_patch, we're OK here.
-
-import socket
-import traceback
-
-from nova import test
-
-
-class TestMonkeyPatch(test.TestCase):
-
- def test_greendns_is_disabled(self):
- """Try to resolve a fake fqdn. If we see greendns mentioned in the
- traceback of the raised exception, it means we've not actually disabled
- greendns. See the TODO and NOTE in nova.monkey_patch to understand why
- greendns needs to be disabled.
- """
- raised = False
- try:
- socket.gethostbyname('goat.fake')
- except Exception:
- tb = traceback.format_exc()
- # NOTE(artom) If we've correctly disabled greendns, we expect the
- # traceback to not contain any reference to it.
- self.assertNotIn('greendns.py', tb)
- raised = True
- self.assertTrue(raised)
diff --git a/nova/tests/functional/test_nova_manage.py b/nova/tests/functional/test_nova_manage.py
index b6fddc84b2..888b43cea0 100644
--- a/nova/tests/functional/test_nova_manage.py
+++ b/nova/tests/functional/test_nova_manage.py
@@ -15,9 +15,9 @@ import collections
import datetime
from io import StringIO
import os.path
+from unittest import mock
import fixtures
-import mock
from neutronclient.common import exceptions as neutron_client_exc
import os_resource_classes as orc
from oslo_serialization import jsonutils
diff --git a/nova/tests/functional/test_policy.py b/nova/tests/functional/test_policy.py
index 28339bdd0f..bb72915336 100644
--- a/nova/tests/functional/test_policy.py
+++ b/nova/tests/functional/test_policy.py
@@ -60,7 +60,7 @@ class HostStatusPolicyTestCase(test.TestCase,
overwrite=False)
# Create a server as a normal non-admin user.
# In microversion 2.36 the /images proxy API was deprecated, so
- # specifiy the image_uuid directly.
+ # specify the image_uuid directly.
kwargs = {'image_uuid': self.image_uuid}
if networks:
# Starting with microversion 2.37 the networks field is required.
diff --git a/nova/tests/functional/test_report_client.py b/nova/tests/functional/test_report_client.py
index 7f9e3bbb79..a5da9f87b4 100644
--- a/nova/tests/functional/test_report_client.py
+++ b/nova/tests/functional/test_report_client.py
@@ -12,13 +12,14 @@
# under the License.
import copy
+from unittest import mock
+
import ddt
from keystoneauth1 import exceptions as kse
-import mock
+import microversion_parse
import os_resource_classes as orc
import os_traits as ot
from oslo_utils.fixture import uuidsentinel as uuids
-import pkg_resources
from nova.cmd import status
from nova.compute import provider_tree
@@ -39,9 +40,6 @@ from nova.tests.functional import fixtures as func_fixtures
CONF = conf.CONF
-CMD_STATUS_MIN_MICROVERSION = pkg_resources.parse_version(
- status.MIN_PLACEMENT_MICROVERSION)
-
class VersionCheckingReportClient(report.SchedulerReportClient):
"""This wrapper around SchedulerReportClient checks microversions for
@@ -57,14 +55,18 @@ class VersionCheckingReportClient(report.SchedulerReportClient):
if not microversion:
return
- seen_microversion = pkg_resources.parse_version(microversion)
- if seen_microversion > CMD_STATUS_MIN_MICROVERSION:
+ min_microversion = microversion_parse.parse_version_string(
+ status.MIN_PLACEMENT_MICROVERSION)
+ got_microversion = microversion_parse.parse_version_string(
+ microversion)
+ if got_microversion > min_microversion:
raise ValueError(
"Report client is using microversion %s, but nova.cmd.status "
"is only requiring %s. See "
"I4369f7fb1453e896864222fa407437982be8f6b5 for an example of "
"how to bump the minimum requirement." %
- (microversion, status.MIN_PLACEMENT_MICROVERSION))
+ (got_microversion, min_microversion)
+ )
def get(self, *args, **kwargs):
self._check_microversion(kwargs)
@@ -1361,6 +1363,17 @@ class SchedulerReportClientTests(test.TestCase):
resp = self.client._reshape(self.context, inventories, allocs)
self.assertEqual(204, resp.status_code)
+ # Trigger generation conflict
+ # We can do this is by simply sending back the same reshape as that
+ # will not work because the previous reshape updated generations
+ self.assertRaises(
+ exception.PlacementReshapeConflict,
+ self.client._reshape,
+ self.context,
+ inventories,
+ allocs,
+ )
+
def test_update_from_provider_tree_reshape(self):
"""Run update_from_provider_tree with reshaping."""
exp_ptree = self._set_up_provider_tree()
@@ -1517,3 +1530,44 @@ class SchedulerReportClientTests(test.TestCase):
self.context, self.compute_name)
self.assertProviderTree(orig_exp_ptree, ptree)
self.assertAllocations(orig_exp_allocs, allocs)
+
+ def test_update_from_provider_tree_reshape_conflict_retry(self):
+ exp_ptree = self._set_up_provider_tree()
+
+ ptree = self.client.get_provider_tree_and_ensure_root(
+ self.context, self.compute_uuid)
+ allocs = self.client.get_allocations_for_provider_tree(
+ self.context, self.compute_name)
+ self.assertProviderTree(exp_ptree, ptree)
+ self.assertAllocations({}, allocs)
+
+ exp_allocs = self._set_up_provider_tree_allocs()
+
+ # we prepare inventory and allocation changes to trigger a reshape
+ for rp_uuid in ptree.get_provider_uuids():
+ # Add a new resource class to the inventories
+ ptree.update_inventory(
+ rp_uuid, dict(ptree.data(rp_uuid).inventory,
+ CUSTOM_FOO={'total': 10}))
+ exp_ptree[rp_uuid]['inventory']['CUSTOM_FOO'] = {'total': 10}
+ for c_uuid, alloc in allocs.items():
+ for rp_uuid, res in alloc['allocations'].items():
+ res['resources']['CUSTOM_FOO'] = 1
+ exp_allocs[c_uuid]['allocations'][rp_uuid][
+ 'resources']['CUSTOM_FOO'] = 1
+
+ # As the inventory update happens is the same request as the allocation
+ # update the allocation update will have a generation conflict.
+ # So we expect that it is signalled with an exception so that the
+ # upper layer can re-drive the reshape process with a fresh tree that
+ # now has the inventories
+ self.assertRaises(
+ exception.PlacementReshapeConflict,
+ self.client.update_from_provider_tree,
+ self.context,
+ ptree,
+ allocations=allocs,
+ )
+ # also we except that the internal caches is cleared so that the
+ # re-drive will have a chance to load fresh data from placement
+ self.assertEqual(0, len(self.client._provider_tree.roots))
diff --git a/nova/tests/functional/test_routed_networks.py b/nova/tests/functional/test_routed_networks.py
index 19c5d3c59f..616780a219 100644
--- a/nova/tests/functional/test_routed_networks.py
+++ b/nova/tests/functional/test_routed_networks.py
@@ -11,7 +11,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/functional/test_server_faults.py b/nova/tests/functional/test_server_faults.py
index 91f813f070..edc3c3b377 100644
--- a/nova/tests/functional/test_server_faults.py
+++ b/nova/tests/functional/test_server_faults.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import test
from nova.tests import fixtures as nova_fixtures
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
index 08e47b3971..01e3547f7e 100644
--- a/nova/tests/functional/test_server_group.py
+++ b/nova/tests/functional/test_server_group.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from nova.compute import instance_actions
@@ -64,12 +65,12 @@ class ServerGroupTestBase(test.TestCase,
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ self.api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
- self.api = api_fixture.api
+ self.api = self.api_fixture.api
self.api.microversion = self.microversion
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = self.microversion
self.start_service('conductor')
@@ -102,7 +103,10 @@ class ServerGroupFakeDriver(fake.SmallFakeDriver):
"""
vcpus = 1000
- memory_mb = 8192
+ # the testcases were built with a default ram allocation ratio
+ # of 1.5 and 8192 mb of ram so to maintain the same capacity with
+ # the new default allocation ratio of 1.0 we use 8192+4096=12288
+ memory_mb = 12288
local_gb = 100000
@@ -174,13 +178,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
# Create an API using project 'openstack1'.
# This is a non-admin API.
- #
- # NOTE(sdague): this is actually very much *not* how this
- # fixture should be used. This actually spawns a whole
- # additional API server. Should be addressed in the future.
- api_openstack1 = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version=self.api_major_version,
- project_id=PROJECT_ID_ALT)).api
+ api_openstack1 = self.api_fixture.alternative_api
+ api_openstack1.project_id = PROJECT_ID_ALT
api_openstack1.microversion = self.microversion
# Create a server group in project 'openstack'
@@ -445,7 +444,8 @@ class ServerGroupTestV21(ServerGroupTestBase):
evacuated_server = self._evacuate_server(
servers[1], {'onSharedStorage': 'False'},
- expected_migration_status='done')
+ expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated to another host
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -622,7 +622,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
compute3 = self.start_service('compute', host='host3')
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# check that the server is evacuated
self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
@@ -801,7 +802,8 @@ class ServerGroupTestV215(ServerGroupTestV21):
self._set_forced_down(host, True)
evacuated_server = self._evacuate_server(
- servers[1], expected_migration_status='done')
+ servers[1], expected_migration_status='done',
+ expected_state='ACTIVE')
# Note(gibi): need to get the server again as the state of the instance
# goes to ACTIVE first then the host of the instance changes to the
@@ -871,6 +873,54 @@ class ServerGroupTestV264(ServerGroupTestV215):
self.assertEqual(2, hosts.count(host))
+class ServerGroupTestV295(ServerGroupTestV264):
+ microversion = '2.95'
+
+ def _evacuate_with_soft_anti_affinity_policies(self, group):
+ created_group = self.api.post_server_groups(group)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # Note(gibi): need to get the server again as the state of the instance
+ # goes to ACTIVE first then the host of the instance changes to the
+ # new host later
+ evacuated_server = self.admin_api.get_server(evacuated_server['id'])
+
+ return [evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host']]
+
+ def test_evacuate_with_anti_affinity(self):
+ created_group = self.api.post_server_groups(self.anti_affinity)
+ servers = self._boot_servers_to_group(created_group)
+
+ host = self._get_compute_service_by_host_name(
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # Set forced_down on the host to ensure nova considers the host down.
+ self._set_forced_down(host, True)
+
+ # Start additional host to test evacuation
+ compute3 = self.start_service('compute', host='host3')
+
+ evacuated_server = self._evacuate_server(
+ servers[1], expected_migration_status='done')
+
+ # check that the server is evacuated
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[1]['OS-EXT-SRV-ATTR:host'])
+ # check that policy is kept
+ self.assertNotEqual(evacuated_server['OS-EXT-SRV-ATTR:host'],
+ servers[0]['OS-EXT-SRV-ATTR:host'])
+
+ compute3.kill()
+
+
class ServerGroupTestMultiCell(ServerGroupTestBase):
NUMBER_OF_CELLS = 2
diff --git a/nova/tests/functional/test_server_rescue.py b/nova/tests/functional/test_server_rescue.py
index fa96c10344..8f5b912943 100644
--- a/nova/tests/functional/test_server_rescue.py
+++ b/nova/tests/functional/test_server_rescue.py
@@ -10,6 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
+
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
@@ -23,7 +27,37 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
self.useFixture(nova_fixtures.CinderFixture(self))
self._start_compute(host='host1')
- def _create_bfv_server(self):
+ def _create_image(self, metadata=None):
+ image = {
+ 'id': uuids.stable_rescue_image,
+ 'name': 'fake-image-rescue-property',
+ 'created_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'updated_at': datetime.datetime(2011, 1, 1, 1, 2, 3),
+ 'deleted_at': None,
+ 'deleted': False,
+ 'status': 'active',
+ 'is_public': False,
+ 'container_format': 'raw',
+ 'disk_format': 'raw',
+ 'size': '25165824',
+ 'min_ram': 0,
+ 'min_disk': 0,
+ 'protected': False,
+ 'visibility': 'public',
+ 'tags': ['tag1', 'tag2'],
+ 'properties': {
+ 'kernel_id': 'nokernel',
+ 'ramdisk_id': 'nokernel',
+ 'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi',
+ },
+ }
+ if metadata:
+ image['properties'].update(metadata)
+ return self.glance.create(None, image)
+
+ def _create_bfv_server(self, metadata=None):
+ image = self._create_image(metadata=metadata)
server_request = self._build_server(networks=[])
server_request.pop('imageRef')
server_request['block_device_mapping_v2'] = [{
@@ -33,7 +67,7 @@ class BFVRescue(integrated_helpers.ProviderUsageBaseTestCase):
'destination_type': 'volume'}]
server = self.api.post_server({'server': server_request})
self._wait_for_state_change(server, 'ACTIVE')
- return server
+ return server, image
class DisallowBFVRescuev286(BFVRescue):
@@ -43,10 +77,10 @@ class DisallowBFVRescuev286(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -60,10 +94,10 @@ class DisallowBFVRescuev286WithTrait(BFVRescue):
microversion = '2.86'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Cannot rescue a volume-backed instance',
ex.response.text)
@@ -77,10 +111,10 @@ class DisallowBFVRescuev287WithoutTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_not_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action, server['id'], {'rescue': {
- 'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ 'rescue_image_ref': image['id']}})
self.assertEqual(400, ex.response.status_code)
self.assertIn('Host unable to rescue a volume-backed instance',
ex.response.text)
@@ -94,7 +128,41 @@ class AllowBFVRescuev287WithTrait(BFVRescue):
microversion = '2.87'
def test_bfv_rescue_supported(self):
- server = self._create_bfv_server()
+ server, image = self._create_bfv_server()
self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
+ self._wait_for_state_change(server, 'RESCUE')
+
+
+class DisallowBFVRescuev287WithoutRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests fail with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are not set on the image.
+ """
+ compute_driver = 'fake.MediumFakeDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_failed(self):
+ server, image = self._create_bfv_server()
+ # try rescue without hw_rescue_device and hw_rescue_bus properties set
+ ex = self.assertRaises(client.OpenStackApiException,
+ self.api.post_server_action, server['id'], {'rescue': {
'rescue_image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6'}})
+ self.assertEqual(400, ex.response.status_code)
+ self.assertIn('Cannot rescue a volume-backed instance',
+ ex.response.text)
+
+
+class AllowBFVRescuev287WithRescueImageProperties(BFVRescue):
+ """Asserts that BFV rescue requests pass with microversion 2.87 (or later)
+ when the required hw_rescue_device and hw_rescue_bus image properties
+ are set on the image.
+ """
+ compute_driver = 'fake.RescueBFVDriver'
+ microversion = '2.87'
+
+ def test_bfv_rescue_done(self):
+ server, image = self._create_bfv_server()
+ self.api.post_server_action(server['id'], {'rescue': {
+ 'rescue_image_ref': image['id']}})
self._wait_for_state_change(server, 'RESCUE')
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index e77d4bf1ea..5887c99081 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -17,10 +17,11 @@ import collections
import copy
import datetime
import time
+from unittest import mock
import zlib
+from cinderclient import exceptions as cinder_exception
from keystoneauth1 import adapter
-import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import base64
@@ -764,7 +765,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
LOG.info('Attaching volume %s to server %s', volume_id, server_id)
# The fake driver doesn't implement get_device_name_for_instance, so
- # we'll just raise the exception directly here, instead of simuluating
+ # we'll just raise the exception directly here, instead of simulating
# an instance with 26 disk devices already attached.
with mock.patch.object(self.compute.driver,
'get_device_name_for_instance') as mock_get:
@@ -1253,9 +1254,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase):
def test_get_servers_detail_filters(self):
# We get the results only from the up cells, this ignoring the down
# cells if list_records_by_skipping_down_cells config option is True.
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(
search_opts={'hostname': "cell3-inst0"})
@@ -1263,9 +1262,7 @@ class ServerTestV269(integrated_helpers._IntegratedTestBase):
self.assertEqual(self.up_cell_insts[2], servers[0]['id'])
def test_get_servers_detail_all_tenants_with_down_cells(self):
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.admin_api = api_fixture.admin_api
+ self.admin_api = self.api_fixture.admin_api
self.admin_api.microversion = '2.69'
servers = self.admin_api.get_servers(search_opts={'all_tenants': True})
# 4 servers from the up cells and 4 servers from the down cells
@@ -1518,15 +1515,97 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase):
'volume-backed server', str(resp))
+class ServerRebuildTestCaseV293(integrated_helpers._IntegratedTestBase):
+ api_major_version = 'v2.1'
+
+ def setUp(self):
+ super(ServerRebuildTestCaseV293, self).setUp()
+ self.cinder = nova_fixtures.CinderFixture(self)
+ self.useFixture(self.cinder)
+
+ def _bfv_server(self):
+ server_req_body = {
+ # There is no imageRef because this is boot from volume.
+ 'server': {
+ 'flavorRef': '1', # m1.tiny from DefaultFlavorsFixture,
+ 'name': 'test_volume_backed_rebuild_different_image',
+ 'networks': [],
+ 'block_device_mapping_v2': [{
+ 'boot_index': 0,
+ 'uuid':
+ nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
+ 'source_type': 'volume',
+ 'destination_type': 'volume'
+ }]
+ }
+ }
+ server = self.api.post_server(server_req_body)
+ return self._wait_for_state_change(server, 'ACTIVE')
+
+ def _test_rebuild(self, server):
+ self.api.microversion = '2.93'
+ # Now rebuild the server with a different image than was used to create
+ # our fake volume.
+ rebuild_image_ref = self.glance.auto_disk_config_enabled_image['id']
+ rebuild_req_body = {'rebuild': {'imageRef': rebuild_image_ref}}
+
+ with mock.patch.object(self.compute.manager.virtapi,
+ 'wait_for_instance_event'):
+ self.api.api_post('/servers/%s/action' % server['id'],
+ rebuild_req_body,
+ check_response_status=[202])
+
+ def test_volume_backed_rebuild_root_v293(self):
+ server = self._bfv_server()
+ self._test_rebuild(server)
+
+ def test_volume_backed_rebuild_root_create_failed(self):
+ server = self._bfv_server()
+ error = cinder_exception.ClientException(code=500)
+ with mock.patch.object(volume.cinder.API, 'attachment_create',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+ def test_volume_backed_rebuild_root_instance_deleted(self):
+ server = self._bfv_server()
+ error = exception.InstanceNotFound(instance_id=server['id'])
+ with mock.patch.object(self.compute.manager, '_detach_root_volume',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+ def test_volume_backed_rebuild_root_delete_old_failed(self):
+ server = self._bfv_server()
+ error = cinder_exception.ClientException(code=500)
+ with mock.patch.object(volume.cinder.API, 'attachment_delete',
+ side_effect=error):
+ # We expect this to fail because we are doing cast-as-call
+ self.assertRaises(client.OpenStackApiException,
+ self._test_rebuild, server)
+ server = self.api.get_server(server['id'])
+ self.assertIn('Failed to rebuild volume backed instance',
+ server['fault']['message'])
+ self.assertEqual('ERROR', server['status'])
+
+
class ServersTestV280(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2.1'
def setUp(self):
super(ServersTestV280, self).setUp()
- api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version='v2.1'))
- self.api = api_fixture.api
- self.admin_api = api_fixture.admin_api
+ self.api = self.api_fixture.api
+ self.admin_api = self.api_fixture.admin_api
self.api.microversion = '2.80'
self.admin_api.microversion = '2.80'
@@ -1585,9 +1664,8 @@ class ServersTestV280(integrated_helpers._IntegratedTestBase):
project_id_1 = '4906260553374bf0a5d566543b320516'
project_id_2 = 'c850298c1b6b4796a8f197ac310b2469'
- new_api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
- api_version=self.api_major_version, project_id=project_id_1))
- new_admin_api = new_api_fixture.admin_api
+ new_admin_api = self.api_fixture.alternative_admin_api
+ new_admin_api.project_id = project_id_1
new_admin_api.microversion = '2.80'
post = {
@@ -2182,7 +2260,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
}
server = self._evacuate_server(
- server, extra_post_args=post, expected_host=dest_hostname)
+ server, extra_post_args=post, expected_host=dest_hostname,
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -2359,7 +2438,8 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
# stay ACTIVE and task_state will be set to None.
server = self._evacuate_server(
server, expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state='ACTIVE')
# Run the periodics to show those don't modify allocations.
self._run_periodics()
@@ -2519,6 +2599,57 @@ class ServerMovingTests(integrated_helpers.ProviderUsageBaseTestCase):
self._delete_and_check_allocations(server)
+ def test_shelve_unshelve_to_host(self):
+ source_hostname = self.compute1.host
+ dest_hostname = self.compute2.host
+ source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
+ dest_rp_uuid = \
+ self._get_provider_uuid_by_host(dest_hostname)
+
+ server = self._boot_then_shelve_and_check_allocations(
+ source_hostname, source_rp_uuid)
+
+ self._shelve_offload_and_check_allocations(server, source_rp_uuid)
+
+ req = {
+ 'unshelve': {'host': dest_hostname}
+ }
+
+ self.api.post_server_action(server['id'], req)
+ self._wait_for_server_parameter(
+ server, {'OS-EXT-SRV-ATTR:host': dest_hostname, 'status': 'ACTIVE'}
+ )
+
+ self.assertFlavorMatchesUsage(dest_rp_uuid, self.flavor1)
+
+ # the server has an allocation on only the dest node
+ self.assertFlavorMatchesAllocation(
+ self.flavor1, server['id'], dest_rp_uuid)
+
+ self._delete_and_check_allocations(server)
+
+ def test_shelve_unshelve_to_host_instance_not_offloaded(self):
+ source_hostname = self.compute1.host
+ dest_hostname = self.compute2.host
+ source_rp_uuid = self._get_provider_uuid_by_host(source_hostname)
+
+ server = self._boot_then_shelve_and_check_allocations(
+ source_hostname, source_rp_uuid)
+
+ req = {
+ 'unshelve': {'host': dest_hostname}
+ }
+
+ ex = self.assertRaises(
+ client.OpenStackApiException,
+ self.api.post_server_action,
+ server['id'], req
+ )
+ self.assertEqual(409, ex.response.status_code)
+ self.assertIn(
+ "The server status must be SHELVED_OFFLOADED",
+ ex.response.text)
+
def _shelve_offload_and_check_allocations(self, server, source_rp_uuid):
req = {
'shelveOffload': {}
@@ -5195,7 +5326,8 @@ class ServerMovingTestsWithNestedResourceRequests(
server = self._evacuate_server(
server, extra_post_args=post, expected_migration_status='error',
- expected_host=source_hostname)
+ expected_host=source_hostname,
+ expected_state='ACTIVE')
self.assertIn('Unable to move instance %s to host host2. The instance '
'has complex allocations on the source host so move '
@@ -5401,7 +5533,8 @@ class ServerMovingTestsFromFlatToNested(
self._evacuate_server(
server, extra_post_args=post, expected_host='host1',
- expected_migration_status='error')
+ expected_migration_status='error',
+ expected_state='ACTIVE')
# We expect that the evacuation will fail as force evacuate tries to
# blindly copy the source allocation to the destination but on the
@@ -6393,3 +6526,41 @@ class PortAndFlavorAccelsServerCreateTest(AcceleratorServerBase):
binding_profile = neutronapi.get_binding_profile(updated_port)
self.assertNotIn('arq_uuid', binding_profile)
self.assertNotIn('pci_slot', binding_profile)
+
+
+class PortBindingShelvedServerTest(integrated_helpers._IntegratedTestBase):
+ """Tests for servers with ports."""
+
+ compute_driver = 'fake.SmallFakeDriver'
+
+ def setUp(self):
+ super(PortBindingShelvedServerTest, self).setUp()
+ self.flavor_id = self._create_flavor(
+ disk=10, ephemeral=20, swap=5 * 1024)
+
+ def test_shelve_offload_with_port(self):
+ # Do not wait before offloading
+ self.flags(shelved_offload_time=0)
+
+ server = self._create_server(
+ flavor_id=self.flavor_id,
+ networks=[{'port': self.neutron.port_1['id']}])
+
+ port = self.neutron.show_port(self.neutron.port_1['id'])['port']
+
+ # Assert that the port is actually associated to the instance
+ self.assertEqual(port['device_id'], server['id'])
+ self.assertEqual(port['binding:host_id'], 'compute')
+ self.assertEqual(port['binding:status'], 'ACTIVE')
+
+ # Do shelve
+ server = self._shelve_server(server, 'SHELVED_OFFLOADED')
+
+ # Retrieve the updated port
+ port = self.neutron.show_port(self.neutron.port_1['id'])['port']
+
+ # Assert that the port is still associated to the instance
+ # but the binding is not on the compute anymore
+ self.assertEqual(port['device_id'], server['id'])
+ self.assertIsNone(port['binding:host_id'])
+ self.assertNotIn('binding:status', port)
diff --git a/nova/tests/functional/test_servers_provider_tree.py b/nova/tests/functional/test_servers_provider_tree.py
index 0eff6c6bda..da562c4f19 100644
--- a/nova/tests/functional/test_servers_provider_tree.py
+++ b/nova/tests/functional/test_servers_provider_tree.py
@@ -14,8 +14,8 @@
# under the License.
-import mock
import os_traits
+from unittest import mock
from oslo_log import log as logging
from oslo_utils.fixture import uuidsentinel as uuids
@@ -82,7 +82,7 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
},
'MEMORY_MB': {
'total': 8192,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
'max_unit': 8192,
'min_unit': 1,
'reserved': 512,
@@ -90,7 +90,7 @@ class ProviderTreeTests(integrated_helpers.ProviderUsageBaseTestCase):
},
'VCPU': {
'total': 10,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
'max_unit': 10,
'min_unit': 1,
'reserved': 0,
diff --git a/nova/tests/functional/test_servers_resource_request.py b/nova/tests/functional/test_servers_resource_request.py
index a8df84a5bc..9c91af7218 100644
--- a/nova/tests/functional/test_servers_resource_request.py
+++ b/nova/tests/functional/test_servers_resource_request.py
@@ -14,9 +14,9 @@
import copy
import logging
+from unittest import mock
from keystoneauth1 import adapter
-import mock
from neutronclient.common import exceptions as neutron_exception
import os_resource_classes as orc
from oslo_config import cfg
@@ -146,12 +146,13 @@ class ExtendedResourceRequestNeutronFixture(ResourceRequestNeutronFixture):
# port_resource_request_groups.py
{
"updated": "2021-08-02T10:00:00-00:00",
- "name": constants.RESOURCE_REQUEST_GROUPS_EXTENSION,
+ "name": "Port Resource Request Groups",
"links": [],
"alias": "port-resource-request-groups",
- "description":
+ "description": (
"Support requesting multiple groups of resources and "
"traits from the same RP subtree in resource_request"
+ ),
}
)
return extensions
@@ -458,7 +459,7 @@ class PortResourceRequestBasedSchedulingTestBase(
def _create_sriov_networking_rp_tree(self, hostname, compute_rp_uuid):
# Create a matching RP tree in placement for the PCI devices added to
- # the passthrough_whitelist config during setUp() and PCI devices
+ # the device_spec config during setUp() and PCI devices
# present in the FakeDriverWithPciResources virt driver.
#
# * PF1 represents the PCI device 0000:01:00, it will be mapped to
@@ -1067,7 +1068,7 @@ class PortResourceRequestBasedSchedulingTest(
def test_interface_attach_sriov_with_qos_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1114,7 +1115,7 @@ class PortResourceRequestBasedSchedulingTest(
):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the sriov interface is attached.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1361,7 +1362,7 @@ class PortResourceRequestBasedSchedulingTest(
does not have resource request can be allocated to PF2 or PF3.
For the detailed compute host config see the FakeDriverWithPciResources
- class. For the necessary passthrough_whitelist config see the setUp of
+ class. For the necessary device_spec config see the setUp of
the PortResourceRequestBasedSchedulingTestBase class.
"""
@@ -1922,7 +1923,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_migrate_server_with_qos_port_pci_update_fail_not_reschedule(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -1942,7 +1943,7 @@ class ServerMoveWithPortResourceRequestTest(
non_qos_port, qos_port, qos_sriov_port)
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name which will
+ # update_pci_request_with_placement_allocations which will
# intentionally not trigger a re-schedule even if there is host3 as an
# alternate.
self.api.post_server_action(server['id'], {'migrate': None})
@@ -2161,7 +2162,8 @@ class ServerMoveWithPortResourceRequestTest(
# simply fail and the server remains on the source host
server = self._evacuate_server(
server, expected_host='host1', expected_task_state=None,
- expected_migration_status='failed')
+ expected_migration_status='failed',
+ expected_state="ACTIVE")
# As evacuation failed the resource allocation should be untouched
self._check_allocation(
@@ -2185,7 +2187,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_evacuate_with_qos_port_pci_update_fail(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is evacuated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2206,7 +2208,7 @@ class ServerMoveWithPortResourceRequestTest(
self.compute1_service_id, {'forced_down': 'true'})
# The compute manager on host2 will raise from
- # update_pci_request_spec_with_allocated_interface_name
+ # update_pci_request_with_placement_allocations
server = self._evacuate_server(
server, expected_host='host1', expected_state='ERROR',
expected_task_state=None, expected_migration_status='failed')
@@ -2362,7 +2364,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_live_migrate_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is live migrated to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2503,7 +2505,7 @@ class ServerMoveWithPortResourceRequestTest(
def test_unshelve_offloaded_server_with_qos_port_pci_update_fails(self):
# Update the name of the network device RP of PF2 on host2 to something
# unexpected. This will cause
- # update_pci_request_spec_with_allocated_interface_name() to raise
+ # update_pci_request_with_placement_allocations() to raise
# when the instance is unshelved to the host2.
rsp = self.placement.put(
'/resource_providers/%s'
@@ -2536,7 +2538,7 @@ class ServerMoveWithPortResourceRequestTest(
self.api.post_server_action(server['id'], {'unshelve': None})
# Unshelve fails on host2 due to
- # update_pci_request_spec_with_allocated_interface_name fails so the
+ # update_pci_request_with_placement_allocations fails so the
# instance goes back to shelve offloaded state
self.notifier.wait_for_versioned_notifications(
'instance.unshelve.start')
@@ -2978,6 +2980,7 @@ class ExtendedResourceRequestOldCompute(
super().setUp()
self.neutron = self.useFixture(
ExtendedResourceRequestNeutronFixture(self))
+ self.api.microversion = '2.72'
@mock.patch.object(
objects.service, 'get_minimum_version_all_cells',
diff --git a/nova/tests/functional/test_service.py b/nova/tests/functional/test_service.py
index 65b41594bd..21e9a519ee 100644
--- a/nova/tests/functional/test_service.py
+++ b/nova/tests/functional/test_service.py
@@ -10,8 +10,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
from unittest import mock
+import fixtures
+from oslo_utils.fixture import uuidsentinel as uuids
+
from nova import context as nova_context
from nova import exception
from nova.objects import service
@@ -19,6 +23,7 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.functional import integrated_helpers
+from nova.virt import node
class ServiceTestCase(test.TestCase,
@@ -137,3 +142,83 @@ class TestOldComputeCheck(
return_value=old_version):
self.assertRaises(
exception.TooOldComputeService, self._start_compute, 'host1')
+
+
+class TestComputeStartupChecks(test.TestCase):
+ STUB_COMPUTE_ID = False
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.RealPolicyFixture())
+ self.useFixture(nova_fixtures.NeutronFixture(self))
+ self.useFixture(nova_fixtures.GlanceFixture(self))
+ self.useFixture(func_fixtures.PlacementFixture())
+
+ self._local_uuid = str(uuids.node)
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.get_local_node_uuid',
+ functools.partial(self.local_uuid, True)))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.read_local_node_uuid',
+ self.local_uuid))
+ self.useFixture(fixtures.MockPatch(
+ 'nova.virt.node.write_local_node_uuid',
+ mock.DEFAULT))
+ self.flags(compute_driver='fake.FakeDriverWithoutFakeNodes')
+
+ def local_uuid(self, get=False):
+ if get and not self._local_uuid:
+ # Simulate the get_local_node_uuid behavior of calling write once
+ self._local_uuid = str(uuids.node)
+ node.write_local_node_uuid(self._local_uuid)
+ return self._local_uuid
+
+ def test_compute_node_identity_greenfield(self):
+ # Level-set test case to show that starting and re-starting without
+ # any error cases works as expected.
+
+ # Start with no local compute_id
+ self._local_uuid = None
+ self.start_service('compute')
+
+ # Start should have generated and written a compute id
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ # Starting again should succeed and not cause another write
+ self.start_service('compute')
+ node.write_local_node_uuid.assert_called_once_with(str(uuids.node))
+
+ def test_compute_node_identity_deleted(self):
+ self.start_service('compute')
+
+ # Simulate the compute_id file being deleted
+ self._local_uuid = None
+
+ # Should refuse to start because it's not our first time and the file
+ # being missing is a hard error.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('lost that state', str(exc))
+
+ def test_compute_node_hostname_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Starting with a different hostname should trigger the abort
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute', host='other')
+ self.assertIn('hypervisor_hostname', str(exc))
+
+ def test_compute_node_uuid_changed(self):
+ # Start our compute once to create the node record
+ self.start_service('compute')
+
+ # Simulate a changed local compute_id file
+ self._local_uuid = str(uuids.othernode)
+
+ # We should fail to create the compute node record again, but with a
+ # useful error message about why.
+ exc = self.assertRaises(exception.InvalidConfiguration,
+ self.start_service, 'compute')
+ self.assertIn('Duplicate compute node record', str(exc))
diff --git a/nova/tests/functional/test_unified_limits.py b/nova/tests/functional/test_unified_limits.py
new file mode 100644
index 0000000000..64d59b47d7
--- /dev/null
+++ b/nova/tests/functional/test_unified_limits.py
@@ -0,0 +1,217 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_limit import fixture as limit_fixture
+from oslo_serialization import base64
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova import context as nova_context
+from nova.limit import local as local_limit
+from nova.objects import flavor as flavor_obj
+from nova.objects import instance_group as group_obj
+from nova.tests.functional.api import client
+from nova.tests.functional import integrated_helpers
+
+
+class UnifiedLimitsTest(integrated_helpers._IntegratedTestBase):
+
+ def setUp(self):
+ super(UnifiedLimitsTest, self).setUp()
+ # Use different project_ids for non-admin and admin.
+ self.api.project_id = 'fake'
+ self.admin_api.project_id = 'admin'
+
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 10,
+ local_limit.SERVER_GROUP_MEMBERS: 1,
+ 'servers': 4,
+ 'class:VCPU': 8,
+ 'class:MEMORY_MB': 32768,
+ 'class:DISK_GB': 250}
+ projlimits = {self.api.project_id: {'servers': 2,
+ 'class:VCPU': 4,
+ 'class:MEMORY_MB': 16384,
+ 'class:DISK_GB': 100}}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, projlimits))
+ self.ctx = nova_context.get_admin_context()
+
+ def _setup_services(self):
+ # Use driver with lots of resources so we don't get NoValidHost while
+ # testing quotas. Need to do this before services are started.
+ self.flags(compute_driver='fake.FakeDriver')
+ super(UnifiedLimitsTest, self)._setup_services()
+
+ def test_servers(self):
+ # First test the project limit using the non-admin project.
+ for i in range(2):
+ self._create_server(api=self.api)
+
+ # Attempt to create a third server should fail.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server, api=self.api)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('servers', e.response.text)
+
+ # Then test the default limit using the admin project.
+ for i in range(4):
+ self._create_server(api=self.admin_api)
+
+ # Attempt to create a fifth server should fail.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server,
+ api=self.admin_api)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('servers', e.response.text)
+
+ def test_vcpu(self):
+ # First test the project limit using the non-admin project.
+ # m1.large has vcpus=4 and our project limit is 4, should succeed.
+ flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.large')
+ self._create_server(api=self.api, flavor_id=flavor.flavorid)
+
+ # m1.small has vcpus=1, should fail because we are at quota.
+ flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.small')
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server, api=self.api,
+ flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:VCPU', e.response.text)
+
+ # Then test the default limit of 8 using the admin project.
+ flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.large')
+ for i in range(2):
+ self._create_server(api=self.admin_api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another server with vcpus=1 should fail because we
+ # are at quota.
+ flavor = flavor_obj.Flavor.get_by_name(self.ctx, 'm1.small')
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server,
+ api=self.admin_api, flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:VCPU', e.response.text)
+
+ def test_memory_mb(self):
+ # First test the project limit using the non-admin project.
+ flavor = flavor_obj.Flavor(
+ context=self.ctx, memory_mb=16384, vcpus=1, root_gb=1,
+ flavorid='9', name='m1.custom')
+ flavor.create()
+ self._create_server(api=self.api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another should fail as we are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server, api=self.api,
+ flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:MEMORY_MB', e.response.text)
+
+ # Then test the default limit of 32768 using the admin project.
+ for i in range(2):
+ self._create_server(api=self.admin_api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another server should fail because we are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server,
+ api=self.admin_api, flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:MEMORY_MB', e.response.text)
+
+ def test_disk_gb(self):
+ # First test the project limit using the non-admin project.
+ flavor = flavor_obj.Flavor(
+ context=self.ctx, memory_mb=1, vcpus=1, root_gb=100,
+ flavorid='9', name='m1.custom')
+ flavor.create()
+ self._create_server(api=self.api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another should fail as we are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server, api=self.api,
+ flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:DISK_GB', e.response.text)
+
+ # Then test the default limit of 250 using the admin project.
+ for i in range(2):
+ self._create_server(api=self.admin_api, flavor_id=flavor.flavorid)
+
+ # Attempt to create another server should fail because we are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self._create_server,
+ api=self.admin_api, flavor_id=flavor.flavorid)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('class:DISK_GB', e.response.text)
+
+ def test_no_injected_files(self):
+ self._create_server()
+
+ def test_max_injected_files(self):
+ # Quota is 5.
+ files = []
+ contents = base64.encode_as_text('some content')
+ for i in range(5):
+ files.append(('/my/path%d' % i, contents))
+ server = self._build_server()
+ personality = [
+ {'path': item[0], 'contents': item[1]} for item in files]
+ server['personality'] = personality
+ self.api.post_server({'server': server})
+
+ def test_max_injected_file_content_bytes(self):
+ # Quota is 10 * 1024
+ # Hm, apparently quota is checked against the base64 encoded string
+ # even though the api-ref claims the limit is for the decoded data.
+ # Subtract 3072 characters to account for that.
+ content = base64.encode_as_bytes(
+ ''.join(['a' for i in range(10 * 1024 - 3072)]))
+ server = self._build_server()
+ personality = [{'path': '/test/path', 'contents': content}]
+ server['personality'] = personality
+ self.api.post_server({'server': server})
+
+ def test_max_injected_file_path_bytes(self):
+ # Quota is 255.
+ path = ''.join(['a' for i in range(255)])
+ contents = base64.encode_as_text('some content')
+ server = self._build_server()
+ personality = [{'path': path, 'contents': contents}]
+ server['personality'] = personality
+ self.api.post_server({'server': server})
+
+ def test_server_group_members(self):
+ # Create a server group.
+ instance_group = group_obj.InstanceGroup(
+ self.ctx, policy="anti-affinity")
+ instance_group.name = "foo"
+ instance_group.project_id = self.ctx.project_id
+ instance_group.user_id = self.ctx.user_id
+ instance_group.uuid = uuids.instance_group
+ instance_group.create()
+
+ # Quota for server group members is 1.
+ server = self._build_server()
+ hints = {'group': uuids.instance_group}
+ req = {'server': server, 'os:scheduler_hints': hints}
+ self.admin_api.post_server(req)
+
+ # Attempt to create another server in the group should fail because we
+ # are at quota.
+ e = self.assertRaises(
+ client.OpenStackApiException, self.admin_api.post_server, req)
+ self.assertEqual(403, e.response.status_code)
+ self.assertIn('server_group_members', e.response.text)
diff --git a/nova/tests/unit/accelerator/test_cyborg.py b/nova/tests/unit/accelerator/test_cyborg.py
index c8f3944514..2d814c74a1 100644
--- a/nova/tests/unit/accelerator/test_cyborg.py
+++ b/nova/tests/unit/accelerator/test_cyborg.py
@@ -13,7 +13,7 @@
# under the License.
import itertools
-import mock
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
from requests.models import Response
@@ -190,7 +190,7 @@ class CyborgTestCase(test.NoDBTestCase):
},
"attach_handle_type": "PCI",
"state": "Bound",
- # Devic eprofile name is common to all bound ARQs
+ # Device profile name is common to all bound ARQs
"device_profile_name": arqs[0]["device_profile_name"],
**common
}
@@ -367,7 +367,7 @@ class CyborgTestCase(test.NoDBTestCase):
# If only some ARQs are resolved, return just the resolved ones
unbound_arqs, _ = self._get_arqs_and_request_groups()
_, bound_arqs = self._get_bound_arqs()
- # Create a amixture of unbound and bound ARQs
+ # Create a mixture of unbound and bound ARQs
arqs = [unbound_arqs[0], bound_arqs[0]]
instance_uuid = bound_arqs[0]['instance_uuid']
@@ -487,7 +487,7 @@ class CyborgTestCase(test.NoDBTestCase):
self.assertEqual(bound_arqs, ret_arqs)
def test_get_arq_pci_device_profile(self):
- """Test extractin arq pci device info"""
+ """Test extracting arq pci device info"""
arq = {'uuid': uuids.arq_uuid,
'device_profile_name': "smart_nic",
'device_profile_group_id': '5',
diff --git a/nova/tests/unit/api/openstack/compute/admin_only_action_common.py b/nova/tests/unit/api/openstack/compute/admin_only_action_common.py
index 37fd1012b7..f332d9f32f 100644
--- a/nova/tests/unit/api/openstack/compute/admin_only_action_common.py
+++ b/nova/tests/unit/api/openstack/compute/admin_only_action_common.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_admin_password.py b/nova/tests/unit/api/openstack/compute/test_admin_password.py
index 90a4a2983b..67e4c743d5 100644
--- a/nova/tests/unit/api/openstack/compute/test_admin_password.py
+++ b/nova/tests/unit/api/openstack/compute/test_admin_password.py
@@ -13,7 +13,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+
+from unittest import mock
+
import webob
from nova.api.openstack.compute import admin_password as admin_password_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_aggregates.py b/nova/tests/unit/api/openstack/compute/test_aggregates.py
index fb096861eb..21d644f0be 100644
--- a/nova/tests/unit/api/openstack/compute/test_aggregates.py
+++ b/nova/tests/unit/api/openstack/compute/test_aggregates.py
@@ -15,7 +15,8 @@
"""Tests for the aggregates admin api."""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_api.py b/nova/tests/unit/api/openstack/compute/test_api.py
index ca54be9a74..d1bb6babb7 100644
--- a/nova/tests/unit/api/openstack/compute/test_api.py
+++ b/nova/tests/unit/api/openstack/compute/test_api.py
@@ -143,7 +143,7 @@ class APITest(test.NoDBTestCase):
self.assertEqual(resp.headers[key], str(value))
def test_quota_error_mapping(self):
- self._do_test_exception_mapping(exception.QuotaError, 'too many used')
+ self._do_test_exception_mapping(exception.OverQuota, 'too many used')
def test_non_nova_notfound_exception_mapping(self):
class ExceptionWithCode(Exception):
diff --git a/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py b/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
index 526cb6011d..e4719ea052 100644
--- a/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
+++ b/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from webob import exc
from nova.api.openstack import common
diff --git a/nova/tests/unit/api/openstack/compute/test_availability_zone.py b/nova/tests/unit/api/openstack/compute/test_availability_zone.py
index f355eb436a..a408e0d1aa 100644
--- a/nova/tests/unit/api/openstack/compute/test_availability_zone.py
+++ b/nova/tests/unit/api/openstack/compute/test_availability_zone.py
@@ -13,9 +13,9 @@
# under the License.
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
diff --git a/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py b/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
index 55a8b03216..c8ad907b10 100644
--- a/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
+++ b/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
@@ -13,13 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
from ironicclient import exc as ironic_exc
-import mock
from webob import exc
-from nova.api.openstack.compute import baremetal_nodes \
- as b_nodes_v21
+from nova.api.openstack.compute import baremetal_nodes as b_nodes_v21
from nova import context
from nova import exception
from nova import test
diff --git a/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py b/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
index 429096d51d..a1f3d1e63d 100644
--- a/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
+++ b/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
import webob
from nova.api.openstack import api_version_request
diff --git a/nova/tests/unit/api/openstack/compute/test_console_output.py b/nova/tests/unit/api/openstack/compute/test_console_output.py
index 1a76a445fc..a9dc830255 100644
--- a/nova/tests/unit/api/openstack/compute/test_console_output.py
+++ b/nova/tests/unit/api/openstack/compute/test_console_output.py
@@ -14,8 +14,8 @@
# under the License.
import string
+from unittest import mock
-import mock
import webob
from nova.api.openstack.compute import console_output \
diff --git a/nova/tests/unit/api/openstack/compute/test_create_backup.py b/nova/tests/unit/api/openstack/compute/test_create_backup.py
index f7280a5a37..9728002e88 100644
--- a/nova/tests/unit/api/openstack/compute/test_create_backup.py
+++ b/nova/tests/unit/api/openstack/compute/test_create_backup.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import timeutils
import webob
@@ -40,10 +41,6 @@ class CreateBackupTestsV21(admin_only_action_common.CommonMixin,
self.controller = getattr(self.create_backup, self.controller_name)()
self.compute_api = self.controller.compute_api
- patch_get = mock.patch.object(self.compute_api, 'get')
- self.mock_get = patch_get.start()
- self.addCleanup(patch_get.stop)
-
@mock.patch.object(common, 'check_img_metadata_properties_quota')
@mock.patch.object(api.API, 'backup')
def test_create_backup_with_metadata(self, mock_backup, mock_check_image):
diff --git a/nova/tests/unit/api/openstack/compute/test_deferred_delete.py b/nova/tests/unit/api/openstack/compute/test_deferred_delete.py
index db6f774c51..8a1c8efd57 100644
--- a/nova/tests/unit/api/openstack/compute/test_deferred_delete.py
+++ b/nova/tests/unit/api/openstack/compute/test_deferred_delete.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack.compute import deferred_delete as dd_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_disk_config.py b/nova/tests/unit/api/openstack/compute/test_disk_config.py
index bf3be1d0a3..c5ee59722a 100644
--- a/nova/tests/unit/api/openstack/compute/test_disk_config.py
+++ b/nova/tests/unit/api/openstack/compute/test_disk_config.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from nova.api.openstack import compute
diff --git a/nova/tests/unit/api/openstack/compute/test_evacuate.py b/nova/tests/unit/api/openstack/compute/test_evacuate.py
index 6620d7a180..bd88bb8d6e 100644
--- a/nova/tests/unit/api/openstack/compute/test_evacuate.py
+++ b/nova/tests/unit/api/openstack/compute/test_evacuate.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import testtools
import webob
@@ -415,3 +416,32 @@ class EvacuateTestV268(EvacuateTestV229):
def test_forced_evacuate_with_no_host_provided(self):
# not applicable for v2.68, which removed the 'force' parameter
pass
+
+
+class EvacuateTestV295(EvacuateTestV268):
+ def setUp(self):
+ super(EvacuateTestV268, self).setUp()
+ self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True,
+ version='2.95')
+ self.req = fakes.HTTPRequest.blank('', version='2.95')
+ self.mock_get_min_ver = self.useFixture(fixtures.MockPatch(
+ 'nova.objects.service.get_minimum_version_all_cells',
+ return_value=62)).mock
+
+ def test_evacuate_version_error(self):
+ self.mock_get_min_ver.return_value = 61
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self._get_evacuate_response,
+ {'host': 'my-host', 'adminPass': 'foo'})
+
+ def test_evacuate_unsupported_rpc(self):
+ def fake_evacuate(*args, **kwargs):
+ raise exception.UnsupportedRPCVersion(
+ api="fakeapi",
+ required="x.xx")
+
+ self.stub_out('nova.compute.api.API.evacuate', fake_evacuate)
+ self._check_evacuate_failure(webob.exc.HTTPConflict,
+ {'host': 'my-host',
+ 'onSharedStorage': 'False',
+ 'adminPass': 'MyNewPass'})
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index 8c25a2efc2..ea9ca2f632 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from webob import exc
from nova.api.openstack import api_version_request as api_version
@@ -353,14 +353,37 @@ class FlavorAccessTestV21(test.NoDBTestCase):
mock_verify.assert_called_once_with(
req.environ['nova.context'], 'proj2')
+ @mock.patch('nova.objects.Flavor.remove_access')
@mock.patch('nova.api.openstack.identity.verify_project_id',
side_effect=exc.HTTPBadRequest(
explanation="Project ID proj2 is not a valid project."))
- def test_remove_tenant_access_with_invalid_tenant(self, mock_verify):
+ def test_remove_tenant_access_with_invalid_tenant(self,
+ mock_verify,
+ mock_remove_access):
"""Tests the case that the tenant does not exist in Keystone."""
req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
use_admin_context=True)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
+ self.flavor_action_controller._remove_tenant_access(
+ req, '2', body=body)
+ mock_verify.assert_called_once_with(
+ req.environ['nova.context'], 'proj2')
+ mock_remove_access.assert_called_once_with('proj2')
+
+ @mock.patch('nova.api.openstack.identity.verify_project_id',
+ side_effect=exc.HTTPBadRequest(
+ explanation="Nova was unable to find Keystone "
+ "service endpoint."))
+ def test_remove_tenant_access_missing_keystone_endpoint(self,
+ mock_verify):
+ """Tests the case that Keystone identity service endpoint
+ version 3.0 was not found.
+ """
+ req = fakes.HTTPRequest.blank(self._prefix + '/flavors/2/action',
+ use_admin_context=True)
+ body = {'removeTenantAccess': {'tenant': 'proj2'}}
+
self.assertRaises(exc.HTTPBadRequest,
self.flavor_action_controller._remove_tenant_access,
req, '2', body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_manage.py b/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
index f8412c772c..948f255f34 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors.py b/nova/tests/unit/api/openstack/compute/test_flavors.py
index 4390b32012..c7fbf5c468 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavors.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavors.py
@@ -13,9 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
from urllib import parse as urlparse
-import mock
import webob
from nova.api.openstack import common
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
index e68bf7e306..8355ce59b5 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import testtools
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py b/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py
index e25302ee9a..71ca209672 100644
--- a/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py
+++ b/nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import floating_ip_pools \
as fipp_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_floating_ips.py b/nova/tests/unit/api/openstack/compute/test_floating_ips.py
index 2cb89dfe76..7093c5a80d 100644
--- a/nova/tests/unit/api/openstack/compute/test_floating_ips.py
+++ b/nova/tests/unit/api/openstack/compute/test_floating_ips.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_hosts.py b/nova/tests/unit/api/openstack/compute/test_hosts.py
index 7adc698093..f1cdde2917 100644
--- a/nova/tests/unit/api/openstack/compute/test_hosts.py
+++ b/nova/tests/unit/api/openstack/compute/test_hosts.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
import testtools
import webob.exc
diff --git a/nova/tests/unit/api/openstack/compute/test_hypervisors.py b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
index facc5389be..a908988811 100644
--- a/nova/tests/unit/api/openstack/compute/test_hypervisors.py
+++ b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
import netaddr
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -368,25 +368,23 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.index(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
- else compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- self.assertDictEqual(expected, result['hypervisors'][0])
+ m_get = self.controller.host_api.compute_node_get_all
+ m_get.side_effect = None
+ m_get.return_value = compute_nodes
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
- _test(self)
+ req = self._get_request(True)
+ result = self.controller.index(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
+ else compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ self.assertDictEqual(expected, result['hypervisors'][0])
def test_index_compute_host_not_mapped(self):
"""Tests that we don't fail index if a host is not mapped."""
@@ -402,25 +400,22 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.index(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
- else compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- self.assertDictEqual(expected, result['hypervisors'][0])
+ self.controller.host_api.compute_node_get_all.return_value = (
+ compute_nodes)
+ self.controller.host_api.service_get_by_compute_host = (
+ fake_service_get_by_compute_host)
- _test(self)
+ req = self._get_request(True)
+ result = self.controller.index(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].uuid if self.expect_uuid_for_id
+ else compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ self.assertDictEqual(expected, result['hypervisors'][0])
def test_detail(self):
req = self._get_request(True)
@@ -444,32 +439,30 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.ComputeHostNotFound(host=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.detail(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- # we don't care about all of the details, just make sure we get
- # the subset we care about and there are more keys than what index
- # would return
- hypervisor = result['hypervisors'][0]
- self.assertTrue(
- set(expected.keys()).issubset(set(hypervisor.keys())))
- self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
- self.assertEqual(compute_nodes[0].hypervisor_hostname,
- hypervisor['hypervisor_hostname'])
-
- _test(self)
+ m_get = self.controller.host_api.compute_node_get_all
+ m_get.side_effect = None
+ m_get.return_value = compute_nodes
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
+
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ # we don't care about all of the details, just make sure we get
+ # the subset we care about and there are more keys than what index
+ # would return
+ hypervisor = result['hypervisors'][0]
+ self.assertTrue(
+ set(expected.keys()).issubset(set(hypervisor.keys())))
+ self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
+ self.assertEqual(compute_nodes[0].hypervisor_hostname,
+ hypervisor['hypervisor_hostname'])
def test_detail_compute_host_not_mapped(self):
"""Tests that if a service is deleted but the compute node is not we
@@ -487,32 +480,28 @@ class HypervisorsTestV21(test.NoDBTestCase):
return TEST_SERVICES[0]
raise exception.HostMappingNotFound(name=host)
- @mock.patch.object(self.controller.host_api, 'compute_node_get_all',
- return_value=compute_nodes)
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- fake_service_get_by_compute_host)
- def _test(self, compute_node_get_all):
- req = self._get_request(True)
- result = self.controller.detail(req)
- self.assertEqual(1, len(result['hypervisors']))
- expected = {
- 'id': compute_nodes[0].id,
- 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
- 'state': 'up',
- 'status': 'enabled',
- }
- # we don't care about all of the details, just make sure we get
- # the subset we care about and there are more keys than what index
- # would return
- hypervisor = result['hypervisors'][0]
- self.assertTrue(
- set(expected.keys()).issubset(set(hypervisor.keys())))
- self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
- self.assertEqual(compute_nodes[0].hypervisor_hostname,
- hypervisor['hypervisor_hostname'])
-
- _test(self)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ fake_service_get_by_compute_host)
+ self.controller.host_api.compute_node_get_all.return_value = (
+ compute_nodes)
+ req = self._get_request(True)
+ result = self.controller.detail(req)
+ self.assertEqual(1, len(result['hypervisors']))
+ expected = {
+ 'id': compute_nodes[0].id,
+ 'hypervisor_hostname': compute_nodes[0].hypervisor_hostname,
+ 'state': 'up',
+ 'status': 'enabled',
+ }
+ # we don't care about all of the details, just make sure we get
+ # the subset we care about and there are more keys than what index
+ # would return
+ hypervisor = result['hypervisors'][0]
+ self.assertTrue(
+ set(expected.keys()).issubset(set(hypervisor.keys())))
+ self.assertGreater(len(hypervisor.keys()), len(expected.keys()))
+ self.assertEqual(compute_nodes[0].hypervisor_hostname,
+ hypervisor['hypervisor_hostname'])
def test_show(self):
req = self._get_request(True)
@@ -525,21 +514,16 @@ class HypervisorsTestV21(test.NoDBTestCase):
"""Tests that if a service is deleted but the compute node is not we
don't fail when listing hypervisors.
"""
-
- @mock.patch.object(self.controller.host_api, 'compute_node_get',
- return_value=self.TEST_HYPERS_OBJ[0])
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host')
- def _test(self, mock_service, mock_compute_node_get):
- req = self._get_request(True)
- mock_service.side_effect = exception.HostMappingNotFound(
- name='foo')
- hyper_id = self._get_hyper_id()
- self.assertRaises(exc.HTTPNotFound, self.controller.show,
- req, hyper_id)
- self.assertTrue(mock_service.called)
- mock_compute_node_get.assert_called_once_with(mock.ANY, hyper_id)
- _test(self)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='foo'))
+ req = self._get_request(True)
+ hyper_id = self._get_hyper_id()
+ self.assertRaises(
+ exc.HTTPNotFound, self.controller.show, req, hyper_id)
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
+ self.controller.host_api.compute_node_get.assert_called_once_with(
+ mock.ANY, hyper_id)
def test_show_noid(self):
req = self._get_request(True)
@@ -611,20 +595,15 @@ class HypervisorsTestV21(test.NoDBTestCase):
mock.ANY, self.TEST_HYPERS_OBJ[0].host)
def test_uptime_hypervisor_not_mapped_service_get(self):
- @mock.patch.object(self.controller.host_api, 'compute_node_get')
- @mock.patch.object(self.controller.host_api, 'get_host_uptime')
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host',
- side_effect=exception.HostMappingNotFound(
- name='dummy'))
- def _test(mock_get, _, __):
- req = self._get_request(True)
- hyper_id = self._get_hyper_id()
- self.assertRaises(exc.HTTPNotFound,
- self.controller.uptime, req, hyper_id)
- self.assertTrue(mock_get.called)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='dummy'))
- _test()
+ req = self._get_request(True)
+ hyper_id = self._get_hyper_id()
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.uptime, req, hyper_id)
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
def test_uptime_hypervisor_not_mapped(self):
with mock.patch.object(self.controller.host_api, 'get_host_uptime',
@@ -644,30 +623,26 @@ class HypervisorsTestV21(test.NoDBTestCase):
self.assertEqual(dict(hypervisors=self.INDEX_HYPER_DICTS), result)
def test_search_non_exist(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.search,
- req, 'a')
- self.assertEqual(1, mock_node_search.call_count)
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+ self.assertEqual(1, m_search.call_count)
def test_search_unmapped(self):
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = [mock.MagicMock()]
- @mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor')
- @mock.patch.object(self.controller.host_api,
- 'service_get_by_compute_host')
- def _test(mock_service, mock_search):
- mock_search.return_value = [mock.MagicMock()]
- mock_service.side_effect = exception.HostMappingNotFound(
- name='foo')
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound, self.controller.search,
- req, 'a')
- self.assertTrue(mock_service.called)
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.HostMappingNotFound(name='foo'))
- _test()
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound, self.controller.search, req, 'a')
+ self.assertTrue(
+ self.controller.host_api.service_get_by_compute_host.called)
@mock.patch.object(objects.InstanceList, 'get_by_host',
side_effect=fake_instance_get_all_by_host)
@@ -702,15 +677,12 @@ class HypervisorsTestV21(test.NoDBTestCase):
def test_servers_compute_host_not_found(self):
req = self._get_request(True)
- with test.nested(
- mock.patch.object(
- self.controller.host_api, 'instance_get_all_by_host',
- side_effect=fake_instance_get_all_by_host,
- ),
- mock.patch.object(
- self.controller.host_api, 'service_get_by_compute_host',
- side_effect=exception.ComputeHostNotFound(host='foo'),
- ),
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.ComputeHostNotFound(host='foo'))
+ with mock.patch.object(
+ self.controller.host_api,
+ 'instance_get_all_by_host',
+ side_effect=fake_instance_get_all_by_host,
):
# The result should be empty since every attempt to fetch the
# service for a hypervisor "failed"
@@ -718,24 +690,25 @@ class HypervisorsTestV21(test.NoDBTestCase):
self.assertEqual({'hypervisors': []}, result)
def test_servers_non_id(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers,
- req, '115')
- self.assertEqual(1, mock_node_search.call_count)
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
+
+ req = self._get_request(True)
+ self.assertRaises(exc.HTTPNotFound,
+ self.controller.servers,
+ req, '115')
+ self.assertEqual(1, m_search.call_count)
def test_servers_with_non_integer_hypervisor_id(self):
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=[]) as mock_node_search:
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = []
- req = self._get_request(True)
- self.assertRaises(exc.HTTPNotFound,
- self.controller.servers, req, 'abc')
- self.assertEqual(1, mock_node_search.call_count)
+ req = self._get_request(True)
+ self.assertRaises(
+ exc.HTTPNotFound, self.controller.servers, req, 'abc')
+ self.assertEqual(1, m_search.call_count)
def test_servers_with_no_servers(self):
with mock.patch.object(self.controller.host_api,
@@ -1089,15 +1062,13 @@ class HypervisorsTestV253(HypervisorsTestV252):
use_admin_context=True,
url='/os-hypervisors?with_servers=1')
- with test.nested(
- mock.patch.object(
- self.controller.host_api, 'instance_get_all_by_host',
- side_effect=fake_instance_get_all_by_host,
- ),
- mock.patch.object(
- self.controller.host_api, 'service_get_by_compute_host',
- side_effect=exception.ComputeHostNotFound(host='foo'),
- ),
+ self.controller.host_api.service_get_by_compute_host.side_effect = (
+ exception.ComputeHostNotFound(host='foo'))
+
+ with mock.patch.object(
+ self.controller.host_api,
+ "instance_get_all_by_host",
+ side_effect=fake_instance_get_all_by_host,
):
# The result should be empty since every attempt to fetch the
# service for a hypervisor "failed"
@@ -1157,11 +1128,13 @@ class HypervisorsTestV253(HypervisorsTestV252):
use_admin_context=True,
url='/os-hypervisors?with_servers=yes&'
'hypervisor_hostname_pattern=shenzhen')
- with mock.patch.object(self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=objects.ComputeNodeList()) as s:
- self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
- s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = objects.ComputeNodeList()
+
+ self.assertRaises(exc.HTTPNotFound, self.controller.index, req)
+ m_search.assert_called_once_with(
+ req.environ['nova.context'], 'shenzhen')
def test_detail_with_hostname_pattern(self):
"""Test listing hypervisors with details and using the
@@ -1170,13 +1143,14 @@ class HypervisorsTestV253(HypervisorsTestV252):
req = self._get_request(
use_admin_context=True,
url='/os-hypervisors?hypervisor_hostname_pattern=shenzhen')
- with mock.patch.object(
- self.controller.host_api,
- 'compute_node_search_by_hypervisor',
- return_value=objects.ComputeNodeList(objects=[TEST_HYPERS_OBJ[0]])
- ) as s:
- result = self.controller.detail(req)
- s.assert_called_once_with(req.environ['nova.context'], 'shenzhen')
+ m_search = self.controller.host_api.compute_node_search_by_hypervisor
+ m_search.side_effect = None
+ m_search.return_value = objects.ComputeNodeList(
+ objects=[TEST_HYPERS_OBJ[0]])
+
+ result = self.controller.detail(req)
+ m_search.assert_called_once_with(
+ req.environ['nova.context'], 'shenzhen')
expected = {'hypervisors': [self.DETAIL_HYPERS_DICTS[0]]}
@@ -1483,15 +1457,11 @@ class HypervisorsTestV288(HypervisorsTestV275):
self.controller.uptime, req)
def test_uptime_old_version(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- return_value='fake uptime',
- ):
- req = self._get_request(use_admin_context=True, version='2.87')
- hyper_id = self._get_hyper_id()
+ req = self._get_request(use_admin_context=True, version='2.87')
+ hyper_id = self._get_hyper_id()
- # no exception == pass
- self.controller.uptime(req, hyper_id)
+ # no exception == pass
+ self.controller.uptime(req, hyper_id)
def test_uptime_noid(self):
# the separate 'uptime' API has been removed, so skip this test
@@ -1526,34 +1496,36 @@ class HypervisorsTestV288(HypervisorsTestV275):
pass
def test_show_with_uptime_notimplemented(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- side_effect=NotImplementedError,
- ) as mock_get_uptime:
- req = self._get_request(use_admin_context=True)
- hyper_id = self._get_hyper_id()
+ self.controller.host_api.get_host_uptime.side_effect = (
+ NotImplementedError())
- result = self.controller.show(req, hyper_id)
+ req = self._get_request(use_admin_context=True)
+ hyper_id = self._get_hyper_id()
- expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
- expected_dict.update({'uptime': None})
- self.assertEqual({'hypervisor': expected_dict}, result)
- self.assertEqual(1, mock_get_uptime.call_count)
+ result = self.controller.show(req, hyper_id)
+
+ expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
+ expected_dict.update({'uptime': None})
+ self.assertEqual({'hypervisor': expected_dict}, result)
+ self.assertEqual(
+ 1, self.controller.host_api.get_host_uptime.call_count)
def test_show_with_uptime_hypervisor_down(self):
- with mock.patch.object(
- self.controller.host_api, 'get_host_uptime',
- side_effect=exception.ComputeServiceUnavailable(host='dummy')
- ) as mock_get_uptime:
- req = self._get_request(use_admin_context=True)
- hyper_id = self._get_hyper_id()
+ self.controller.host_api.get_host_uptime.side_effect = (
+ exception.ComputeServiceUnavailable(host='dummy'))
- result = self.controller.show(req, hyper_id)
+ req = self._get_request(use_admin_context=True)
+ hyper_id = self._get_hyper_id()
- expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
- expected_dict.update({'uptime': None})
- self.assertEqual({'hypervisor': expected_dict}, result)
- self.assertEqual(1, mock_get_uptime.call_count)
+ result = self.controller.show(req, hyper_id)
+
+ expected_dict = copy.deepcopy(self.DETAIL_HYPERS_DICTS[0])
+ expected_dict.update({'uptime': None})
+ self.assertEqual({'hypervisor': expected_dict}, result)
+ self.assertEqual(
+ 1,
+ self.controller.host_api.get_host_uptime.call_count
+ )
def test_show_old_version(self):
# ensure things still work as expected here
diff --git a/nova/tests/unit/api/openstack/compute/test_image_metadata.py b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
index 2e1c26a712..4072d6f489 100644
--- a/nova/tests/unit/api/openstack/compute/test_image_metadata.py
+++ b/nova/tests/unit/api/openstack/compute/test_image_metadata.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_images.py b/nova/tests/unit/api/openstack/compute/test_images.py
index fad4fcb5a2..734e755dd5 100644
--- a/nova/tests/unit/api/openstack/compute/test_images.py
+++ b/nova/tests/unit/api/openstack/compute/test_images.py
@@ -19,9 +19,9 @@ and as a WSGI layer
"""
import copy
+from unittest import mock
from urllib import parse as urlparse
-import mock
import webob
from nova.api.openstack.compute import images as images_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_instance_actions.py b/nova/tests/unit/api/openstack/compute/test_instance_actions.py
index 04e9ae443e..df13e1d89d 100644
--- a/nova/tests/unit/api/openstack/compute/test_instance_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_instance_actions.py
@@ -15,9 +15,9 @@
import copy
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_policy import policy as oslo_policy
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_keypairs.py b/nova/tests/unit/api/openstack/compute/test_keypairs.py
index 657973ffbd..590639d5ed 100644
--- a/nova/tests/unit/api/openstack/compute/test_keypairs.py
+++ b/nova/tests/unit/api/openstack/compute/test_keypairs.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack.compute import keypairs as keypairs_v21
@@ -37,6 +38,8 @@ keypair_data = {
FAKE_UUID = 'b48316c5-71e8-45e4-9884-6c78055b9b13'
+keypair_name_2_92_compatible = 'my-key@ my.host'
+
def fake_keypair(name):
return dict(test_keypair.fake_keypair,
@@ -110,16 +113,22 @@ class KeypairsTestV21(test.TestCase):
self.assertGreater(len(res_dict['keypair']['private_key']), 0)
self._assert_keypair_type(res_dict)
- def _test_keypair_create_bad_request_case(self,
- body,
- exception):
- self.assertRaises(exception,
- self.controller.create, self.req, body=body)
+ def _test_keypair_create_bad_request_case(
+ self, body, exception, error_msg=None
+ ):
+ if error_msg:
+ self.assertRaisesRegex(exception, error_msg,
+ self.controller.create,
+ self.req, body=body)
+ else:
+ self.assertRaises(exception,
+ self.controller.create, self.req, body=body)
def test_keypair_create_with_empty_name(self):
body = {'keypair': {'name': ''}}
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ 'is too short')
def test_keypair_create_with_name_too_long(self):
body = {
@@ -128,7 +137,8 @@ class KeypairsTestV21(test.TestCase):
}
}
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ 'is too long')
def test_keypair_create_with_name_leading_trailing_spaces(self):
body = {
@@ -136,8 +146,10 @@ class KeypairsTestV21(test.TestCase):
'name': ' test '
}
}
+ expected_msg = 'Can not start or end with whitespace.'
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ expected_msg)
def test_keypair_create_with_name_leading_trailing_spaces_compat_mode(
self):
@@ -152,8 +164,21 @@ class KeypairsTestV21(test.TestCase):
'name': 'test/keypair'
}
}
+ expected_msg = 'Only expected characters'
self._test_keypair_create_bad_request_case(body,
- webob.exc.HTTPBadRequest)
+ self.validation_error,
+ expected_msg)
+
+ def test_keypair_create_with_special_characters(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible
+ }
+ }
+ expected_msg = 'Only expected characters'
+ self._test_keypair_create_bad_request_case(body,
+ self.validation_error,
+ expected_msg)
def test_keypair_import_bad_key(self):
body = {
@@ -167,8 +192,10 @@ class KeypairsTestV21(test.TestCase):
def test_keypair_create_with_invalid_keypair_body(self):
body = {'alpha': {'name': 'create_test'}}
+ expected_msg = "'keypair' is a required property"
self._test_keypair_create_bad_request_case(body,
- self.validation_error)
+ self.validation_error,
+ expected_msg)
def test_keypair_import(self):
body = {
@@ -228,50 +255,6 @@ class KeypairsTestV21(test.TestCase):
self.controller.create, self.req, body=body)
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
- @mock.patch('nova.objects.Quotas.check_deltas')
- def test_keypair_create_over_quota_during_recheck(self, mock_check):
- # Simulate a race where the first check passes and the recheck fails.
- # First check occurs in compute/api.
- exc = exception.OverQuota(overs='key_pairs', usages={'key_pairs': 100})
- mock_check.side_effect = [None, exc]
- body = {
- 'keypair': {
- 'name': 'FAKE',
- },
- }
-
- self.assertRaises(webob.exc.HTTPForbidden,
- self.controller.create, self.req, body=body)
-
- ctxt = self.req.environ['nova.context']
- self.assertEqual(2, mock_check.call_count)
- call1 = mock.call(ctxt, {'key_pairs': 1}, ctxt.user_id)
- call2 = mock.call(ctxt, {'key_pairs': 0}, ctxt.user_id)
- mock_check.assert_has_calls([call1, call2])
-
- # Verify we removed the key pair that was added after the first
- # quota check passed.
- key_pairs = objects.KeyPairList.get_by_user(ctxt, ctxt.user_id)
- names = [key_pair.name for key_pair in key_pairs]
- self.assertNotIn('create_test', names)
-
- @mock.patch('nova.objects.Quotas.check_deltas')
- def test_keypair_create_no_quota_recheck(self, mock_check):
- # Disable recheck_quota.
- self.flags(recheck_quota=False, group='quota')
-
- body = {
- 'keypair': {
- 'name': 'create_test',
- },
- }
- self.controller.create(self.req, body=body)
-
- ctxt = self.req.environ['nova.context']
- # check_deltas should have been called only once.
- mock_check.assert_called_once_with(ctxt, {'key_pairs': 1},
- ctxt.user_id)
-
def test_keypair_create_duplicate(self):
self.stub_out("nova.objects.KeyPair.create",
db_key_pair_create_duplicate)
@@ -514,3 +497,82 @@ class KeypairsTestV275(test.TestCase):
version='2.75', use_admin_context=True)
self.assertRaises(exception.ValidationError, self.controller.delete,
req, 1)
+
+
+class KeypairsTestV292(test.TestCase):
+ wsgi_api_version = '2.92'
+ wsgi_old_api_version = '2.91'
+
+ def setUp(self):
+ super(KeypairsTestV292, self).setUp()
+ self.controller = keypairs_v21.KeypairController()
+ self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ self.old_req = fakes.HTTPRequest.blank(
+ '', version=self.wsgi_old_api_version)
+
+ def test_keypair_create_no_longer_supported(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible,
+ }
+ }
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.req, body=body)
+
+ def test_keypair_create_works_with_old_version(self):
+ body = {
+ 'keypair': {
+ 'name': 'fake',
+ }
+ }
+ res_dict = self.controller.create(self.old_req, body=body)
+ self.assertEqual('fake', res_dict['keypair']['name'])
+ self.assertGreater(len(res_dict['keypair']['private_key']), 0)
+
+ def test_keypair_import_works_with_new_version(self):
+ body = {
+ 'keypair': {
+ 'name': 'fake',
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ }
+ }
+ res_dict = self.controller.create(self.req, body=body)
+ self.assertEqual('fake', res_dict['keypair']['name'])
+ self.assertNotIn('private_key', res_dict['keypair'])
+
+ def test_keypair_create_refuses_special_chars_with_old_version(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible,
+ }
+ }
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.old_req, body=body)
+
+ def test_keypair_import_with_special_characters(self):
+ body = {
+ 'keypair': {
+ 'name': keypair_name_2_92_compatible,
+ 'public_key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBYIznA'
+ 'x9D7118Q1VKGpXy2HDiKyUTM8XcUuhQpo0srqb9rboUp4'
+ 'a9NmCwpWpeElDLuva707GOUnfaBAvHBwsRXyxHJjRaI6Y'
+ 'Qj2oLJwqvaSaWUbyT1vtryRqy6J3TecN0WINY71f4uymi'
+ 'MZP0wby4bKBcYnac8KiCIlvkEl0ETjkOGUq8OyWRmn7lj'
+ 'j5SESEUdBP0JnuTFKddWTU/wD6wydeJaUhBTqOlHn0kX1'
+ 'GyqoNTE1UEhcM5ZRWgfUZfTjVyDF2kGj3vJLCJtJ8LoGc'
+ 'j7YaN4uPg1rBle+izwE/tLonRrds+cev8p6krSSrxWOwB'
+ 'bHkXa6OciiJDvkRzJXzf',
+ }
+ }
+
+ res_dict = self.controller.create(self.req, body=body)
+ self.assertEqual(keypair_name_2_92_compatible,
+ res_dict['keypair']['name'])
diff --git a/nova/tests/unit/api/openstack/compute/test_limits.py b/nova/tests/unit/api/openstack/compute/test_limits.py
index 31033e111d..1748023aa8 100644
--- a/nova/tests/unit/api/openstack/compute/test_limits.py
+++ b/nova/tests/unit/api/openstack/compute/test_limits.py
@@ -19,8 +19,9 @@ Tests dealing with HTTP rate-limiting.
from http import client as httplib
from io import StringIO
+from unittest import mock
-import mock
+from oslo_limit import fixture as limit_fixture
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
@@ -29,8 +30,10 @@ from nova.api.openstack.compute import views
from nova.api.openstack import wsgi
import nova.context
from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limit
+from nova import objects
from nova.policies import limits as l_policies
-from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
@@ -48,12 +51,12 @@ class BaseLimitTestSuite(test.NoDBTestCase):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
- mock_get_project_quotas = mock.patch.object(
+ patcher_get_project_quotas = mock.patch.object(
nova.quota.QUOTAS,
"get_project_quotas",
- side_effect = stub_get_project_quotas)
- mock_get_project_quotas.start()
- self.addCleanup(mock_get_project_quotas.stop)
+ side_effect=stub_get_project_quotas)
+ self.mock_get_project_quotas = patcher_get_project_quotas.start()
+ self.addCleanup(patcher_get_project_quotas.stop)
patcher = self.mock_can = mock.patch('nova.context.RequestContext.can')
self.mock_can = patcher.start()
self.addCleanup(patcher.stop)
@@ -150,16 +153,14 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in self.absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
- response = request.get_response(self.controller)
+ response = request.get_response(self.controller)
- body = jsonutils.loads(response.body)
- self.assertEqual(expected, body)
- get_project_quotas.assert_called_once_with(context, tenant_id,
- usages=True)
+ body = jsonutils.loads(response.body)
+ self.assertEqual(expected, body)
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, tenant_id, usages=True)
def _do_test_used_limits(self, reserved):
request = self._get_index_request(tenant_id=None)
@@ -182,8 +183,7 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
def stub_get_project_quotas(context, project_id, usages=True):
return limits
- self.stub_out('nova.quota.QUOTAS.get_project_quotas',
- stub_get_project_quotas)
+ self.mock_get_project_quotas.side_effect = stub_get_project_quotas
res = request.get_response(self.controller)
body = jsonutils.loads(res.body)
@@ -207,15 +207,15 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
user_id=user_id,
project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
- self.assertEqual(2, self.mock_can.call_count)
- self.mock_can.assert_called_with(
- l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME,
- target={"project_id": tenant_id})
- mock_get_quotas.assert_called_once_with(context,
- tenant_id, usages=True)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
+
+ fake_req.get_response(self.controller)
+ self.assertEqual(2, self.mock_can.call_count)
+ self.mock_can.assert_called_with(
+ l_policies.OTHER_PROJECT_LIMIT_POLICY_NAME)
+ self.mock_get_project_quotas.assert_called_once_with(context,
+ tenant_id, usages=True)
def _test_admin_can_fetch_used_limits_for_own_project(self, req_get):
project_id = "123456"
@@ -227,11 +227,12 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
- mock_get_quotas.assert_called_once_with(context,
- project_id, usages=True)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
+
+ fake_req.get_response(self.controller)
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, project_id, usages=True)
def test_admin_can_fetch_used_limits_for_own_project(self):
req_get = {}
@@ -251,7 +252,7 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
req_get = {'tenant_id': -1}
self._test_admin_can_fetch_used_limits_for_own_project(req_get)
- def test_admin_can_fetch_used_limits_with_unkown_param(self):
+ def test_admin_can_fetch_used_limits_with_unknown_param(self):
req_get = {'tenant_id': '123', 'unknown': 'unknown'}
self._test_admin_can_fetch_used_limits_for_own_project(req_get)
@@ -259,12 +260,13 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
project_id = "123456"
fake_req = self._get_index_request(project_id=project_id)
context = fake_req.environ["nova.context"]
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
- fake_req.get_response(self.controller)
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
- mock_get_quotas.assert_called_once_with(context,
- project_id, usages=True)
+ fake_req.get_response(self.controller)
+
+ self.mock_get_project_quotas.assert_called_once_with(
+ context, project_id, usages=True)
def test_used_ram_added(self):
fake_req = self._get_index_request()
@@ -272,28 +274,26 @@ class LimitsControllerTestV21(BaseLimitTestSuite):
def stub_get_project_quotas(context, project_id, usages=True):
return {'ram': {'limit': 512, 'in_use': 256}}
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- side_effect=stub_get_project_quotas
- ) as mock_get_quotas:
+ self.mock_get_project_quotas.side_effect = stub_get_project_quotas
- res = fake_req.get_response(self.controller)
- body = jsonutils.loads(res.body)
- abs_limits = body['limits']['absolute']
- self.assertIn('totalRAMUsed', abs_limits)
- self.assertEqual(256, abs_limits['totalRAMUsed'])
- self.assertEqual(1, mock_get_quotas.call_count)
+ res = fake_req.get_response(self.controller)
+ body = jsonutils.loads(res.body)
+ abs_limits = body['limits']['absolute']
+ self.assertIn('totalRAMUsed', abs_limits)
+ self.assertEqual(256, abs_limits['totalRAMUsed'])
+ self.assertEqual(1, self.mock_get_project_quotas.call_count)
def test_no_ram_quota(self):
fake_req = self._get_index_request()
- with mock.patch.object(quota.QUOTAS, 'get_project_quotas',
- return_value={}) as mock_get_quotas:
+ self.mock_get_project_quotas.side_effect = None
+ self.mock_get_project_quotas.return_value = {}
- res = fake_req.get_response(self.controller)
- body = jsonutils.loads(res.body)
- abs_limits = body['limits']['absolute']
- self.assertNotIn('totalRAMUsed', abs_limits)
- self.assertEqual(1, mock_get_quotas.call_count)
+ res = fake_req.get_response(self.controller)
+ body = jsonutils.loads(res.body)
+ abs_limits = body['limits']['absolute']
+ self.assertNotIn('totalRAMUsed', abs_limits)
+ self.assertEqual(1, self.mock_get_project_quotas.call_count)
class FakeHttplibSocket(object):
@@ -395,25 +395,24 @@ class LimitsControllerTestV236(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- response = self.controller.index(self.req)
- expected_response = {
- "limits": {
- "rate": [],
- "absolute": {
- "maxTotalRAMSize": 512,
- "maxTotalInstances": 5,
- "maxTotalCores": 21,
- "maxTotalKeypairs": 10,
- "totalRAMUsed": 256,
- "totalCoresUsed": 10,
- "totalInstancesUsed": 2,
- },
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+
+ response = self.controller.index(self.req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxTotalRAMSize": 512,
+ "maxTotalInstances": 5,
+ "maxTotalCores": 21,
+ "maxTotalKeypairs": 10,
+ "totalRAMUsed": 256,
+ "totalCoresUsed": 10,
+ "totalInstancesUsed": 2,
},
- }
- self.assertEqual(expected_response, response)
+ },
+ }
+ self.assertEqual(expected_response, response)
class LimitsControllerTestV239(BaseLimitTestSuite):
@@ -433,21 +432,20 @@ class LimitsControllerTestV239(BaseLimitTestSuite):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- response = self.controller.index(self.req)
- # staring from version 2.39 there is no 'maxImageMeta' field
- # in response after removing 'image-metadata' proxy API
- expected_response = {
- "limits": {
- "rate": [],
- "absolute": {
- "maxServerMeta": 1,
- },
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+
+ response = self.controller.index(self.req)
+ # starting from version 2.39 there is no 'maxImageMeta' field
+ # in response after removing 'image-metadata' proxy API
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ "maxServerMeta": 1,
},
- }
- self.assertEqual(expected_response, response)
+ },
+ }
+ self.assertEqual(expected_response, response)
class LimitsControllerTestV275(BaseLimitTestSuite):
@@ -459,21 +457,170 @@ class LimitsControllerTestV275(BaseLimitTestSuite):
absolute_limits = {
"metadata_items": 1,
}
- req = fakes.HTTPRequest.blank("/?unkown=fake",
+ req = fakes.HTTPRequest.blank("/?unknown=fake",
version='2.74')
def _get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v, in_use=v // 2)
for k, v in absolute_limits.items()}
- with mock.patch('nova.quota.QUOTAS.get_project_quotas') as \
- get_project_quotas:
- get_project_quotas.side_effect = _get_project_quotas
- self.controller.index(req)
+ self.mock_get_project_quotas.side_effect = _get_project_quotas
+ self.controller.index(req)
+ self.controller.index(req)
def test_index_additional_query_param(self):
- req = fakes.HTTPRequest.blank("/?unkown=fake",
+ req = fakes.HTTPRequest.blank("/?unknown=fake",
version='2.75')
self.assertRaises(
exception.ValidationError,
self.controller.index, req=req)
+
+
+class NoopLimitsControllerTest(test.NoDBTestCase):
+ quota_driver = "nova.quota.NoopQuotaDriver"
+
+ def setUp(self):
+ super(NoopLimitsControllerTest, self).setUp()
+ self.flags(driver=self.quota_driver, group="quota")
+ self.controller = limits_v21.LimitsController()
+ # remove policy checks
+ patcher = self.mock_can = mock.patch('nova.context.RequestContext.can')
+ self.mock_can = patcher.start()
+ self.addCleanup(patcher.stop)
+
+ def test_index_v21(self):
+ req = fakes.HTTPRequest.blank("/")
+ response = self.controller.index(req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ 'maxImageMeta': -1,
+ 'maxPersonality': -1,
+ 'maxPersonalitySize': -1,
+ 'maxSecurityGroupRules': -1,
+ 'maxSecurityGroups': -1,
+ 'maxServerGroupMembers': -1,
+ 'maxServerGroups': -1,
+ 'maxServerMeta': -1,
+ 'maxTotalCores': -1,
+ 'maxTotalFloatingIps': -1,
+ 'maxTotalInstances': -1,
+ 'maxTotalKeypairs': -1,
+ 'maxTotalRAMSize': -1,
+ 'totalCoresUsed': -1,
+ 'totalFloatingIpsUsed': -1,
+ 'totalInstancesUsed': -1,
+ 'totalRAMUsed': -1,
+ 'totalSecurityGroupsUsed': -1,
+ 'totalServerGroupsUsed': -1,
+ },
+ },
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_index_v275(self):
+ req = fakes.HTTPRequest.blank("/?tenant_id=faketenant",
+ version='2.75')
+ response = self.controller.index(req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ 'maxServerGroupMembers': -1,
+ 'maxServerGroups': -1,
+ 'maxServerMeta': -1,
+ 'maxTotalCores': -1,
+ 'maxTotalInstances': -1,
+ 'maxTotalKeypairs': -1,
+ 'maxTotalRAMSize': -1,
+ 'totalCoresUsed': -1,
+ 'totalInstancesUsed': -1,
+ 'totalRAMUsed': -1,
+ 'totalServerGroupsUsed': -1,
+ },
+ },
+ }
+ self.assertEqual(expected_response, response)
+
+
+class UnifiedLimitsControllerTest(NoopLimitsControllerTest):
+ quota_driver = "nova.quota.UnifiedLimitsDriver"
+
+ def setUp(self):
+ super(UnifiedLimitsControllerTest, self).setUp()
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 12,
+ local_limit.SERVER_GROUP_MEMBERS: 10}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_index_v21(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ req = fakes.HTTPRequest.blank("/")
+ response = self.controller.index(req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ 'maxImageMeta': 128,
+ 'maxPersonality': 5,
+ 'maxPersonalitySize': 10240,
+ 'maxSecurityGroupRules': -1,
+ 'maxSecurityGroups': -1,
+ 'maxServerGroupMembers': 10,
+ 'maxServerGroups': 12,
+ 'maxServerMeta': 128,
+ 'maxTotalCores': 2,
+ 'maxTotalFloatingIps': -1,
+ 'maxTotalInstances': 1,
+ 'maxTotalKeypairs': 100,
+ 'maxTotalRAMSize': 3,
+ 'totalCoresUsed': 5,
+ 'totalFloatingIpsUsed': 0,
+ 'totalInstancesUsed': 4,
+ 'totalRAMUsed': 6,
+ 'totalSecurityGroupsUsed': 0,
+ 'totalServerGroupsUsed': 9,
+ },
+ },
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_index_v275(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ req = fakes.HTTPRequest.blank("/?tenant_id=faketenant",
+ version='2.75')
+ response = self.controller.index(req)
+ expected_response = {
+ "limits": {
+ "rate": [],
+ "absolute": {
+ 'maxServerGroupMembers': 10,
+ 'maxServerGroups': 12,
+ 'maxServerMeta': 128,
+ 'maxTotalCores': 2,
+ 'maxTotalInstances': 1,
+ 'maxTotalKeypairs': 100,
+ 'maxTotalRAMSize': 3,
+ 'totalCoresUsed': 5,
+ 'totalInstancesUsed': 4,
+ 'totalRAMUsed': 6,
+ 'totalServerGroupsUsed': 9,
+ },
+ },
+ }
+ self.assertEqual(expected_response, response)
diff --git a/nova/tests/unit/api/openstack/compute/test_lock_server.py b/nova/tests/unit/api/openstack/compute/test_lock_server.py
index a605e2bcdb..bf49bf2b73 100644
--- a/nova/tests/unit/api/openstack/compute/test_lock_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_lock_server.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack import api_version_request
from nova.api.openstack import common
@@ -114,7 +114,7 @@ class LockServerTestsV273(LockServerTestsV21):
self.controller._lock, self.req, instance.uuid, body=body)
self.assertIn("256 is not of type 'string'", str(exp))
- def test_lock_with_invalid_paramater(self):
+ def test_lock_with_invalid_parameter(self):
# This will fail from 2.73 since we have a schema check that allows
# only locked_reason
instance = fake_instance.fake_instance_obj(
diff --git a/nova/tests/unit/api/openstack/compute/test_microversions.py b/nova/tests/unit/api/openstack/compute/test_microversions.py
index c5b1ddb5e5..9f5dd90889 100644
--- a/nova/tests/unit/api/openstack/compute/test_microversions.py
+++ b/nova/tests/unit/api/openstack/compute/test_microversions.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from nova.api.openstack import api_version_request as api_version
diff --git a/nova/tests/unit/api/openstack/compute/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
index 683759eccc..8d1c853206 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrate_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
@@ -530,9 +531,8 @@ class MigrateServerTestsV256(MigrateServerTestsV234):
self.req, fakes.FAKE_UUID, body=body)
def _test_migrate_exception(self, exc_info, expected_result):
- @mock.patch.object(self.compute_api, 'get')
@mock.patch.object(self.compute_api, 'resize', side_effect=exc_info)
- def _test(mock_resize, mock_get):
+ def _test(mock_resize):
instance = objects.Instance(uuid=uuids.instance)
self.assertRaises(expected_result,
self.controller._migrate,
diff --git a/nova/tests/unit/api/openstack/compute/test_migrations.py b/nova/tests/unit/api/openstack/compute/test_migrations.py
index a06d395bea..19bc42a9de 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrations.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrations.py
@@ -13,9 +13,9 @@
# under the License.
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_multinic.py b/nova/tests/unit/api/openstack/compute/test_multinic.py
index ceaaebf373..17a872fed2 100644
--- a/nova/tests/unit/api/openstack/compute/test_multinic.py
+++ b/nova/tests/unit/api/openstack/compute/test_multinic.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
import webob
from nova.api.openstack.compute import multinic as multinic_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_networks.py b/nova/tests/unit/api/openstack/compute/test_networks.py
index 595353e7b1..bcbce58483 100644
--- a/nova/tests/unit/api/openstack/compute/test_networks.py
+++ b/nova/tests/unit/api/openstack/compute/test_networks.py
@@ -26,7 +26,7 @@ from nova import test
from nova.tests.unit.api.openstack import fakes
-# NOTE(stephenfin): obviously these aren't complete reponses, but this is all
+# NOTE(stephenfin): obviously these aren't complete responses, but this is all
# we care about
FAKE_NETWORKS = [
{
diff --git a/nova/tests/unit/api/openstack/compute/test_quota_classes.py b/nova/tests/unit/api/openstack/compute/test_quota_classes.py
index bdb33a7e1a..463f8344c0 100644
--- a/nova/tests/unit/api/openstack/compute/test_quota_classes.py
+++ b/nova/tests/unit/api/openstack/compute/test_quota_classes.py
@@ -12,12 +12,19 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
import copy
+from unittest import mock
+
+from oslo_limit import fixture as limit_fixture
import webob
from nova.api.openstack.compute import quota_classes \
as quota_classes_v21
from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limit
+from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -156,3 +163,220 @@ class QuotaClassSetsTestV257(QuotaClassSetsTestV250):
for resource in quota_classes_v21.FILTERED_QUOTAS_2_57:
self.quota_resources.pop(resource, None)
self.filtered_quotas.extend(quota_classes_v21.FILTERED_QUOTAS_2_57)
+
+
+class NoopQuotaClassesTest(test.NoDBTestCase):
+ quota_driver = "nova.quota.NoopQuotaDriver"
+
+ def setUp(self):
+ super(NoopQuotaClassesTest, self).setUp()
+ self.flags(driver=self.quota_driver, group="quota")
+ self.controller = quota_classes_v21.QuotaClassSetsController()
+
+ def test_show_v21(self):
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.show(req, "test_class")
+ expected_response = {
+ 'quota_class_set': {
+ 'id': 'test_class',
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_show_v257(self):
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ response = self.controller.show(req, "default")
+ expected_response = {
+ 'quota_class_set': {
+ 'id': 'default',
+ 'cores': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_update_v21_still_rejects_badrequests(self):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_class_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'unsupported': 12}}
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, 'test_class', body=body)
+
+ @mock.patch.object(objects.Quotas, "update_class")
+ def test_update_v21(self, mock_update):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_class_set': {'ram': 51200}}
+ response = self.controller.update(req, 'default', body=body)
+ expected_response = {
+ 'quota_class_set': {
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1
+ }
+ }
+ self.assertEqual(expected_response, response)
+ mock_update.assert_called_once_with(req.environ['nova.context'],
+ "default", "ram", 51200)
+
+ @mock.patch.object(objects.Quotas, "update_class")
+ def test_update_v257(self, mock_update):
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ body = {'quota_class_set': {'ram': 51200}}
+ response = self.controller.update(req, 'default', body=body)
+ expected_response = {
+ 'quota_class_set': {
+ 'cores': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ mock_update.assert_called_once_with(req.environ['nova.context'],
+ "default", "ram", 51200)
+
+
+class UnifiedLimitsQuotaClassesTest(NoopQuotaClassesTest):
+ quota_driver = "nova.quota.UnifiedLimitsDriver"
+
+ def setUp(self):
+ super(UnifiedLimitsQuotaClassesTest, self).setUp()
+ # Set server_groups so all config options get a different value
+ # but we also test as much as possible with the default config
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group='quota')
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 12,
+ local_limit.SERVER_GROUP_MEMBERS: 10}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_show_v21(self, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.show(req, "test_class")
+ expected_response = {
+ 'quota_class_set': {
+ 'id': 'test_class',
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'ram': 3,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_show_v257(self, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ response = self.controller.show(req, "default")
+ expected_response = {
+ 'quota_class_set': {
+ 'id': 'default',
+ 'cores': 2,
+ 'instances': 1,
+ 'ram': 3,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_update_still_rejects_badrequests(self):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_class_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'unsupported': 12}}
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, 'test_class', body=body)
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ @mock.patch.object(objects.Quotas, "update_class")
+ def test_update_v21(self, mock_update, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_class_set': {'ram': 51200}}
+ response = self.controller.update(req, 'default', body=body)
+ expected_response = {
+ 'quota_class_set': {
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1
+ }
+ }
+ self.assertEqual(expected_response, response)
+ # TODO(johngarbutt) we should be proxying to keystone
+ self.assertEqual(0, mock_update.call_count)
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ @mock.patch.object(objects.Quotas, "update_class")
+ def test_update_v257(self, mock_update, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ body = {'quota_class_set': {'ram': 51200}}
+ response = self.controller.update(req, 'default', body=body)
+ expected_response = {
+ 'quota_class_set': {
+ 'cores': 2,
+ 'instances': 1,
+ 'ram': 3,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ # TODO(johngarbutt) we should be proxying to keystone
+ self.assertEqual(0, mock_update.call_count)
diff --git a/nova/tests/unit/api/openstack/compute/test_quotas.py b/nova/tests/unit/api/openstack/compute/test_quotas.py
index 545bd51e13..0a1bbd08d8 100644
--- a/nova/tests/unit/api/openstack/compute/test_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_quotas.py
@@ -14,12 +14,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
+from oslo_limit import fixture as limit_fixture
+from oslo_utils.fixture import uuidsentinel as uuids
import webob
from nova.api.openstack.compute import quota_sets as quotas_v21
from nova.db import constants as db_const
from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limit
+from nova import objects
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -660,3 +666,475 @@ class QuotaSetsTestV275(QuotaSetsTestV257):
query_string=query_string)
self.assertRaises(exception.ValidationError, self.controller.delete,
req, 1234)
+
+
+class NoopQuotaSetsTest(test.NoDBTestCase):
+ quota_driver = "nova.quota.NoopQuotaDriver"
+ expected_detail = {'in_use': -1, 'limit': -1, 'reserved': -1}
+
+ def setUp(self):
+ super(NoopQuotaSetsTest, self).setUp()
+ self.flags(driver=self.quota_driver, group="quota")
+ self.controller = quotas_v21.QuotaSetsController()
+ self.stub_out('nova.api.openstack.identity.verify_project_id',
+ lambda ctx, project_id: True)
+
+ def test_show_v21(self):
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.show(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_show_v257(self):
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ response = self.controller.show(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1}}
+ self.assertEqual(expected_response, response)
+
+ def test_detail_v21(self):
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.detail(req, uuids.project_id)
+
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': self.expected_detail,
+ 'fixed_ips': self.expected_detail,
+ 'floating_ips': self.expected_detail,
+ 'injected_file_content_bytes': self.expected_detail,
+ 'injected_file_path_bytes': self.expected_detail,
+ 'injected_files': self.expected_detail,
+ 'instances': self.expected_detail,
+ 'key_pairs': self.expected_detail,
+ 'metadata_items': self.expected_detail,
+ 'ram': self.expected_detail,
+ 'security_group_rules': self.expected_detail,
+ 'security_groups': self.expected_detail,
+ 'server_group_members': self.expected_detail,
+ 'server_groups': self.expected_detail,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_detail_v21_user(self):
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ response = self.controller.detail(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': self.expected_detail,
+ 'fixed_ips': self.expected_detail,
+ 'floating_ips': self.expected_detail,
+ 'injected_file_content_bytes': self.expected_detail,
+ 'injected_file_path_bytes': self.expected_detail,
+ 'injected_files': self.expected_detail,
+ 'instances': self.expected_detail,
+ 'key_pairs': self.expected_detail,
+ 'metadata_items': self.expected_detail,
+ 'ram': self.expected_detail,
+ 'security_group_rules': self.expected_detail,
+ 'security_groups': self.expected_detail,
+ 'server_group_members': self.expected_detail,
+ 'server_groups': self.expected_detail,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_update_still_rejects_badrequests(self):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_set': {'instances': 50, 'cores': 50,
+ 'ram': 51200, 'unsupported': 12}}
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, uuids.project_id, body=body)
+
+ @mock.patch.object(objects.Quotas, "create_limit")
+ def test_update_v21(self, mock_create):
+ req = fakes.HTTPRequest.blank("")
+ body = {'quota_set': {'server_groups': 2}}
+ response = self.controller.update(req, uuids.project_id, body=body)
+ expected_response = {
+ 'quota_set': {
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ mock_create.assert_called_once_with(req.environ['nova.context'],
+ uuids.project_id, "server_groups",
+ 2, user_id=None)
+
+ @mock.patch.object(objects.Quotas, "create_limit")
+ def test_update_v21_user(self, mock_create):
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ body = {'quota_set': {'key_pairs': 52}}
+ response = self.controller.update(req, uuids.project_id, body=body)
+ expected_response = {
+ 'quota_set': {
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ mock_create.assert_called_once_with(req.environ['nova.context'],
+ uuids.project_id, "key_pairs", 52,
+ user_id="42")
+
+ def test_defaults_v21(self):
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.defaults(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': -1,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': -1,
+ 'injected_file_path_bytes': -1,
+ 'injected_files': -1,
+ 'instances': -1,
+ 'key_pairs': -1,
+ 'metadata_items': -1,
+ 'ram': -1,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': -1,
+ 'server_groups': -1,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch('nova.objects.Quotas.destroy_all_by_project')
+ def test_quotas_delete(self, mock_destroy_all_by_project):
+ req = fakes.HTTPRequest.blank("")
+ self.controller.delete(req, "1234")
+ mock_destroy_all_by_project.assert_called_once_with(
+ req.environ['nova.context'], "1234")
+
+ @mock.patch('nova.objects.Quotas.destroy_all_by_project_and_user')
+ def test_user_quotas_delete(self, mock_destroy_all_by_user):
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ self.controller.delete(req, "1234")
+ mock_destroy_all_by_user.assert_called_once_with(
+ req.environ['nova.context'], "1234", "42")
+
+
+class UnifiedLimitsQuotaSetsTest(NoopQuotaSetsTest):
+ quota_driver = "nova.quota.UnifiedLimitsDriver"
+ # this matches what the db driver returns
+ expected_detail = {'in_use': 0, 'limit': -1, 'reserved': 0}
+
+ def setUp(self):
+ super(UnifiedLimitsQuotaSetsTest, self).setUp()
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 12,
+ local_limit.SERVER_GROUP_MEMBERS: 10}
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture(reglimits, {}))
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ def test_show_v21(self, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.show(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ def test_show_v257(self, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("", version='2.57')
+ response = self.controller.show(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': 2,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'server_group_members': 10,
+ 'server_groups': 12}}
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_detail_v21(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.detail(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': {
+ 'in_use': 5, 'limit': 2, 'reserved': 0},
+ 'fixed_ips': self.expected_detail,
+ 'floating_ips': self.expected_detail,
+ 'injected_file_content_bytes': {
+ 'in_use': 0, 'limit': 10240, 'reserved': 0},
+ 'injected_file_path_bytes': {
+ 'in_use': 0, 'limit': 255, 'reserved': 0},
+ 'injected_files': {
+ 'in_use': 0, 'limit': 5, 'reserved': 0},
+ 'instances': {
+ 'in_use': 4, 'limit': 1, 'reserved': 0},
+ 'key_pairs': {
+ 'in_use': 0, 'limit': 100, 'reserved': 0},
+ 'metadata_items': {
+ 'in_use': 0, 'limit': 128, 'reserved': 0},
+ 'ram': {
+ 'in_use': 6, 'limit': 3, 'reserved': 0},
+ 'security_group_rules': self.expected_detail,
+ 'security_groups': self.expected_detail,
+ 'server_group_members': {
+ 'in_use': 0, 'limit': 10, 'reserved': 0},
+ 'server_groups': {
+ 'in_use': 9, 'limit': 12, 'reserved': 0},
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_detail_v21_user(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ response = self.controller.detail(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': {
+ 'in_use': 5, 'limit': 2, 'reserved': 0},
+ 'fixed_ips': self.expected_detail,
+ 'floating_ips': self.expected_detail,
+ 'injected_file_content_bytes': {
+ 'in_use': 0, 'limit': 10240, 'reserved': 0},
+ 'injected_file_path_bytes': {
+ 'in_use': 0, 'limit': 255, 'reserved': 0},
+ 'injected_files': {
+ 'in_use': 0, 'limit': 5, 'reserved': 0},
+ 'instances': {
+ 'in_use': 4, 'limit': 1, 'reserved': 0},
+ 'key_pairs': {
+ 'in_use': 0, 'limit': 100, 'reserved': 0},
+ 'metadata_items': {
+ 'in_use': 0, 'limit': 128, 'reserved': 0},
+ 'ram': {
+ 'in_use': 6, 'limit': 3, 'reserved': 0},
+ 'security_group_rules': self.expected_detail,
+ 'security_groups': self.expected_detail,
+ 'server_group_members': {
+ 'in_use': 0, 'limit': 10, 'reserved': 0},
+ 'server_groups': {
+ 'in_use': 9, 'limit': 12, 'reserved': 0},
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.Quotas, "create_limit")
+ def test_update_v21(self, mock_create, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ # TODO(johngarbutt) still need to implement get_settable_quotas
+ body = {'quota_set': {'server_groups': 2}}
+ response = self.controller.update(req, uuids.project_id, body=body)
+ expected_response = {
+ 'quota_set': {
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ self.assertEqual(0, mock_create.call_count)
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.Quotas, "create_limit")
+ def test_update_v21_user(self, mock_create, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ body = {'quota_set': {'key_pairs': 52}}
+ response = self.controller.update(req, uuids.project_id, body=body)
+ expected_response = {
+ 'quota_set': {
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+ self.assertEqual(0, mock_create.call_count)
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_defaults_v21(self, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.defaults(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 3,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ def test_defaults_v21_different_limit_values(self):
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 7,
+ local_limit.INJECTED_FILES: 6,
+ local_limit.INJECTED_FILES_CONTENT: 4,
+ local_limit.INJECTED_FILES_PATH: 5,
+ local_limit.KEY_PAIRS: 1,
+ local_limit.SERVER_GROUPS: 3,
+ local_limit.SERVER_GROUP_MEMBERS: 2}
+ self.limit_fixture.reglimits = reglimits
+
+ req = fakes.HTTPRequest.blank("")
+ response = self.controller.defaults(req, uuids.project_id)
+ expected_response = {
+ 'quota_set': {
+ 'id': uuids.project_id,
+ 'cores': 0,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 4,
+ 'injected_file_path_bytes': 5,
+ 'injected_files': 6,
+ 'instances': 0,
+ 'key_pairs': 1,
+ 'metadata_items': 7,
+ 'ram': 0,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 2,
+ 'server_groups': 3,
+ }
+ }
+ self.assertEqual(expected_response, response)
+
+ @mock.patch('nova.objects.Quotas.destroy_all_by_project')
+ def test_quotas_delete(self, mock_destroy_all_by_project):
+ req = fakes.HTTPRequest.blank("")
+ self.controller.delete(req, "1234")
+ # Ensure destroy isn't called for unified limits
+ self.assertEqual(0, mock_destroy_all_by_project.call_count)
+
+ @mock.patch('nova.objects.Quotas.destroy_all_by_project_and_user')
+ def test_user_quotas_delete(self, mock_destroy_all_by_user):
+ req = fakes.HTTPRequest.blank("?user_id=42")
+ self.controller.delete(req, "1234")
+ # Ensure destroy isn't called for unified limits
+ self.assertEqual(0, mock_destroy_all_by_user.call_count)
diff --git a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
index 6427b1abf0..961f4a02c9 100644
--- a/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
+++ b/nova/tests/unit/api/openstack/compute/test_remote_consoles.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack import api_version_request
@@ -103,6 +104,18 @@ class ConsolesExtensionTestV21(test.NoDBTestCase):
'get_vnc_console',
exception.InstanceNotFound(instance_id=fakes.FAKE_UUID))
+ def test_get_vnc_console_instance_invalid_state(self):
+ body = {'os-getVNCConsole': {'type': 'novnc'}}
+ self._check_console_failure(
+ self.controller.get_vnc_console,
+ webob.exc.HTTPConflict,
+ body,
+ 'get_vnc_console',
+ exception.InstanceInvalidState(
+ attr='fake-attr', state='fake-state', method='fake-method',
+ instance_uuid=fakes.FAKE_UUID)
+ )
+
def test_get_vnc_console_invalid_type(self):
body = {'os-getVNCConsole': {'type': 'invalid'}}
self._check_console_failure(
@@ -446,7 +459,7 @@ class ConsolesExtensionTestV26(test.NoDBTestCase):
self.req, fakes.FAKE_UUID, body=body)
self.assertTrue(mock_handler.called)
- def test_create_console_not_found(self,):
+ def test_create_console_not_found(self):
mock_handler = mock.MagicMock()
mock_handler.side_effect = exception.InstanceNotFound(
instance_id='xxx')
diff --git a/nova/tests/unit/api/openstack/compute/test_rescue.py b/nova/tests/unit/api/openstack/compute/test_rescue.py
index 28b8217d1a..8a87f52222 100644
--- a/nova/tests/unit/api/openstack/compute/test_rescue.py
+++ b/nova/tests/unit/api/openstack/compute/test_rescue.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import ddt
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/api/openstack/compute/test_security_groups.py b/nova/tests/unit/api/openstack/compute/test_security_groups.py
index 71cdcbc871..4a85a9997d 100644
--- a/nova/tests/unit/api/openstack/compute/test_security_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_security_groups.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from neutronclient.common import exceptions as n_exc
+from unittest import mock
+
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py
index d07924abe8..08f7a31573 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py
@@ -13,9 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
@@ -66,11 +67,11 @@ class ServerActionsControllerTestV21(test.TestCase):
self.controller = self._get_controller()
self.compute_api = self.controller.compute_api
- # We don't care about anything getting as far as hitting the compute
- # RPC API so we just mock it out here.
- mock_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi')
- mock_rpcapi.start()
- self.addCleanup(mock_rpcapi.stop)
+ # In most of the cases we don't care about anything getting as far as
+ # hitting the compute RPC API so we just mock it out here.
+ patcher_rpcapi = mock.patch.object(self.compute_api, 'compute_rpcapi')
+ self.mock_rpcapi = patcher_rpcapi.start()
+ self.addCleanup(patcher_rpcapi.stop)
# The project_id here matches what is used by default in
# fake_compute_get which need to match for policy checks.
self.req = fakes.HTTPRequest.blank('',
@@ -1079,21 +1080,23 @@ class ServerActionsControllerTestV21(test.TestCase):
snapshot = dict(id=_fake_id('d'))
+ self.mock_rpcapi.quiesce_instance.side_effect = (
+ exception.InstanceQuiesceNotSupported(
+ instance_id="fake", reason="test"
+ )
+ )
+
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
- mock.patch.object(self.controller.compute_api.compute_rpcapi,
- 'quiesce_instance',
- side_effect=exception.InstanceQuiesceNotSupported(
- instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
- ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
+ ) as (mock_get_limits, mock_vol_get, mock_vol_create):
if mock_vol_create_side_effect:
mock_vol_create.side_effect = mock_vol_create_side_effect
@@ -1125,7 +1128,7 @@ class ServerActionsControllerTestV21(test.TestCase):
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
- mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
+ self.mock_rpcapi.quiesce_instance.assert_called_once()
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
@@ -1189,21 +1192,23 @@ class ServerActionsControllerTestV21(test.TestCase):
snapshot = dict(id=_fake_id('d'))
+ self.mock_rpcapi.quiesce_instance.side_effect = (
+ exception.InstanceQuiesceNotSupported(
+ instance_id="fake", reason="test"
+ )
+ )
+
with test.nested(
mock.patch.object(
self.controller.compute_api.volume_api, 'get_absolute_limits',
return_value={'totalSnapshotsUsed': 0,
'maxTotalSnapshots': 10}),
- mock.patch.object(self.controller.compute_api.compute_rpcapi,
- 'quiesce_instance',
- side_effect=exception.InstanceQuiesceNotSupported(
- instance_id='fake', reason='test')),
mock.patch.object(self.controller.compute_api.volume_api, 'get',
return_value=volume),
mock.patch.object(self.controller.compute_api.volume_api,
'create_snapshot_force',
return_value=snapshot),
- ) as (mock_get_limits, mock_quiesce, mock_vol_get, mock_vol_create):
+ ) as (mock_get_limits, mock_vol_get, mock_vol_create):
response = self.controller._action_create_image(self.req,
FAKE_UUID, body=body)
@@ -1218,7 +1223,7 @@ class ServerActionsControllerTestV21(test.TestCase):
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
- mock_quiesce.assert_called_once_with(mock.ANY, mock.ANY)
+ self.mock_rpcapi.quiesce_instance.assert_called_once()
mock_vol_get.assert_called_once_with(mock.ANY, volume['id'])
mock_vol_create.assert_called_once_with(mock.ANY, volume['id'],
mock.ANY, mock.ANY)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py b/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py
index d215f3e903..12d8bbb318 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_diagnostics.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/api/openstack/compute/test_server_external_events.py b/nova/tests/unit/api/openstack/compute/test_server_external_events.py
index 2ca97fc6d8..e366d0acdd 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_external_events.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_external_events.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures as fx
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_external_events \
@@ -192,7 +193,7 @@ class ServerExternalEventsTestV21(test.NoDBTestCase):
self.api.create, self.req, body=body)
def test_create_unknown_events(self):
- self.event_1['name'] = 'unkown_event'
+ self.event_1['name'] = 'unknown_event'
body = {'events': self.event_1}
self.assertRaises(self.invalid_error,
self.api.create, self.req, body=body)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
index 6b08be6fd9..fe7a60f956 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
@@ -13,14 +13,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
+from oslo_limit import fixture as limit_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
import webob
from nova.api.openstack.compute import server_groups as sg_v21
from nova import context
+from nova import exception
+from nova.limit import local as local_limit
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -116,14 +120,41 @@ class ServerGroupQuotasTestV21(test.TestCase):
self.controller.create,
self.req, body={'server_group': sgroup})
+ def _test_create_server_group_during_recheck(self, mock_method):
+ self._setup_quotas()
+ sgroup = server_group_template()
+ policies = ['anti-affinity']
+ sgroup['policies'] = policies
+ e = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create,
+ self.req, body={'server_group': sgroup})
+ self.assertEqual(2, mock_method.call_count)
+ return e
+
@mock.patch('nova.objects.Quotas.check_deltas')
- def test_create_server_group_recheck_disabled(self, mock_check):
+ def test_create_server_group_during_recheck(self, mock_check):
+ """Simulate a race where this request initially has enough quota to
+ progress partially through the create path but then fails the quota
+ recheck because a parallel request filled up the quota first.
+ """
+ # First quota check succeeds, second (recheck) fails.
+ mock_check.side_effect = [None,
+ exception.OverQuota(overs='server_groups')]
+ e = self._test_create_server_group_during_recheck(mock_check)
+ expected = 'Quota exceeded, too many server groups.'
+ self.assertEqual(expected, str(e))
+
+ def _test_create_server_group_recheck_disabled(self):
self.flags(recheck_quota=False, group='quota')
self._setup_quotas()
sgroup = server_group_template()
policies = ['anti-affinity']
sgroup['policies'] = policies
self.controller.create(self.req, body={'server_group': sgroup})
+
+ @mock.patch('nova.objects.Quotas.check_deltas')
+ def test_create_server_group_recheck_disabled(self, mock_check):
+ self._test_create_server_group_recheck_disabled()
ctxt = self.req.environ['nova.context']
mock_check.assert_called_once_with(ctxt, {'server_groups': 1},
ctxt.project_id, ctxt.user_id)
@@ -170,3 +201,76 @@ class ServerGroupQuotasTestV21(test.TestCase):
else:
status_int = resp.status_int
self.assertEqual(204, status_int)
+
+
+class ServerGroupQuotasUnifiedLimitsTestV21(ServerGroupQuotasTestV21):
+
+ def setUp(self):
+ super(ServerGroupQuotasUnifiedLimitsTestV21, self).setUp()
+ self.flags(driver='nova.quota.UnifiedLimitsDriver', group='quota')
+ self.req = fakes.HTTPRequest.blank('')
+ self.controller = sg_v21.ServerGroupController()
+ self.limit_fixture = self.useFixture(
+ limit_fixture.LimitFixture({'server_groups': 10}, {}))
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_create_server_group_during_recheck(self, mock_enforce):
+ """Simulate a race where this request initially has enough quota to
+ progress partially through the create path but then fails the quota
+ recheck because a parallel request filled up the quota first.
+ """
+ # First quota check succeeds, second (recheck) fails.
+ mock_enforce.side_effect = [
+ None,
+ exception.ServerGroupLimitExceeded(message='oslo.limit message')]
+ # Run the test using the unified limits enforce method.
+ e = self._test_create_server_group_during_recheck(mock_enforce)
+ expected = 'oslo.limit message'
+ self.assertEqual(expected, str(e))
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_create_server_group_recheck_disabled(self, mock_enforce):
+ # Run the test using the unified limits enforce method.
+ self._test_create_server_group_recheck_disabled()
+ ctxt = self.req.environ['nova.context']
+ mock_enforce.assert_called_once_with(ctxt, 'server_groups',
+ entity_scope=ctxt.project_id,
+ delta=1)
+
+ def test_create_group_fails_with_zero_quota(self):
+ self.limit_fixture.reglimits = {'server_groups': 0}
+ sgroup = {'name': 'test', 'policies': ['anti-affinity']}
+ exc = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create,
+ self.req, body={'server_group': sgroup})
+ msg = ("Resource %s is over limit" % local_limit.SERVER_GROUPS)
+ self.assertIn(msg, str(exc))
+
+ def test_create_only_one_group_when_limit_is_one(self):
+ self.limit_fixture.reglimits = {'server_groups': 1}
+ policies = ['anti-affinity']
+ sgroup = {'name': 'test', 'policies': policies}
+ res_dict = self.controller.create(
+ self.req, body={'server_group': sgroup})
+ self.assertEqual(res_dict['server_group']['name'], 'test')
+ self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
+ self.assertEqual(res_dict['server_group']['policies'], policies)
+
+ # prove we can't create two, as limited to one
+ sgroup2 = {'name': 'test2', 'policies': policies}
+ exc = self.assertRaises(webob.exc.HTTPForbidden,
+ self.controller.create,
+ self.req, body={'server_group': sgroup2})
+ msg = ("Resource %s is over limit" % local_limit.SERVER_GROUPS)
+ self.assertIn(msg, str(exc))
+
+ # delete first one
+ self.controller.delete(self.req, res_dict['server_group']['id'])
+
+ # prove we can now create the second one
+ res_dict2 = self.controller.create(
+ self.req, body={'server_group': sgroup2})
+ self.assertEqual(res_dict2['server_group']['name'], 'test2')
+ self.assertTrue(
+ uuidutils.is_uuid_like(res_dict2['server_group']['id']))
+ self.assertEqual(res_dict2['server_group']['policies'], policies)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_groups.py b/nova/tests/unit/api/openstack/compute/test_server_groups.py
index a0d1712343..9d99c3ae6d 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_groups.py
@@ -14,7 +14,8 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
import webob
@@ -86,7 +87,8 @@ class ServerGroupTestV21(test.NoDBTestCase):
def setUp(self):
super(ServerGroupTestV21, self).setUp()
self._setup_controller()
- self.req = fakes.HTTPRequest.blank('')
+ self.member_req = fakes.HTTPRequest.member_req('')
+ self.reader_req = fakes.HTTPRequest.reader_req('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.foo_req = fakes.HTTPRequest.blank('', project_id='foo')
self.policy = self.useFixture(fixtures.RealPolicyFixture())
@@ -113,20 +115,20 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_create_server_group_with_no_policies(self):
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
sgroup = server_group_template()
sgroup['policies'] = policies
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'test')
self.assertTrue(uuidutils.is_uuid_like(res_dict['server_group']['id']))
self.assertEqual(res_dict['server_group']['policies'], policies)
def test_create_server_group_with_new_policy_before_264(self):
- req = fakes.HTTPRequest.blank('', version='2.63')
+ req = fakes.HTTPRequest.member_req('', version='2.63')
policy = 'anti-affinity'
rules = {'max_server_per_host': 3}
# 'policy' isn't an acceptable request key before 2.64
@@ -161,7 +163,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.create(self.admin_req, body={'server_group': sgroup})
# test as non-admin
- self.controller.create(self.req, body={'server_group': sgroup})
+ self.controller.create(self.member_req, body={'server_group': sgroup})
def _create_instance(self, ctx, cell):
with context.target_cell(ctx, cell) as cctx:
@@ -288,7 +290,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
path = path or '/os-server-groups?all_projects=True'
if limited:
path += limited
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ reader_req = fakes.HTTPRequest.reader_req(path, version=api_version)
admin_req = fakes.HTTPRequest.blank(path, use_admin_context=True,
version=api_version)
@@ -297,7 +299,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertEqual(all, res_dict)
# test as non-admin
- res_dict = self.controller.index(req)
+ res_dict = self.controller.index(reader_req)
self.assertEqual(tenant_specific, res_dict)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
@@ -346,25 +348,27 @@ class ServerGroupTestV21(test.NoDBTestCase):
return_get_by_project = return_server_groups()
mock_get_by_project.return_value = return_get_by_project
path = '/os-server-groups'
- req = fakes.HTTPRequest.blank(path, version=api_version)
+ req = fakes.HTTPRequest.reader_req(path, version=api_version)
res_dict = self.controller.index(req)
self.assertEqual(expected, res_dict)
def test_display_members(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
self.assertEqual(3, len(result_members))
for member in members:
self.assertIn(member, result_members)
def test_display_members_with_nonexistent_group(self):
- self.assertRaises(webob.exc.HTTPNotFound,
- self.controller.show, self.req, uuidsentinel.group)
+ self.assertRaises(
+ webob.exc.HTTPNotFound,
+ self.controller.show, self.reader_req, uuidsentinel.group)
def test_display_active_members_only(self):
- ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
+ ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID,
+ roles=['member', 'reader'])
(ig_uuid, instances, members) = self._create_groups_and_instances(ctx)
# delete an instance
@@ -378,7 +382,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.assertRaises(exception.InstanceNotFound,
objects.Instance.get_by_uuid,
ctx, instances[1].uuid)
- res_dict = self.controller.show(self.req, ig_uuid)
+ res_dict = self.controller.show(self.reader_req, ig_uuid)
result_members = res_dict['server_group']['members']
# check that only the active instance is displayed
self.assertEqual(2, len(result_members))
@@ -392,7 +396,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.show(self.admin_req, ig_uuid)
# test as non-admin, same project
- self.controller.show(self.req, ig_uuid)
+ self.controller.show(self.reader_req, ig_uuid)
# test as non-admin, different project
self.assertRaises(webob.exc.HTTPNotFound,
@@ -405,7 +409,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
sgroup = server_group_template(name='good* $%name',
policies=['affinity'])
- res_dict = self.controller.create(self.req,
+ res_dict = self.controller.create(self.member_req,
body={'server_group': sgroup})
self.assertEqual(res_dict['server_group']['name'], 'good* $%name')
@@ -413,99 +417,99 @@ class ServerGroupTestV21(test.NoDBTestCase):
# blank name
sgroup = server_group_template(name='', policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with length 256
sgroup = server_group_template(name='1234567890' * 26,
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# non-string name
sgroup = server_group_template(name=12, policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with leading spaces
sgroup = server_group_template(name=' leading spaces',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with trailing spaces
sgroup = server_group_template(name='trailing space ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with all spaces
sgroup = server_group_template(name=' ',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with unprintable character
sgroup = server_group_template(name='bad\x00name',
policies=['test_policy'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# name with out of range char U0001F4A9
sgroup = server_group_template(name=u"\U0001F4A9",
policies=['affinity'])
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
# blank policy
sgroup = server_group_template(name='fake-name', policies='')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as integer
sgroup = server_group_template(name='fake-name', policies=7)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as string
sgroup = server_group_template(name='fake-name', policies='invalid')
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
# policy as None
sgroup = server_group_template(name='fake-name', policies=None)
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_conflicting_policies(self):
sgroup = server_group_template()
policies = ['anti-affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_duplicate_policies(self):
sgroup = server_group_template()
policies = ['affinity', 'affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_not_supported(self):
sgroup = server_group_template()
policies = ['storage-affinity', 'anti-affinity', 'rack-affinity']
sgroup['policies'] = policies
self.assertRaises(self.validation_error, self.controller.create,
- self.req, body={'server_group': sgroup})
+ self.member_req, body={'server_group': sgroup})
def test_create_server_group_with_no_body(self):
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=None)
+ self.controller.create, self.member_req, body=None)
def test_create_server_group_with_no_server_group(self):
body = {'no-instanceGroup': None}
self.assertRaises(self.validation_error,
- self.controller.create, self.req, body=body)
+ self.controller.create, self.member_req, body=body)
def test_list_server_group_by_tenant(self):
self._test_list_server_group_by_tenant(
@@ -527,7 +531,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.controller.index(self.admin_req)
# test as non-admin
- self.controller.index(self.req)
+ self.controller.index(self.reader_req)
def test_list_server_group_multiple_param(self):
self._test_list_server_group(api_version=self.wsgi_api_version,
@@ -597,7 +601,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
self.stub_out('nova.objects.InstanceGroup.get_by_uuid',
return_server_group)
- resp = self.controller.delete(self.req, uuidsentinel.sg1_id)
+ resp = self.controller.delete(self.member_req, uuidsentinel.sg1_id)
mock_destroy.assert_called_once_with()
# NOTE: on v2.1, http status code is set as wsgi_code of API
@@ -610,7 +614,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
def test_delete_non_existing_server_group(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
- self.req, 'invalid')
+ self.member_req, 'invalid')
def test_delete_server_group_rbac_default(self):
ctx = context.RequestContext('fake_user', fakes.FAKE_PROJECT_ID)
@@ -621,7 +625,7 @@ class ServerGroupTestV21(test.NoDBTestCase):
# test as non-admin
ig_uuid = self._create_groups_and_instances(ctx)[0]
- self.controller.delete(self.req, ig_uuid)
+ self.controller.delete(self.member_req, ig_uuid)
class ServerGroupTestV213(ServerGroupTestV21):
@@ -648,7 +652,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
def _create_server_group_normal(self, policies=None, policy=None,
rules=None):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template()
sgroup['rules'] = rules or {}
sgroup['policy'] = policy
@@ -673,7 +677,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], {})
def _display_server_group(self, uuid):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('', version=self.wsgi_api_version)
group = self.controller.show(req, uuid)
return group
@@ -689,7 +693,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertEqual(res_dict['server_group']['rules'], rules)
def test_create_affinity_server_group_with_invalid_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(webob.exc.HTTPBadRequest,
@@ -697,7 +701,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self.assertIn("Only anti-affinity policy supports rules", str(result))
def test_create_anti_affinity_server_group_with_invalid_rules(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# A negative test for key is unknown, the value is not positive
# and not integer
invalid_rules = [{'unknown_key': '3'},
@@ -717,7 +721,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
return_value=32)
def test_create_server_group_with_low_version_compute_service(self,
mock_get_v):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(policy='anti-affinity',
rules={'max_server_per_host': 3})
result = self.assertRaises(
@@ -733,7 +737,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
self._create_server_group_normal(policy=policy)
def test_policies_since_264(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policies' isn't allowed in request >= 2.64
sgroup = server_group_template(policies=['anti-affinity'])
self.assertRaises(
@@ -741,14 +745,14 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_create_server_group_without_policy(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# 'policy' is required request key in request >= 2.64
sgroup = server_group_template()
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
def test_create_server_group_with_illegal_policies(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
# blank policy
sgroup = server_group_template(policy='')
self.assertRaises(self.validation_error, self.controller.create,
@@ -770,7 +774,7 @@ class ServerGroupTestV264(ServerGroupTestV213):
req, body={'server_group': sgroup})
def test_additional_params(self):
- req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.member_req('', version=self.wsgi_api_version)
sgroup = server_group_template(unknown='unknown')
self.assertRaises(self.validation_error, self.controller.create,
req, body={'server_group': sgroup})
@@ -785,7 +789,7 @@ class ServerGroupTestV275(ServerGroupTestV264):
path='/os-server-groups?dummy=False&all_projects=True')
def test_list_server_group_additional_param(self):
- req = fakes.HTTPRequest.blank('/os-server-groups?dummy=False',
- version=self.wsgi_api_version)
+ req = fakes.HTTPRequest.reader_req('/os-server-groups?dummy=False',
+ version=self.wsgi_api_version)
self.assertRaises(self.validation_error, self.controller.index,
req)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_metadata.py b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
index a454597305..9b420dde17 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/api/openstack/compute/test_server_migrations.py b/nova/tests/unit/api/openstack/compute/test_server_migrations.py
index 8d798d434c..c5d8556751 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_migrations.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_migrations.py
@@ -15,8 +15,8 @@
import copy
import datetime
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_password.py b/nova/tests/unit/api/openstack/compute/test_server_password.py
index e34ceb90e9..2751eee709 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_password.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_password.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import server_password \
as server_password_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_server_reset_state.py b/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
index 3462cf21ac..3a0c9ca1e2 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_reset_state.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_start_stop.py b/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
index 60d12d0c43..f604652622 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_server_tags.py b/nova/tests/unit/api/openstack/compute/test_server_tags.py
index b121c75c3a..4e4609d778 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_tags.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_tags.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from webob import exc
from nova.api.openstack.compute import server_tags
diff --git a/nova/tests/unit/api/openstack/compute/test_server_topology.py b/nova/tests/unit/api/openstack/compute/test_server_topology.py
index 3d8f6dc908..63d5f7a5c1 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_topology.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_topology.py
@@ -11,7 +11,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from webob import exc
diff --git a/nova/tests/unit/api/openstack/compute/test_servers.py b/nova/tests/unit/api/openstack/compute/test_servers.py
index 31739ed7ab..8903de0c3c 100644
--- a/nova/tests/unit/api/openstack/compute/test_servers.py
+++ b/nova/tests/unit/api/openstack/compute/test_servers.py
@@ -17,13 +17,14 @@
import collections
import copy
import datetime
+from unittest import mock
+
import ddt
import functools
from urllib import parse as urlparse
import fixtures
import iso8601
-import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import base64
from oslo_serialization import jsonutils
@@ -2087,10 +2088,10 @@ class ServersControllerTestV216(_ServersControllerTest):
return server_dict
- @mock.patch('nova.compute.api.API.get_instance_host_status')
- def _verify_host_status_policy_behavior(self, func, mock_get_host_status):
+ def _verify_host_status_policy_behavior(self, func):
# Set policy to disallow both host_status cases and verify we don't
# call the get_instance_host_status compute RPC API.
+ self.mock_get_instance_host_status.reset_mock()
rules = {
'os_compute_api:servers:show:host_status': '!',
'os_compute_api:servers:show:host_status:unknown-only': '!',
@@ -2098,7 +2099,7 @@ class ServersControllerTestV216(_ServersControllerTest):
orig_rules = policy.get_rules()
policy.set_rules(oslo_policy.Rules.from_dict(rules), overwrite=False)
func()
- mock_get_host_status.assert_not_called()
+ self.mock_get_instance_host_status.assert_not_called()
# Restore the original rules.
policy.set_rules(orig_rules)
@@ -2638,15 +2639,13 @@ class ServersControllerTestV275(ControllerTest):
microversion = '2.75'
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_additional_query_param_old_version(self, mock_get):
+ def test_get_servers_additional_query_param_old_version(self):
req = fakes.HTTPRequest.blank(self.path_with_query % 'unknown=1',
use_admin_context=True,
version='2.74')
self.controller.index(req)
- @mock.patch('nova.compute.api.API.get_all')
- def test_get_servers_ignore_sort_key_old_version(self, mock_get):
+ def test_get_servers_ignore_sort_key_old_version(self):
req = fakes.HTTPRequest.blank(
self.path_with_query % 'sort_key=deleted',
use_admin_context=True, version='2.74')
@@ -3584,13 +3583,13 @@ class ServersControllerRebuildTestV263(ControllerTest):
},
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get, certs=None,
- conf_enabled=True, conf_certs=None):
+ def _rebuild_server(self, certs=None, conf_enabled=True, conf_certs=None):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
- vm_state=vm_states.ACTIVE, trusted_certs=certs,
- project_id=self.req_project_id, user_id=self.req_user_id)
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(
+ ctx, vm_state=vm_states.ACTIVE, trusted_certs=certs,
+ project_id=self.req_project_id, user_id=self.req_user_id
+ )
self.flags(default_trusted_certificate_ids=conf_certs, group='glance')
@@ -3743,10 +3742,10 @@ class ServersControllerRebuildTestV271(ControllerTest):
}
}
- @mock.patch('nova.compute.api.API.get')
- def _rebuild_server(self, mock_get):
+ def _rebuild_server(self):
ctx = self.req.environ['nova.context']
- mock_get.return_value = fakes.stub_instance_obj(ctx,
+ self.mock_get.side_effect = None
+ self.mock_get.return_value = fakes.stub_instance_obj(ctx,
vm_state=vm_states.ACTIVE, project_id=self.req_project_id,
user_id=self.req_user_id)
server = self.controller._action_rebuild(
@@ -8023,7 +8022,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
version=self.microversion)
def test_get_server_list_detail_with_down_cells(self):
- # Fake out 1 partially constructued instance and one full instance.
+ # Fake out 1 partially constructed instance and one full instance.
self.instances = [
self.instance,
objects.Instance(
@@ -8151,7 +8150,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_list_with_down_cells(self):
- # Fake out 1 partially constructued instance and one full instance.
+ # Fake out 1 partially constructed instance and one full instance.
self.instances = [
self.instance,
objects.Instance(
@@ -8203,7 +8202,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_with_down_cells(self):
- # Fake out 1 partially constructued instance.
+ # Fake out 1 partially constructed instance.
self.instance = objects.Instance(
context=self.ctxt,
uuid=self.uuid,
@@ -8266,7 +8265,7 @@ class ServersViewBuilderTestV269(_ServersViewBuilderTest):
self.assertThat(output, matchers.DictMatches(expected))
def test_get_server_without_image_avz_user_id_set_from_down_cells(self):
- # Fake out 1 partially constructued instance.
+ # Fake out 1 partially constructed instance.
self.instance = objects.Instance(
context=self.ctxt,
uuid=self.uuid,
diff --git a/nova/tests/unit/api/openstack/compute/test_services.py b/nova/tests/unit/api/openstack/compute/test_services.py
index 5d83bc5a91..f237acc15a 100644
--- a/nova/tests/unit/api/openstack/compute/test_services.py
+++ b/nova/tests/unit/api/openstack/compute/test_services.py
@@ -14,9 +14,9 @@
import copy
import datetime
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
-import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
import webob.exc
diff --git a/nova/tests/unit/api/openstack/compute/test_shelve.py b/nova/tests/unit/api/openstack/compute/test_shelve.py
index 68e523be47..bfa8d2d055 100644
--- a/nova/tests/unit/api/openstack/compute/test_shelve.py
+++ b/nova/tests/unit/api/openstack/compute/test_shelve.py
@@ -12,10 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
import ddt
+import fixtures
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import webob
@@ -134,13 +134,17 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
'availability_zone': 'us-east'
}}
self.req.body = jsonutils.dump_as_bytes(body)
- self.req.api_version_request = (api_version_request.
- APIVersionRequest('2.76'))
- with mock.patch.object(self.controller.compute_api,
- 'unshelve') as mock_unshelve:
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.76')
+ )
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve'
+ ) as mock_unshelve:
self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
mock_unshelve.assert_called_once_with(
- self.req.environ['nova.context'], instance, new_az=None)
+ self.req.environ['nova.context'],
+ instance,
+ )
@mock.patch('nova.compute.api.API.unshelve')
@mock.patch('nova.api.openstack.common.get_instance')
@@ -158,7 +162,9 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
APIVersionRequest('2.76'))
self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
mock_unshelve.assert_called_once_with(
- self.req.environ['nova.context'], instance, new_az=None)
+ self.req.environ['nova.context'],
+ instance,
+ )
@mock.patch('nova.compute.api.API.unshelve')
@mock.patch('nova.api.openstack.common.get_instance')
@@ -193,6 +199,238 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
'availability_zone': None
}}
self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID, body=body)
+
+ def test_unshelve_with_additional_param(self):
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ 'additional_param': 1
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ exc = self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve, self.req,
+ fakes.FAKE_UUID, body=body)
+ self.assertIn("Additional properties are not allowed", str(exc))
+
+
+class UnshelveServerControllerTestV291(test.NoDBTestCase):
+ """Server controller test for microversion 2.91
+
+ Add host parameter to unshelve a shelved-offloaded server of
+ 2.91 microversion.
+ """
+ wsgi_api_version = '2.91'
+
+ def setUp(self):
+ super(UnshelveServerControllerTestV291, self).setUp()
+ self.controller = shelve_v21.ShelveController()
+ self.req = fakes.HTTPRequest.blank(
+ '/%s/servers/a/action' % fakes.FAKE_PROJECT_ID,
+ use_admin_context=True, version=self.wsgi_api_version)
+
+ def fake_get_instance(self):
+ ctxt = self.req.environ['nova.context']
+ return fake_instance.fake_instance_obj(
+ ctxt, uuid=fakes.FAKE_UUID, vm_state=vm_states.SHELVED_OFFLOADED)
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_pre_2_91(self, mock_get_instance):
+ """Make sure specifying an AZ before microversion 2.91
+ is still working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.77'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve'
+ ) as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az='us-east',
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_without_parameters_2_91(self, mock_get_instance):
+ """Make sure not specifying parameters with microversion 2.91
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': None
+ }
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_2_91(self, mock_get_instance):
+ """Make sure specifying an AZ with microversion 2.91
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az='us-east',
+ host=None,
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_none_2_91(self, mock_get_instance):
+ """Make sure specifying an AZ to none (unpin server)
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': None,
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az=None,
+ host=None,
+ )
+
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_host_2_91(self, mock_get_instance):
+ """Make sure specifying a host with microversion 2.91
+ is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'host': 'server02',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+ with mock.patch.object(
+ self.controller.compute_api, 'unshelve') as mock_unshelve:
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ mock_unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ host='server02',
+ )
+
+ @mock.patch('nova.compute.api.API.unshelve')
+ @mock.patch('nova.api.openstack.common.get_instance')
+ def test_unshelve_with_az_and_host_with_v2_91(
+ self, mock_get_instance, mock_unshelve):
+ """Make sure specifying a host and an availability_zone with
+ microversion 2.91 is working.
+ """
+ instance = self.fake_get_instance()
+ mock_get_instance.return_value = instance
+
+ body = {
+ 'unshelve': {
+ 'availability_zone': 'us-east',
+ 'host': 'server01',
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.req.api_version_request = (
+ api_version_request.APIVersionRequest('2.91'))
+
+ self.controller._unshelve(self.req, fakes.FAKE_UUID, body=body)
+ self.controller.compute_api.unshelve.assert_called_once_with(
+ self.req.environ['nova.context'],
+ instance,
+ new_az='us-east',
+ host='server01',
+ )
+
+ def test_invalid_az_name_with_int(self):
+ body = {
+ 'unshelve': {
+ 'host': 1234
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID,
+ body=body)
+
+ def test_no_az_value(self):
+ body = {
+ 'unshelve': {
+ 'host': None
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID, body=body)
+
+ def test_invalid_host_fqdn_with_int(self):
+ body = {
+ 'unshelve': {
+ 'host': 1234
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
+ self.assertRaises(
+ exception.ValidationError,
+ self.controller._unshelve,
+ self.req,
+ fakes.FAKE_UUID,
+ body=body)
+
+ def test_no_host(self):
+ body = {
+ 'unshelve': {
+ 'host': None
+ }}
+ self.req.body = jsonutils.dump_as_bytes(body)
self.assertRaises(exception.ValidationError,
self.controller._unshelve,
self.req, fakes.FAKE_UUID,
@@ -201,7 +439,7 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
def test_unshelve_with_additional_param(self):
body = {
'unshelve': {
- 'availability_zone': 'us-east',
+ 'host': 'server01',
'additional_param': 1
}}
self.req.body = jsonutils.dump_as_bytes(body)
@@ -209,4 +447,4 @@ class UnshelveServerControllerTestV277(test.NoDBTestCase):
exception.ValidationError,
self.controller._unshelve, self.req,
fakes.FAKE_UUID, body=body)
- self.assertIn("Additional properties are not allowed", str(exc))
+ self.assertIn("Invalid input for field/attribute unshelve.", str(exc))
diff --git a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
index 5794fdf061..a7dcfae558 100644
--- a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
+++ b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_snapshots.py b/nova/tests/unit/api/openstack/compute/test_snapshots.py
index b23ed50865..2e133506a3 100644
--- a/nova/tests/unit/api/openstack/compute/test_snapshots.py
+++ b/nova/tests/unit/api/openstack/compute/test_snapshots.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import webob
from nova.api.openstack.compute import volumes as volumes_v21
diff --git a/nova/tests/unit/api/openstack/compute/test_suspend_server.py b/nova/tests/unit/api/openstack/compute/test_suspend_server.py
index 6eeb2b4549..a44297362c 100644
--- a/nova/tests/unit/api/openstack/compute/test_suspend_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_suspend_server.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_tenant_networks.py b/nova/tests/unit/api/openstack/compute/test_tenant_networks.py
index d05c85c508..c6de561b11 100644
--- a/nova/tests/unit/api/openstack/compute/test_tenant_networks.py
+++ b/nova/tests/unit/api/openstack/compute/test_tenant_networks.py
@@ -13,8 +13,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
import webob
diff --git a/nova/tests/unit/api/openstack/compute/test_volumes.py b/nova/tests/unit/api/openstack/compute/test_volumes.py
index a24c104c93..5b4a2d8b1a 100644
--- a/nova/tests/unit/api/openstack/compute/test_volumes.py
+++ b/nova/tests/unit/api/openstack/compute/test_volumes.py
@@ -15,10 +15,10 @@
# under the License.
import datetime
+from unittest import mock
import urllib
import fixtures
-import mock
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -1889,8 +1889,7 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
req, '5')
def _test_assisted_delete_instance_conflict(self, api_error):
- # unset the stub on volume_snapshot_delete from setUp
- self.mock_volume_snapshot_delete.stop()
+ self.mock_volume_snapshot_delete.side_effect = api_error
params = {
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
@@ -1899,10 +1898,9 @@ class AssistedSnapshotDeleteTestCaseV21(test.NoDBTestCase):
urllib.parse.urlencode(params),
version=self.microversion)
req.method = 'DELETE'
- with mock.patch.object(compute_api.API, 'volume_snapshot_delete',
- side_effect=api_error):
- self.assertRaises(
- webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
+
+ self.assertRaises(
+ webob.exc.HTTPBadRequest, self.controller.delete, req, '5')
def test_assisted_delete_instance_invalid_state(self):
api_error = exception.InstanceInvalidState(
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
index 8cf90ddebe..9ac970f787 100644
--- a/nova/tests/unit/api/openstack/fakes.py
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -240,6 +240,9 @@ class HTTPRequest(os_wsgi.Request):
def blank(cls, *args, **kwargs):
defaults = {'base_url': 'http://localhost/v2'}
use_admin_context = kwargs.pop('use_admin_context', False)
+ roles = kwargs.pop('roles', [])
+ if use_admin_context:
+ roles.append('admin')
project_id = kwargs.pop('project_id', FAKE_PROJECT_ID)
version = kwargs.pop('version', os_wsgi.DEFAULT_API_VERSION)
defaults.update(kwargs)
@@ -247,10 +250,19 @@ class HTTPRequest(os_wsgi.Request):
out.environ['nova.context'] = FakeRequestContext(
user_id='fake_user',
project_id=project_id,
- is_admin=use_admin_context)
+ is_admin=use_admin_context,
+ roles=roles)
out.api_version_request = api_version.APIVersionRequest(version)
return out
+ @classmethod
+ def member_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['member', 'reader'], **kwargs)
+
+ @classmethod
+ def reader_req(cls, *args, **kwargs):
+ return cls.blank(*args, roles=['reader'], **kwargs)
+
class HTTPRequestV21(HTTPRequest):
pass
diff --git a/nova/tests/unit/api/openstack/test_common.py b/nova/tests/unit/api/openstack/test_common.py
index 4666413e27..7fe98bd52e 100644
--- a/nova/tests/unit/api/openstack/test_common.py
+++ b/nova/tests/unit/api/openstack/test_common.py
@@ -17,7 +17,8 @@
Test suites for 'common' code used throughout the OpenStack HTTP API.
"""
-import mock
+from unittest import mock
+
from testtools import matchers
import webob
import webob.exc
diff --git a/nova/tests/unit/api/openstack/test_faults.py b/nova/tests/unit/api/openstack/test_faults.py
index 1bd56a87c5..c7dd5c0a9d 100644
--- a/nova/tests/unit/api/openstack/test_faults.py
+++ b/nova/tests/unit/api/openstack/test_faults.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
import webob
import webob.dec
diff --git a/nova/tests/unit/api/openstack/test_requestlog.py b/nova/tests/unit/api/openstack/test_requestlog.py
index 0ea91439cc..7e79e1b079 100644
--- a/nova/tests/unit/api/openstack/test_requestlog.py
+++ b/nova/tests/unit/api/openstack/test_requestlog.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import fixtures as fx
import testtools
diff --git a/nova/tests/unit/api/openstack/test_wsgi.py b/nova/tests/unit/api/openstack/test_wsgi.py
index e0cf8f6fd8..76554e1fcb 100644
--- a/nova/tests/unit/api/openstack/test_wsgi.py
+++ b/nova/tests/unit/api/openstack/test_wsgi.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
import testscenarios
import webob
diff --git a/nova/tests/unit/api/openstack/test_wsgi_app.py b/nova/tests/unit/api/openstack/test_wsgi_app.py
index 247886b9dd..0eb7011c11 100644
--- a/nova/tests/unit/api/openstack/test_wsgi_app.py
+++ b/nova/tests/unit/api/openstack/test_wsgi_app.py
@@ -11,9 +11,9 @@
# under the License.
import tempfile
+from unittest import mock
import fixtures
-import mock
from oslo_config import fixture as config_fixture
from oslotest import base
@@ -104,3 +104,18 @@ document_root = /tmp
'disable_compute_service_check_for_ffu', True,
group='workarounds')
wsgi_app._setup_service('myhost', 'api')
+
+ def test__get_config_files_empty_env(self):
+ env = {}
+ result = wsgi_app._get_config_files(env)
+ expected = ['/etc/nova/api-paste.ini', '/etc/nova/nova.conf']
+ self.assertEqual(result, expected)
+
+ def test__get_config_files_with_env(self):
+ env = {
+ "OS_NOVA_CONFIG_DIR": "/nova",
+ "OS_NOVA_CONFIG_FILES": "api.conf",
+ }
+ result = wsgi_app._get_config_files(env)
+ expected = ['/nova/api.conf']
+ self.assertEqual(result, expected)
diff --git a/nova/tests/unit/api/test_auth.py b/nova/tests/unit/api/test_auth.py
index 3be245b90e..3bc5f51b04 100644
--- a/nova/tests/unit/api/test_auth.py
+++ b/nova/tests/unit/api/test_auth.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_middleware import request_id
from oslo_serialization import jsonutils
import webob
diff --git a/nova/tests/unit/api/test_wsgi.py b/nova/tests/unit/api/test_wsgi.py
index b2701dc723..b8f215c730 100644
--- a/nova/tests/unit/api/test_wsgi.py
+++ b/nova/tests/unit/api/test_wsgi.py
@@ -20,8 +20,8 @@ Test WSGI basics and provide some helper functions for other WSGI tests.
"""
import sys
+from unittest import mock
-import mock
import routes
import webob
diff --git a/nova/tests/unit/api/validation/extra_specs/test_validators.py b/nova/tests/unit/api/validation/extra_specs/test_validators.py
index 969fb9b648..a8911aadad 100644
--- a/nova/tests/unit/api/validation/extra_specs/test_validators.py
+++ b/nova/tests/unit/api/validation/extra_specs/test_validators.py
@@ -28,7 +28,7 @@ class TestValidators(test.NoDBTestCase):
"""
namespaces = {
'accel', 'aggregate_instance_extra_specs', 'capabilities', 'hw',
- 'hw_rng', 'hw_video', 'os', 'pci_passthrough', 'powervm', 'quota',
+ 'hw_rng', 'hw_video', 'os', 'pci_passthrough', 'quota',
'resources(?P<group>([a-zA-Z0-9_-]{1,64})?)',
'trait(?P<group>([a-zA-Z0-9_-]{1,64})?)', 'vmware',
}
@@ -74,6 +74,10 @@ class TestValidators(test.NoDBTestCase):
('hw:pci_numa_affinity_policy', 'preferred'),
('hw:pci_numa_affinity_policy', 'socket'),
('hw:cpu_policy', 'mixed'),
+ ('hw:viommu_model', 'auto'),
+ ('hw:viommu_model', 'intel'),
+ ('hw:viommu_model', 'smmuv3'),
+ ('hw:viommu_model', 'virtio'),
)
for key, value in valid_specs:
validators.validate(key, value)
@@ -92,6 +96,7 @@ class TestValidators(test.NoDBTestCase):
('hw:pci_numa_affinity_policy', 'requird'),
('hw:pci_numa_affinity_policy', 'prefrred'),
('hw:pci_numa_affinity_policy', 'socet'),
+ ('hw:viommu_model', 'autt'),
)
for key, value in invalid_specs:
with testtools.ExpectedException(exception.ValidationError):
@@ -101,9 +106,7 @@ class TestValidators(test.NoDBTestCase):
valid_specs = (
('hw:numa_nodes', '1'),
('os:monitors', '1'),
- ('powervm:shared_weight', '1'),
('os:monitors', '8'),
- ('powervm:shared_weight', '255'),
)
for key, value in valid_specs:
validators.validate(key, value)
@@ -113,9 +116,7 @@ class TestValidators(test.NoDBTestCase):
('hw:serial_port_count', '!'), # NaN
('hw:numa_nodes', '0'), # has min
('os:monitors', '0'), # has min
- ('powervm:shared_weight', '-1'), # has min
('os:monitors', '9'), # has max
- ('powervm:shared_weight', '256'), # has max
)
for key, value in invalid_specs:
with testtools.ExpectedException(exception.ValidationError):
diff --git a/nova/tests/unit/cmd/test_baseproxy.py b/nova/tests/unit/cmd/test_baseproxy.py
index 34f911cd83..25f3905f24 100644
--- a/nova/tests/unit/cmd/test_baseproxy.py
+++ b/nova/tests/unit/cmd/test_baseproxy.py
@@ -13,9 +13,9 @@
# limitations under the License.
from io import StringIO
+from unittest import mock
import fixtures
-import mock
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
diff --git a/nova/tests/unit/cmd/test_common.py b/nova/tests/unit/cmd/test_common.py
index cabb54f9d4..a32073c297 100644
--- a/nova/tests/unit/cmd/test_common.py
+++ b/nova/tests/unit/cmd/test_common.py
@@ -19,9 +19,9 @@
from io import StringIO
import sys
+from unittest import mock
import fixtures
-import mock
from nova.cmd import common as cmd_common
from nova import exception
diff --git a/nova/tests/unit/cmd/test_compute.py b/nova/tests/unit/cmd/test_compute.py
index acfcea50d2..e465b026aa 100644
--- a/nova/tests/unit/cmd/test_compute.py
+++ b/nova/tests/unit/cmd/test_compute.py
@@ -13,8 +13,8 @@
# limitations under the License.
import contextlib
+from unittest import mock
-import mock
from nova.cmd import compute
from nova import context
diff --git a/nova/tests/unit/cmd/test_manage.py b/nova/tests/unit/cmd/test_manage.py
index 309c2fc829..10c1a77c94 100644
--- a/nova/tests/unit/cmd/test_manage.py
+++ b/nova/tests/unit/cmd/test_manage.py
@@ -17,11 +17,11 @@ import datetime
from io import StringIO
import sys
import textwrap
+from unittest import mock
import warnings
import ddt
import fixtures
-import mock
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
@@ -40,7 +40,6 @@ from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_requests
-
CONF = conf.CONF
@@ -2945,11 +2944,54 @@ class TestNovaManagePlacement(test.NoDBTestCase):
neutron.update_port.assert_called_once_with(
uuidsentinel.port_id, body=expected_update_body)
- def test_audit_with_wrong_provider_uuid(self):
+ @mock.patch.object(manage.PlacementCommands,
+ '_check_orphaned_allocations_for_provider')
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
+ def test_audit_with_provider_uuid(
+ self, get_resource_providers, check_orphaned_allocs,
+ ):
+ rps = [
+ {
+ "generation": 1,
+ "uuid": uuidsentinel.rp1,
+ "links": None,
+ "name": "rp1",
+ "parent_provider_uuid": None,
+ "root_provider_uuid": uuidsentinel.rp1,
+ },
+ ]
+ get_resource_providers.return_value = fake_requests.FakeResponse(
+ 200, content=jsonutils.dumps({"resource_providers": rps}))
+
+ # we found one orphaned allocation per RP and we had no faults
+ check_orphaned_allocs.side_effect = ((1, 0),)
+
+ ret = self.cli.audit(
+ verbose=True, delete=False,
+ provider_uuid=uuidsentinel.fake_uuid)
+
+ # We found orphaned allocations but we left them
+ self.assertEqual(3, ret)
+
+ get_resource_providers.assert_called_once_with(
+ f'/resource_providers?uuid={uuidsentinel.fake_uuid}',
+ global_request_id=mock.ANY,
+ version='1.14')
+
+ # Only the specified RP is checked
+ check_orphaned_allocs.assert_has_calls([
+ mock.call(mock.ANY, mock.ANY, mock.ANY, rps[0], False),
+ ])
+
+ output = self.output.getvalue()
+ self.assertIn('Processed 1 allocation', output)
+
+ def test_audit_with_invalid_provider_uuid(self):
with mock.patch.object(
- self.cli, '_get_resource_provider',
- side_effect=exception.ResourceProviderNotFound(
- name_or_uuid=uuidsentinel.fake_uuid)):
+ self.cli, '_get_resource_provider',
+ side_effect=exception.ResourceProviderNotFound(
+ name_or_uuid=uuidsentinel.fake_uuid),
+ ):
ret = self.cli.audit(
provider_uuid=uuidsentinel.fake_uuid)
self.assertEqual(127, ret)
@@ -3005,6 +3047,11 @@ class TestNovaManagePlacement(test.NoDBTestCase):
expected_ret = 0
self.assertEqual(expected_ret, ret)
+ get_resource_providers.assert_called_once_with(
+ '/resource_providers',
+ global_request_id=mock.ANY,
+ version='1.14')
+
call1 = mock.call(mock.ANY, mock.ANY, mock.ANY, rps[0], delete)
call2 = mock.call(mock.ANY, mock.ANY, mock.ANY, rps[1], delete)
if errors:
@@ -3952,3 +3999,262 @@ class LibvirtCommandsTestCase(test.NoDBTestCase):
output = self.output.getvalue()
self.assertEqual(3, ret)
self.assertIn(uuidsentinel.instance, output)
+
+
+class ImagePropertyCommandsTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ super().setUp()
+ self.output = StringIO()
+ self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
+ self.commands = manage.ImagePropertyCommands()
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.STOPPED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='hw_disk_bus')
+ self.assertEqual(0, ret, 'return code')
+ self.assertIn('virtio', self.output.getvalue(), 'command output')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock())
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties_instance_not_found(
+ self,
+ mock_get_instance
+ ):
+ mock_get_instance.side_effect = exception.InstanceNotFound(
+ instance_id=uuidsentinel.instance)
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='hw_disk_bus')
+ self.assertEqual(2, ret, 'return code')
+
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties_instance_mapping_not_found(
+ self,
+ mock_get_instance_mapping
+ ):
+ mock_get_instance_mapping.side_effect = \
+ exception.InstanceMappingNotFound(
+ uuid=uuidsentinel.instance)
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='hw_disk_bus')
+ self.assertEqual(2, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties_image_property_not_found(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.STOPPED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='foo')
+ self.assertEqual(3, ret, 'return code')
+
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_show_image_properties_unknown_failure(
+ self,
+ mock_get_instance_mapping,
+ ):
+ mock_get_instance_mapping.side_effect = Exception()
+ ret = self.commands.show(
+ instance_uuid=uuidsentinel.instance,
+ image_property='hw_disk_bus')
+ self.assertEqual(1, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.Instance.save')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties(
+ self, mock_instance_save, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ instance = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.STOPPED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ mock_get_instance.return_value = instance
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_cdrom_bus=sata']
+ )
+ self.assertEqual(0, ret, 'return code')
+ self.assertIn('image_hw_cdrom_bus', instance.system_metadata)
+ self.assertEqual(
+ 'sata',
+ instance.system_metadata.get('image_hw_cdrom_bus'),
+ 'image_hw_cdrom_bus'
+ )
+ self.assertEqual(
+ 'virtio',
+ instance.system_metadata.get('image_hw_disk_bus'),
+ 'image_hw_disk_bus'
+ )
+ mock_instance_save.assert_called_once()
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock())
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_instance_not_found(self, mock_get_instance):
+ mock_get_instance.side_effect = exception.InstanceNotFound(
+ instance_id=uuidsentinel.instance)
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_disk_bus=virtio'])
+ self.assertEqual(2, ret, 'return code')
+
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid')
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_instance_mapping_not_found(
+ self,
+ mock_get_instance_mapping
+ ):
+ mock_get_instance_mapping.side_effect = \
+ exception.InstanceMappingNotFound(
+ uuid=uuidsentinel.instance)
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_disk_bus=virtio'])
+ self.assertEqual(2, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_instance_invalid_state(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.ACTIVE,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_cdrom_bus=sata']
+ )
+ self.assertEqual(3, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_invalid_input(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.SHELVED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_cdrom_bus'])
+ self.assertEqual(4, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_invalid_property_name(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.SHELVED_OFFLOADED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['foo=bar'])
+ self.assertEqual(5, ret, 'return code')
+
+ @mock.patch('nova.objects.Instance.get_by_uuid')
+ @mock.patch('nova.context.target_cell')
+ @mock.patch('nova.objects.InstanceMapping.get_by_instance_uuid',
+ new=mock.Mock(cell_mapping=mock.sentinel.cm))
+ @mock.patch('nova.context.get_admin_context',
+ new=mock.Mock(return_value=mock.sentinel.ctxt))
+ def test_set_image_properties_invalid_property_value(
+ self, mock_target_cell, mock_get_instance
+ ):
+ mock_target_cell.return_value.__enter__.return_value = \
+ mock.sentinel.cctxt
+ mock_get_instance.return_value = objects.Instance(
+ uuid=uuidsentinel.instance,
+ vm_state=obj_fields.InstanceState.STOPPED,
+ system_metadata={
+ 'image_hw_disk_bus': 'virtio',
+ }
+ )
+ ret = self.commands.set(
+ instance_uuid=uuidsentinel.instance,
+ image_properties=['hw_disk_bus=bar'])
+ self.assertEqual(6, ret, 'return code')
diff --git a/nova/tests/unit/cmd/test_nova_api.py b/nova/tests/unit/cmd/test_nova_api.py
index f13712eabd..a4f7d82105 100644
--- a/nova/tests/unit/cmd/test_nova_api.py
+++ b/nova/tests/unit/cmd/test_nova_api.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.cmd import api
from nova import config
diff --git a/nova/tests/unit/cmd/test_policy.py b/nova/tests/unit/cmd/test_policy.py
index 4c990a8ff1..29dd5610f6 100644
--- a/nova/tests/unit/cmd/test_policy.py
+++ b/nova/tests/unit/cmd/test_policy.py
@@ -18,9 +18,9 @@
"""
from io import StringIO
+from unittest import mock
import fixtures
-import mock
from nova.cmd import policy
import nova.conf
@@ -128,20 +128,21 @@ class TestPolicyCheck(test.NoDBTestCase):
self.assertEqual(set(expected_rules), set(passing_rules))
def test_filter_rules_non_admin(self):
- context = nova_context.RequestContext()
- rule_conditions = [base_policies.PROJECT_READER_OR_SYSTEM_READER]
+ context = nova_context.RequestContext(roles=['reader'])
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
def test_filter_rules_admin(self):
- self._check_filter_rules()
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context)
def test_filter_rules_instance_non_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- context = nova_context.RequestContext()
+ context = nova_context.RequestContext(roles=['reader'])
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str == base_policies.RULE_ANY]
self._check_filter_rules(context, instance, expected_rules)
@@ -150,13 +151,15 @@ class TestPolicyCheck(test.NoDBTestCase):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
- self._check_filter_rules(target=instance)
+ context = nova_context.RequestContext(roles=['admin'])
+ self._check_filter_rules(context, target=instance)
def test_filter_rules_instance_owner(self):
db_context = nova_context.RequestContext(user_id='fake-user',
- project_id='fake-project')
+ project_id='fake-project',
+ roles=['reader'])
instance = fake_instance.fake_instance_obj(db_context)
- rule_conditions = [base_policies.PROJECT_READER_OR_SYSTEM_READER]
+ rule_conditions = [base_policies.PROJECT_READER_OR_ADMIN]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(db_context, instance, expected_rules)
diff --git a/nova/tests/unit/cmd/test_scheduler.py b/nova/tests/unit/cmd/test_scheduler.py
index e207c7343f..2927492abc 100644
--- a/nova/tests/unit/cmd/test_scheduler.py
+++ b/nova/tests/unit/cmd/test_scheduler.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.cmd import scheduler
from nova import config
diff --git a/nova/tests/unit/cmd/test_status.py b/nova/tests/unit/cmd/test_status.py
index d0343212f2..f5fcc168ee 100644
--- a/nova/tests/unit/cmd/test_status.py
+++ b/nova/tests/unit/cmd/test_status.py
@@ -18,13 +18,12 @@ Unit tests for the nova-status CLI interfaces.
# NOTE(cdent): Additional tests of nova-status may be found in
# nova/tests/functional/test_nova_status.py. Those tests use the external
-# PlacementFixture, which is only available in functioanl tests.
+# PlacementFixture, which is only available in functional tests.
from io import StringIO
+from unittest import mock
import fixtures
-import mock
-
from keystoneauth1 import exceptions as ks_exc
from keystoneauth1 import loading as keystone
from keystoneauth1 import session
@@ -40,7 +39,6 @@ from nova import exception
# in the tests, we don't use them in the actual CLI.
from nova import objects
from nova.objects import service
-from nova import policy
from nova import test
from nova.tests import fixtures as nova_fixtures
@@ -394,60 +392,6 @@ class TestUpgradeCheckCinderAPI(test.NoDBTestCase):
self.assertEqual(upgradecheck.Code.SUCCESS, result.code)
-class TestUpgradeCheckPolicy(test.NoDBTestCase):
-
- new_default_status = upgradecheck.Code.WARNING
-
- def setUp(self):
- super(TestUpgradeCheckPolicy, self).setUp()
- self.cmd = status.UpgradeCommands()
- self.rule_name = "system_admin_api"
-
- def tearDown(self):
- super(TestUpgradeCheckPolicy, self).tearDown()
- # Check if policy is reset back after the upgrade check
- self.assertIsNone(policy._ENFORCER)
-
- def test_policy_rule_with_new_defaults(self):
- new_default = "role:admin and system_scope:all"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
- self.assertEqual(self.new_default_status,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_old_defaults(self):
- new_default = "is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_rule_with_both_defaults(self):
- new_default = "(role:admin and system_scope:all) or is_admin:True"
- rule = {self.rule_name: new_default}
- self.policy.set_rules(rule, overwrite=False)
-
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
- def test_policy_checks_with_fresh_init_and_no_policy_override(self):
- self.policy = self.useFixture(nova_fixtures.OverridePolicyFixture(
- rules_in_file={}))
- policy.reset()
- self.assertEqual(upgradecheck.Code.SUCCESS,
- self.cmd._check_policy().code)
-
-
-class TestUpgradeCheckPolicyEnableScope(TestUpgradeCheckPolicy):
-
- new_default_status = upgradecheck.Code.SUCCESS
-
- def setUp(self):
- super(TestUpgradeCheckPolicyEnableScope, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
-
class TestUpgradeCheckOldCompute(test.NoDBTestCase):
def setUp(self):
@@ -474,7 +418,7 @@ class TestUpgradeCheckOldCompute(test.NoDBTestCase):
"nova.objects.service.get_minimum_version_all_cells",
return_value=too_old):
result = self.cmd._check_old_computes()
- self.assertEqual(upgradecheck.Code.WARNING, result.code)
+ self.assertEqual(upgradecheck.Code.FAILURE, result.code)
class TestCheckMachineTypeUnset(test.NoDBTestCase):
diff --git a/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py b/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py
index aed34ea30c..a563a7e346 100644
--- a/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py
+++ b/nova/tests/unit/compute/monitors/cpu/test_virt_driver.py
@@ -15,7 +15,7 @@
"""Tests for Compute Driver CPU resource monitor."""
-import mock
+from unittest import mock
from nova.compute.monitors.cpu import virt_driver
from nova import objects
diff --git a/nova/tests/unit/compute/monitors/test_monitors.py b/nova/tests/unit/compute/monitors/test_monitors.py
index 34b4a34d20..d43f90206c 100644
--- a/nova/tests/unit/compute/monitors/test_monitors.py
+++ b/nova/tests/unit/compute/monitors/test_monitors.py
@@ -15,7 +15,7 @@
"""Tests for resource monitors."""
-import mock
+from unittest import mock
from nova.compute import monitors
from nova import test
diff --git a/nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml b/nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml
index 278b77cae6..ac0b61a207 100644
--- a/nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml
+++ b/nova/tests/unit/compute/provider_config_data/v1/validation_error_test_data.yaml
@@ -31,7 +31,7 @@ property__source_file_present_value:
schema_version: '1.0'
__source_file: "present"
expected_messages:
- - "{} is not allowed for"
+ - "should not be valid under {}"
- "validating 'not' in schema['properties']['__source_file']"
property__source_file_present_null:
config:
@@ -39,7 +39,7 @@ property__source_file_present_null:
schema_version: '1.0'
__source_file: null
expected_messages:
- - "{} is not allowed for"
+ - "should not be valid under {}"
- "validating 'not' in schema['properties']['__source_file']"
provider_invalid_uuid:
config:
diff --git a/nova/tests/unit/compute/test_api.py b/nova/tests/unit/compute/test_api.py
index 64064cf636..9d6e9ba4bd 100644
--- a/nova/tests/unit/compute/test_api.py
+++ b/nova/tests/unit/compute/test_api.py
@@ -15,12 +15,13 @@
import contextlib
import datetime
+from unittest import mock
import ddt
import fixtures
import iso8601
-import mock
import os_traits as ot
+from oslo_limit import exception as limit_exceptions
from oslo_messaging import exceptions as oslo_exceptions
from oslo_serialization import jsonutils
from oslo_utils import fixture as utils_fixture
@@ -42,6 +43,7 @@ from nova import context
from nova.db.main import api as db
from nova import exception
from nova.image import glance as image_api
+from nova.limit import placement as placement_limit
from nova.network import constants
from nova.network import model
from nova.network import neutron as neutron_api
@@ -206,6 +208,10 @@ class _ComputeAPIUnitTestMixIn(object):
list_obj.obj_reset_changes()
return list_obj
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=False),
+ )
@mock.patch('nova.objects.Quotas.check_deltas')
@mock.patch('nova.conductor.conductor_api.ComputeTaskAPI.build_instances')
@mock.patch('nova.compute.api.API._record_action_start')
@@ -521,6 +527,36 @@ class _ComputeAPIUnitTestMixIn(object):
instance, fake_bdm)
@mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
+ @mock.patch.object(
+ objects.BlockDeviceMapping, 'get_by_volume_and_instance')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_volume')
+ def test_attach_volume_reserve_bdm_timeout(
+ self, mock_get_by_volume, mock_get_by_volume_and_instance,
+ mock_reserve):
+ mock_get_by_volume.side_effect = exception.VolumeBDMNotFound(
+ volume_id='fake-volume-id')
+
+ fake_bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
+ mock_get_by_volume_and_instance.return_value = fake_bdm
+ instance = self._create_instance_obj()
+ volume = fake_volume.fake_volume(1, 'test-vol', 'test-vol',
+ None, None, None, None, None)
+
+ mock_reserve.side_effect = oslo_exceptions.MessagingTimeout()
+
+ mock_volume_api = mock.patch.object(self.compute_api, 'volume_api',
+ mock.MagicMock(spec=cinder.API))
+
+ with mock_volume_api as mock_v_api:
+ mock_v_api.get.return_value = volume
+ self.assertRaises(oslo_exceptions.MessagingTimeout,
+ self.compute_api.attach_volume,
+ self.context, instance, volume['id'])
+ mock_get_by_volume_and_instance.assert_called_once_with(
+ self.context, volume['id'], instance.uuid)
+ fake_bdm.destroy.assert_called_once_with()
+
+ @mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_volume')
@mock.patch.object(compute_rpcapi.ComputeAPI, 'attach_volume')
def test_attach_volume_attachment_create_fails(
@@ -931,6 +967,31 @@ class _ComputeAPIUnitTestMixIn(object):
return snapshot_id
+ def _test_delete(self, delete_type, **attrs):
+ delete_time = datetime.datetime(
+ 1955, 11, 5, 9, 30, tzinfo=iso8601.UTC)
+ timeutils.set_time_override(delete_time)
+ self.addCleanup(timeutils.clear_time_override)
+
+ with test.nested(
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'confirm_resize'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'terminate_instance'),
+ mock.patch.object(
+ self.compute_api.compute_rpcapi, 'soft_delete_instance'),
+ ) as (
+ mock_confirm, mock_terminate, mock_soft_delete
+ ):
+ self._do_delete(
+ delete_type,
+ mock_confirm,
+ mock_terminate,
+ mock_soft_delete,
+ delete_time,
+ **attrs
+ )
+
@mock.patch.object(compute_utils,
'notify_about_instance_action')
@mock.patch.object(objects.Migration, 'get_by_instance_and_status')
@@ -950,12 +1011,13 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid', return_value=[])
@mock.patch.object(objects.Instance, 'save')
- def _test_delete(self, delete_type, mock_save, mock_bdm_get, mock_elevated,
- mock_get_cn, mock_up, mock_record, mock_inst_update,
- mock_deallocate, mock_inst_meta, mock_inst_destroy,
- mock_notify_legacy, mock_get_inst,
- mock_save_im, mock_image_delete, mock_mig_get,
- mock_notify, **attrs):
+ def _do_delete(
+ self, delete_type, mock_confirm, mock_terminate, mock_soft_delete,
+ delete_time, mock_save, mock_bdm_get, mock_elevated, mock_get_cn,
+ mock_up, mock_record, mock_inst_update, mock_deallocate,
+ mock_inst_meta, mock_inst_destroy, mock_notify_legacy, mock_get_inst,
+ mock_save_im, mock_image_delete, mock_mig_get, mock_notify, **attrs
+ ):
expected_save_calls = [mock.call()]
expected_record_calls = []
expected_elevated_calls = []
@@ -965,17 +1027,11 @@ class _ComputeAPIUnitTestMixIn(object):
deltas = {'instances': -1,
'cores': -inst.flavor.vcpus,
'ram': -inst.flavor.memory_mb}
- delete_time = datetime.datetime(1955, 11, 5, 9, 30,
- tzinfo=iso8601.UTC)
- self.useFixture(utils_fixture.TimeFixture(delete_time))
task_state = (delete_type == 'soft_delete' and
task_states.SOFT_DELETING or task_states.DELETING)
updates = {'progress': 0, 'task_state': task_state}
if delete_type == 'soft_delete':
updates['deleted_at'] = delete_time
- rpcapi = self.compute_api.compute_rpcapi
- mock_confirm = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'confirm_resize')).mock
def _reset_task_state(context, instance, migration, src_host,
cast=False):
@@ -990,11 +1046,6 @@ class _ComputeAPIUnitTestMixIn(object):
snapshot_id = self._set_delete_shelved_part(inst,
mock_image_delete)
- mock_terminate = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'terminate_instance')).mock
- mock_soft_delete = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'soft_delete_instance')).mock
-
if inst.task_state == task_states.RESIZE_FINISH:
self._test_delete_resizing_part(inst, deltas)
@@ -1082,7 +1133,7 @@ class _ComputeAPIUnitTestMixIn(object):
mock_mig_get.assert_called_once_with(
self.context, instance_uuid, 'finished')
mock_confirm.assert_called_once_with(
- self.context, inst, migration, migration['source_compute'],
+ self.context, inst, migration, migration.source_compute,
cast=False)
if instance_host is not None:
mock_get_cn.assert_called_once_with(self.context,
@@ -1203,10 +1254,12 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.notify_about_instance_usage')
@mock.patch('nova.objects.Service.get_by_compute_host')
+ @mock.patch('nova.compute.api.API._record_action_start')
@mock.patch('nova.compute.api.API._local_delete')
def test_delete_error_state_with_no_host(
- self, mock_local_delete, mock_service_get, _mock_notify,
- _mock_save, mock_bdm_get, mock_lookup, _mock_del_booting):
+ self, mock_local_delete, mock_record, mock_service_get,
+ _mock_notify, _mock_save, mock_bdm_get, mock_lookup,
+ _mock_del_booting):
# Instance in error state with no host should be a local delete
# for non API cells
inst = self._create_instance_obj(params=dict(vm_state=vm_states.ERROR,
@@ -1218,6 +1271,8 @@ class _ComputeAPIUnitTestMixIn(object):
mock_local_delete.assert_called_once_with(
self.context, inst, mock_bdm_get.return_value,
'delete', self.compute_api._do_delete)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_terminate.assert_not_called()
mock_service_get.assert_not_called()
@@ -1317,10 +1372,6 @@ class _ComputeAPIUnitTestMixIn(object):
self.context, instance_uuid, constraint='constraint',
hard_delete=False)
- def _fake_do_delete(context, instance, bdms,
- rservations=None, local=False):
- pass
-
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(cinder.API, 'detach')
@@ -1342,9 +1393,11 @@ class _ComputeAPIUnitTestMixIn(object):
mock_elevated.return_value = self.context
mock_detach.side_effect = exception.VolumeNotFound('volume_id')
+ # lambda function is used to run no op call as a delete function
+ # called by compute_api._local_delete
self.compute_api._local_delete(self.context, inst, bdms,
'delete',
- self._fake_do_delete)
+ lambda *args, **kwargs: None)
mock_notify_legacy.assert_has_calls([
mock.call(self.compute_api.notifier, self.context,
@@ -1380,8 +1433,11 @@ class _ComputeAPIUnitTestMixIn(object):
inst._context = self.context
mock_elevated.return_value = self.context
bdms = []
+ # lambda function is used to run no op call as a delete function
+ # called by compute_api._local_delete
self.compute_api._local_delete(self.context, inst, bdms,
- 'delete', self._fake_do_delete)
+ 'delete',
+ lambda *args, **kwargs: None)
mock_del_arqs.assert_called_once_with(self.context, inst)
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@@ -1817,6 +1873,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.context, objects.Migration(),
test_migration.fake_db_migration())
fake_reqspec = objects.RequestSpec()
+ fake_reqspec.is_bfv = False
fake_reqspec.flavor = fake_inst.flavor
fake_numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
@@ -2036,7 +2093,8 @@ class _ComputeAPIUnitTestMixIn(object):
filter_properties = {'ignore_hosts': [fake_inst['host']]}
if request_spec:
- fake_spec = objects.RequestSpec()
+ fake_spec = objects.RequestSpec(
+ pci_requests=objects.InstancePCIRequests(requests=[]))
if requested_destination:
cell1 = objects.CellMapping(uuid=uuids.cell1, name='cell1')
fake_spec.requested_destination = objects.Destination(
@@ -2176,6 +2234,8 @@ class _ComputeAPIUnitTestMixIn(object):
def test_resize_allow_cross_cell_resize_true(self):
self._test_resize(allow_cross_cell_resize=True)
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@mock.patch('nova.compute.flavors.get_flavor_by_flavor_id')
@@ -2391,6 +2451,8 @@ class _ComputeAPIUnitTestMixIn(object):
do_test()
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@mock.patch.object(objects.Instance, 'save')
@@ -2449,6 +2511,8 @@ class _ComputeAPIUnitTestMixIn(object):
mock_record.assert_not_called()
mock_resize.assert_not_called()
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
@@ -2476,6 +2540,8 @@ class _ComputeAPIUnitTestMixIn(object):
fake_inst, flavor_id='flavor-id')
self.assertFalse(mock_save.called)
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@mock.patch.object(flavors, 'get_flavor_by_flavor_id')
@@ -2509,6 +2575,32 @@ class _ComputeAPIUnitTestMixIn(object):
else:
self.fail("Exception not raised")
+ @mock.patch.object(placement_limit, 'enforce_num_instances_and_flavor')
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
+ @mock.patch('nova.servicegroup.api.API.service_is_up',
+ new=mock.Mock(return_value=True))
+ @mock.patch.object(flavors, 'get_flavor_by_flavor_id')
+ def test_resize_instance_quota_exceeds_with_multiple_resources_ul(
+ self, mock_get_flavor, mock_enforce):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ mock_enforce.side_effect = limit_exceptions.ProjectOverLimit(
+ self.context.project_id, [limit_exceptions.OverLimitInfo(
+ resource_name='servers', limit=1, current_usage=1, delta=1)])
+ mock_get_flavor.return_value = self._create_flavor(id=333,
+ vcpus=3,
+ memory_mb=1536)
+
+ self.assertRaises(limit_exceptions.ProjectOverLimit,
+ self.compute_api.resize,
+ self.context, self._create_instance_obj(),
+ 'fake_flavor_id')
+
+ mock_get_flavor.assert_called_once_with('fake_flavor_id',
+ read_deleted="no")
+ mock_enforce.assert_called_once_with(
+ self.context, "fake", mock_get_flavor.return_value, False, 1, 1)
+
# TODO(huaqiang): Remove in Wallaby
@mock.patch('nova.servicegroup.api.API.service_is_up',
new=mock.Mock(return_value=True))
@@ -2565,9 +2657,6 @@ class _ComputeAPIUnitTestMixIn(object):
rpcapi = self.compute_api.compute_rpcapi
- mock_pause = self.useFixture(
- fixtures.MockPatchObject(rpcapi, 'pause_instance')).mock
-
with mock.patch.object(rpcapi, 'pause_instance') as mock_pause:
self.compute_api.pause(self.context, instance)
@@ -3346,7 +3435,7 @@ class _ComputeAPIUnitTestMixIn(object):
raise exception.InstanceQuiesceNotSupported(
instance_id=instance['uuid'], reason='unsupported')
if quiesce_fails:
- raise oslo_exceptions.MessagingTimeout('quiece timeout')
+ raise oslo_exceptions.MessagingTimeout('quiesce timeout')
quiesced[0] = True
def fake_unquiesce_instance(context, instance, mapping=None):
@@ -3407,7 +3496,9 @@ class _ComputeAPIUnitTestMixIn(object):
'device_type': None, 'snapshot_id': '1-snapshot',
'device_name': '/dev/vda',
'destination_type': 'volume', 'delete_on_termination': False,
- 'tag': None, 'volume_type': None})
+ 'tag': None, 'volume_type': None,
+ 'encrypted': None, 'encryption_format': None,
+ 'encryption_secret_uuid': None, 'encryption_options': None})
limits_patcher = mock.patch.object(
self.compute_api.volume_api, 'get_absolute_limits',
@@ -3470,7 +3561,9 @@ class _ComputeAPIUnitTestMixIn(object):
'device_type': None, 'snapshot_id': None,
'device_name': '/dev/vdh',
'destination_type': 'local', 'delete_on_termination': True,
- 'tag': None, 'volume_type': None})
+ 'tag': None, 'volume_type': None,
+ 'encrypted': False, 'encryption_format': None,
+ 'encryption_secret_uuid': None, 'encryption_options': None})
quiesced = [False, False]
@@ -3508,7 +3601,7 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_snapshot_volume_backed(quiesce_required=True,
quiesce_unsupported=False)
- def test_snaphost_volume_backed_with_quiesce_failure(self):
+ def test_snapshot_volume_backed_with_quiesce_failure(self):
self.assertRaises(oslo_exceptions.MessagingTimeout,
self._test_snapshot_volume_backed,
quiesce_required=True,
@@ -3915,6 +4008,158 @@ class _ComputeAPIUnitTestMixIn(object):
_checks_for_create_and_rebuild.assert_called_once_with(
self.context, None, image, flavor, {}, [], None)
+ @ddt.data(True, False)
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed(self, reimage_boot_vol,
+ _record_action_start, _checks_for_create_and_rebuild,
+ _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where the instance is volume backed and we rebuild
+ with following cases:
+
+ 1) reimage_boot_volume=True
+ 2) reimage_boot_volume=False
+
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ flavor = instance.get_flavor()
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm), \
+ mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance') as rebuild_instance:
+ if reimage_boot_vol:
+ self.compute_api.rebuild(self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=True)
+ rebuild_instance.assert_called_once_with(self.context,
+ instance=instance, new_pass=admin_pass,
+ image_ref=uuids.image_ref,
+ orig_image_ref=None, orig_sys_metadata={},
+ injected_files=[], bdms=bdms,
+ preserve_ephemeral=False, host=None,
+ request_spec=fake_spec,
+ reimage_boot_volume=True,
+ target_state=None)
+ _check_auto_disk_config.assert_called_once_with(
+ image=image, auto_disk_config=None)
+ _checks_for_create_and_rebuild.assert_called_once_with(
+ self.context, None, image, flavor, {}, [], root_bdm)
+ mock_get_bdms.assert_called_once_with(
+ self.context, instance.uuid)
+ else:
+ self.assertRaises(
+ exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False,
+ target_state=None)
+
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(objects.Instance, 'get_flavor')
+ @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
+ @mock.patch.object(compute_api.API, '_get_image')
+ @mock.patch.object(compute_api.API, '_check_image_arch')
+ @mock.patch.object(compute_api.API, '_check_auto_disk_config')
+ @mock.patch.object(compute_api.API, '_checks_for_create_and_rebuild')
+ @mock.patch.object(compute_api.API, '_record_action_start')
+ def test_rebuild_volume_backed_fails(self, _record_action_start,
+ _checks_for_create_and_rebuild, _check_auto_disk_config,
+ _check_image_arch, mock_get_image,
+ mock_get_bdms, get_flavor,
+ instance_save, req_spec_get_by_inst_uuid, request_save):
+ """Test a scenario where we don't pass parameters to rebuild
+ boot volume
+ """
+ instance = fake_instance.fake_instance_obj(
+ self.context, vm_state=vm_states.ACTIVE, cell_name='fake-cell',
+ launched_at=timeutils.utcnow(),
+ system_metadata={}, image_ref=uuids.image_ref,
+ expected_attrs=['system_metadata'], node='fake')
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=None, image_id=None,
+ source_type='volume', destination_type='volume',
+ volume_type=None, snapshot_id=None,
+ volume_id=uuids.volume_id, volume_size=None)])
+ mock_get_bdms.return_value = bdms
+ get_flavor.return_value = test_flavor.fake_flavor
+ image = {
+ "id": uuids.image_ref,
+ "min_ram": 10, "min_disk": 1,
+ "properties": {
+ 'architecture': fields_obj.Architecture.X86_64}}
+ mock_get_image.return_value = (None, image)
+ fake_spec = objects.RequestSpec(id=1, force_nodes=None)
+ req_spec_get_by_inst_uuid.return_value = fake_spec
+ fake_volume = {'id': uuids.volume_id, 'status': 'in-use'}
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ admin_pass = "new password"
+ with mock.patch.object(self.compute_api.volume_api, 'get',
+ return_value=fake_volume), \
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm):
+ self.assertRaises(exception.NovaException,
+ self.compute_api.rebuild,
+ self.context,
+ instance,
+ uuids.image_ref,
+ admin_pass,
+ reimage_boot_volume=False,
+ target_state=None)
+
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.Instance, 'get_flavor')
@@ -3963,7 +4208,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4036,7 +4282,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=None,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
# assert the request spec was modified so the scheduler picks
# the existing instance host/node
req_spec_save.assert_called_once_with()
@@ -4104,7 +4351,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4163,7 +4411,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4227,7 +4476,8 @@ class _ComputeAPIUnitTestMixIn(object):
orig_image_ref=uuids.image_ref,
orig_sys_metadata=orig_system_metadata, bdms=bdms,
preserve_ephemeral=False, host=instance.host,
- request_spec=fake_spec)
+ request_spec=fake_spec, reimage_boot_volume=False,
+ target_state=None)
_check_auto_disk_config.assert_called_once_with(
image=image, auto_disk_config=None)
@@ -4329,6 +4579,8 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Quotas.get_all_by_project_and_user',
new=mock.MagicMock())
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.objects.Quotas.count_as_dict')
@mock.patch('nova.objects.Quotas.limit_check_project_and_user')
@mock.patch('nova.objects.Instance.save')
@@ -4371,6 +4623,8 @@ class _ComputeAPIUnitTestMixIn(object):
@mock.patch('nova.objects.Quotas.get_all_by_project_and_user',
new=mock.MagicMock())
+ @mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.objects.Quotas.count_as_dict')
@mock.patch('nova.objects.Quotas.limit_check_project_and_user')
@mock.patch('nova.objects.Instance.save')
@@ -4740,7 +4994,7 @@ class _ComputeAPIUnitTestMixIn(object):
def test_validate_vol_az_for_create_vol_az_matches_default_cpu_az(self):
"""Tests the scenario that the instance is not being created in a
specific zone and the volume's zone matches
- CONF.default_availabilty_zone so None is returned indicating the
+ CONF.default_availability_zone so None is returned indicating the
RequestSpec.availability_zone does not need to be updated.
"""
self.flags(cross_az_attach=False, group='cinder')
@@ -5548,7 +5802,10 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
- rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({})
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
with test.nested(
mock.patch.object(self.compute_api.placementclient,
@@ -5600,6 +5857,7 @@ class _ComputeAPIUnitTestMixIn(object):
# Assert that the instance task state as set in the compute API
self.assertEqual(task_states.RESCUING, instance.task_state)
+ @mock.patch('nova.objects.instance.Instance.image_meta')
@mock.patch('nova.objects.compute_node.ComputeNode'
'.get_by_host_and_nodename')
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -5608,7 +5866,8 @@ class _ComputeAPIUnitTestMixIn(object):
'.get_by_instance_uuid')
def test_rescue_bfv_without_required_trait(self, mock_get_bdms,
mock_is_volume_backed,
- mock_get_cn):
+ mock_get_cn,
+ mock_image_meta):
instance = self._create_instance_obj()
bdms = objects.BlockDeviceMappingList(objects=[
objects.BlockDeviceMapping(
@@ -5616,6 +5875,12 @@ class _ComputeAPIUnitTestMixIn(object):
destination_type='volume', volume_type=None,
snapshot_id=None, volume_id=uuids.volume_id,
volume_size=None)])
+
+ instance.image_meta = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
with test.nested(
mock.patch.object(self.compute_api.placementclient,
'get_provider_traits'),
@@ -5653,6 +5918,124 @@ class _ComputeAPIUnitTestMixIn(object):
mock_get_traits.assert_called_once_with(
self.context, uuids.cn)
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.objects.compute_node.ComputeNode'
+ '.get_by_host_and_nodename')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_with_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed, mock_get_cn,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {'hw_rescue_device': 'disk',
+ 'hw_rescue_bus': 'scsi'}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.placementclient,
+ 'get_provider_traits'),
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(self.compute_api, '_record_action_start'),
+ mock.patch.object(self.compute_api.compute_rpcapi,
+ 'rescue_instance')
+ ) as (
+ mock_get_traits, mock_get_volume, mock_check_attached,
+ mock_instance_save, mock_record_start, mock_rpcapi_rescue
+ ):
+ # Mock out the returned compute node, image_meta, bdms and volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_get_cn.return_value = mock.Mock(uuid=uuids.cn)
+
+ # Ensure the required trait is returned, allowing BFV rescue
+ mock_trait_info = mock.Mock(traits=[ot.COMPUTE_RESCUE_BFV])
+ mock_get_traits.return_value = mock_trait_info
+
+ # Try to rescue the instance
+ self.compute_api.rescue(self.context, instance,
+ rescue_image_ref=uuids.rescue_image_id,
+ allow_bfv_rescue=True)
+
+ # Assert all of the calls made in the compute API
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+ mock_get_cn.assert_called_once_with(
+ self.context, instance.host, instance.node)
+ mock_get_traits.assert_called_once_with(self.context, uuids.cn)
+ mock_instance_save.assert_called_once_with(
+ expected_task_state=[None])
+ mock_record_start.assert_called_once_with(
+ self.context, instance, instance_actions.RESCUE)
+ mock_rpcapi_rescue.assert_called_once_with(
+ self.context, instance=instance, rescue_password=None,
+ rescue_image_ref=uuids.rescue_image_id, clean_shutdown=True)
+
+ # Assert that the instance task state as set in the compute API
+ self.assertEqual(task_states.RESCUING, instance.task_state)
+
+ @mock.patch('nova.objects.image_meta.ImageMeta.from_image_ref')
+ @mock.patch('nova.compute.utils.is_volume_backed_instance',
+ return_value=True)
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList'
+ '.get_by_instance_uuid')
+ def test_rescue_bfv_without_required_image_properties(
+ self, mock_get_bdms, mock_is_volume_backed,
+ mock_image_meta_obj_from_ref):
+ instance = self._create_instance_obj()
+ bdms = objects.BlockDeviceMappingList(objects=[
+ objects.BlockDeviceMapping(
+ boot_index=0, image_id=uuids.image_id, source_type='image',
+ destination_type='volume', volume_type=None,
+ snapshot_id=None, volume_id=uuids.volume_id,
+ volume_size=None)])
+ rescue_image_meta_obj = image_meta_obj.ImageMeta.from_dict({
+ 'properties': {}
+ })
+
+ with test.nested(
+ mock.patch.object(self.compute_api.volume_api, 'get'),
+ mock.patch.object(self.compute_api.volume_api, 'check_attached'),
+ ) as (
+ mock_get_volume, mock_check_attached
+ ):
+ # Mock out the returned bdms, volume and image_meta
+ mock_get_bdms.return_value = bdms
+ mock_get_volume.return_value = mock.sentinel.volume
+ mock_image_meta_obj_from_ref.return_value = rescue_image_meta_obj
+
+ # Assert that any attempt to rescue a bfv instance on a compute
+ # node that does not report the COMPUTE_RESCUE_BFV trait fails and
+ # raises InstanceNotRescuable
+ self.assertRaises(exception.InstanceNotRescuable,
+ self.compute_api.rescue, self.context, instance,
+ rescue_image_ref=None, allow_bfv_rescue=True)
+
+ # Assert the calls made in the compute API prior to the failure
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ mock_get_volume.assert_called_once_with(
+ self.context, uuids.volume_id)
+ mock_check_attached.assert_called_once_with(
+ self.context, mock.sentinel.volume)
+ mock_is_volume_backed.assert_called_once_with(
+ self.context, instance, bdms)
+
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
@mock.patch('nova.objects.block_device.BlockDeviceMappingList'
@@ -5837,6 +6220,41 @@ class _ComputeAPIUnitTestMixIn(object):
'volume_id': 'volume_id'}]
self._test_check_and_transform_bdm(block_device_mapping)
+ def test_update_ephemeral_encryption_bdms(self):
+ flavor = self._create_flavor(
+ extra_specs={
+ 'hw:ephemeral_encryption': True,
+ 'hw:ephemeral_encryption_format': 'luks',
+ }
+ )
+ block_device_mapping = [
+ {'device_name': '/dev/sda1',
+ 'source_type': 'snapshot', 'destination_type': 'volume',
+ 'snapshot_id': uuids.snapshot_id,
+ 'delete_on_termination': False,
+ 'boot_index': 0},
+ {'device_name': '/dev/sdb2',
+ 'source_type': 'image', 'destination_type': 'local',
+ 'image_id': uuids.image_id, 'delete_on_termination': False},
+ {'device_name': '/dev/sdb3',
+ 'source_type': 'blank', 'destination_type': 'local',
+ 'guest_format': 'ext3', 'delete_on_termination': False}]
+
+ block_device_mapping = (
+ block_device_obj.block_device_make_list_from_dicts(
+ self.context,
+ map(fake_block_device.AnonFakeDbBlockDeviceDict,
+ block_device_mapping)))
+
+ self.compute_api._update_ephemeral_encryption_bdms(
+ flavor, {}, block_device_mapping)
+
+ for bdm in block_device_mapping:
+ if bdm.is_local:
+ self.assertTrue(bdm.encrypted)
+ else:
+ self.assertFalse(bdm.encrypted)
+
def test_bdm_validate_set_size_and_instance(self):
swap_size = 42
ephemeral_size = 24
@@ -6296,8 +6714,9 @@ class _ComputeAPIUnitTestMixIn(object):
self.assertEqual(uuids.inst, result.uuid)
mock_get_inst.assert_called_once()
+ @mock.patch('nova.compute.api.LOG.exception')
@mock.patch.object(objects.Instance, 'get_by_uuid')
- def test_get_instance_from_cell_failure(self, mock_get_inst):
+ def test_get_instance_from_cell_failure(self, mock_get_inst, mock_log_exp):
# Make sure InstanceNotFound is bubbled up and not treated like
# other errors
mock_get_inst.side_effect = exception.InstanceNotFound(
@@ -6310,6 +6729,15 @@ class _ComputeAPIUnitTestMixIn(object):
self.compute_api._get_instance_from_cell, self.context,
im, [], False)
self.assertIn('could not be found', str(exp))
+ # Make sure other unexpected NovaException are logged for debugging
+ mock_get_inst.side_effect = exception.NovaException()
+ exp = self.assertRaises(
+ exception.NovaException, self.compute_api._get_instance_from_cell,
+ self.context, im, [], False)
+ msg = (f'Cell {cell_mapping.uuid} is not responding or returned an '
+ 'exception, hence instance info is not available.')
+ self.assertIn(msg, str(exp))
+ mock_log_exp.assert_called_once_with(mock_get_inst.side_effect)
@mock.patch('nova.compute.api.API._save_user_id_in_instance_mapping')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@@ -7070,7 +7498,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
# be no conflict.
self.compute_api._validate_numa_rebuild(instance, image, flavor)
- def test__validate_numa_rebuild_add_numa_toplogy(self):
+ def test__validate_numa_rebuild_add_numa_topology(self):
"""Assert that a rebuild of an instance with a new image
that requests a NUMA topology when the original instance did not
have a NUMA topology is invalid.
@@ -7093,7 +7521,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
exception.ImageNUMATopologyRebuildConflict,
self.compute_api._validate_numa_rebuild, instance, image, flavor)
- def test__validate_numa_rebuild_remove_numa_toplogy(self):
+ def test__validate_numa_rebuild_remove_numa_topology(self):
"""Assert that a rebuild of an instance with a new image
that does not request a NUMA topology when the original image did
is invalid if it would alter the instances topology as a result.
@@ -7124,7 +7552,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.compute_api._validate_numa_rebuild, instance,
image, flavor)
- def test__validate_numa_rebuild_alter_numa_toplogy(self):
+ def test__validate_numa_rebuild_alter_numa_topology(self):
"""Assert that a rebuild of an instance with a new image
that requests a different NUMA topology than the original image
is invalid.
@@ -7159,57 +7587,6 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
image, flavor)
@mock.patch('nova.pci.request.get_pci_requests_from_flavor')
- def test_pmu_image_and_flavor_conflict(self, mock_request):
- """Tests that calling _validate_flavor_image_nostatus()
- with an image that conflicts with the flavor raises but no
- exception is raised if there is no conflict.
- """
- image = {'id': uuids.image_id, 'status': 'foo',
- 'properties': {'hw_pmu': False}}
- flavor = objects.Flavor(
- vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "true"})
- self.assertRaises(
- exception.ImagePMUConflict,
- self.compute_api._validate_flavor_image_nostatus,
- self.context, image, flavor, None)
-
- @mock.patch('nova.pci.request.get_pci_requests_from_flavor')
- def test_pmu_image_and_flavor_same_value(self, mock_request):
- # assert that if both the image and flavor are set to the same value
- # no exception is raised and the function returns nothing.
- flavor = objects.Flavor(
- vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "true"})
-
- image = {'id': uuids.image_id, 'status': 'foo',
- 'properties': {'hw_pmu': True}}
- self.assertIsNone(self.compute_api._validate_flavor_image_nostatus(
- self.context, image, flavor, None))
-
- @mock.patch('nova.pci.request.get_pci_requests_from_flavor')
- def test_pmu_image_only(self, mock_request):
- # assert that if only the image metadata is set then it is valid
- flavor = objects.Flavor(
- vcpus=1, memory_mb=512, root_gb=1, extra_specs={})
-
- # ensure string to bool conversion works for image metadata
- # property by using "yes".
- image = {'id': uuids.image_id, 'status': 'foo',
- 'properties': {'hw_pmu': "yes"}}
- self.assertIsNone(self.compute_api._validate_flavor_image_nostatus(
- self.context, image, flavor, None))
-
- @mock.patch('nova.pci.request.get_pci_requests_from_flavor')
- def test_pmu_flavor_only(self, mock_request):
- # assert that if only the flavor extra_spec is set then it is valid
- # and test the string to bool conversion of "on" works.
- flavor = objects.Flavor(
- vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "on"})
-
- image = {'id': uuids.image_id, 'status': 'foo', 'properties': {}}
- self.assertIsNone(self.compute_api._validate_flavor_image_nostatus(
- self.context, image, flavor, None))
-
- @mock.patch('nova.pci.request.get_pci_requests_from_flavor')
def test_pci_validated(self, mock_request):
"""Tests that calling _validate_flavor_image_nostatus() with
validate_pci=True results in get_pci_requests_from_flavor() being
@@ -7243,6 +7620,37 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
requested_networks)
mock_get.assert_called_once_with(self.context, ['nova-compute'])
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch('nova.objects.service.get_minimum_version_all_cells',
+ return_value=60)
+ def test_check_support_vnic_remote_managed_version_before_61(
+ self, mock_get):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=uuids.port)])
+ self.assertRaisesRegex(exception.ForbiddenWithRemoteManagedPorts,
+ 'Remote-managed ports are not supported until an upgrade is fully'
+ ' finished.',
+ self.compute_api._check_support_vnic_remote_managed,
+ self.context,
+ requested_networks)
+ mock_get.assert_called_once_with(self.context, ['nova-compute'])
+
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch('nova.objects.service.get_minimum_version_all_cells',
+ return_value=61)
+ def test_check_support_vnic_remote_managed_version_61(self, mock_get):
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=uuids.port)])
+ self.compute_api._check_support_vnic_remote_managed(self.context,
+ requested_networks)
+ mock_get.assert_called_once_with(self.context, ['nova-compute'])
+
def test_validate_and_build_base_options_translate_neutron_secgroup(self):
"""Tests that _check_requested_secgroups will return a uuid for a
requested Neutron security group and that will be returned from
@@ -7638,8 +8046,9 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(objects.Instance, 'destroy')
+ @mock.patch('nova.compute.api.API._record_action_start')
def _test_delete_volume_backed_instance(
- self, vm_state, mock_instance_destroy, bdm_destroy,
+ self, vm_state, mock_record, mock_instance_destroy, bdm_destroy,
notify_about_instance_usage, mock_save, mock_elevated,
bdm_get_by_instance_uuid, mock_lookup, _mock_del_booting,
notify_about_instance_action):
@@ -7668,6 +8077,8 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
'detach') as mock_detach:
self.compute_api.delete(self.context, inst)
+ mock_record.assert_called_once_with(self.context, inst,
+ instance_actions.DELETE)
mock_deallocate.assert_called_once_with(self.context, inst)
mock_detach.assert_called_once_with(self.context, volume_id,
inst.uuid)
@@ -7685,16 +8096,13 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.assertTrue(hasattr(self.compute_api, 'host'))
self.assertEqual(CONF.host, self.compute_api.host)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per API class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.compute_api._placementclient)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.compute_api.placementclient
+ self.assertFalse(mock_report_client.called)
+ self.compute_api.placementclient
mock_report_client.assert_called_once_with()
def test_validate_host_for_cold_migrate_same_host_fails(self):
@@ -7966,7 +8374,7 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
@mock.patch('nova.objects.service.get_minimum_version_all_cells',
return_value=54)
def test_block_accelerators_until_service(self, mock_get_min):
- """Support operating server with acclerators until compute service
+ """Support operating server with accelerators until compute service
more than the version of 53.
"""
extra_specs = {'accel:device_profile': 'mydp'}
diff --git a/nova/tests/unit/compute/test_claims.py b/nova/tests/unit/compute/test_claims.py
index 8997511e73..dcdef56fbe 100644
--- a/nova/tests/unit/compute/test_claims.py
+++ b/nova/tests/unit/compute/test_claims.py
@@ -15,9 +15,9 @@
"""Tests for resource tracker claims."""
+from unittest import mock
import uuid
-import mock
from nova.compute import claims
from nova import context
@@ -169,7 +169,8 @@ class ClaimTestCase(test.NoDBTestCase):
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
requests = objects.InstancePCIRequests(requests=[request])
self._claim(requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=False)
@@ -181,7 +182,8 @@ class ClaimTestCase(test.NoDBTestCase):
exception.ComputeResourcesUnavailable,
'Claim pci failed.',
self._claim, requests=requests)
- mock_pci_supports_requests.assert_called_once_with([request])
+ mock_pci_supports_requests.assert_called_once_with(
+ [request], provider_mapping=None)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests')
def test_pci_pass_no_requests(self, mock_pci_supports_requests):
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index f65f1abdb7..36bcd368dc 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -22,10 +22,10 @@ import fixtures as std_fixtures
from itertools import chain
import operator
import sys
+from unittest import mock
from castellan import key_manager
import ddt
-import mock
from neutronclient.common import exceptions as neutron_exceptions
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -168,7 +168,7 @@ class BaseTestCase(test.TestCase):
'uuid': uuids.fake_compute_node,
'vcpus_used': 0,
'deleted': 0,
- 'hypervisor_type': 'powervm',
+ 'hypervisor_type': 'libvirt',
'created_at': '2013-04-01T00:27:06.000000',
'local_gb_used': 0,
'updated_at': '2013-04-03T00:35:41.000000',
@@ -178,7 +178,7 @@ class BaseTestCase(test.TestCase):
'current_workload': 0,
'vcpus': 16,
'mapped': 1,
- 'cpu_info': 'ppc64,powervm,3940',
+ 'cpu_info': 'ppc64,libvirt,3940',
'running_vms': 0,
'free_disk_gb': 259,
'service_id': 7,
@@ -1389,13 +1389,14 @@ class ComputeVolumeTestCase(BaseTestCase):
@mock.patch.object(nova.virt.block_device, 'convert_snapshots')
@mock.patch.object(nova.virt.block_device, 'convert_volumes')
@mock.patch.object(nova.virt.block_device, 'convert_ephemerals')
+ @mock.patch.object(nova.virt.block_device, 'convert_local_images')
@mock.patch.object(nova.virt.block_device, 'convert_swap')
@mock.patch.object(nova.virt.block_device, 'attach_block_devices')
def test_prep_block_device_with_blanks(self, attach_block_devices,
- convert_swap, convert_ephemerals,
- convert_volumes, convert_snapshots,
- convert_images, convert_blanks,
- get_swap):
+ convert_swap, convert_local_images,
+ convert_ephemerals, convert_volumes,
+ convert_snapshots, convert_images,
+ convert_blanks, get_swap):
instance = self._create_fake_instance_obj()
instance['root_device_name'] = '/dev/vda'
root_volume = objects.BlockDeviceMapping(
@@ -1426,6 +1427,7 @@ class ComputeVolumeTestCase(BaseTestCase):
return bdm
convert_swap.return_value = []
+ convert_local_images.return_value = []
convert_ephemerals.return_value = []
convert_volumes.return_value = [blank_volume1, blank_volume2]
convert_snapshots.return_value = []
@@ -1438,6 +1440,7 @@ class ComputeVolumeTestCase(BaseTestCase):
'root_device_name': '/dev/vda',
'swap': [],
'ephemerals': [],
+ 'image': [],
'block_device_mapping': bdms
}
@@ -1452,6 +1455,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertIsNotNone(bdm.device_name)
convert_swap.assert_called_once_with(bdms)
+ convert_local_images.assert_called_once_with(bdms)
convert_ephemerals.assert_called_once_with(bdms)
bdm_args = tuple(bdms)
convert_volumes.assert_called_once_with(bdm_args)
@@ -2726,7 +2730,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_driver(self):
@@ -2756,7 +2761,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=[], recreate=False, on_shared_storage=False,
preserve_ephemeral=False, migration=None, scheduled_node=None,
- limits={}, request_spec=None, accel_uuids=[])
+ limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2808,7 +2814,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata,
bdms=bdms, recreate=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={},
- on_shared_storage=False, request_spec=None, accel_uuids=[])
+ on_shared_storage=False, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.assertTrue(called['rebuild'])
self.compute.terminate_instance(self.context, instance, [])
@@ -2827,7 +2834,8 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata=sys_metadata, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits=None,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
self.compute.terminate_instance(self.context, instance, [])
def test_rebuild_launched_at_time(self):
@@ -2848,7 +2856,7 @@ class ComputeTestCase(BaseTestCase,
new_pass="new_password", orig_sys_metadata={}, bdms=[],
recreate=False, on_shared_storage=False, preserve_ephemeral=False,
migration=None, scheduled_node=None, limits={}, request_spec=None,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
instance.refresh()
self.assertEqual(cur_time,
instance['launched_at'].replace(tzinfo=None))
@@ -2881,7 +2889,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=injected_files, new_pass="new_password",
orig_sys_metadata=sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
self.compute.terminate_instance(self.context, instance, [])
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@@ -3212,6 +3221,7 @@ class ComputeTestCase(BaseTestCase,
expected = {
'swap': None,
'ephemerals': [],
+ 'image': [],
'root_device_name': None,
'block_device_mapping': driver_bdms
}
@@ -3240,6 +3250,7 @@ class ComputeTestCase(BaseTestCase,
expected = {
'swap': None,
'ephemerals': [],
+ 'image': [],
'root_device_name': None,
'block_device_mapping': driver_bdms
}
@@ -3273,7 +3284,11 @@ class ComputeTestCase(BaseTestCase,
'delete_on_termination': True,
'guest_format': None,
'volume_size': 2,
- 'boot_index': -1
+ 'boot_index': -1,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': None,
})
swap = fake_block_device.FakeDbBlockDeviceDict({
'id': 3,
@@ -3308,16 +3323,25 @@ class ComputeTestCase(BaseTestCase,
'device_type': 'disk',
'disk_bus': 'virtio',
'guest_format': None,
- 'size': 1
+ 'size': 1,
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
},
{
'device_name': '/dev/vdc',
'device_type': 'disk',
'disk_bus': 'virtio',
'guest_format': None,
- 'size': 2
+ 'size': 2,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': None,
}
],
+ 'image': [],
'block_device_mapping': [],
'root_device_name': None
}
@@ -4593,7 +4617,9 @@ class ComputeTestCase(BaseTestCase,
'limits': {},
'request_spec': None,
'on_shared_storage': False,
- 'accel_uuids': ()}),
+ 'accel_uuids': (),
+ 'reimage_boot_volume': False,
+ 'target_state': None}),
("set_admin_password", task_states.UPDATING_PASSWORD,
{'new_pass': None}),
("rescue_instance", task_states.RESCUING,
@@ -5111,7 +5137,8 @@ class ComputeTestCase(BaseTestCase,
injected_files=[], new_pass=password,
orig_sys_metadata=orig_sys_metadata, bdms=[], recreate=False,
on_shared_storage=False, preserve_ephemeral=False, migration=None,
- scheduled_node=None, limits={}, request_spec=None, accel_uuids=[])
+ scheduled_node=None, limits={}, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
inst_ref.refresh()
@@ -5645,6 +5672,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([1, 2]),
siblings=[set([1]), set([2])],
mempages=[objects.NUMAPagesTopology(
@@ -5660,6 +5688,7 @@ class ComputeTestCase(BaseTestCase,
pagesize=2048,
memory_usage=0,
cpu_usage=0,
+ socket=0,
siblings=[set([3]), set([4])],
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=256, used=0)])
@@ -5714,13 +5743,15 @@ class ComputeTestCase(BaseTestCase,
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0a:00.1',
- request_id=uuids.req1)])
+ request_id=uuids.req1,
+ compute_node_id=1)])
new_pci_devices = objects.PciDeviceList(
objects=[objects.PciDevice(vendor_id='1377',
product_id='0047',
address='0000:0b:00.1',
- request_id=uuids.req2)])
+ request_id=uuids.req2,
+ compute_node_id=2)])
if expected_pci_addr == old_pci_devices[0].address:
expected_pci_device = old_pci_devices[0]
@@ -6066,10 +6097,9 @@ class ComputeTestCase(BaseTestCase,
return fake_network.fake_get_instance_nw_info(self)
self.stub_out('nova.network.neutron.API.get_instance_nw_info', stupid)
- self.useFixture(
- std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
- lambda *args: True))
+ self.useFixture(std_fixtures.MonkeyPatch(
+ 'nova.network.neutron.API.has_port_binding_extension',
+ lambda *args: True))
# creating instance testdata
instance = self._create_fake_instance_obj({'host': 'dummy'})
c = context.get_admin_context()
@@ -6107,7 +6137,7 @@ class ComputeTestCase(BaseTestCase,
mock_pre.assert_called_once_with(
test.MatchType(nova.context.RequestContext),
test.MatchType(objects.Instance),
- {'swap': None, 'ephemerals': [],
+ {'swap': None, 'ephemerals': [], 'image': [],
'root_device_name': None,
'block_device_mapping': []},
mock.ANY, mock.ANY, mock.ANY)
@@ -6304,9 +6334,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('completed', migration.status)
mock_pre.assert_called_once_with(c, instance, False, None,
dest, migrate_data)
- mock_migrate.assert_called_once_with(c, instance,
- {'source_compute': instance[
- 'host'], 'dest_compute': dest})
+ mock_migrate.assert_called_once_with(c, instance, mock.ANY)
mock_post.assert_called_once_with(c, instance, False, dest)
mock_clear.assert_called_once_with(mock.ANY)
@@ -6389,7 +6417,6 @@ class ComputeTestCase(BaseTestCase,
migration_obj = objects.Migration(uuid=uuids.migration,
source_node=instance.node,
status='completed')
- migration = {'source_compute': srchost, 'dest_compute': dest, }
migrate_data = objects.LibvirtLiveMigrateData(
is_shared_instance_path=False,
is_shared_block_storage=False,
@@ -6412,7 +6439,7 @@ class ComputeTestCase(BaseTestCase,
self.assertIn('cleanup', result)
self.assertTrue(result['cleanup'])
- mock_migrate.assert_called_once_with(c, instance, migration)
+ mock_migrate.assert_called_once_with(c, instance, mock.ANY)
mock_post.assert_called_once_with(c, instance, False, dest)
mock_clear.assert_called_once_with(mock.ANY)
@@ -6476,13 +6503,11 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual(2, mock_notify.call_count)
post_live_migration.assert_has_calls([
mock.call(c, instance, {'swap': None, 'ephemerals': [],
- 'root_device_name': None,
+ 'image': [], 'root_device_name': None,
'block_device_mapping': []},
migrate_data)])
- migration = {'source_compute': srchost,
- 'dest_compute': dest, }
migrate_instance_start.assert_has_calls([
- mock.call(c, instance, migration)])
+ mock.call(c, instance, mock.ANY)])
post_live_migration_at_destination.assert_has_calls([
mock.call(c, instance, False, dest)])
post_live_migration_at_source.assert_has_calls(
@@ -6709,7 +6734,7 @@ class ComputeTestCase(BaseTestCase,
mock_setup.assert_called_once_with(c, instance, self.compute.host,
teardown=True)
mock_rollback.assert_called_once_with(c, instance, [],
- {'swap': None, 'ephemerals': [],
+ {'swap': None, 'ephemerals': [], 'image': [],
'root_device_name': None,
'block_device_mapping': []},
destroy_disks=True, migrate_data=None)
@@ -7385,7 +7410,7 @@ class ComputeTestCase(BaseTestCase,
fake_instance.fake_db_instance(uuid=uuids.migration_instance_5,
vm_state=vm_states.ACTIVE,
task_state=None),
- # The expceted migration result will be None instead of error
+ # The expected migration result will be None instead of error
# since _poll_unconfirmed_resizes will not change it
# when the instance vm state is RESIZED and task state
# is deleting, see bug 1301696 for more detail
@@ -7442,12 +7467,11 @@ class ComputeTestCase(BaseTestCase,
# raise exception for uuids.migration_instance_4 to check
# migration status does not get set to 'error' on confirm_resize
# failure.
- if instance['uuid'] == uuids.migration_instance_4:
+ if instance.uuid == uuids.migration_instance_4:
raise test.TestingException('bomb')
self.assertIsNotNone(migration)
for migration2 in migrations:
- if (migration2['instance_uuid'] ==
- migration['instance_uuid']):
+ if migration2['instance_uuid'] == migration.instance_uuid:
migration2['status'] = 'confirmed'
self.stub_out(
@@ -8139,7 +8163,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._default_block_device_names(instance, {}, bdms)
self.assertEqual('/dev/vda', instance.root_device_name)
- mock_def.assert_called_once_with(instance, '/dev/vda', [], [],
+ mock_def.assert_called_once_with(instance, '/dev/vda', [], [], [],
[bdm for bdm in bdms])
@mock.patch.object(objects.BlockDeviceMapping, 'save')
@@ -8153,7 +8177,7 @@ class ComputeTestCase(BaseTestCase,
self.compute._default_block_device_names(instance, {}, bdms)
- mock_def.assert_called_once_with(instance, '/dev/vda', [], [],
+ mock_def.assert_called_once_with(instance, '/dev/vda', [], [], [],
[bdm for bdm in bdms])
@mock.patch.object(objects.Instance, 'save')
@@ -8175,7 +8199,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('/dev/vda', instance.root_device_name)
mock_default_dev.assert_called_once_with(instance, mock.ANY, bdms[0])
mock_default_name.assert_called_once_with(instance, '/dev/vda', [], [],
- [bdm for bdm in bdms])
+ [], [bdm for bdm in bdms])
def test_default_block_device_names_with_blank_volumes(self):
instance = self._create_fake_instance_obj()
@@ -8235,7 +8259,7 @@ class ComputeTestCase(BaseTestCase,
self.assertEqual('/dev/vda', instance.root_device_name)
self.assertTrue(object_save.called)
default_device_names.assert_called_once_with(instance,
- '/dev/vda', [bdms[-2]], [bdms[-1]],
+ '/dev/vda', [], [bdms[-2]], [bdms[-1]],
[bdm for bdm in bdms[:-2]])
def test_reserve_block_device_name(self):
@@ -8619,16 +8643,13 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_instance_sets_system_metadata(self):
# Make sure image properties are copied into system metadata.
- with mock.patch.object(
- self.compute_api.compute_task_api, 'schedule_and_build_instances',
- ) as mock_sbi:
- ref, resv_id = self.compute_api.create(
- self.context,
- flavor=self.default_flavor,
- image_href='f5000000-0000-0000-0000-000000000000')
+ ref, resv_id = self.compute_api.create(
+ self.context,
+ flavor=self.default_flavor,
+ image_href='f5000000-0000-0000-0000-000000000000')
- build_call = mock_sbi.call_args_list[0]
- instance = build_call[1]['build_requests'][0].instance
+ build_call = self.schedule_and_build_instances_mock.call_args_list[0]
+ instance = build_call[1]['build_requests'][0].instance
image_props = {'image_kernel_id': uuids.kernel_id,
'image_ramdisk_id': uuids.ramdisk_id,
@@ -8638,16 +8659,14 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(value, instance.system_metadata[key])
def test_create_saves_flavor(self):
- with mock.patch.object(
- self.compute_api.compute_task_api, 'schedule_and_build_instances',
- ) as mock_sbi:
- ref, resv_id = self.compute_api.create(
- self.context,
- flavor=self.default_flavor,
- image_href=uuids.image_href_id)
+ ref, resv_id = self.compute_api.create(
+ self.context,
+ flavor=self.default_flavor,
+ image_href=uuids.image_href_id)
+
+ build_call = self.schedule_and_build_instances_mock.call_args_list[0]
+ instance = build_call[1]['build_requests'][0].instance
- build_call = mock_sbi.call_args_list[0]
- instance = build_call[1]['build_requests'][0].instance
self.assertIn('flavor', instance)
self.assertEqual(self.default_flavor.flavorid,
instance.flavor.flavorid)
@@ -8655,19 +8674,18 @@ class ComputeAPITestCase(BaseTestCase):
def test_create_instance_associates_security_groups(self):
# Make sure create associates security groups.
- with test.nested(
- mock.patch.object(self.compute_api.compute_task_api,
- 'schedule_and_build_instances'),
- mock.patch('nova.network.security_group_api.validate_name',
- return_value=uuids.secgroup_id),
- ) as (mock_sbi, mock_secgroups):
+ with mock.patch(
+ "nova.network.security_group_api.validate_name",
+ return_value=uuids.secgroup_id,
+ ) as mock_secgroups:
self.compute_api.create(
self.context,
flavor=self.default_flavor,
image_href=uuids.image_href_id,
security_groups=['testgroup'])
- build_call = mock_sbi.call_args_list[0]
+ build_call = (
+ self.schedule_and_build_instances_mock.call_args_list[0])
reqspec = build_call[1]['request_spec'][0]
self.assertEqual(1, len(reqspec.security_groups))
@@ -8692,28 +8710,29 @@ class ComputeAPITestCase(BaseTestCase):
len(db.instance_get_all(self.context)))
mock_secgroups.assert_called_once_with(mock.ANY, 'invalid_sec_group')
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=False),
+ )
def test_create_instance_associates_requested_networks(self):
# Make sure create adds the requested networks to the RequestSpec
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.port_instance)])
- with test.nested(
- mock.patch.object(
- self.compute_api.compute_task_api,
- 'schedule_and_build_instances'),
- mock.patch.object(
- self.compute_api.network_api,
- 'create_resource_requests',
- return_value=(None, [], objects.RequestLevelParams())),
- ) as (mock_sbi, _mock_create_resreqs):
+ with mock.patch.object(
+ self.compute_api.network_api,
+ "create_resource_requests",
+ return_value=(None, [], objects.RequestLevelParams()),
+ ):
self.compute_api.create(
self.context,
flavor=self.default_flavor,
image_href=uuids.image_href_id,
requested_networks=requested_networks)
- build_call = mock_sbi.call_args_list[0]
+ build_call = (
+ self.schedule_and_build_instances_mock.call_args_list[0])
reqspec = build_call[1]['request_spec'][0]
self.assertEqual(1, len(reqspec.requested_networks))
@@ -8856,7 +8875,7 @@ class ComputeAPITestCase(BaseTestCase):
group.create()
get_group_mock.return_value = group
- self.assertRaises(exception.QuotaError, self.compute_api.create,
+ self.assertRaises(exception.OverQuota, self.compute_api.create,
self.context, self.default_flavor, self.fake_image['id'],
scheduler_hints={'group': group.uuid},
check_server_group_quota=True)
@@ -9827,6 +9846,10 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(refs[i]['display_name'], name)
self.assertEqual(refs[i]['hostname'], name)
+ @mock.patch(
+ 'nova.network.neutron.API.is_remote_managed_port',
+ new=mock.Mock(return_value=False),
+ )
@mock.patch("nova.objects.service.get_minimum_version_all_cells")
@mock.patch(
"nova.network.neutron.API.has_extended_resource_request_extension")
@@ -10209,8 +10232,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.get_console_output,
self.context, instance)
- @mock.patch.object(compute_utils, 'notify_about_instance_action')
- def test_attach_interface(self, mock_notify):
+ def test_attach_interface(self):
instance = self._create_fake_instance_obj()
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
@@ -10230,8 +10252,12 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch.object(
self.compute,
"_claim_pci_device_for_interface_attach",
- return_value=None)
- ) as (cap, mock_lock, mock_create_resource_req, mock_claim_pci):
+ return_value=None),
+ mock.patch.object(compute_utils, 'notify_about_instance_action'),
+ ) as (
+ cap, mock_lock, mock_create_resource_req, mock_claim_pci,
+ mock_notify
+ ):
mock_create_resource_req.return_value = (
None, [], mock.sentinel.req_lvl_params)
vif = self.compute.attach_interface(self.context,
@@ -10492,7 +10518,7 @@ class ComputeAPITestCase(BaseTestCase):
pci_reqs = mock_claim_pci.mock_calls[0][1][1]
self.assertEqual([pci_req], pci_reqs.requests)
- # after the pci claim we also need to allocate that pci to the instace
+ # after the pci claim we also need to allocate that pci to the instance
mock_allocate_pci.assert_called_once_with(self.context, instance)
# and as this changes the instance we have to save it.
mock_save.assert_called_once_with()
@@ -10739,8 +10765,13 @@ class ComputeAPITestCase(BaseTestCase):
supports_attach_interface=True),
mock.patch.object(self.compute.network_api,
'create_resource_requests'),
- mock.patch.object(self.compute.rt, 'claim_pci_devices',
- return_value=[]),
+ mock.patch.object(
+ self.compute.rt,
+ 'claim_pci_devices',
+ side_effect=exception.PciDeviceRequestFailed(
+ requests=instance.pci_requests
+ )
+ ),
mock.patch.object(
self.compute, '_allocate_port_resource_for_instance'),
mock.patch(
@@ -10816,7 +10847,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
mock_update_pci
@@ -10886,7 +10917,7 @@ class ComputeAPITestCase(BaseTestCase):
new=mock.NonCallableMock()),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -10931,7 +10962,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock()),
) as (
mock_get_nodename, mock_get_alloc_candidates, mock_add_res,
@@ -10998,7 +11029,7 @@ class ComputeAPITestCase(BaseTestCase):
'add_resources_to_instance_allocation'),
mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name'),
+ 'update_pci_request_with_placement_allocations'),
mock.patch(
'nova.scheduler.client.report.SchedulerReportClient.'
'remove_resources_from_instance_allocation'),
@@ -11049,8 +11080,7 @@ class ComputeAPITestCase(BaseTestCase):
mock_remove_res.assert_called_once_with(
self.context, instance.uuid, mock.sentinel.resources)
- @mock.patch.object(compute_utils, 'notify_about_instance_action')
- def test_detach_interface(self, mock_notify):
+ def test_detach_interface(self):
nwinfo, port_id = self.test_attach_interface()
instance = self._create_fake_instance_obj()
instance.info_cache = objects.InstanceInfoCache.new(
@@ -11083,10 +11113,13 @@ class ComputeAPITestCase(BaseTestCase):
mock.patch('nova.pci.request.get_instance_pci_request_from_vif',
return_value=pci_req),
mock.patch.object(self.compute.rt, 'unclaim_pci_devices'),
- mock.patch.object(instance, 'save')
+ mock.patch.object(instance, 'save'),
+ mock.patch.object(compute_utils, 'notify_about_instance_action'),
) as (
- mock_remove_alloc, mock_deallocate, mock_lock,
- mock_get_pci_req, mock_unclaim_pci, mock_instance_save):
+ mock_remove_alloc, mock_deallocate, mock_lock,
+ mock_get_pci_req, mock_unclaim_pci, mock_instance_save,
+ mock_notify
+ ):
self.compute.detach_interface(self.context, instance, port_id)
mock_deallocate.assert_called_once_with(
@@ -11561,12 +11594,60 @@ class ComputeAPITestCase(BaseTestCase):
instance.uuid, None)
@mock.patch.object(context.RequestContext, 'elevated')
+ @mock.patch.object(cinder.API, 'detach')
+ @mock.patch.object(cinder.API, 'terminate_connection')
+ @mock.patch.object(compute_manager.ComputeManager,
+ '_get_instance_block_device_info')
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_legacy_volume_detach(
+ self, mock_get_connector, mock_info, mock_terminate, mock_detach,
+ mock_elevated,
+ ):
+ # test _shutdown_instance with legacy BDMs without a volume
+ # attachment ID
+ admin = context.get_admin_context()
+ mock_elevated.return_value = admin
+ instance = self._create_fake_instance_obj()
+ connector = 'fake-connector'
+ mock_get_connector.return_value = connector
+
+ vol_a_bdm = block_device_obj.BlockDeviceMapping(
+ instance_uuid=instance['uuid'],
+ source_type='volume', destination_type='volume',
+ delete_on_termination=False,
+ volume_id=uuids.volume_a_id,
+ attachment_id=None)
+ vol_b_bdm = block_device_obj.BlockDeviceMapping(
+ instance_uuid=instance['uuid'],
+ source_type='volume', destination_type='volume',
+ delete_on_termination=False,
+ volume_id=uuids.volume_b_id,
+ attachment_id=None)
+ bdms = [vol_a_bdm, vol_b_bdm]
+
+ self.compute._shutdown_instance(admin, instance, bdms)
+
+ # we should only got the connector once, regardless of the number of
+ # volumes
+ mock_get_connector.assert_called_once_with(instance)
+ # but we should have separate terminate and detach calls
+ mock_terminate.assert_has_calls([
+ mock.call(admin, uuids.volume_a_id, connector),
+ mock.call(admin, uuids.volume_b_id, connector),
+ ])
+ mock_detach.assert_has_calls([
+ mock.call(admin, uuids.volume_a_id, instance.uuid),
+ mock.call(admin, uuids.volume_b_id, instance.uuid),
+ ])
+
+ @mock.patch.object(context.RequestContext, 'elevated')
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
- def test_shutdown_with_attachment_delete(self, mock_info,
- mock_attach_delete,
- mock_elevated):
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_attachment_delete(
+ self, mock_get_connector, mock_info, mock_attach_delete, mock_elevated,
+ ):
# test _shutdown_instance with volume bdm containing an
# attachment id. This should use the v3 cinder api.
admin = context.get_admin_context()
@@ -11586,14 +11667,18 @@ class ComputeAPITestCase(BaseTestCase):
self.compute._shutdown_instance(admin, instance, bdms)
mock_attach_delete.assert_called_once_with(admin, attachment_id)
+ # we shouldn't try to get a connector for a cinder v3-style attachment
+ mock_get_connector.assert_not_called()
@mock.patch.object(compute_manager.LOG, 'debug')
@mock.patch.object(cinder.API, 'attachment_delete')
@mock.patch.object(compute_manager.ComputeManager,
'_get_instance_block_device_info')
- def test_shutdown_with_attachment_not_found(self, mock_info,
- mock_attach_delete,
- mock_debug_log):
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
+ def test_shutdown_with_attachment_not_found(
+ self, mock_get_connector, mock_info, mock_attach_delete,
+ mock_debug_log,
+ ):
# test _shutdown_instance with attachment_delete throwing
# a VolumeAttachmentNotFound exception. This should not
# cause _shutdown_instance to fail. Only a debug log
@@ -11619,6 +11704,8 @@ class ComputeAPITestCase(BaseTestCase):
# get last call to LOG.debug and verify correct exception is in there
self.assertIsInstance(mock_debug_log.call_args[0][1],
exception.VolumeAttachmentNotFound)
+ # we shouldn't try to get a connector for a cinder v3-style attachment
+ mock_get_connector.assert_not_called()
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
@@ -11878,7 +11965,7 @@ class ComputeAPITestCase(BaseTestCase):
force=False)
@mock.patch('nova.compute.utils.notify_about_instance_action')
- def _test_evacuate(self, mock_notify, force=None):
+ def _test_evacuate(self, mock_notify, force=None, target_state=None):
instance = self._create_fake_instance_obj(services=True)
self.assertIsNone(instance.task_state)
@@ -11893,17 +11980,16 @@ class ComputeAPITestCase(BaseTestCase):
instance.save()
@mock.patch.object(objects.Service, 'get_by_compute_host')
- @mock.patch.object(self.compute_api.compute_task_api,
- 'rebuild_instance')
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.RequestSpec,
'get_by_instance_uuid')
@mock.patch.object(self.compute_api.servicegroup_api, 'service_is_up')
- def do_test(service_is_up, get_by_instance_uuid, get_all_by_host,
- rebuild_instance, get_service):
+ def do_test(
+ service_is_up, get_by_instance_uuid, get_all_by_host, get_service
+ ):
service_is_up.return_value = False
get_by_instance_uuid.return_value = fake_spec
- rebuild_instance.side_effect = fake_rebuild_instance
+ self.rebuild_instance_mock.side_effect = fake_rebuild_instance
get_all_by_host.return_value = objects.ComputeNodeList(
objects=[objects.ComputeNode(
host='fake_dest_host',
@@ -11916,12 +12002,13 @@ class ComputeAPITestCase(BaseTestCase):
host='fake_dest_host',
on_shared_storage=True,
admin_password=None,
- force=force)
+ force=force,
+ target_state=target_state)
if force is False:
host = None
else:
host = 'fake_dest_host'
- rebuild_instance.assert_called_once_with(
+ self.rebuild_instance_mock.assert_called_once_with(
ctxt,
instance=instance,
new_pass=None,
@@ -11933,7 +12020,8 @@ class ComputeAPITestCase(BaseTestCase):
recreate=True,
on_shared_storage=True,
request_spec=fake_spec,
- host=host)
+ host=host,
+ target_state=target_state)
do_test()
instance.refresh()
@@ -11965,6 +12053,9 @@ class ComputeAPITestCase(BaseTestCase):
def test_evacuate_with_forced_host(self):
self._test_evacuate(force=True)
+ def test_evacuate_with_target_state(self):
+ self._test_evacuate(target_state="stopped")
+
@mock.patch('nova.servicegroup.api.API.service_is_up',
return_value=False)
def test_fail_evacuate_with_non_existing_destination(self, _service_is_up):
@@ -13039,16 +13130,13 @@ class ComputeAPIAggrTestCase(BaseTestCase):
hosts = aggregate.hosts if 'hosts' in aggregate else None
self.assertIn(values[0][1][0], hosts)
- @mock.patch('nova.scheduler.client.report.SchedulerReportClient')
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
def test_placement_client_init(self, mock_report_client):
"""Tests to make sure that the construction of the placement client
- only happens once per AggregateAPI class instance.
+ uses the singleton helper, and happens only when needed.
"""
- self.assertIsNone(self.api._placement_client)
- # Access the property twice to make sure SchedulerReportClient is
- # only loaded once.
- for x in range(2):
- self.api.placement_client
+ self.assertFalse(mock_report_client.called)
+ self.api.placement_client
mock_report_client.assert_called_once_with()
@@ -13427,7 +13515,8 @@ class EvacuateHostTestCase(BaseTestCase):
super(EvacuateHostTestCase, self).tearDown()
def _rebuild(self, on_shared_storage=True, migration=None,
- send_node=False, vm_states_is_stopped=False):
+ send_node=False, vm_states_is_stopped=False,
+ expect_error=False):
network_api = self.compute.network_api
ctxt = context.get_admin_context()
@@ -13441,7 +13530,7 @@ class EvacuateHostTestCase(BaseTestCase):
return_value=mock.sentinel.mapping)
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@mock.patch('nova.compute.utils.notify_about_instance_rebuild')
@mock.patch.object(network_api, 'setup_networks_on_host')
@@ -13461,7 +13550,8 @@ class EvacuateHostTestCase(BaseTestCase):
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage, migration=migration,
preserve_ephemeral=False, scheduled_node=node, limits=limits,
- request_spec=None, accel_uuids=[])
+ request_spec=None, accel_uuids=[], reimage_boot_volume=False,
+ target_state=None)
if vm_states_is_stopped:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
@@ -13473,6 +13563,11 @@ class EvacuateHostTestCase(BaseTestCase):
action='power_off', phase='start'),
mock.call(ctxt, self.inst, self.inst.host,
action='power_off', phase='end')])
+ elif expect_error:
+ mock_notify_rebuild.assert_has_calls([
+ mock.call(ctxt, self.inst, self.compute.host,
+ phase='error', exception=mock.ANY, bdms=bdms)])
+ return
else:
mock_notify_rebuild.assert_has_calls([
mock.call(ctxt, self.inst, self.inst.host, phase='start',
@@ -13527,14 +13622,15 @@ class EvacuateHostTestCase(BaseTestCase):
mock.patch.object(self.compute, '_get_compute_info',
side_effect=fake_get_compute_info)
) as (mock_inst, mock_get):
- self._rebuild()
+ self.assertRaises(exception.InstanceFaultRollback,
+ self._rebuild, expect_error=True)
# Should be on destination host
instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], self.compute.host)
- self.assertIsNone(instance['node'])
- self.assertTrue(mock_inst.called)
- self.assertTrue(mock_get.called)
+ self.assertEqual('fake_host_2', instance['host'])
+ self.assertEqual('fakenode2', instance['node'])
+ mock_inst.assert_not_called()
+ mock_get.assert_called_once_with(mock.ANY, self.compute.host)
def test_rebuild_on_host_node_passed(self):
patch_get_info = mock.patch.object(self.compute, '_get_compute_info')
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 4d7967b37e..73c9d32197 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -17,6 +17,7 @@ import copy
import datetime
import fixtures as std_fixtures
import time
+from unittest import mock
from cinderclient import exceptions as cinder_exception
from cursive import exception as cursive_exception
@@ -24,7 +25,6 @@ import ddt
from eventlet import event as eventlet_event
from eventlet import timeout as eventlet_timeout
from keystoneauth1 import exceptions as keystone_exception
-import mock
import netaddr
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -57,6 +57,7 @@ from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import network_request as net_req_obj
+from nova.objects import service as service_obj
from nova.pci import request as pci_request
from nova.scheduler.client import report
from nova import test
@@ -76,6 +77,7 @@ from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
+from nova.virt import node as virt_node
from nova.volume import cinder
@@ -86,6 +88,11 @@ fake_host_list = [mock.sentinel.host1]
@ddt.ddt
class ComputeManagerUnitTestCase(test.NoDBTestCase,
fake_resource_tracker.RTMockMixin):
+ # os-brick>=5.1 now uses external file system locks instead of internal
+ # locks so we need to set up locking
+ REQUIRES_LOCKING = True
+ STUB_COMPUTE_ID = False
+
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
self.compute = manager.ComputeManager()
@@ -344,6 +351,46 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, mock.sentinel.node, startup=True)
log_mock.exception.assert_called_once()
+ def test_update_available_resource_for_node_pci_placement_failed_startup(
+ self
+ ):
+ """If the PCI placement translation failed during startup then the
+ exception is raised up to kill the service
+ """
+ rt = self._mock_rt(spec_set=['update_available_resource'])
+ rt.update_available_resource.side_effect = (
+ exception.PlacementPciException(error='error'))
+
+ self.assertRaises(
+ exception.PlacementPciException,
+ self.compute._update_available_resource_for_node,
+ self.context,
+ mock.sentinel.node,
+ startup=True,
+ )
+ rt.update_available_resource.assert_called_once_with(
+ self.context, mock.sentinel.node, startup=True)
+
+ @mock.patch('nova.compute.manager.LOG')
+ def test_update_available_resource_for_node_pci_placement_failed_later(
+ self, mock_log
+ ):
+ """If the PCI placement translation failed later (not at startup)
+ during a periodic then the exception is just logged
+ """
+ rt = self._mock_rt(spec_set=['update_available_resource'])
+ rt.update_available_resource.side_effect = (
+ exception.PlacementPciException(error='error'))
+
+ self.compute._update_available_resource_for_node(
+ self.context, mock.sentinel.node, startup=False)
+ rt.update_available_resource.assert_called_once_with(
+ self.context, mock.sentinel.node, startup=False)
+ mock_log.exception.assert_called_once_with(
+ 'Error updating PCI resources for node %(node)s.',
+ {'node': mock.sentinel.node}
+ )
+
@mock.patch.object(manager, 'LOG')
@mock.patch.object(manager.ComputeManager,
'_update_available_resource_for_node')
@@ -862,6 +909,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
return instance_obj._make_instance_list(
self.context, objects.InstanceList(), db_list, None)
+ @mock.patch.object(manager.ComputeManager,
+ '_ensure_existing_node_identity')
@mock.patch.object(manager.ComputeManager, '_get_nodes')
@mock.patch.object(manager.ComputeManager,
'_error_out_instances_whose_build_was_interrupted')
@@ -880,17 +929,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm, mock_validate_pinning,
mock_destroy, mock_admin_ctxt, mock_host_get,
mock_init_host,
- mock_error_interrupted, mock_get_nodes):
+ mock_error_interrupted, mock_get_nodes,
+ mock_existing_node):
mock_admin_ctxt.return_value = self.context
inst_list = _make_instance_list(startup_instances)
mock_host_get.return_value = inst_list
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
+ mock_existing_node.assert_not_called()
mock_validate_pinning.assert_called_once_with(inst_list)
mock_validate_vtpm.assert_called_once_with(inst_list)
mock_destroy.assert_called_once_with(
@@ -933,8 +984,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"""
mock_get_nodes.return_value = {
uuids.cn_uuid1: objects.ComputeNode(
- uuid=uuids.cn_uuid1, hypervisor_hostname='node1')}
- self.compute.init_host()
+ uuid=uuids.cn_uuid1, hypervisor_hostname='node1',
+ host=self.compute.host)}
+ self.compute.init_host(None)
mock_error_interrupted.assert_called_once_with(
test.MatchType(nova.context.RequestContext), set(),
@@ -944,16 +996,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
@mock.patch('nova.objects.InstanceList')
@mock.patch('nova.objects.MigrationList.get_by_filters')
- def test_cleanup_host(self, mock_miglist_get, mock_instance_list):
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids')
+ def test_cleanup_host(self, mock_cnlist_get, mock_miglist_get,
+ mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
# equivalent method.
mock_miglist_get.return_value = []
mock_instance_list.get_by_host.return_value = []
+ mock_cnlist_get.return_value = []
with mock.patch.object(self.compute, 'driver') as mock_driver:
- self.compute.init_host()
+ self.compute.init_host(None)
mock_driver.init_host.assert_called_once_with(host='fake-mini')
self.compute.cleanup_host()
@@ -1042,7 +1097,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
'remove_provider_tree_from_instance_allocation')
) as (mock_get_net, mock_remove_allocation):
- self.compute.init_host()
+ self.compute.init_host(None)
mock_remove_allocation.assert_called_once_with(
self.context, deleted_instance.uuid, uuids.our_node_uuid)
@@ -1095,11 +1150,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
uuids.evac_instance: evacuating_instance
}
our_node = objects.ComputeNode(
- host='fake-host', uuid=uuids.our_node_uuid,
+ host=self.compute.host, uuid=uuids.our_node_uuid,
hypervisor_hostname='fake-node')
mock_get_nodes.return_value = {uuids.our_node_uuid: our_node}
- self.compute.init_host()
+ self.compute.init_host(None)
mock_init_instance.assert_called_once_with(
self.context, active_instance)
@@ -1107,23 +1162,49 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, {active_instance.uuid, evacuating_instance.uuid},
mock_get_nodes.return_value.keys())
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
- def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_host_and_node):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn1 = objects.ComputeNode(uuid=uuids.cn1)
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [cn1, cn2]
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_uuid):
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host',
+ uuids.node_fake_node2: 'host'}
+ # NOTE(danms): The fake driver, by default, uses
+ # uuidsentinel.node_$node_name, so we can predict the uuids it will
+ # return here.
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host')
+ mock_get_by_uuid.return_value = [cn1, cn2]
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn1: cn1, uuids.cn2: cn2}, nodes)
+ self.assertEqual({uuids.node_fake_node1: cn1,
+ uuids.node_fake_node2: cn2}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_by_uuid.assert_called_once_with(self.context,
+ [uuids.node_fake_node1,
+ uuids.node_fake_node2])
+
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
+ def test_get_nodes_mismatch(self, mock_driver_get_nodes, mock_get_by_uuid):
+ # Virt driver reports a (hypervisor_) hostname of 'host1'
+ mock_driver_get_nodes.return_value = {uuids.node_fake_node1: 'host1',
+ uuids.node_fake_node2: 'host1'}
+
+ # The database records for our compute nodes (by UUID) show a
+ # hypervisor_hostname of 'host2'
+ cn1 = objects.ComputeNode(uuid=uuids.node_fake_node1,
+ hypervisor_hostname='host2')
+ cn2 = objects.ComputeNode(uuid=uuids.node_fake_node2,
+ hypervisor_hostname='host2')
+ mock_get_by_uuid.return_value = [cn1, cn2]
+
+ # Possible hostname (as reported by the virt driver) rename,
+ # which should abort our startup
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._get_nodes, self.context)
@mock.patch.object(manager.LOG, 'warning')
@mock.patch.object(
@@ -1145,37 +1226,35 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
"is starting on this host, then you can ignore this warning.")
@mock.patch.object(manager.LOG, 'warning')
- @mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
- @mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
+ @mock.patch.object(fake_driver.FakeDriver, 'get_nodenames_by_uuid')
def test_get_nodes_node_not_found(
- self, mock_driver_get_nodes, mock_get_by_host_and_node,
+ self, mock_driver_get_nodes, mock_get_all_by_uuids,
mock_log_warning):
- mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
- cn2 = objects.ComputeNode(uuid=uuids.cn2)
- mock_get_by_host_and_node.side_effect = [
- exception.ComputeHostNotFound(host='fake-node1'), cn2]
+ mock_driver_get_nodes.return_value = {uuids.node_1: 'fake-node1'}
+ mock_get_all_by_uuids.return_value = []
nodes = self.compute._get_nodes(self.context)
- self.assertEqual({uuids.cn2: cn2}, nodes)
+ self.assertEqual({}, nodes)
mock_driver_get_nodes.assert_called_once_with()
- mock_get_by_host_and_node.assert_has_calls([
- mock.call(self.context, self.compute.host, 'fake-node1'),
- mock.call(self.context, self.compute.host, 'fake-node2'),
- ])
+ mock_get_all_by_uuids.assert_called_once_with(self.context,
+ [uuids.node_1])
mock_log_warning.assert_called_once_with(
- "Compute node %s not found in the database. If this is the first "
- "time this service is starting on this host, then you can ignore "
- "this warning.", 'fake-node1')
+ "Compute nodes %s for host %s were not found in the database. "
+ "If this is the first time this service is starting on this host, "
+ "then you can ignore this warning.", [uuids.node_1], 'fake-mini')
def test_init_host_disk_devices_configuration_failure(self):
self.flags(max_disk_devices_to_attach=0, group='compute')
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration')
def test_init_host_pinning_configuration_validation_failure(self,
@@ -1186,13 +1265,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_pinning.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.InstanceList, 'get_by_host',
new=mock.Mock())
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_pinning_configuration',
new=mock.Mock())
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_uuids',
+ new=mock.Mock(return_value=[mock.MagicMock()]))
@mock.patch('nova.compute.manager.ComputeManager.'
'_validate_vtpm_configuration')
def test_init_host_vtpm_configuration_validation_failure(self,
@@ -1203,7 +1284,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock_validate_vtpm.side_effect = exception.InvalidConfiguration
self.assertRaises(exception.InvalidConfiguration,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@@ -1306,6 +1387,36 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute._init_instance(self.context, instance)
set_error_state.assert_called_once_with(instance)
+ def test_init_instance_vif_plug_fails_missing_pci(self):
+ instance = fake_instance.fake_instance_obj(
+ self.context,
+ uuid=uuids.instance,
+ info_cache=None,
+ power_state=power_state.RUNNING,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ host=self.compute.host,
+ expected_attrs=['info_cache'])
+
+ with test.nested(
+ mock.patch.object(context, 'get_admin_context',
+ return_value=self.context),
+ mock.patch.object(objects.Instance, 'get_network_info',
+ return_value=network_model.NetworkInfo()),
+ mock.patch.object(self.compute.driver, 'plug_vifs',
+ side_effect=exception.PciDeviceNotFoundById("pci-addr")),
+ mock.patch("nova.compute.manager.LOG.exception"),
+ ) as (get_admin_context, get_nw_info, plug_vifs, log_exception):
+ # as this does not raise, we are sure that the compute service
+ # continues initializing the rest of the instances
+ self.compute._init_instance(self.context, instance)
+ log_exception.assert_called_once_with(
+ "Virtual interface plugging failed for instance. Probably the "
+ "vnic_type of the bound port has been changed. Nova does not "
+ "support such change.",
+ instance=instance
+ )
+
def _test__validate_pinning_configuration(self, supports_pcpus=True):
instance_1 = fake_instance.fake_instance_obj(
self.context, uuid=uuids.instance_1)
@@ -2449,10 +2560,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertFalse(mock_get_info.called)
self.assertFalse(mock_sync_power_state.called)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch('nova.compute.manager.ComputeManager.'
'_sync_instance_power_state')
def test_query_driver_power_state_and_sync_not_found_driver(
- self, mock_sync_power_state):
+ self, mock_sync_power_state, mock_claim):
error = exception.InstanceNotFound(instance_id=1)
with mock.patch.object(self.compute.driver,
'get_info', side_effect=error) as mock_get_info:
@@ -3460,7 +3572,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_success(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination()
@@ -3468,7 +3580,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_fail(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
self.assertRaises(
test.TestingException,
@@ -3479,7 +3591,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_contains_vifs(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
migrate_data = self._test_check_can_live_migrate_destination()
self.assertIn('vifs', migrate_data)
@@ -3489,7 +3601,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_no_binding_extended(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: False))
migrate_data = self._test_check_can_live_migrate_destination()
self.assertNotIn('vifs', migrate_data)
@@ -3498,7 +3610,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_src_numa_lm_false(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=False)
@@ -3506,7 +3618,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
side_effect=exception.InstanceGroupNotFound(group_uuid='')))
def test_check_can_live_migrate_destination_src_numa_lm_true(self):
self.useFixture(std_fixtures.MonkeyPatch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
lambda *args: True))
self._test_check_can_live_migrate_destination(src_numa_lm=True)
@@ -4996,8 +5108,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.patch.object(self.compute.reportclient,
'remove_provider_tree_from_instance_allocation'),
mock.patch('nova.objects.Instance.get_by_uuid')
- ) as (_get_intances_on_driver, destroy, migration_list, migration_save,
- get_resources, remove_allocation, instance_get_by_uuid):
+ ) as (_get_instances_on_driver, destroy, migration_list,
+ migration_save, get_resources, remove_allocation,
+ instance_get_by_uuid):
migration_list.return_value = [migration_1]
instance_get_by_uuid.return_value = instance_1
get_resources.return_value = mock.sentinel.resources
@@ -5059,15 +5172,18 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
msg = mock_log.warning.call_args_list[0]
self.assertIn('appears to not be owned by this host', msg[0][0])
- def test_init_host_pci_passthrough_whitelist_validation_failure(self):
- # Tests that we fail init_host if there is a pci.passthrough_whitelist
+ def test_init_host_pci_device_spec_validation_failure(self):
+ # Tests that we fail init_host if there is a pci.device_spec
# configured incorrectly.
- self.flags(passthrough_whitelist=[
- # it's invalid to specify both in the same devspec
- jsonutils.dumps({'address': 'foo', 'devname': 'bar'})],
- group='pci')
+ self.flags(
+ device_spec=[
+ # it's invalid to specify both in the same devspec
+ jsonutils.dumps({'address': 'foo', 'devname': 'bar'})
+ ],
+ group='pci'
+ )
self.assertRaises(exception.PciDeviceInvalidDeviceName,
- self.compute.init_host)
+ self.compute.init_host, None)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_not_implemented_err(self,
@@ -5257,7 +5373,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None, None,
- recreate, False, False, None, scheduled_node, {}, None, [])
+ recreate, False, False, None, scheduled_node, {}, None, [], False,
+ None)
mock_set.assert_called_once_with(None, 'failed')
mock_notify_about_instance_usage.assert_called_once_with(
mock.ANY, instance, 'rebuild.error', fault=mock_rebuild.side_effect
@@ -5350,7 +5467,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self, mock_rebuild_claim, mock_set_migration_status,
mock_validate_policy, mock_image_meta, mock_notify_exists,
mock_notify_legacy, mock_notify, mock_instance_save,
- mock_setup_networks, mock_setup_intance_network, mock_get_bdms,
+ mock_setup_networks, mock_setup_instance_network, mock_get_bdms,
mock_mutate_migration, mock_appy_migration, mock_drop_migration,
mock_context_elevated):
self.flags(api_servers=['http://localhost/image/v2'], group='glance')
@@ -5368,7 +5485,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
None, recreate=True, on_shared_storage=None,
preserve_ephemeral=False, migration=None,
scheduled_node='fake-node',
- limits={}, request_spec=request_spec, accel_uuids=[])
+ limits={}, request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False,
+ target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5407,7 +5526,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.context, instance, None, None, None, None, None, None,
recreate=True, on_shared_storage=None, preserve_ephemeral=False,
migration=None, scheduled_node='fake-node', limits={},
- request_spec=request_spec, accel_uuids=[])
+ request_spec=request_spec, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None)
mock_validate_policy.assert_called_once_with(
elevated_context, instance, {'group': [uuids.group]})
@@ -5433,7 +5553,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None,
None, None, None, None, False,
- False, False, migration, None, {}, None, [])
+ False, False, migration, None, {}, None, [], False,
+ None)
self.assertFalse(mock_get.called)
self.assertEqual(node, instance.node)
self.assertEqual('done', migration.status)
@@ -5455,7 +5576,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.compute.rebuild_instance(
self.context, instance, None, None, None, None, None,
None, True, False, False, mock.sentinel.migration, None, {},
- None, [])
+ None, [], False, None)
mock_get.assert_called_once_with(mock.ANY, self.compute.host)
mock_rt.finish_evacuation.assert_called_once_with(
instance, 'new-node', mock.sentinel.migration)
@@ -5537,7 +5658,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
recreate, on_shared_storage,
preserve_ephemeral, {}, {},
self.allocations,
- mock.sentinel.mapping, [])
+ mock.sentinel.mapping, [],
+ False, None)
mock_notify_usage.assert_has_calls(
[mock.call(self.context, instance, "rebuild.start",
@@ -5555,8 +5677,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
provider_mappings=mock.sentinel.mapping)
mock_get_nw_info.assert_called_once_with(self.context, instance)
- def test_rebuild_default_impl(self):
- def _detach(context, bdms):
+ @ddt.data((False, False), (False, True), (True, False), (True, True))
+ @ddt.unpack
+ def test_rebuild_default_impl(self, is_vol_backed, reimage_boot_vol):
+ fake_image_meta = mock.MagicMock(id='fake_id')
+
+ def _detach(context, bdms, detach_root_bdm=True):
# NOTE(rpodolyaka): check that instance has been powered off by
# the time we detach block devices, exact calls arguments will be
# checked below
@@ -5582,13 +5708,20 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
mock.patch.object(self.compute, '_power_off_instance',
return_value=None),
mock.patch.object(self.compute, '_get_accel_info',
- return_value=[])
+ return_value=[]),
+ mock.patch.object(compute_utils, 'is_volume_backed_instance',
+ return_value=is_vol_backed),
+ mock.patch.object(self.compute, '_rebuild_volume_backed_instance'),
+ mock.patch.object(compute_utils, 'get_root_bdm')
) as(
mock_destroy,
mock_spawn,
mock_save,
mock_power_off,
- mock_accel_info
+ mock_accel_info,
+ mock_is_volume_backed,
+ mock_rebuild_vol_backed_inst,
+ mock_get_root,
):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = None
@@ -5598,9 +5731,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
instance.device_metadata = None
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
+ fake_block_device_info = {
+ 'block_device_mapping': [
+ {'attachment_id': '341a8917-f74d-4473-8ee7-4ca05e5e0ab3',
+ 'volume_id': 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'connection_info': {'driver_volume_type': 'iscsi',
+ 'data': {'target_discovered': False,
+ 'target_portal': '127.0.0.1:3260',
+ 'target_iqn': 'iqn.2010-10.org.openstack:volume-'
+ 'b7c93bb9-dfe4-41af-aa56-e6b28342fd8f',
+ 'target_lun': 0}}}]}
self.compute._rebuild_default_impl(self.context,
instance,
- None,
+ fake_image_meta,
[],
admin_password='new_pass',
bdms=[],
@@ -5609,16 +5752,151 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
attach_block_devices=_attach,
network_info=None,
evacuate=False,
- block_device_info=None,
- preserve_ephemeral=False)
+ block_device_info=
+ fake_block_device_info,
+ preserve_ephemeral=False,
+ reimage_boot_volume=
+ reimage_boot_vol)
self.assertTrue(mock_save.called)
self.assertTrue(mock_spawn.called)
mock_destroy.assert_called_once_with(
self.context, instance,
- network_info=None, block_device_info=None)
+ network_info=None, block_device_info=fake_block_device_info)
mock_power_off.assert_called_once_with(
instance, clean_shutdown=True)
+ if is_vol_backed and reimage_boot_vol:
+ mock_rebuild_vol_backed_inst.assert_called_once_with(
+ self.context, instance, [], fake_image_meta.id)
+ else:
+ mock_rebuild_vol_backed_inst.assert_not_called()
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+ events = [('volume-reimaged', root_bdm.volume_id)]
+ image_size_gb = 1
+ deadline = CONF.reimage_timeout_per_gb * image_size_gb
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as (
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ # 1024 ** 3 = 1073741824
+ mock_get_img.return_value = {'size': 1073741824}
+ self.compute._rebuild_volume_backed_instance(
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_vol_api.reimage_volume.assert_called_once_with(
+ self.context, uuids.volume_id, uuids.image_id,
+ reimage_reserved=True)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+ mock_get_root_bdm.assert_called_once_with(
+ self.context, instance, bdms)
+ wait_inst_event.assert_called_once_with(
+ instance, events, deadline=deadline,
+ error_callback=self.compute._reimage_failed_callback)
+
+ @mock.patch('nova.volume.cinder.API.attachment_delete')
+ @mock.patch('nova.volume.cinder.API.attachment_create',
+ return_value={'id': uuids.new_attachment_id})
+ @mock.patch.object(nova.compute.manager.ComputeVirtAPI,
+ 'wait_for_instance_event')
+ def test__rebuild_volume_backed_instance_image_not_found(
+ self, wait_inst_event, attach_create, attach_delete):
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ bdms = [root_bdm]
+
+ with test.nested(
+ mock.patch.object(objects.Instance, 'save',
+ return_value=None),
+ mock.patch.object(compute_utils, 'get_root_bdm',
+ return_value=root_bdm),
+ mock.patch.object(self.compute, 'volume_api'),
+ mock.patch.object(self.compute.image_api, 'get'),
+ ) as(
+ mock_save,
+ mock_get_root_bdm,
+ mock_vol_api,
+ mock_get_img
+ ):
+ mock_get_img.side_effect = exception.ImageNotFound(
+ image_id=uuids.image_id)
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ mock_get_img.return_value = {'size': 1}
+ self.assertRaises(
+ exception.BuildAbortException,
+ self.compute._rebuild_volume_backed_instance,
+ self.context, instance, bdms, uuids.image_id)
+ mock_vol_api.attachment_create.assert_called_once_with(
+ self.context, uuids.volume_id, instance.uuid)
+ mock_vol_api.attachment_delete.assert_called_once_with(
+ self.context, uuids.old_attachment_id)
+ mock_get_img.assert_called_once_with(
+ self.context, uuids.image_id)
+
+ @mock.patch.object(objects.Instance, 'save', return_value=None)
+ @mock.patch.object(fake_driver.SmallFakeDriver, 'detach_volume')
+ @mock.patch.object(cinder.API, 'roll_detaching')
+ def test__detach_root_volume(self, mock_roll_detach, mock_detach,
+ mock_save):
+ exception_list = [
+ '',
+ exception.DiskNotFound(location="not\\here"),
+ exception.DeviceDetachFailed(device="fake_dev", reason="unknown"),
+ ]
+ mock_detach.side_effect = exception_list
+ fake_conn_info = '{}'
+ fake_device = 'fake_vda'
+ root_bdm = mock.MagicMock(
+ volume_id=uuids.volume_id, connection_info=fake_conn_info,
+ device_name=fake_device, attachment_id=uuids.old_attachment_id,
+ save=mock.MagicMock())
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.task_state = task_states.REBUILDING
+ instance.save(expected_task_state=[task_states.REBUILDING])
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.compute._detach_root_volume(self.context, instance, root_bdm)
+ self.assertRaises(exception.DeviceDetachFailed,
+ self.compute._detach_root_volume,
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
+ self.assertRaises(Exception, self.compute._detach_root_volume, # noqa
+ self.context, instance, root_bdm)
+ mock_roll_detach.assert_called_with(self.context, uuids.volume_id)
def test_do_rebuild_instance_check_trusted_certs(self):
"""Tests the scenario that we're rebuilding an instance with
@@ -5640,7 +5918,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
request_spec=objects.RequestSpec(),
allocations=self.allocations,
request_group_resource_providers_mapping=mock.sentinel.mapping,
- accel_uuids=[])
+ accel_uuids=[], reimage_boot_volume=False, target_state=None)
self.assertIn('Trusted image certificates provided on host', str(ex))
def test_reverts_task_state_instance_not_found(self):
@@ -6084,6 +6362,171 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
self.assertEqual({'one-image': 'cached',
'two-image': 'existing'}, r)
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_version(self, mock_read, mock_write):
+ # Make sure an up-to-date service bypasses the persistence
+ service_ref = service_obj.Service()
+ self.assertEqual(service_obj.SERVICE_VERSION, service_ref.version)
+ mock_read.return_value = 'not none'
+ mock_write.assert_not_called()
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_ironic(self, mock_node):
+ # Make sure an old service for ironic does not write a local node uuid
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_not_needed_preprovisioned(self,
+ mock_read_node,
+ mock_write_node):
+ # Make sure an old service does not write a uuid if one is present
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = str(uuids.SOME_UUID)
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_no_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find no nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = []
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_multi_node(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, we have no pre-provisioned node uuid
+ # and we find multiple nodes in the database, we do not write a local
+ # node uuid *and* we abort startup since something is likely wrong.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [1, 2]
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_not_called()
+
+ @mock.patch.object(virt_node, 'write_local_node_uuid')
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ @mock.patch('nova.objects.ComputeNodeList.get_all_by_host')
+ def test_ensure_node_uuid_upgrade_writes_node_uuid(self, mock_get_cn,
+ mock_read_node,
+ mock_write_node):
+ # If we are not a new service, there is no pre-provisioned local
+ # compute node uuid, and we find exactly one compute node in the
+ # database for our host, we persist that.
+ with mock.patch.object(service_obj, 'SERVICE_VERSION', new=60):
+ service_ref = service_obj.Service()
+ self.assertEqual(60, service_ref.version)
+ mock_read_node.return_value = None
+ mock_get_cn.return_value = [
+ objects.ComputeNode(uuid=str(uuids.compute)),
+ ]
+ self.compute._ensure_existing_node_identity(service_ref)
+ mock_get_cn.assert_called_once_with(mock.ANY, self.compute.host)
+ mock_write_node.assert_called_once_with(str(uuids.compute))
+
+ @mock.patch.object(virt_node, 'read_local_node_uuid')
+ def test_ensure_node_uuid_missing_file_ironic(self, mock_read):
+ mock_service = mock.MagicMock(
+ version=service_obj.NODE_IDENTITY_VERSION)
+ mock_read.return_value = None
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._ensure_existing_node_identity,
+ mock_service)
+ mock_read.assert_called_once_with()
+
+ # Now make sure that ironic causes this exact configuration to pass
+ self.flags(compute_driver='ironic')
+ self.compute._ensure_existing_node_identity(mock_service)
+
+ def test_ensure_node_uuid_called_by_init_host(self):
+ # test_init_host() above ensures that we do not call
+ # _ensure_existing_node_identity() in the service_ref=None case.
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_ensure_existing_node_identity') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host,
+ mock.sentinel.service_ref)
+ m.assert_called_once_with(mock.sentinel.service_ref)
+
+ def test_check_for_host_rename_ironic(self):
+ self.flags(compute_driver='ironic')
+ # Passing None here makes sure we take the early exit because of our
+ # virt driver
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.compute._check_for_host_rename(nodes)
+
+ def test_check_for_host_rename_renamed_only(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_renamed_one(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host),
+ uuids.node2: mock.MagicMock(uuid=uuids.node2,
+ host='not-this-host')}
+ self.assertRaises(exception.InvalidConfiguration,
+ self.compute._check_for_host_rename, nodes)
+
+ def test_check_for_host_rename_not_renamed(self):
+ nodes = {uuids.node1: mock.MagicMock(uuid=uuids.node1,
+ host=self.compute.host)}
+ with mock.patch.object(manager.LOG, 'debug') as mock_debug:
+ self.compute._check_for_host_rename(nodes)
+ mock_debug.assert_called_once_with(
+ 'Verified node %s matches my host %s',
+ uuids.node1, self.compute.host)
+
+ @mock.patch('nova.compute.manager.ComputeManager._get_nodes')
+ def test_check_for_host_rename_called_by_init_host(self, mock_nodes):
+ # Since testing init_host() requires a billion mocks, this
+ # tests that we do call it when expected, but make it raise
+ # to avoid running the rest of init_host().
+ with mock.patch.object(self.compute,
+ '_check_for_host_rename') as m:
+ m.side_effect = test.TestingException
+ self.assertRaises(test.TestingException,
+ self.compute.init_host, None)
+ m.assert_called_once_with(mock_nodes.return_value)
+
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
@@ -6126,6 +6569,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver)
self.compute.rt = fake_rt
+ self.compute.driver._set_nodes([self.node])
+ self.compute.rt.compute_nodes = {self.node: objects.ComputeNode()}
self.allocations = {
uuids.provider1: {
@@ -6415,6 +6860,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_get_arqs.assert_called_once_with(
self.instance.uuid, only_resolved=True)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@@ -6426,7 +6872,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_spawn_called_with_accel_info(self, mock_ins_usage,
mock_ins_create, mock_dev_tag, mock_certs, mock_req_group_map,
- mock_get_allocations, mock_ins_save, mock_spawn):
+ mock_get_allocations, mock_ins_save, mock_spawn, mock_claim):
accel_info = [{'k1': 'v1', 'k2': 'v2'}]
@@ -6700,13 +7146,15 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.security_groups, self.block_device_mapping,
request_spec={}, host_lists=[fake_host_list])
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_rescheduled_exception_with_non_ascii_exception(self,
- mock_notify, mock_save, mock_spawn, mock_build, mock_shutdown):
+ mock_notify, mock_save, mock_spawn, mock_build, mock_shutdown,
+ mock_claim):
exc = exception.NovaException(u's\xe9quence')
mock_build.return_value = self.network_info
@@ -6722,7 +7170,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.accel_uuids)
mock_save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
mock_notify.assert_has_calls([
@@ -7228,6 +7675,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.assertEqual(10, mock_failed.call_count)
mock_succeeded.assert_not_called()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@mock.patch.object(fake_driver.FakeDriver, 'spawn')
@@ -7235,7 +7683,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def _test_instance_exception(self, exc, raised_exc,
mock_notify, mock_save, mock_spawn,
- mock_build, mock_shutdown):
+ mock_build, mock_shutdown, mock_claim):
"""This method test the instance related InstanceNotFound
and reschedule on exception errors. The test cases get from
arguments.
@@ -7258,7 +7706,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock_save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(expected_task_state='block_device_mapping')])
mock_notify.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
@@ -7369,11 +7816,12 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
'_shutdown_instance'),
mock.patch.object(self.compute,
'_validate_instance_group_policy'),
+ mock.patch.object(self.compute.rt, 'instance_claim'),
mock.patch('nova.compute.utils.notify_about_instance_create')
) as (spawn, save,
_build_networks_for_instance, _notify_about_instance_usage,
_shutdown_instance, _validate_instance_group_policy,
- mock_notify):
+ mock_claim, mock_notify):
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
@@ -7404,7 +7852,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
save.assert_has_calls([
mock.call(),
- mock.call(),
mock.call(
expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
@@ -7466,11 +7913,12 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
request_spec={}, host_lists=[fake_host_list])
mock_nil.assert_called_once_with(self.instance)
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_build_resources')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
def test_build_resources_buildabort_reraise(self, mock_notify, mock_save,
- mock_build):
+ mock_build, mock_claim):
exc = exception.BuildAbortException(
instance_uuid=self.instance.uuid, reason='')
mock_build.side_effect = exc
@@ -7484,7 +7932,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.node, self.limits, self.filter_properties,
request_spec=[], accel_uuids=self.accel_uuids)
- mock_save.assert_called_once_with()
mock_notify.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
extra_usage_info={'image_name': self.image.get('name')}),
@@ -7585,6 +8032,27 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
instance, hints)
mock_get.assert_called_once_with(self.context, uuids.group_hint)
+ @mock.patch('nova.objects.InstanceGroup.get_by_hint')
+ def test_validate_instance_group_policy_deleted_group(self, mock_get):
+ """Tests that _validate_instance_group_policy handles the case
+ where the scheduler hint has a group but that group has been deleted.
+ This tests is a reproducer for bug: #1890244
+ """
+ instance = objects.Instance(uuid=uuids.instance)
+ hints = {'group': [uuids.group_hint]}
+ mock_get.side_effect = exception.InstanceGroupNotFound(
+ group_uuid=uuids.group_hint
+ )
+ # This implicitly asserts that no exception is raised since
+ # uncaught exceptions would be treated as a test failure.
+ self.compute._validate_instance_group_policy(
+ self.context, instance, hints
+ )
+ # and this just assert that we did in fact invoke the method
+ # that raises to ensure that if we refactor in the future this
+ # this test will fail if the function we mock is no longer called.
+ mock_get.assert_called_once_with(self.context, uuids.group_hint)
+
@mock.patch('nova.objects.InstanceGroup.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_uuids_by_host')
@mock.patch('nova.objects.InstanceGroup.get_by_hint')
@@ -7669,6 +8137,42 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
@mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'prepare_networks_before_block_device_mapping')
+ @mock.patch.object(virt_driver.ComputeDriver,
+ 'clean_networks_preparation')
+ def test_failed_prepare_for_spawn(self, mock_clean, mock_prepnet,
+ mock_prepspawn, mock_failedspawn):
+ mock_prepspawn.side_effect = exception.ComputeResourcesUnavailable(
+ reason="asdf")
+ with mock.patch.object(self.compute,
+ '_build_networks_for_instance',
+ return_value=self.network_info
+ ) as _build_networks_for_instance:
+
+ try:
+ with self.compute._build_resources(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.image, self.block_device_mapping,
+ self.resource_provider_mapping, self.accel_uuids):
+ pass
+ except Exception as e:
+ self.assertIsInstance(e,
+ exception.ComputeResourcesUnavailable)
+
+ _build_networks_for_instance.assert_has_calls(
+ [mock.call(self.context, self.instance,
+ self.requested_networks, self.security_groups,
+ self.resource_provider_mapping,
+ self.network_arqs)])
+
+ mock_prepnet.assert_not_called()
+ mock_clean.assert_called_once_with(self.instance, self.network_info)
+ mock_prepspawn.assert_called_once_with(self.instance)
+ mock_failedspawn.assert_called_once_with(self.instance)
+
+ @mock.patch.object(virt_driver.ComputeDriver, 'failed_spawn_cleanup')
+ @mock.patch.object(virt_driver.ComputeDriver, 'prepare_for_spawn')
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
def test_build_resources_aborts_on_failed_network_alloc(self, mock_build,
mock_prepspawn,
@@ -8082,10 +8586,11 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
ctxt, instance, req_networks)
warning_mock.assert_not_called()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch('nova.compute.utils.notify_about_instance_create')
@mock.patch.object(manager.ComputeManager, '_instance_update')
def test_launched_at_in_create_end_notification(self,
- mock_instance_update, mock_notify_instance_create):
+ mock_instance_update, mock_notify_instance_create, mock_claim):
def fake_notify(*args, **kwargs):
if args[2] == 'create.end':
@@ -8125,6 +8630,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self.flags(default_access_ip_network_name='test1')
instance = fake_instance.fake_db_instance()
+ @mock.patch.object(self.compute.rt, 'instance_claim')
@mock.patch.object(db, 'instance_update_and_get_original',
return_value=({}, instance))
@mock.patch.object(self.compute.driver, 'spawn')
@@ -8133,7 +8639,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(db, 'instance_extra_update_by_uuid')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
def _check_access_ip(mock_notify, mock_extra, mock_networks,
- mock_spawn, mock_db_update):
+ mock_spawn, mock_db_update, mock_claim):
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
@@ -8154,8 +8660,10 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
_check_access_ip()
+ @mock.patch('nova.compute.resource_tracker.ResourceTracker.instance_claim')
@mock.patch.object(manager.ComputeManager, '_instance_update')
- def test_create_error_on_instance_delete(self, mock_instance_update):
+ def test_create_error_on_instance_delete(self, mock_instance_update,
+ mock_claim):
def fake_notify(*args, **kwargs):
if args[2] == 'create.error':
@@ -8169,7 +8677,7 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(self.compute,
'_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save',
- side_effect=[None, None, None, exc]),
+ side_effect=[None, None, exc]),
mock.patch.object(self.compute, '_notify_about_instance_usage',
side_effect=fake_notify)
) as (mock_spawn, mock_networks, mock_save, mock_notify):
@@ -8198,7 +8706,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(
self.compute, '_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save'),
- ) as (mock_spawn, mock_networks, mock_save):
+ mock.patch.object(self.compute.rt, 'instance_claim'),
+ ) as (mock_spawn, mock_networks, mock_save, mock_claim):
self.compute._build_and_run_instance(
self.context,
self.instance, self.image, self.injected_files,
@@ -8229,11 +8738,17 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
# resource request and therefore no matching request group exists in
# the request spec.
self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(),
objects.InstancePCIRequest(
+ request_id=uuids.req0,
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
requester_id=uuids.port1,
spec=[{'vendor_id': '1377', 'product_id': '0047'}]),
- objects.InstancePCIRequest(requester_id=uuids.port2),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ requester_id=uuids.port2,
+ ),
])
with test.nested(
mock.patch.object(self.compute.driver, 'spawn'),
@@ -8242,7 +8757,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
mock.patch.object(self.instance, 'save'),
mock.patch('nova.scheduler.client.report.'
'SchedulerReportClient._get_resource_provider'),
- ) as (mock_spawn, mock_networks, mock_save, mock_get_rp):
+ mock.patch.object(self.compute.rt, 'instance_claim'),
+ ) as (mock_spawn, mock_networks, mock_save, mock_get_rp, mock_claim):
mock_get_rp.return_value = {
'uuid': uuids.rp1,
'name': 'compute1:sriov-agent:ens3'
@@ -8278,8 +8794,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = None
@@ -8301,8 +8822,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
with mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'_get_resource_provider') as (mock_get_rp):
mock_get_rp.return_value = {
@@ -8326,8 +8852,13 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
requester_id=uuids.port1,
provider_uuids=[uuids.rp1, uuids.rp2])])
- self.instance.pci_requests = objects.InstancePCIRequests(requests=[
- objects.InstancePCIRequest(requester_id=uuids.port1)])
+ self.instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port1, request_id=uuids.req1
+ )
+ ]
+ )
self.assertRaises(
exception.BuildAbortException,
@@ -8563,11 +9094,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_test(get_by_instance_uuid,
- migration_save,
notify_usage_exists,
migrate_instance_start,
setup_networks_on_host,
@@ -8639,7 +9168,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'migrate_instance_finish',
side_effect=_migrate_instance_finish)
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.instance, 'save')
@mock.patch.object(self.compute, '_set_instance_info')
@mock.patch.object(db, 'instance_fault_create')
@@ -8653,7 +9181,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
fault_create,
set_instance_info,
instance_save,
- migration_save,
setup_networks_on_host,
migrate_instance_finish,
get_instance_nw_info,
@@ -8697,11 +9224,9 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
@mock.patch.object(db, 'instance_extra_update_by_uuid')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_revert_resize(mock_get_by_instance_uuid,
- mock_migration_save,
mock_extra_update,
mock_notify_usage_exists,
mock_migrate_instance_start,
@@ -8748,7 +9273,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(compute_utils, 'notify_about_instance_action')
@mock.patch.object(self.compute, "_set_instance_info")
@mock.patch.object(self.instance, 'save')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(db, 'instance_fault_create')
@mock.patch.object(db, 'instance_extra_update_by_uuid')
@@ -8772,7 +9296,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
mock_extra_update,
mock_fault_create,
mock_fault_from_exc,
- mock_mig_save,
mock_inst_save,
mock_set,
mock_notify_about_instance_action,
@@ -8866,7 +9389,6 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.compute, '_delete_scheduler_instance_info')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.Migration.get_by_id')
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(self.compute.driver, 'confirm_migration')
@@ -8875,7 +9397,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch.object(self.instance, 'save')
def do_confirm_resize(mock_save, mock_drop, mock_delete,
mock_confirm, mock_nwapi, mock_notify,
- mock_mig_save, mock_mig_get, mock_inst_get,
+ mock_mig_get, mock_inst_get,
mock_delete_scheduler_info):
self._mock_rt()
@@ -8958,16 +9480,16 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
instance_get_by_uuid.assert_called_once()
def test_confirm_resize_calls_virt_driver_with_old_pci(self):
- @mock.patch.object(self.migration, 'save')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, 'network_api')
@mock.patch.object(self.compute.driver, 'confirm_migration')
@mock.patch.object(self.compute, '_delete_allocation_after_move')
@mock.patch.object(self.instance, 'drop_migration_context')
@mock.patch.object(self.instance, 'save')
- def do_confirm_resize(mock_save, mock_drop, mock_delete,
- mock_confirm, mock_nwapi, mock_notify,
- mock_mig_save):
+ def do_confirm_resize(
+ mock_save, mock_drop, mock_delete, mock_confirm, mock_nwapi,
+ mock_notify
+ ):
# Mock virt driver confirm_resize() to save the provided
# network_info, we will check it later.
updated_nw_info = []
@@ -8983,10 +9505,12 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self._mock_rt()
old_devs = objects.PciDeviceList(
objects=[objects.PciDevice(
+ compute_node_id=1,
address='0000:04:00.2',
request_id=uuids.pcidev1)])
new_devs = objects.PciDeviceList(
objects=[objects.PciDevice(
+ compute_node_id=2,
address='0000:05:00.3',
request_id=uuids.pcidev1)])
self.instance.migration_context = objects.MigrationContext(
@@ -9135,9 +9659,15 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertEqual(driver_console.get_connection_info.return_value,
console)
+ @mock.patch('nova.utils.pass_context')
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_live_migration')
- def _test_max_concurrent_live(self, mock_lm):
+ def _test_max_concurrent_live(self, mock_lm, mock_pass_context):
+ # pass_context wraps the function, which doesn't work with a mock
+ # So we simply mock it too
+ def _mock_pass_context(runner, func, *args, **kwargs):
+ return runner(func, *args, **kwargs)
+ mock_pass_context.side_effect = _mock_pass_context
@mock.patch('nova.objects.Migration.save')
def _do_it(mock_mig_save):
@@ -9539,7 +10069,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertEqual('error', self.migration.status)
mock_rollback_live_mig.assert_called_once_with(
self.context, self.instance, 'dest-host',
- migrate_data=migrate_data, source_bdms=source_bdms)
+ migrate_data=migrate_data, source_bdms=source_bdms,
+ pre_live_migration=True)
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._rollback_live_migration')
@@ -9574,7 +10105,8 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.assertEqual('error', self.migration.status)
mock_rollback_live_mig.assert_called_once_with(
self.context, self.instance, 'dest-host',
- migrate_data=migrate_data, source_bdms=source_bdms)
+ migrate_data=migrate_data, source_bdms=source_bdms,
+ pre_live_migration=True)
@mock.patch('nova.compute.rpcapi.ComputeAPI.pre_live_migration')
@mock.patch('nova.compute.manager.ComputeManager._rollback_live_migration')
@@ -9956,6 +10488,27 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
self.instance,
migration)
+ def test_post_live_migration_update_host(self):
+ @mock.patch.object(self.compute, '_get_compute_info')
+ def _test_post_live_migration(_get_compute_info):
+ dest_host = 'dest'
+ cn = objects.ComputeNode(hypervisor_hostname=dest_host)
+ _get_compute_info.return_value = cn
+ instance = fake_instance.fake_instance_obj(self.context,
+ node='src',
+ uuid=uuids.instance)
+ with mock.patch.object(self.compute, "_post_live_migration"
+ ) as plm, mock.patch.object(instance, "save") as save:
+ error = ValueError("some failure")
+ plm.side_effect = error
+ self.assertRaises(
+ ValueError, self.compute._post_live_migration_update_host,
+ self.context, instance, dest_host)
+ save.assert_called_once()
+ self.assertEqual(instance.host, dest_host)
+
+ _test_post_live_migration()
+
def test_post_live_migration_cinder_pre_344_api(self):
# Because live migration has
# succeeded,_post_live_migration_remove_source_vol_connections()
@@ -10420,19 +10973,34 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
action='live_migration_abort', phase='end')]
)
+ @mock.patch.object(objects.Instance, 'save')
+ @mock.patch.object(manager.ComputeManager, '_revert_allocation')
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
@mock.patch.object(objects.Migration, 'get_by_id')
@mock.patch('nova.compute.utils.notify_about_instance_action')
def test_live_migration_abort_queued(self, mock_notify_action,
- mock_get_migration, mock_notify):
+ mock_get_migration, mock_notify,
+ mock_revert_allocations,
+ mock_instance_save):
instance = objects.Instance(id=123, uuid=uuids.instance)
migration = self._get_migration(10, 'queued', 'live-migration')
+ migration.dest_compute = uuids.dest
+ migration.dest_node = uuids.dest
migration.save = mock.MagicMock()
mock_get_migration.return_value = migration
fake_future = mock.MagicMock()
self.compute._waiting_live_migrations[instance.uuid] = (
migration, fake_future)
- self.compute.live_migration_abort(self.context, instance, migration.id)
+ with mock.patch.object(
+ self.compute.network_api,
+ 'setup_networks_on_host') as mock_setup_net:
+ self.compute.live_migration_abort(
+ self.context, instance, migration.id)
+ mock_setup_net.assert_called_once_with(
+ self.context, instance, host=migration.dest_compute,
+ teardown=True)
+ mock_revert_allocations.assert_called_once_with(
+ self.context, instance, migration)
mock_notify.assert_has_calls(
[mock.call(self.context, instance,
'live.migration.abort.start'),
@@ -10772,7 +11340,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
@@ -10806,7 +11374,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
@mock.patch('nova.compute.resource_tracker.ResourceTracker.resize_claim')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.compute.utils.notify_usage_exists')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
@@ -10940,40 +11508,94 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase,
_test()
def test__update_migrate_vifs_profile_with_pci(self):
- # Define two migrate vifs with only one pci that is required
- # to be updated. Make sure method under test updated the correct one
+ # Define three migrate vifs with two pci devs that are required
+ # to be updated, one VF and on PF.
+ # Make sure method under test updated the correct devs with the correct
+ # values.
nw_vifs = network_model.NetworkInfo(
- [network_model.VIF(
- id=uuids.port0,
- vnic_type='direct',
- type=network_model.VIF_TYPE_HW_VEB,
- profile={'pci_slot': '0000:04:00.3',
- 'pci_vendor_info': '15b3:1018',
- 'physical_network': 'default'}),
- network_model.VIF(
- id=uuids.port1,
- vnic_type='normal',
- type=network_model.VIF_TYPE_OVS,
- profile={'some': 'attribute'})])
- pci_dev = objects.PciDevice(request_id=uuids.pci_req,
- address='0000:05:00.4',
- vendor_id='15b3',
- product_id='1018')
- port_id_to_pci_dev = {uuids.port0: pci_dev}
- mig_vifs = migrate_data_obj.VIFMigrateData.\
- create_skeleton_migrate_vifs(nw_vifs)
- self.compute._update_migrate_vifs_profile_with_pci(mig_vifs,
- port_id_to_pci_dev)
+ [
+ network_model.VIF(
+ id=uuids.port0,
+ vnic_type='direct',
+ type=network_model.VIF_TYPE_HW_VEB,
+ profile={
+ 'pci_slot': '0000:04:00.3',
+ 'pci_vendor_info': '15b3:1018',
+ 'physical_network': 'default',
+ },
+ ),
+ network_model.VIF(
+ id=uuids.port1,
+ vnic_type='normal',
+ type=network_model.VIF_TYPE_OVS,
+ profile={'some': 'attribute'},
+ ),
+ network_model.VIF(
+ id=uuids.port2,
+ vnic_type='direct-physical',
+ type=network_model.VIF_TYPE_HOSTDEV,
+ profile={
+ 'pci_slot': '0000:01:00',
+ 'pci_vendor_info': '8086:154d',
+ 'physical_network': 'physnet2',
+ },
+ ),
+ ]
+ )
+
+ pci_vf_dev = objects.PciDevice(
+ request_id=uuids.pci_req,
+ address='0000:05:00.4',
+ parent_addr='0000:05:00',
+ vendor_id='15b3',
+ product_id='1018',
+ compute_node_id=13,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ )
+ pci_pf_dev = objects.PciDevice(
+ request_id=uuids.pci_req2,
+ address='0000:01:00',
+ parent_addr='0000:02:00',
+ vendor_id='8086',
+ product_id='154d',
+ compute_node_id=13,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'},
+ )
+ port_id_to_pci_dev = {
+ uuids.port0: pci_vf_dev,
+ uuids.port2: pci_pf_dev,
+ }
+ mig_vifs = (
+ migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
+ nw_vifs)
+ )
+
+ self.compute._update_migrate_vifs_profile_with_pci(
+ mig_vifs, port_id_to_pci_dev)
+
# Make sure method under test updated the correct one.
- changed_mig_vif = mig_vifs[0]
+ changed_vf_mig_vif = mig_vifs[0]
unchanged_mig_vif = mig_vifs[1]
+ changed_pf_mig_vif = mig_vifs[2]
# Migrate vifs profile was updated with pci_dev.address
# for port ID uuids.port0.
- self.assertEqual(changed_mig_vif.profile['pci_slot'],
- pci_dev.address)
+ self.assertEqual(changed_vf_mig_vif.profile['pci_slot'],
+ pci_vf_dev.address)
+ # MAC is not added as this is a VF
+ self.assertNotIn('device_mac_address', changed_vf_mig_vif.profile)
# Migrate vifs profile was unchanged for port ID uuids.port1.
# i.e 'profile' attribute does not exist.
self.assertNotIn('profile', unchanged_mig_vif)
+ # Migrate vifs profile was updated with pci_dev.address
+ # for port ID uuids.port2.
+ self.assertEqual(changed_pf_mig_vif.profile['pci_slot'],
+ pci_pf_dev.address)
+ # MAC is updated as this is a PF
+ self.assertEqual(
+ 'b4:96:91:34:f4:36',
+ changed_pf_mig_vif.profile['device_mac_address']
+ )
def test_get_updated_nw_info_with_pci_mapping(self):
old_dev = objects.PciDevice(address='0000:04:00.2')
diff --git a/nova/tests/unit/compute/test_flavors.py b/nova/tests/unit/compute/test_flavors.py
index 82434a9473..ba0eabc77d 100644
--- a/nova/tests/unit/compute/test_flavors.py
+++ b/nova/tests/unit/compute/test_flavors.py
@@ -196,7 +196,7 @@ class TestCreateFlavor(test.TestCase):
def test_rxtx_factor_must_be_within_sql_float_range(self):
# We do * 10 since this is an approximation and we need to make sure
- # the difference is noticeble.
+ # the difference is noticeable.
over_rxtx_factor = db_const.SQL_SP_FLOAT_MAX * 10
self.assertInvalidInput('flavor1', 64, 1, 120,
diff --git a/nova/tests/unit/compute/test_host_api.py b/nova/tests/unit/compute/test_host_api.py
index e4c310deb0..7f9e862057 100644
--- a/nova/tests/unit/compute/test_host_api.py
+++ b/nova/tests/unit/compute/test_host_api.py
@@ -14,8 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
import oslo_messaging as messaging
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/compute/test_instance_list.py b/nova/tests/unit/compute/test_instance_list.py
index e6e195e9cc..6544ddc801 100644
--- a/nova/tests/unit/compute/test_instance_list.py
+++ b/nova/tests/unit/compute/test_instance_list.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import instance_list
diff --git a/nova/tests/unit/compute/test_keypairs.py b/nova/tests/unit/compute/test_keypairs.py
index 7860f3d529..8822cb4522 100644
--- a/nova/tests/unit/compute/test_keypairs.py
+++ b/nova/tests/unit/compute/test_keypairs.py
@@ -14,13 +14,16 @@
# under the License.
"""Tests for keypair API."""
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
+from oslo_limit import fixture as limit_fixture
from nova.compute import api as compute_api
from nova import context
from nova import exception
+from nova.limit import local as local_limit
from nova.objects import keypair as keypair_obj
from nova import quota
from nova.tests.unit.compute import test_compute
@@ -119,25 +122,7 @@ class CreateImportSharedTestMixIn(object):
exc = self.assertRaises(exc_class, func, self.ctxt, self.ctxt.user_id,
name, *args)
- self.assertEqual(expected_message, str(exc))
-
- def assertInvalidKeypair(self, expected_message, name):
- msg = 'Keypair data is invalid: %s' % expected_message
- self.assertKeypairRaises(exception.InvalidKeypair, msg, name)
-
- def test_name_too_short(self):
- msg = ('Keypair name must be string and between 1 '
- 'and 255 characters long')
- self.assertInvalidKeypair(msg, '')
-
- def test_name_too_long(self):
- msg = ('Keypair name must be string and between 1 '
- 'and 255 characters long')
- self.assertInvalidKeypair(msg, 'x' * 256)
-
- def test_invalid_chars(self):
- msg = "Keypair name contains unsafe characters"
- self.assertInvalidKeypair(msg, '* BAD CHARACTERS! *')
+ self.assertIn(expected_message, str(exc))
def test_already_exists(self):
def db_key_pair_create_duplicate(context, keypair):
@@ -155,9 +140,51 @@ class CreateImportSharedTestMixIn(object):
return_value={'user': {
'key_pairs': CONF.quota.key_pairs}})
def test_quota_limit(self, mock_count_as_dict):
- msg = "Maximum number of key pairs exceeded"
+ msg = "Quota exceeded, too many key pairs."
self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
+ def _test_quota_during_recheck(self, mock_method, msg):
+ # Skip for import key pair due to bug 1959732.
+ if self.func_name == 'import_key_pair':
+ self.skipTest('bug/1959732: import_key_pair missing quota recheck')
+
+ self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
+ self.assertEqual(2, mock_method.call_count)
+
+ @mock.patch('nova.objects.Quotas.check_deltas')
+ def test_quota_during_recheck(self, mock_check):
+ """Simulate a race where this request initially has enough quota to
+ progress partially through the create path but then fails the quota
+ recheck because a parallel request filled up the quota first.
+ """
+ # First quota check succeeds, second (recheck) fails.
+ mock_check.side_effect = [None,
+ exception.OverQuota(overs='key_pairs')]
+ msg = "Quota exceeded, too many key pairs."
+ self._test_quota_during_recheck(mock_check, msg)
+
+ def test_quota_unified_limits(self):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 0}, {}))
+ msg = ("Resource %s is over limit" % local_limit.KEY_PAIRS)
+ self.assertKeypairRaises(exception.KeypairLimitExceeded, msg, 'foo')
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_quota_during_recheck_unified_limits(self, mock_enforce):
+ """Simulate a race where this request initially has enough quota to
+ progress partially through the create path but then fails the quota
+ recheck because a parallel request filled up the quota first.
+ """
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 100}, {}))
+ # First quota check succeeds, second (recheck) fails.
+ mock_enforce.side_effect = [
+ None, exception.KeypairLimitExceeded('oslo.limit message')]
+ msg = 'oslo.limit message'
+ self._test_quota_during_recheck(mock_enforce, msg)
+
class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
func_name = 'create_key_pair'
@@ -192,6 +219,27 @@ class CreateKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
self.assertRaises(processutils.ProcessExecutionError,
self._check_success)
+ def test_success_unified_limits(self):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 1}, {}))
+ self._check_success()
+
+ @mock.patch('nova.objects.Quotas.check_deltas')
+ def test_quota_recheck_disabled(self, mock_check):
+ self.flags(recheck_quota=False, group="quota")
+ self._check_success()
+ self.assertEqual(1, mock_check.call_count)
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_quota_recheck_disabled_unified_limits(self, mock_enforce):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.flags(recheck_quota=False, group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 1}, {}))
+ self._check_success()
+ self.assertEqual(1, mock_enforce.call_count)
+
class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
func_name = 'import_key_pair'
@@ -240,6 +288,27 @@ class ImportKeypairTestCase(KeypairAPITestCase, CreateImportSharedTestMixIn):
msg = u'Keypair data is invalid: failed to generate fingerprint'
self.assertEqual(msg, str(exc))
+ def test_success_unified_limits(self):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 1}, {}))
+ self._check_success()
+
+ @mock.patch('nova.objects.Quotas.check_deltas')
+ def test_quota_recheck_disabled(self, mock_check):
+ self.flags(recheck_quota=False, group="quota")
+ self._check_success()
+ self.assertEqual(1, mock_check.call_count)
+
+ @mock.patch('nova.limit.local.enforce_db_limit')
+ def test_quota_recheck_disabled_unified_limits(self, mock_enforce):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ self.flags(recheck_quota=False, group="quota")
+ self.useFixture(limit_fixture.LimitFixture(
+ {'server_key_pairs': 1}, {}))
+ self._check_success()
+ self.assertEqual(1, mock_enforce.call_count)
+
class GetKeypairTestCase(KeypairAPITestCase):
def test_success(self):
diff --git a/nova/tests/unit/compute/test_multi_cell_list.py b/nova/tests/unit/compute/test_multi_cell_list.py
index 6bb67a76b8..5906f69de2 100644
--- a/nova/tests/unit/compute/test_multi_cell_list.py
+++ b/nova/tests/unit/compute/test_multi_cell_list.py
@@ -13,7 +13,8 @@
from contextlib import contextmanager
import copy
import datetime
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import multi_cell_list
diff --git a/nova/tests/unit/compute/test_pci_placement_translator.py b/nova/tests/unit/compute/test_pci_placement_translator.py
new file mode 100644
index 0000000000..0592186e54
--- /dev/null
+++ b/nova/tests/unit/compute/test_pci_placement_translator.py
@@ -0,0 +1,291 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import ddt
+from oslo_utils.fixture import uuidsentinel as uuids
+from unittest import mock
+
+from nova.compute import pci_placement_translator as ppt
+from nova.compute import provider_tree
+from nova import exception
+from nova.objects import fields
+from nova.objects import pci_device
+from nova.pci import devspec
+from nova import test
+
+
+def dev(v, p):
+ return pci_device.PciDevice(vendor_id=v, product_id=p)
+
+
+# NOTE(gibi): Most of the nova.compute.pci_placement_translator module is
+# covered with functional tests in
+# nova.tests.functional.libvirt.test_pci_in_placement
+@ddt.ddt
+class TestTranslator(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ patcher = mock.patch(
+ "nova.compute.pci_placement_translator."
+ "_is_placement_tracking_enabled")
+ self.addCleanup(patcher.stop)
+ patcher.start()
+
+ def test_translator_skips_devices_without_matching_spec(self):
+ """As every PCI device in the PciTracker is created by matching a
+ PciDeviceSpec the translator should always be able to look up the spec
+ for a device. But if cannot then the device will be skipped and warning
+ will be emitted.
+ """
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = pci_device.PciDeviceList(
+ objects=[
+ pci_device.PciDevice(
+ address="0000:81:00.0",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ instance_uuid=None,
+ )
+ ]
+ )
+ # So we have a device but there is no spec for it
+ pci_tracker.dev_filter.get_devspec = mock.Mock(return_value=None)
+ pci_tracker.dev_filter.specs = []
+ # we expect that the provider_tree is not touched as the device without
+ # spec is skipped, we assert that with the NonCallableMock
+ provider_tree = mock.NonCallableMock()
+
+ ppt.update_provider_tree_for_pci(
+ provider_tree, "fake-node", pci_tracker, {}, [])
+
+ self.assertIn(
+ "Device spec is not found for device 0000:81:00.0 in "
+ "[pci]device_spec. Ignoring device in Placement resource view. "
+ "This should not happen. Please file a bug.",
+ self.stdlog.logger.output
+ )
+
+ @ddt.unpack
+ @ddt.data(
+ (None, set()),
+ ("", set()),
+ ("a", {"CUSTOM_A"}),
+ ("a,b", {"CUSTOM_A", "CUSTOM_B"}),
+ ("HW_GPU_API_VULKAN", {"HW_GPU_API_VULKAN"}),
+ ("CUSTOM_FOO", {"CUSTOM_FOO"}),
+ ("custom_bar", {"CUSTOM_BAR"}),
+ ("custom-bar", {"CUSTOM_CUSTOM_BAR"}),
+ ("CUSTOM_a", {"CUSTOM_A"}),
+ ("a@!#$b123X", {"CUSTOM_A_B123X"}),
+ # Note that both trait names are normalized to the same trait
+ ("a!@b,a###b", {"CUSTOM_A_B"}),
+ )
+ def test_trait_normalization(self, trait_names, expected_traits):
+ self.assertEqual(
+ expected_traits,
+ ppt.get_traits(trait_names)
+ )
+
+ @ddt.unpack
+ @ddt.data(
+ (dev(v='1234', p='5678'), None, "CUSTOM_PCI_1234_5678"),
+ (dev(v='1234', p='5678'), "", "CUSTOM_PCI_1234_5678"),
+ (dev(v='1234', p='5678'), "PGPU", "PGPU"),
+ (dev(v='1234', p='5678'), "pgpu", "PGPU"),
+ (dev(v='1234', p='5678'), "foobar", "CUSTOM_FOOBAR"),
+ (dev(v='1234', p='5678'), "custom_foo", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "CUSTOM_foo", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "custom_FOO", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "CUSTOM_FOO", "CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "custom-foo", "CUSTOM_CUSTOM_FOO"),
+ (dev(v='1234', p='5678'), "a###b", "CUSTOM_A_B"),
+ (dev(v='123a', p='567b'), "", "CUSTOM_PCI_123A_567B"),
+ )
+ def test_resource_class_normalization(self, pci_dev, rc_name, expected_rc):
+ self.assertEqual(
+ expected_rc,
+ ppt.get_resource_class(
+ rc_name, pci_dev.vendor_id, pci_dev.product_id
+ ),
+ )
+
+ def test_dependent_device_pf_then_vf(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ pf = pci_device.PciDevice(
+ address="0000:81:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+
+ pv._add_dev(pf, {"resource_class": "foo"})
+ ex = self.assertRaises(
+ exception.PlacementPciDependentDeviceException,
+ pv._add_dev,
+ vf,
+ {"resource_class": "bar"}
+ )
+
+ self.assertEqual(
+ "Configuring both 0000:81:00.1 and 0000:81:00.0 in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured.",
+ str(ex),
+ )
+
+ def test_dependent_device_vf_then_pf(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ pf = pci_device.PciDevice(
+ address="0000:81:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ vf2 = pci_device.PciDevice(
+ address="0000:81:00.2",
+ parent_addr=pf.address,
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+
+ pv._add_dev(vf, {"resource_class": "foo"})
+ pv._add_dev(vf2, {"resource_class": "foo"})
+ ex = self.assertRaises(
+ exception.PlacementPciDependentDeviceException,
+ pv._add_dev,
+ pf,
+ {"resource_class": "bar"}
+ )
+
+ self.assertEqual(
+ "Configuring both 0000:81:00.0 and 0000:81:00.1,0000:81:00.2 in "
+ "[pci]device_spec is not supported. Either the parent PF or its "
+ "children VFs can be configured.",
+ str(ex),
+ )
+
+ def test_mixed_rc_for_sibling_vfs(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf1, vf2, vf3, vf4 = [
+ pci_device.PciDevice(
+ address="0000:81:00.%d" % f,
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ for f in range(0, 4)
+ ]
+
+ pv._add_dev(vf1, {"resource_class": "a", "traits": "foo,bar,baz"})
+ # order is irrelevant
+ pv._add_dev(vf2, {"resource_class": "a", "traits": "foo,baz,bar"})
+ # but missing trait is rejected
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ pv._add_dev,
+ vf3,
+ {"resource_class": "a", "traits": "foo,bar"},
+ )
+ self.assertEqual(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_FOO for "
+ "0000:81:00.2 and "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO "
+ "for 0000:81:00.0,0000:81:00.1.",
+ str(ex),
+ )
+ # as well as additional trait
+ ex = self.assertRaises(
+ exception.PlacementPciMixedTraitsException,
+ pv._add_dev,
+ vf4,
+ {"resource_class": "a", "traits": "foo,bar,baz,extra"}
+ )
+ self.assertEqual(
+ "VFs from the same PF cannot be configured with different set of "
+ "'traits' in [pci]device_spec. We got "
+ "COMPUTE_MANAGED_PCI_DEVICE,CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_EXTRA,"
+ "CUSTOM_FOO for 0000:81:00.3 and COMPUTE_MANAGED_PCI_DEVICE,"
+ "CUSTOM_BAR,CUSTOM_BAZ,CUSTOM_FOO for 0000:81:00.0,0000:81:00.1.",
+ str(ex),
+ )
+
+ def test_translator_maps_pci_device_to_rp(self):
+ pv = ppt.PlacementView(
+ "fake-node", instances_under_same_host_resize=[])
+ vf = pci_device.PciDevice(
+ address="0000:81:00.1",
+ parent_addr="0000:71:00.0",
+ dev_type=fields.PciDeviceType.SRIOV_VF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ )
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+
+ pv._add_dev(vf, {})
+ pv._add_dev(pf, {})
+ pv.update_provider_tree(pt)
+
+ self.assertEqual(
+ pt.data("fake-node_0000:71:00.0").uuid, vf.extra_info["rp_uuid"]
+ )
+ self.assertEqual(
+ pt.data("fake-node_0000:72:00.0").uuid, pf.extra_info["rp_uuid"]
+ )
+
+ def test_update_provider_tree_for_pci_update_pools(self):
+ pt = provider_tree.ProviderTree()
+ pt.new_root("fake-node", uuids.compute_rp)
+ pf = pci_device.PciDevice(
+ address="0000:72:00.0",
+ parent_addr=None,
+ dev_type=fields.PciDeviceType.SRIOV_PF,
+ vendor_id="dead",
+ product_id="beef",
+ status=fields.PciDeviceStatus.AVAILABLE,
+ )
+ pci_tracker = mock.Mock()
+ pci_tracker.pci_devs = [pf]
+ pci_tracker.dev_filter.specs = [devspec.PciDeviceSpec({})]
+
+ ppt.update_provider_tree_for_pci(pt, 'fake-node', pci_tracker, {}, [])
+
+ pci_tracker.stats.populate_pools_metadata_from_assigned_devices.\
+ assert_called_once_with()
diff --git a/nova/tests/unit/compute/test_provider_config.py b/nova/tests/unit/compute/test_provider_config.py
index b9070bd218..384d465054 100644
--- a/nova/tests/unit/compute/test_provider_config.py
+++ b/nova/tests/unit/compute/test_provider_config.py
@@ -13,13 +13,14 @@
import copy
import ddt
import fixtures
+import importlib.metadata
import microversion_parse
import os
-
from unittest import mock
from oslo_utils.fixture import uuidsentinel
from oslotest import base
+from packaging import version
from nova.compute import provider_config
from nova import exception as nova_exc
@@ -118,6 +119,17 @@ class SchemaValidationTestCasesV1(SchemaValidationMixin):
@ddt.unpack
@ddt.file_data('provider_config_data/v1/validation_error_test_data.yaml')
def test_validation_errors(self, config, expected_messages):
+ # TODO(stephenfin): Drop this once we no longer support jsonschema 3.x
+ jsonschema_version = importlib.metadata.version('jsonschema')
+ if version.parse(jsonschema_version) < version.parse('4.0.0'):
+ if expected_messages == [
+ "should not be valid under {}",
+ "validating 'not' in schema['properties']['__source_file']",
+ ]:
+ expected_messages = [
+ "{} is not allowed for",
+ "validating 'not' in schema['properties']['__source_file']", # noqa: E501
+ ]
self.run_test_validation_errors(config, expected_messages)
@ddt.unpack
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index 51545df59f..919dcb8334 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -12,12 +12,14 @@
import copy
import datetime
+import ddt
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
-import mock
import os_resource_classes as orc
import os_traits
from oslo_config import cfg
+from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import units
@@ -62,11 +64,13 @@ _VIRT_DRIVER_AVAIL_RESOURCES = {
'hypervisor_hostname': _NODENAME,
'cpu_info': '',
'numa_topology': None,
+ 'uuid': uuids.cn1,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
+ deleted=False,
uuid=uuids.cn1,
host=_HOSTNAME,
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
@@ -177,6 +181,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([1]), set([2])],
pinned_cpus=set()),
@@ -187,6 +192,7 @@ _NUMA_HOST_TOPOLOGIES = {
memory=_2MB,
cpu_usage=0,
memory_usage=0,
+ socket=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([3]), set([4])],
pinned_cpus=set())]),
@@ -584,7 +590,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_disabled(self, get_mock, migr_mock, get_cn_mock, pci_mock,
@@ -617,7 +623,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
@@ -641,8 +647,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'flavor',
'migration_context',
'resources'])
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
- _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
migr_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
_NODENAME)
@@ -669,7 +674,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
@@ -728,7 +733,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_ram_and_cpu(
@@ -745,7 +750,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
@@ -770,7 +775,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
@@ -795,7 +800,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6 - 1 used
@@ -821,7 +826,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -861,7 +866,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5,
@@ -887,7 +892,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -924,7 +929,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -950,7 +955,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -985,7 +990,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1011,7 +1016,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1054,7 +1059,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# 6 total - 1G existing - 5G new flav - 1G old flav
@@ -1082,7 +1087,7 @@ class TestUpdateAvailableResources(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1119,7 +1124,7 @@ class TestUpdateAvailableResources(BaseTestCase):
update_mock = self._update_available_resources()
- get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
+ get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
@@ -1145,7 +1150,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1197,7 +1202,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@@ -1238,7 +1243,7 @@ class TestUpdateAvailableResources(BaseTestCase):
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
new=mock.Mock(return_value=objects.PciDeviceList()))
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_check_resources_startup_fail(self, mock_get_instances,
@@ -1271,7 +1276,7 @@ class TestInitComputeNode(BaseTestCase):
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_no_op_init_compute_node(self, update_mock, get_mock, service_mock,
@@ -1294,14 +1299,14 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_loaded(self, update_mock, get_mock, create_mock,
pci_mock):
self._setup_rt()
- def fake_get_node(_ctx, host, node):
+ def fake_get_node(_ctx, uuid):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
@@ -1311,85 +1316,67 @@ class TestInitComputeNode(BaseTestCase):
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.cn1)
self.assertFalse(create_mock.called)
self.assertFalse(update_mock.called)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_rebalanced(self, update_mock, get_mock, create_mock,
- pci_mock, get_by_hypervisor_mock):
+ pci_mock):
self._setup_rt()
self.driver_mock.rebalances_nodes = True
cn = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
cn.host = "old-host"
- def fake_get_all(_ctx, nodename):
- return [cn]
+ def fake_get_node(_ctx, uuid):
+ return cn
- get_mock.side_effect = exc.NotFound
- get_by_hypervisor_mock.side_effect = fake_get_all
+ get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- get_by_hypervisor_mock.assert_called_once_with(mock.sentinel.ctx,
- _NODENAME)
+ get_mock.assert_called_once_with(mock.sentinel.ctx, uuids.cn1)
create_mock.assert_not_called()
update_mock.assert_called_once_with(mock.sentinel.ctx, cn)
self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
- self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock)
+ create_mock):
+ self._test_compute_node_created(update_mock, get_mock, create_mock)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty_rebalance(self, update_mock,
get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = []
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
- @mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_too_many(self, update_mock, get_mock,
- create_mock,
- get_by_hypervisor_mock):
- get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"]
+ create_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=True)
def _test_compute_node_created(self, update_mock, get_mock, create_mock,
- get_by_hypervisor_mock,
rebalances_nodes=False):
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
@@ -1450,13 +1437,9 @@ class TestInitComputeNode(BaseTestCase):
self.rt._init_compute_node(mock.sentinel.ctx, resources))
cn = self.rt.compute_nodes[_NODENAME]
- get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
- _NODENAME)
- if rebalances_nodes:
- get_by_hypervisor_mock.assert_called_once_with(
- mock.sentinel.ctx, _NODENAME)
- else:
- get_by_hypervisor_mock.assert_not_called()
+ get_mock.assert_called_once_with(mock.sentinel.ctx,
+ uuids.compute_node_uuid)
+
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
setup_pci.assert_called_once_with(mock.sentinel.ctx, cn, resources)
@@ -1464,7 +1447,7 @@ class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_setup_pci_tracker')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
side_effect=exc.ComputeHostNotFound(host=_HOSTNAME))
@mock.patch('nova.objects.ComputeNode.create',
side_effect=(test.TestingException, None))
@@ -1487,14 +1470,14 @@ class TestInitComputeNode(BaseTestCase):
self.assertTrue(self.rt._init_compute_node(ctxt, resources))
self.assertIn(_NODENAME, self.rt.compute_nodes)
mock_get.assert_has_calls([mock.call(
- ctxt, _HOSTNAME, _NODENAME)] * 2)
+ ctxt, uuids.cn_uuid)] * 2)
self.assertEqual(2, mock_create.call_count)
mock_setup_pci.assert_called_once_with(
ctxt, test.MatchType(objects.ComputeNode), resources)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_node_removed(self, update_mock, get_mock,
@@ -1510,7 +1493,83 @@ class TestInitComputeNode(BaseTestCase):
self.assertNotIn(_NODENAME, self.rt.stats)
self.assertNotIn(_NODENAME, self.rt.old_resources)
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'fake-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Host is the same, no _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_not_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_update')
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_setup_pci_tracker')
+ def test_undelete_node_move_host(self, mock_pci, mock_update):
+ self._setup_rt()
+ node = mock.MagicMock()
+ node.deleted = True
+ node.uuid = str(uuids.host1)
+ node.host = 'old-host'
+ context_mock = mock.MagicMock()
+ resources = {'hypervisor_hostname': 'fake-host',
+ 'uuid': str(uuids.host1)}
+ with mock.patch.object(self.rt, '_get_compute_node') as getcn:
+ getcn.return_value = node
+
+ # _init_compute_node() should return False to indicate that
+ # it found an existing node
+ self.assertFalse(
+ self.rt._init_compute_node(context_mock, resources))
+
+ # Node should be undeleted and saved
+ self.assertFalse(node.deleted)
+ self.assertIsNone(node.deleted_at)
+ node.save.assert_called_once_with()
+
+ # Our host changed, so we should have the updated value and have
+ # called _update()
+ self.assertEqual('fake-host', node.host)
+ mock_update.assert_called()
+
+ @mock.patch.object(resource_tracker.ResourceTracker,
+ '_get_compute_node',
+ return_value=None)
+ @mock.patch('nova.objects.compute_node.ComputeNode.create')
+ def test_create_failed_conflict(self, mock_create, mock_getcn):
+ self._setup_rt()
+ resources = {'hypervisor_hostname': 'node1',
+ 'uuid': uuids.node1}
+ mock_create.side_effect = exc.DuplicateRecord(target='foo')
+ self.assertRaises(exc.InvalidConfiguration,
+ self.rt._init_compute_node,
+ mock.MagicMock,
+ resources)
+
+@ddt.ddt
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait', new=mock.Mock())
@@ -1577,9 +1636,14 @@ class TestUpdateComputeNode(BaseTestCase):
self.rt._update(mock.sentinel.ctx, new_compute)
save_mock.assert_called_once_with()
+ @mock.patch('nova.objects.ComputeNode.save', new=mock.Mock())
+ @mock.patch(
+ 'nova.pci.stats.PciDeviceStats.has_remote_managed_device_pools',
+ return_value=True)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
- def test_existing_node_capabilities_as_traits(self, mock_sync_disabled):
+ def test_existing_node_capabilities_as_traits(
+ self, mock_sync_disabled, mock_has_remote_managed_device_pools):
"""The capabilities_as_traits() driver method returns traits
information for a node/provider.
"""
@@ -1587,6 +1651,15 @@ class TestUpdateComputeNode(BaseTestCase):
rc = self.rt.reportclient
rc.set_traits_for_provider = mock.MagicMock()
+ # TODO(dmitriis): Remove once the PCI tracker is always created
+ # upon the resource tracker initialization.
+ with mock.patch.object(
+ objects.PciDeviceList, 'get_by_compute_node',
+ return_value=objects.PciDeviceList()
+ ):
+ self.rt.pci_tracker = pci_manager.PciDevTracker(
+ mock.sentinel.ctx, _COMPUTE_NODE_FIXTURES[0])
+
# Emulate a driver that has implemented the update_from_provider_tree()
# virt driver method
self.driver_mock.update_provider_tree = mock.Mock()
@@ -1694,12 +1767,18 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(exp_inv, ptree.data(new_compute.uuid).inventory)
mock_sync_disabled.assert_called_once()
+ @ddt.data(
+ exc.ResourceProviderUpdateConflict(
+ uuid='uuid', generation=42, error='error'),
+ exc.PlacementReshapeConflict(error='error'),
+ )
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_resource_change', return_value=False)
- def test_update_retry_success(self, mock_resource_change,
- mock_sync_disabled):
+ def test_update_retry_success(
+ self, exc, mock_resource_change, mock_sync_disabled
+ ):
self._setup_rt()
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
@@ -1713,9 +1792,7 @@ class TestUpdateComputeNode(BaseTestCase):
self.driver_mock.update_provider_tree.side_effect = lambda *a: None
ufpt_mock = self.rt.reportclient.update_from_provider_tree
- ufpt_mock.side_effect = (
- exc.ResourceProviderUpdateConflict(
- uuid='uuid', generation=42, error='error'), None)
+ ufpt_mock.side_effect = (exc, None)
self.rt._update(mock.sentinel.ctx, new_compute)
@@ -1753,7 +1830,221 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertEqual(4, ufpt_mock.call_count)
self.assertEqual(4, mock_sync_disabled.call_count)
# The retry is restricted to _update_to_placement
- self.assertEqual(1, mock_resource_change.call_count)
+ self.assertEqual(0, mock_resource_change.call_count)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting(self, mock_update_provider_tree_for_pci):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and that call did not change any allocations so
+ update_from_provider_tree called without triggering reshape
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting did not touch allocations
+ mock_update_provider_tree_for_pci.return_value = False
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(mock.sentinel.ctx, ptree, allocations=None)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_reshape(
+ self, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and that call changed allocations so
+ update_from_provider_tree called with allocations to trigger reshape
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting changed some allocations
+ mock_update_provider_tree_for_pci.return_value = True
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(
+ mock.sentinel.ctx, ptree, allocations=mock_get_allocs.return_value)
+
+ @ddt.data(True, False)
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_driver_reshape(
+ self, pci_reshape, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker first called the
+ driver.update_provider_tree and that needed reshape so the allocations
+ are pulled. Then independently of update_provider_tree_for_pci the
+ update_from_provider_tree is called with the allocations to trigger
+ reshape in placement
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that the driver requests reshape
+ self.driver_mock.update_provider_tree.side_effect = [
+ exc.ReshapeNeeded, None]
+ mock_update_provider_tree_for_pci.return_value = pci_reshape
+
+ self.rt._update(mock.sentinel.ctx, compute_obj, startup=True)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(
+ mock.sentinel.ctx, ptree, allocations=mock_get_allocs.return_value)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch(
+ 'nova.compute.pci_placement_translator.update_provider_tree_for_pci')
+ def test_update_pci_reporting_same_host_resize(
+ self, mock_update_provider_tree_for_pci
+ ):
+ """Assert that resource tracker calls update_provider_tree_for_pci
+ and with the list of instances that are being resized to the same
+ host.
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ ptree = self._setup_ptree(compute_obj)
+ # simulate that pci reporting did not touch allocations
+ mock_update_provider_tree_for_pci.return_value = False
+ self.rt.tracked_migrations = {
+ uuids.inst1: objects.Migration(
+ migration_type="resize",
+ source_node="fake-node",
+ dest_node="fake-node",
+ instance_uuid=uuids.inst1,
+ ),
+ uuids.inst2: objects.Migration(
+ migration_type="evacuation",
+ source_node="fake-node",
+ dest_node="fake-node",
+ instance_uuid=uuids.inst2,
+ ),
+ uuids.inst3: objects.Migration(
+ migration_type="resize",
+ source_node="fake-node1",
+ dest_node="fake-node2",
+ instance_uuid=uuids.inst3,
+ ),
+ }
+
+ self.rt._update(mock.sentinel.ctx, compute_obj)
+
+ mock_get_allocs = (
+ self.report_client_mock.get_allocations_for_provider_tree)
+ mock_get_allocs.assert_called_once_with(
+ mock.sentinel.ctx, compute_obj.hypervisor_hostname)
+ mock_update_provider_tree_for_pci.assert_called_once_with(
+ ptree,
+ compute_obj.hypervisor_hostname,
+ self.rt.pci_tracker,
+ mock_get_allocs.return_value,
+ [uuids.inst1],
+ )
+ upt = self.rt.reportclient.update_from_provider_tree
+ upt.assert_called_once_with(mock.sentinel.ctx, ptree, allocations=None)
+
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_sync_compute_service_disabled_trait',
+ new=mock.Mock()
+ )
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker._resource_change',
+ new=mock.Mock(return_value=False)
+ )
+ def test_update_pci_reporting_allocation_in_use_error_propagated(self):
+ """Assert that if the pci placement reporting code tries to remove
+ inventory with allocation from placement due to invalid hypervisor
+ or [pci]device_spec reconfiguration then the InventoryInUse error from
+ placement is propagated and makes the compute startup fail.
+ """
+ compute_obj = _COMPUTE_NODE_FIXTURES[0].obj_clone()
+ self._setup_rt()
+ self.rt.reportclient.update_from_provider_tree.side_effect = (
+ exc.InventoryInUse(
+ resource_class="FOO", resource_provider="bar"))
+
+ self.assertRaises(
+ exc.PlacementPciException,
+ self.rt._update,
+ mock.sentinel.ctx,
+ compute_obj,
+ startup=True,
+ )
@mock.patch('nova.objects.Service.get_by_compute_host',
return_value=objects.Service(disabled=True))
@@ -1807,6 +2098,10 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertIn('Unable to find services table record for nova-compute',
mock_log_error.call_args[0][0])
+ @mock.patch(
+ 'nova.compute.resource_tracker.ResourceTracker.'
+ '_update_to_placement',
+ new=mock.Mock())
def test_update_compute_node_save_fails_restores_old_resources(self):
"""Tests the scenario that compute_node.save() fails and the
old_resources value for the node is restored to its previous value
@@ -1938,14 +2233,19 @@ class TestInstanceClaim(BaseTestCase):
self.rt.compute_nodes = {}
self.assertTrue(self.rt.disabled(_NODENAME))
- with mock.patch.object(self.instance, 'save'):
- claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance,
- _NODENAME, self.allocations, None)
+ # Reset all changes to the instance to make sure that we can detect
+ # any manipulation after the failure.
+ self.instance.obj_reset_changes(recursive=True)
- self.assertEqual(self.rt.host, self.instance.host)
- self.assertEqual(self.rt.host, self.instance.launched_on)
- self.assertEqual(_NODENAME, self.instance.node)
- self.assertIsInstance(claim, claims.NopClaim)
+ with mock.patch.object(self.instance, 'save') as mock_save:
+ self.assertRaises(exc.ComputeResourcesUnavailable,
+ self.rt.instance_claim,
+ mock.sentinel.ctx, self.instance,
+ _NODENAME, self.allocations, None)
+ mock_save.assert_not_called()
+
+ # Make sure the instance was not touched by the failed claim process
+ self.assertEqual(set(), self.instance.obj_what_changed())
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@@ -2111,26 +2411,45 @@ class TestInstanceClaim(BaseTestCase):
# PCI devices on the host and sends an updated pci_device_pools
# attribute of the ComputeNode object.
+ self.flags(
+ group="pci",
+ device_spec=[
+ jsonutils.dumps({"vendor_id": "0001", "product_id": "0002"})
+ ],
+ )
+ pci_dev = pci_device.PciDevice.create(
+ None,
+ dev_dict={
+ "compute_node_id": 1,
+ "address": "0000:81:00.0",
+ "product_id": "0002",
+ "vendor_id": "0001",
+ "numa_node": 0,
+ "dev_type": obj_fields.PciDeviceType.STANDARD,
+ "status": obj_fields.PciDeviceStatus.AVAILABLE,
+ "parent_addr": None,
+ },
+ )
+
+ pci_dev.instance_uuid = None
+ pci_devs = [pci_dev]
+
# TODO(jaypipes): Remove once the PCI tracker is always created
# upon the resource tracker being initialized...
with mock.patch.object(
objects.PciDeviceList, 'get_by_compute_node',
- return_value=objects.PciDeviceList()
+ return_value=objects.PciDeviceList(objects=pci_devs)
):
self.rt.pci_tracker = pci_manager.PciDevTracker(
mock.sentinel.ctx, _COMPUTE_NODE_FIXTURES[0])
- pci_dev = pci_device.PciDevice.create(
- None, fake_pci_device.dev_dict)
- pci_devs = [pci_dev]
- self.rt.pci_tracker.pci_devs = objects.PciDeviceList(objects=pci_devs)
-
request = objects.InstancePCIRequest(count=1,
- spec=[{'vendor_id': 'v', 'product_id': 'p'}])
+ spec=[{'vendor_id': '0001', 'product_id': '0002'}])
pci_requests = objects.InstancePCIRequests(
requests=[request],
instance_uuid=self.instance.uuid)
self.instance.pci_requests = pci_requests
+ self.instance.pci_devices = objects.PciDeviceList()
check_bfv_mock.return_value = False
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
@@ -2142,7 +2461,20 @@ class TestInstanceClaim(BaseTestCase):
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
- 'pci_device_pools': objects.PciDevicePoolList(),
+ 'pci_device_pools': objects.PciDevicePoolList(
+ objects=[
+ objects.PciDevicePool(
+ vendor_id='0001',
+ product_id='0002',
+ numa_node=0,
+ tags={
+ 'dev_type': 'type-PCI',
+ 'address': '0000:81:00.0'
+ },
+ count=0
+ )
+ ]
+ ),
'stats': {
'io_workload': 0,
'num_instances': 1,
@@ -2159,7 +2491,8 @@ class TestInstanceClaim(BaseTestCase):
self.allocations, None)
cn = self.rt.compute_nodes[_NODENAME]
update_mock.assert_called_once_with(self.elevated, cn)
- pci_stats_mock.assert_called_once_with([request])
+ pci_stats_mock.assert_called_once_with(
+ [request], provider_mapping=None)
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
@mock.patch('nova.compute.utils.is_volume_backed_instance',
@@ -2363,7 +2696,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2467,7 +2800,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid',
return_value=_COMPUTE_NODE_FIXTURES[0])
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error',
return_value=[])
@@ -2639,7 +2972,7 @@ class TestResize(BaseTestCase):
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
@mock.patch('nova.pci.request.get_pci_requests_from_flavor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node')
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2809,7 +3142,7 @@ class TestResize(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -2941,7 +3274,7 @@ class TestRebuild(BaseTestCase):
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
- @mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
+ @mock.patch('nova.objects.ComputeNode.get_by_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.ComputeNode.save')
@@ -4046,7 +4379,7 @@ class ProviderConfigTestCases(BaseTestCase):
# add the same trait in p_tree and provider config
# for testing ignoring CUSTOM trait code logic.
- # If a programmer accidently forgets to ignore (substract)
+ # If a programmer accidentally forgets to ignore (subtract)
# existing custom traits, this test case will fail as we only expect
# "EXCEPTION_TRAIT" showed in ValueError exception rather than
# "EXCEPTION_TRAIT,CUSTOM_IGNORE_TRAIT"
@@ -4056,7 +4389,7 @@ class ProviderConfigTestCases(BaseTestCase):
expected = ("Provider config 'test_provider_config.yaml' attempts to "
"define a trait that is owned by the virt driver or "
- "specified via the placment api. Invalid traits '" +
+ "specified via the placement api. Invalid traits '" +
ex_trait + "' must be removed from "
"'test_provider_config.yaml'.")
@@ -4192,9 +4525,9 @@ class TestCleanComputeNodeCache(BaseTestCase):
invalid_nodename = "invalid-node"
self.rt.compute_nodes[_NODENAME] = self.compute
self.rt.compute_nodes[invalid_nodename] = mock.sentinel.compute
- with mock.patch.object(
- self.rt.reportclient, "invalidate_resource_provider",
- ) as mock_invalidate:
- self.rt.clean_compute_node_cache([self.compute])
- mock_remove.assert_called_once_with(invalid_nodename)
- mock_invalidate.assert_called_once_with(invalid_nodename)
+ mock_invalidate = self.rt.reportclient.invalidate_resource_provider
+
+ self.rt.clean_compute_node_cache([self.compute])
+
+ mock_remove.assert_called_once_with(invalid_nodename)
+ mock_invalidate.assert_called_once_with(invalid_nodename)
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index f062d5f45e..6f78678a92 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -16,7 +16,8 @@
Unit Tests for nova.compute.rpcapi
"""
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -834,7 +835,9 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
bdms=[], instance=self.fake_instance_obj, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
preserve_ephemeral=True, migration=None, node=None,
- limits=None, request_spec=None, accel_uuids=[], version='6.0')
+ limits=None, request_spec=None, accel_uuids=[],
+ reimage_boot_volume=False, target_state=None,
+ version='6.2')
def test_rebuild_instance_old_rpcapi(self):
# With rpcapi < 5.12, accel_uuids must be dropped in the client call.
@@ -861,20 +864,95 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
'migration': None,
'limits': None
}
+ # Pass reimage_boot_volume to the client call...
compute_api.rebuild_instance(
ctxt, instance=self.fake_instance_obj,
accel_uuids=['938af7f9-f136-4e5a-bdbe-3b6feab54311'],
- node=None, host=None, **rebuild_args)
+ node=None, host=None, reimage_boot_volume=False,
+ target_state=None, **rebuild_args)
- mock_client.can_send_version.assert_has_calls([mock.call('6.0'),
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2'),
+ mock.call('6.1'),
+ mock.call('6.0'),
mock.call('5.12')])
mock_client.prepare.assert_called_with(
server=self.fake_instance_obj.host, version='5.0')
+ # ...and assert that it does not show up on the wire before 6.1
mock_cctx.cast.assert_called_with( # No accel_uuids
ctxt, 'rebuild_instance',
instance=self.fake_instance_obj,
scheduled_node=None, **rebuild_args)
+ def test_rebuild_instance_vol_backed_old_rpcapi(self):
+ # With rpcapi < 6.1, if reimage_boot_volume is True then we
+ # should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to [False, True, True], so that 6.0
+ # version is used.
+ mock_client.can_send_version.side_effect = [False, False, True, True]
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ 'target_state': None,
+ }
+ self.assertRaises(
+ exception.NovaException, compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
+ mock_client.can_send_version.assert_has_calls([mock.call('6.2')])
+
+ def test_rebuild_instance_evacuate_old_rpcapi(self):
+ # With rpcapi < 6.2, if evacuate we should raise error.
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ compute_api = compute_rpcapi.ComputeAPI()
+ compute_api.router.client = mock.Mock()
+ mock_client = mock.MagicMock()
+ compute_api.router.client.return_value = mock_client
+ # Force can_send_version to return False.
+ mock_client.can_send_version.return_value = False
+ mock_cctx = mock.MagicMock()
+ mock_client.prepare.return_value = mock_cctx
+ rebuild_args = {
+ 'new_pass': 'admin_password',
+ 'injected_files': 'files_to_inject',
+ 'image_ref': uuids.image_ref,
+ 'orig_image_ref': uuids.orig_image_ref,
+ 'orig_sys_metadata': 'orig_sys_meta',
+ 'bdms': {},
+ 'recreate': False,
+ 'on_shared_storage': False,
+ 'preserve_ephemeral': False,
+ 'request_spec': None,
+ 'migration': None,
+ 'limits': None,
+ 'accel_uuids': [],
+ 'reimage_boot_volume': True,
+ 'target_state': 'stopped',
+ }
+ self.assertRaises(
+ exception.UnsupportedRPCVersion,
+ compute_api.rebuild_instance,
+ ctxt, instance=self.fake_instance_obj,
+ node=None, host=None, **rebuild_args)
+
def test_reserve_block_device_name(self):
self.flags(long_rpc_timeout=1234)
self._test_compute_api('reserve_block_device_name', 'call',
@@ -1237,7 +1315,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_version_cap_all_cells_no_access(self, mock_allcells, mock_minver,
mock_log_error):
"""Tests a scenario where nova-compute is configured with a connection
- to the API database and fails trying to get the minium nova-compute
+ to the API database and fails trying to get the minimum nova-compute
service version across all cells because nova-compute is configured to
not allow direct database access.
"""
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index a50b4ca4de..62321bddec 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import eventlet
-import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -24,6 +25,7 @@ from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
+from nova import context
from nova.db.main import api as db
from nova import exception
from nova.network import neutron as neutron_api
@@ -207,6 +209,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance = self._shelve_offload(clean_shutdown=False)
mock_power_off.assert_called_once_with(instance, 0, 0)
+ @mock.patch.object(neutron_api.API, 'unbind_ports')
@mock.patch.object(compute_utils, 'EventReporter')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(nova.compute.manager.ComputeManager,
@@ -223,7 +226,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
def _shelve_offload(self, mock_notify, mock_notify_instance_usage,
mock_get_power_state, mock_update_resource_tracker,
mock_delete_alloc, mock_terminate, mock_get_bdms,
- mock_event, clean_shutdown=True):
+ mock_event, mock_unbind_ports, clean_shutdown=True):
host = 'fake-mini'
instance = self._create_fake_instance_obj(params={'host': host})
instance.task_state = task_states.SHELVING
@@ -276,10 +279,13 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
instance.uuid,
graceful_exit=False)
+ mock_unbind_ports.assert_called_once_with(
+ self.context, mock.ANY, detach=False)
+
return instance
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name',
+ 'update_pci_request_with_placement_allocations',
new=mock.NonCallableMock())
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.utils.notify_about_instance_action')
@@ -629,7 +635,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host')
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request(
self, mock_update_pci, mock_setup_network):
requested_res = [objects.RequestGroup(
@@ -640,7 +646,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.compute.unshelve_instance(
self.context, instance, image=None,
- filter_properties={}, node='fake-node', request_spec=request_spec,
+ filter_properties={}, node='fakenode2', request_spec=request_spec,
accel_uuids=[])
mock_update_pci.assert_called_once_with(
@@ -653,7 +659,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
@mock.patch('nova.network.neutron.API.setup_instance_network_on_host',
new=mock.NonCallableMock())
@mock.patch('nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
def test_unshelve_with_resource_request_update_raises(
self, mock_update_pci):
requested_res = [objects.RequestGroup(
@@ -694,7 +700,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.assertRaises(test.TestingException,
self.compute.unshelve_instance, self.context, instance,
image=shelved_image, filter_properties={},
- node='fake-node', request_spec=fake_spec, accel_uuids=[])
+ node='fakenode2', request_spec=fake_spec, accel_uuids=[])
self.assertEqual(instance.image_ref, initial_image_ref)
@mock.patch.object(objects.InstanceList, 'get_by_filters')
@@ -849,9 +855,67 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
exclude_states = set()
return vm_state - exclude_states
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
+ 'aggregate_add_host')
+ @mock.patch('nova.availability_zones.get_availability_zones')
+ def _create_host_inside_az(
+ self,
+ ctxt,
+ host,
+ az,
+ mock_az,
+ mock_aggregate,
+ ):
+
+ self.api = compute_api.AggregateAPI()
+ mock_az.return_value = [az]
+
+ cells = objects.CellMappingList.get_all(ctxt)
+ cell = cells[0]
+ with context.target_cell(ctxt, cell) as cctxt:
+ s = objects.Service(context=cctxt,
+ host=host,
+ binary='nova-compute',
+ topic='compute',
+ report_count=0)
+ s.create()
+
+ hm = objects.HostMapping(context=ctxt,
+ cell_mapping=cell,
+ host=host)
+ hm.create()
+
+ self._init_aggregate_with_host(None, 'fake_aggregate1',
+ az, host)
+
+ def _create_request_spec_for_initial_az(self, az):
+ fake_spec = objects.RequestSpec()
+ fake_spec.availability_zone = az
+ return fake_spec
+
+ def _assert_unshelving_and_request_spec_az_and_host(
+ self,
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ ):
+ mock_get_by_instance_uuid.assert_called_once_with(context,
+ instance.uuid)
+
+ mock_unshelve.assert_called_once_with(context, instance, fake_spec)
+
+ self.assertEqual(instance.task_state, task_states.UNSHELVING)
+ self.assertEqual(fake_spec.availability_zone, fake_zone)
+ if fake_host:
+ self.assertEqual(fake_spec.requested_destination.host, fake_host)
+
def _test_shelve(self, vm_state=vm_states.ACTIVE, boot_from_volume=False,
clean_shutdown=True):
- # Ensure instance can be shelved.
+
params = dict(task_state=None, vm_state=vm_state, display_name='vm01')
fake_instance = self._create_fake_instance_obj(params=params)
instance = fake_instance
@@ -988,12 +1052,14 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
return instance
+ @mock.patch.object(objects.RequestSpec, 'save')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
- def test_unshelve(self, get_by_instance_uuid):
+ def test_unshelve(self, get_by_instance_uuid, fake_save):
# Ensure instance can be unshelved.
instance = self._get_specify_state_instance(vm_states.SHELVED)
fake_spec = objects.RequestSpec()
+ fake_spec.availability_zone = None
get_by_instance_uuid.return_value = fake_spec
with mock.patch.object(self.compute_api.compute_task_api,
'unshelve_instance') as unshelve:
@@ -1116,24 +1182,558 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
mock_get.assert_called_once_with(self.context, uuids.volume_id)
- @mock.patch.object(compute_api.API, '_validate_unshelve_az')
+# Next tests attempt to check the following behavior
+# +----------+---------------------------+-------+----------------------------+
+# | Boot | Unshelve after offload AZ | Host | Result |
+# +==========+===========================+=======+============================+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (1). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (1) |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (1) |
+# +----------+---------------------------+-------+----------------------------+
+#
+# (1) Check at the api and return an error.
+#
+#
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
@mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
@mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
- def test_specified_az_unshelve(self, get_by_instance_uuid,
- mock_save, mock_validate_unshelve_az):
- # Ensure instance can be unshelved.
+ def test_unshelve_without_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
instance = self._get_specify_state_instance(
vm_states.SHELVED_OFFLOADED)
- new_az = "west_az"
- fake_spec = objects.RequestSpec()
- fake_spec.availability_zone = "fake-old-az"
- get_by_instance_uuid.return_value = fake_spec
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | No AZ or AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, host=fake_host)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_newaz(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, new_az=fake_zone)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | No AZ | AZ="AZ1" | Host1 | Verify that host1 in AZ1, |
+# | | | | or (1). Schedule to |
+# | | | | host1, reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_newaz_and_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=fake_zone, host=fake_host
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_without_az_to_newaz_and_host_invalid(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(None)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ exc = self.assertRaises(
+ nova.exception.UnshelveHostNotInAZ,
+ self.compute_api.unshelve,
+ context,
+ instance,
+ new_az='avail_zone1',
+ host='fake_mini'
+ )
+
+ self.assertIn(
+ exc.message,
+ 'Host "fake_mini" is not in the availability zone "avail_zone1".'
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | No | Schedule to AZ1, |
+# | | | | reqspec.AZ="AZ1" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | No | Free scheduling, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_unpin_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(context, instance, new_az=None)
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | No AZ | Host1 | If host1 is in AZ1, |
+# | | | | then schedule to host1, |
+# | | | | reqspec.AZ="AZ1", otherwise|
+# | | | | reject the request (1) |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_host_in_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
- self.compute_api.unshelve(self.context, instance, new_az=new_az)
+ self.compute_api.unshelve(context, instance, host=fake_host)
- mock_save.assert_called_once_with()
- self.assertEqual(new_az, fake_spec.availability_zone)
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_invalid_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ exc = self.assertRaises(
+ nova.exception.UnshelveHostNotInAZ,
+ self.compute_api.unshelve,
+ context,
+ instance,
+ host='fake_mini'
+ )
+
+ self.assertIn(
+ exc.message,
+ 'Host "fake_mini" is not in the availability zone "avail_zone1".'
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ=null | Host1 | Schedule to host1, |
+# | | | | reqspec.AZ=None |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_host_unpin_az(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
- mock_validate_unshelve_az.assert_called_once_with(
- self.context, instance, new_az)
+ fake_spec = self._create_request_spec_for_initial_az(fake_zone)
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=None, host=fake_host
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ None,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | No | Schedule to AZ2, |
+# | | | | reqspec.AZ="AZ2" |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_newaz(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az('az1')
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=fake_zone
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ None,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+# +----------+---------------------------+-------+----------------------------+
+# | AZ1 | AZ="AZ2" | Host1 | If host1 in AZ2 then |
+# | | | | schedule to host1, |
+# | | | | reqspec.AZ="AZ2", |
+# | | | | otherwise reject (1) |
+# +----------+---------------------------+-------+----------------------------+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_newaz_and_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az('az1')
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.unshelve(
+ context, instance, new_az=fake_zone, host=fake_host
+ )
+
+ self._assert_unshelving_and_request_spec_az_and_host(
+ context,
+ instance,
+ fake_spec,
+ fake_zone,
+ fake_host,
+ mock_get_by_instance_uuid,
+ mock_unshelve
+ )
+
+ @mock.patch.object(nova.conductor.ComputeTaskAPI, 'unshelve_instance')
+ @mock.patch.object(objects.RequestSpec, 'save')
+ @mock.patch.object(objects.ComputeNodeList, 'get_all_by_host')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve_with_az_to_newaz_and_invalid_host(
+ self,
+ mock_get_by_instance_uuid,
+ mock_get_all_by_host,
+ mock_save,
+ mock_unshelve
+ ):
+
+ context = self.context.elevated()
+ fake_host = 'fake_host1'
+ fake_zone = 'avail_zone1'
+ self._create_host_inside_az(self.context, fake_host, fake_zone)
+
+ instance = self._get_specify_state_instance(
+ vm_states.SHELVED_OFFLOADED)
+
+ fake_spec = self._create_request_spec_for_initial_az('az1')
+ mock_get_by_instance_uuid.return_value = fake_spec
+
+ exc = self.assertRaises(
+ nova.exception.UnshelveHostNotInAZ,
+ self.compute_api.unshelve,
+ context,
+ instance,
+ new_az=fake_zone,
+ host='fake_mini'
+ )
+
+ self.assertIn(
+ exc.message,
+ 'Host "fake_mini" is not in the availability zone "avail_zone1".'
+ )
diff --git a/nova/tests/unit/compute/test_utils.py b/nova/tests/unit/compute/test_utils.py
index 6c3cbc1b57..dd10ecd7df 100644
--- a/nova/tests/unit/compute/test_utils.py
+++ b/nova/tests/unit/compute/test_utils.py
@@ -19,8 +19,8 @@
import copy
import datetime
import string
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -1558,47 +1558,86 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
def test_no_pci_request(self):
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, [], provider_mapping)
- def test_pci_request_from_flavor(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=None)]
+ def test_pci_request_from_flavor_no_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
+ self.context, mock.sentinel.report_client, pci_requests,
+ provider_mapping)
+
+ self.assertNotIn('rp_uuids', req.spec[0])
+
+ def test_pci_request_from_flavor_with_mapping(self):
+ req = objects.InstancePCIRequest(
+ requester_id=None,
+ request_id=uuids.req1,
+ alias_name="a-dev",
+ spec=[{}],
+ )
+ pci_requests = [req]
+
+ provider_mapping = {
+ f"{uuids.req1}-0": [uuids.rp1],
+ f"{uuids.req1}-1": [uuids.rp2],
+ }
+
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
+ self.assertEqual(
+ {uuids.rp1, uuids.rp2}, set(req.spec[0]["rp_uuids"].split(','))
+ )
+
def test_pci_request_has_no_mapping(self):
pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
provider_mapping = {}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_pci_request_ambiguous_mapping(self):
- pci_requests = [objects.InstancePCIRequest(requester_id=uuids.port_1)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1, uuids.rp2]}
self.assertRaises(
exception.AmbiguousResourceProviderForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, mock.sentinel.report_client, pci_requests,
provider_mapping)
def test_unexpected_provider_name(self):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = 'unexpected'
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}])]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
+
provider_mapping = {uuids.port_1: [uuids.rp1]}
self.assertRaises(
exception.UnexpectedResourceProviderNameForPCIRequest,
(compute_utils.
- update_pci_request_spec_with_allocated_interface_name),
+ update_pci_request_with_placement_allocations),
self.context, report_client, pci_requests,
provider_mapping)
@@ -1610,11 +1649,14 @@ class PciRequestUpdateTestCase(test.NoDBTestCase):
report_client = mock.Mock(spec=report.SchedulerReportClient)
report_client.get_resource_provider_name.return_value = (
'host:agent:enp0s31f6')
- pci_requests = [objects.InstancePCIRequest(
- requester_id=uuids.port_1, spec=[{}],)]
+ pci_requests = [
+ objects.InstancePCIRequest(
+ requester_id=uuids.port_1, spec=[{}], request_id=uuids.req1
+ )
+ ]
provider_mapping = {uuids.port_1: [uuids.rp1]}
- compute_utils.update_pci_request_spec_with_allocated_interface_name(
+ compute_utils.update_pci_request_with_placement_allocations(
self.context, report_client, pci_requests, provider_mapping)
report_client.get_resource_provider_name.assert_called_once_with(
diff --git a/nova/tests/unit/compute/test_virtapi.py b/nova/tests/unit/compute/test_virtapi.py
index 0012a684f7..71c9097525 100644
--- a/nova/tests/unit/compute/test_virtapi.py
+++ b/nova/tests/unit/compute/test_virtapi.py
@@ -13,8 +13,9 @@
# under the License.
import collections
+from unittest import mock
-import mock
+import eventlet.timeout
import os_traits
from oslo_utils.fixture import uuidsentinel as uuids
@@ -186,16 +187,159 @@ class ComputeVirtAPITest(VirtAPIBaseTest):
do_test()
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
def test_wait_for_instance_event_timeout(self):
+ instance = mock.Mock()
+ instance.vm_state = mock.sentinel.vm_state
+ instance.task_state = mock.sentinel.task_state
+
+ mock_log = mock.Mock()
+
+ @mock.patch.object(compute_manager, 'LOG', new=mock_log)
@mock.patch.object(self.virtapi._compute, '_event_waiter',
- side_effect=test.TestingException())
- @mock.patch('eventlet.timeout.Timeout')
- def do_test(mock_timeout, mock_waiter):
- with self.virtapi.wait_for_instance_event('instance',
- [('foo', 'bar')]):
+ side_effect=eventlet.timeout.Timeout())
+ def do_test(mock_waiter):
+ with self.virtapi.wait_for_instance_event(
+ instance, [('foo', 'bar')]):
pass
- self.assertRaises(test.TestingException, do_test)
+ self.assertRaises(eventlet.timeout.Timeout, do_test)
+ mock_log.warning.assert_called_once_with(
+ 'Timeout waiting for %(events)s for instance with vm_state '
+ '%(vm_state)s and task_state %(task_state)s. '
+ 'Event states are: %(event_states)s',
+ {
+ 'events': ['foo-bar'],
+ 'vm_state': mock.sentinel.vm_state,
+ 'task_state': mock.sentinel.task_state,
+ 'event_states':
+ 'foo-bar: timed out after 1.23 seconds',
+ },
+ instance=instance
+ )
+
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
+ def test_wait_for_instance_event_one_received_one_timed_out(self):
+ instance = mock.Mock()
+ instance.vm_state = mock.sentinel.vm_state
+ instance.task_state = mock.sentinel.task_state
+
+ mock_log = mock.Mock()
+
+ calls = []
+
+ def fake_event_waiter(*args, **kwargs):
+ calls.append((args, kwargs))
+ if len(calls) == 1:
+ event = mock.Mock(status="completed")
+ return event
+ else:
+ raise eventlet.timeout.Timeout()
+
+ @mock.patch.object(compute_manager, 'LOG', new=mock_log)
+ @mock.patch.object(self.virtapi._compute, '_event_waiter',
+ side_effect=fake_event_waiter)
+ def do_test(mock_waiter):
+ with self.virtapi.wait_for_instance_event(
+ instance, [('foo', 'bar'), ('missing', 'event')]):
+ pass
+
+ self.assertRaises(eventlet.timeout.Timeout, do_test)
+ mock_log.warning.assert_called_once_with(
+ 'Timeout waiting for %(events)s for instance with vm_state '
+ '%(vm_state)s and task_state %(task_state)s. '
+ 'Event states are: %(event_states)s',
+ {
+ 'events': ['foo-bar', 'missing-event'],
+ 'vm_state': mock.sentinel.vm_state,
+ 'task_state': mock.sentinel.task_state,
+ 'event_states':
+ 'foo-bar: received after waiting 1.23 seconds, '
+ 'missing-event: timed out after 1.23 seconds',
+ },
+ instance=instance
+ )
+
+ @mock.patch(
+ 'oslo_utils.timeutils.StopWatch.elapsed',
+ new=mock.Mock(return_value=1.23))
+ def test_wait_for_instance_event_multiple_events(self):
+ instance = mock.Mock()
+ instance.vm_state = mock.sentinel.vm_state
+ instance.task_state = mock.sentinel.task_state
+
+ mock_log = mock.Mock()
+
+ calls = []
+
+ def fake_event_waiter(*args, **kwargs):
+ calls.append((args, kwargs))
+ if len(calls) == 1:
+ event = mock.Mock(status="completed")
+ return event
+ else:
+ raise eventlet.timeout.Timeout()
+
+ def fake_prepare_for_instance_event(instance, name, tag):
+ m = mock.MagicMock()
+ m.instance = instance
+ m.name = name
+ m.tag = tag
+ m.event_name = '%s-%s' % (name, tag)
+ m.wait.side_effect = fake_event_waiter
+ if name == 'received-but-not-waited':
+ m.ready.return_value = True
+ if name == 'missing-but-not-waited':
+ m.ready.return_value = False
+ return m
+
+ self.virtapi._compute.instance_events.prepare_for_instance_event.\
+ side_effect = fake_prepare_for_instance_event
+
+ @mock.patch.object(compute_manager, 'LOG', new=mock_log)
+ def do_test():
+ with self.virtapi.wait_for_instance_event(
+ instance,
+ [
+ ('received', 'event'),
+ ('early', 'event'),
+ ('missing', 'event'),
+ ('received-but-not-waited', 'event'),
+ ('missing-but-not-waited', 'event'),
+ ]
+ ):
+ self.virtapi.exit_wait_early([('early', 'event')])
+
+ self.assertRaises(eventlet.timeout.Timeout, do_test)
+ mock_log.warning.assert_called_once_with(
+ 'Timeout waiting for %(events)s for instance with vm_state '
+ '%(vm_state)s and task_state %(task_state)s. '
+ 'Event states are: %(event_states)s',
+ {
+ 'events':
+ [
+ 'received-event',
+ 'early-event',
+ 'missing-event',
+ 'received-but-not-waited-event',
+ 'missing-but-not-waited-event'
+ ],
+ 'vm_state': mock.sentinel.vm_state,
+ 'task_state': mock.sentinel.task_state,
+ 'event_states':
+ 'received-event: received after waiting 1.23 seconds, '
+ 'early-event: received early, '
+ 'missing-event: timed out after 1.23 seconds, '
+ 'received-but-not-waited-event: received but not '
+ 'processed, '
+ 'missing-but-not-waited-event: expected but not received'
+ },
+ instance=instance
+ )
def test_wait_for_instance_event_exit_early(self):
# Wait for two events, exit early skipping one.
diff --git a/nova/tests/unit/conductor/tasks/test_base.py b/nova/tests/unit/conductor/tasks/test_base.py
index a7151c4cd0..cf9e8f9cfd 100644
--- a/nova/tests/unit/conductor/tasks/test_base.py
+++ b/nova/tests/unit/conductor/tasks/test_base.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.conductor.tasks import base
from nova import test
diff --git a/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py b/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py
index 127d763477..c4b6c217b6 100644
--- a/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_cross_cell_migrate.py
@@ -11,8 +11,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_messaging import exceptions as messaging_exceptions
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -439,7 +439,7 @@ class CrossCellMigrationTaskTestCase(test.NoDBTestCase):
what we need.
"""
with mock.patch.object(
- self.task.network_api, 'supports_port_binding_extension',
+ self.task.network_api, 'has_port_binding_extension',
return_value=True) as mock_neutron_check:
self.task._perform_external_api_checks()
mock_neutron_check.assert_called_once_with(self.task.context)
@@ -447,7 +447,7 @@ class CrossCellMigrationTaskTestCase(test.NoDBTestCase):
def test_perform_external_api_checks_old_neutron(self):
"""Tests the case that neutron API is old."""
with mock.patch.object(
- self.task.network_api, 'supports_port_binding_extension',
+ self.task.network_api, 'has_port_binding_extension',
return_value=False):
ex = self.assertRaises(exception.MigrationPreCheckError,
self.task._perform_external_api_checks)
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
index 88f00d0d84..4e888139f6 100644
--- a/nova/tests/unit/conductor/tasks/test_live_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -345,6 +346,36 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
mock.call(self.destination)],
mock_get_info.call_args_list)
+ @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
+ def test_skip_hypervisor_version_check_on_lm_raise_ex(self, mock_get_info):
+ host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
+ host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
+ self.flags(group='workarounds',
+ skip_hypervisor_version_check_on_lm=False)
+ mock_get_info.side_effect = [objects.ComputeNode(**host1),
+ objects.ComputeNode(**host2)]
+ self.assertRaises(exception.DestinationHypervisorTooOld,
+ self.task._check_compatible_with_source_hypervisor,
+ self.destination)
+ self.assertEqual([mock.call(self.instance_host),
+ mock.call(self.destination)],
+ mock_get_info.call_args_list)
+
+ @mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
+ def test_skip_hypervisor_version_check_on_lm_do_not_raise_ex(
+ self, mock_get_info
+ ):
+ host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
+ host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
+ self.flags(group='workarounds',
+ skip_hypervisor_version_check_on_lm=True)
+ mock_get_info.side_effect = [objects.ComputeNode(**host1),
+ objects.ComputeNode(**host2)]
+ self.task._check_compatible_with_source_hypervisor(self.destination)
+ self.assertEqual([mock.call(self.instance_host),
+ mock.call(self.destination)],
+ mock_get_info.call_args_list)
+
@mock.patch.object(compute_rpcapi.ComputeAPI,
'check_can_live_migrate_destination')
def test_check_requested_destination(self, mock_check):
@@ -353,7 +384,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
with test.nested(
mock.patch.object(self.task.network_api,
- 'supports_port_binding_extension',
+ 'has_port_binding_extension',
return_value=False),
mock.patch.object(self.task, '_check_can_migrate_pci')):
self.assertIsNone(self.task._check_requested_destination())
@@ -387,7 +418,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
with test.nested(
mock.patch.object(self.task.network_api,
- 'supports_port_binding_extension',
+ 'has_port_binding_extension',
return_value=False),
mock.patch.object(self.task, '_check_can_migrate_pci')):
ex = self.assertRaises(exception.MigrationPreCheckError,
@@ -730,7 +761,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
@mock.patch(
'nova.compute.utils.'
- 'update_pci_request_spec_with_allocated_interface_name')
+ 'update_pci_request_with_placement_allocations')
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
@@ -813,7 +844,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
"""
@mock.patch.object(self.task.network_api,
- 'supports_port_binding_extension')
+ 'has_port_binding_extension')
@mock.patch.object(live_migrate,
'supports_vif_related_pci_allocations')
def _test(instance_pci_reqs,
diff --git a/nova/tests/unit/conductor/tasks/test_migrate.py b/nova/tests/unit/conductor/tasks/test_migrate.py
index 145e54f884..46cb033c5c 100644
--- a/nova/tests/unit/conductor/tasks/test_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_migrate.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 9445db1b62..971570dfb5 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -16,9 +16,12 @@
"""Tests for the conductor service."""
import copy
+import ddt
+from unittest import mock
-import mock
+from keystoneauth1 import exceptions as ks_exc
from oslo_db import exception as db_exc
+from oslo_limit import exception as limit_exceptions
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -44,12 +47,14 @@ from nova.db.api import models as api_models
from nova.db.main import api as main_db_api
from nova import exception as exc
from nova.image import glance as image_api
+from nova.limit import placement as placement_limit
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import request_spec
from nova.scheduler.client import query
+from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests import fixtures
@@ -383,7 +388,9 @@ class _BaseTaskTestCase(object):
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host',
- 'request_spec': None}
+ 'request_spec': None,
+ 'reimage_boot_volume': False,
+ 'target_state': None}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
@@ -2261,6 +2268,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
rs.instance_group = None
rs.retry = None
rs.limits = None
+ rs.is_bfv = False
rs.create()
params['request_specs'] = [rs]
params['image'] = {'fake_data': 'should_pass_silently'}
@@ -2399,7 +2407,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
'1', None, None, dp_name)
arq_uuid = arq_in_list[0]['uuid']
- # muliti device request
+ # multi device request
mock_create.return_value = [arq_in_list[0], arq_in_list[0]]
rp_map = {"request_group_0" + str(port_id): rp_uuid}
request_tuples = [('123', '1.2.3.4', port_id,
@@ -2871,6 +2879,74 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
'image'):
self.assertIn(key, request_spec_dict)
+ @mock.patch.object(placement_limit, 'enforce_num_instances_and_flavor')
+ @mock.patch('nova.compute.utils.notify_about_compute_task_error')
+ @mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
+ def test_schedule_and_build_over_quota_during_recheck_ul(self, mock_select,
+ mock_notify,
+ mock_enforce):
+ self.flags(driver="nova.quota.UnifiedLimitsDriver",
+ cores=1,
+ group="quota")
+ mock_select.return_value = [[fake_selection1]]
+ # Simulate a race where the first check passes and the recheck fails.
+ # First check occurs in compute/api.
+ project_id = self.params['context'].project_id
+ mock_enforce.side_effect = limit_exceptions.ProjectOverLimit(
+ project_id, [limit_exceptions.OverLimitInfo('cores', 2, 3, 0)])
+
+ original_save = objects.Instance.save
+
+ def fake_save(inst, *args, **kwargs):
+ # Make sure the context is targeted to the cell that the instance
+ # was created in.
+ self.assertIsNotNone(
+ inst._context.db_connection, 'Context is not targeted')
+ original_save(inst, *args, **kwargs)
+
+ self.stub_out('nova.objects.Instance.save', fake_save)
+
+ # This is needed to register the compute node in a cell.
+ self.start_service('compute', host='host1')
+ self.assertRaises(
+ limit_exceptions.ProjectOverLimit,
+ self.conductor.schedule_and_build_instances, **self.params)
+
+ mock_enforce.assert_called_once_with(
+ self.params['context'], project_id, mock.ANY, False, 0, 0)
+
+ # Verify we set the instance to ERROR state and set the fault message.
+ instances = objects.InstanceList.get_all(self.ctxt)
+ self.assertEqual(1, len(instances))
+ instance = instances[0]
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
+ self.assertIsNone(instance.task_state)
+ self.assertIn('ProjectOverLimit', instance.fault.message)
+ # Verify we removed the build objects.
+ build_requests = objects.BuildRequestList.get_all(self.ctxt)
+ # Verify that the instance is mapped to a cell
+ inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
+ self.ctxt, instance.uuid)
+ self.assertIsNotNone(inst_mapping.cell_mapping)
+
+ self.assertEqual(0, len(build_requests))
+
+ @api_db_api.context_manager.reader
+ def request_spec_get_all(context):
+ return context.session.query(api_models.RequestSpec).all()
+
+ request_specs = request_spec_get_all(self.ctxt)
+ self.assertEqual(0, len(request_specs))
+
+ mock_notify.assert_called_once_with(
+ test.MatchType(context.RequestContext), 'build_instances',
+ instance.uuid, test.MatchType(dict), 'error',
+ test.MatchType(limit_exceptions.ProjectOverLimit))
+ request_spec_dict = mock_notify.call_args_list[0][0][3]
+ for key in ('instance_type', 'num_instances', 'instance_properties',
+ 'image'):
+ self.assertIn(key, request_spec_dict)
+
@mock.patch('nova.compute.rpcapi.ComputeAPI.build_and_run_instance')
@mock.patch('nova.objects.quotas.Quotas.check_deltas')
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
@@ -4676,6 +4752,68 @@ class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
mock.sentinel.migration)
can_send_version.assert_called_once_with('1.23')
+ def test_evacuate_old_rpc_with_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': 'stopped'})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version', return_value=False):
+ self.assertRaises(exc.UnsupportedRPCVersion,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj, **rebuild_args)
+
+ def test_evacuate_old_rpc_without_target_state(self):
+ inst_obj = self._create_fake_instance_obj()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host,
+ 'target_state': None})
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ can_send_version.assert_has_calls([
+ mock.call('1.25'), mock.call('1.24'),
+ mock.call('1.12')])
+
+ def test_rebuild_instance_volume_backed(self):
+ inst_obj = self._create_fake_instance_obj()
+ version = '1.25'
+ cctxt_mock = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+
+ @mock.patch.object(self.conductor.client, 'prepare',
+ return_value=cctxt_mock)
+ @mock.patch.object(self.conductor.client, 'can_send_version',
+ return_value=True)
+ def _test(mock_can_send_ver, prepare_mock):
+ self.conductor.rebuild_instance(
+ self.context, inst_obj, **rebuild_args)
+ prepare_mock.assert_called_once_with(version=version)
+ kw = {'instance': inst_obj, **rebuild_args}
+ cctxt_mock.cast.assert_called_once_with(
+ self.context, 'rebuild_instance', **kw)
+ _test()
+
+ def test_rebuild_instance_volume_backed_old_service(self):
+ """Tests rebuild_instance_volume_backed when the service is too old"""
+ inst_obj = mock.MagicMock()
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': inst_obj.host})
+ rebuild_args['reimage_boot_volume'] = True
+ with mock.patch.object(
+ self.conductor.client, 'can_send_version',
+ return_value=False) as can_send_version:
+ self.assertRaises(exc.NovaException,
+ self.conductor.rebuild_instance,
+ self.context, inst_obj,
+ **rebuild_args)
+ can_send_version.assert_has_calls([mock.call('1.25'),
+ mock.call('1.24')])
+
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
@@ -4798,3 +4936,35 @@ class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
logtext)
self.assertIn('host3\' because it is not up', logtext)
self.assertIn('image1 failed 1 times', logtext)
+
+
+@ddt.ddt
+class TestConductorTaskManager(test.NoDBTestCase):
+ def test_placement_client_startup(self):
+ self.assertIsNone(report.PLACEMENTCLIENT)
+ conductor_manager.ComputeTaskManager()
+ self.assertIsNotNone(report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ test.TestingException)
+ def test_placement_client_startup_fatals(self, exc):
+ self.assertRaises(exc,
+ self._test_placement_client_startup_exception, exc)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.DiscoveryFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ ks_exc.ConnectFailure)
+ def test_placement_client_startup_non_fatal(self, exc):
+ self._test_placement_client_startup_exception(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_placement_client_startup_exception(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ try:
+ conductor_manager.ComputeTaskManager()
+ finally:
+ mock_log.error.assert_called_once()
diff --git a/nova/tests/unit/console/rfb/test_auth.py b/nova/tests/unit/console/rfb/test_auth.py
index c4026b6637..1d66b2684f 100644
--- a/nova/tests/unit/console/rfb/test_auth.py
+++ b/nova/tests/unit/console/rfb/test_auth.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
diff --git a/nova/tests/unit/console/rfb/test_authnone.py b/nova/tests/unit/console/rfb/test_authnone.py
index e628106e3b..3ca44dce89 100644
--- a/nova/tests/unit/console/rfb/test_authnone.py
+++ b/nova/tests/unit/console/rfb/test_authnone.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
diff --git a/nova/tests/unit/console/rfb/test_authvencrypt.py b/nova/tests/unit/console/rfb/test_authvencrypt.py
index f7fc31939e..de9bccb44a 100644
--- a/nova/tests/unit/console/rfb/test_authvencrypt.py
+++ b/nova/tests/unit/console/rfb/test_authvencrypt.py
@@ -14,8 +14,8 @@
import ssl
import struct
+from unittest import mock
-import mock
from nova.console.rfb import auth
from nova.console.rfb import authvencrypt
diff --git a/nova/tests/unit/console/securityproxy/test_rfb.py b/nova/tests/unit/console/securityproxy/test_rfb.py
index 3eb8ba6acf..17cf8f7c57 100644
--- a/nova/tests/unit/console/securityproxy/test_rfb.py
+++ b/nova/tests/unit/console/securityproxy/test_rfb.py
@@ -15,7 +15,7 @@
"""Tests the Console Security Proxy Framework."""
-import mock
+from unittest import mock
from nova.console.rfb import auth
from nova.console.rfb import authnone
diff --git a/nova/tests/unit/console/test_serial.py b/nova/tests/unit/console/test_serial.py
index bc87ca6ca2..44d88e6e83 100644
--- a/nova/tests/unit/console/test_serial.py
+++ b/nova/tests/unit/console/test_serial.py
@@ -15,8 +15,7 @@
"""Tests for Serial Console."""
import socket
-
-import mock
+from unittest import mock
from nova.console import serial
from nova import exception
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index e05ae520d9..639623bbb5 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -17,8 +17,8 @@
import copy
import io
import socket
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import nova.conf
@@ -302,8 +302,6 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
- 'host': 'node1',
- 'port': '10000',
'internal_access_path': 'xxx',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
@@ -589,12 +587,12 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
- def test_reject_open_redirect(self):
+ def test_reject_open_redirect(self, url='//example.com/%2F..'):
# This will test the behavior when an attempt is made to cause an open
# redirect. It should be rejected.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
- b'GET //example.com/%2F.. HTTP/1.1\r\n',
+ f'GET {url} HTTP/1.1\r\n'.encode('utf-8'),
b''
]
@@ -619,41 +617,34 @@ class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
result = output.readlines()
# Verify no redirect happens and instead a 400 Bad Request is returned.
- self.assertIn('400 URI must not start with //', result[0].decode())
+ # NOTE: As of python 3.10.6 there is a fix for this vulnerability,
+ # which will cause a 301 Moved Permanently error to be returned
+ # instead that redirects to a sanitized version of the URL with extra
+ # leading '/' characters removed.
+ # See https://github.com/python/cpython/issues/87389 for details.
+ # We will consider either response to be valid for this test. This will
+ # also help if and when the above fix gets backported to older versions
+ # of python.
+ errmsg = result[0].decode()
+ expected_nova = '400 URI must not start with //'
+ expected_cpython = '301 Moved Permanently'
+
+ self.assertTrue(expected_nova in errmsg or expected_cpython in errmsg)
+
+ # If we detect the cpython fix, verify that the redirect location is
+ # now the same url but with extra leading '/' characters removed.
+ if expected_cpython in errmsg:
+ location = result[3].decode()
+ if location.startswith('Location: '):
+ location = location[len('Location: '):]
+ location = location.rstrip('\r\n')
+ self.assertTrue(
+ location.startswith('/example.com/%2F..'),
+ msg='Redirect location is not the expected sanitized URL',
+ )
def test_reject_open_redirect_3_slashes(self):
- # This will test the behavior when an attempt is made to cause an open
- # redirect. It should be rejected.
- mock_req = mock.MagicMock()
- mock_req.makefile().readline.side_effect = [
- b'GET ///example.com/%2F.. HTTP/1.1\r\n',
- b''
- ]
-
- # Collect the response data to verify at the end. The
- # SimpleHTTPRequestHandler writes the response data by calling the
- # request socket sendall() method.
- self.data = b''
-
- def fake_sendall(data):
- self.data += data
-
- mock_req.sendall.side_effect = fake_sendall
-
- client_addr = ('8.8.8.8', 54321)
- mock_server = mock.MagicMock()
- # This specifies that the server will be able to handle requests other
- # than only websockets.
- mock_server.only_upgrade = False
-
- # Constructing a handler will process the mock_req request passed in.
- websocketproxy.NovaProxyRequestHandler(
- mock_req, client_addr, mock_server)
-
- # Verify no redirect happens and instead a 400 Bad Request is returned.
- self.data = self.data.decode()
- self.assertIn('Error code: 400', self.data)
- self.assertIn('Message: URI must not start with //', self.data)
+ self.test_reject_open_redirect(url='///example.com/%2F..')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_no_compute_rpcapi_with_invalid_token(self, mock_validate):
diff --git a/nova/tests/unit/db/api/test_api.py b/nova/tests/unit/db/api/test_api.py
index 251407612f..6113791a8e 100644
--- a/nova/tests/unit/db/api/test_api.py
+++ b/nova/tests/unit/db/api/test_api.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.db.api import api as db_api
from nova import test
diff --git a/nova/tests/unit/db/api/test_migrations.py b/nova/tests/unit/db/api/test_migrations.py
index 1b14d569db..7c99f2f44a 100644
--- a/nova/tests/unit/db/api/test_migrations.py
+++ b/nova/tests/unit/db/api/test_migrations.py
@@ -21,10 +21,10 @@ test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
+from unittest import mock
+
from alembic import command as alembic_api
from alembic import script as alembic_script
-from migrate.versioning import api as migrate_api
-import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
@@ -126,47 +126,6 @@ class TestModelsSyncPostgreSQL(
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
- """Test that the models match the database after old migrations are run."""
-
- def db_sync(self, engine):
- # the 'nova.db.migration.db_sync' method will not use the legacy
- # sqlalchemy-migrate-based migration flow unless the database is
- # already controlled with sqlalchemy-migrate, so we need to manually
- # enable version controlling with this tool to test this code path
- repository = migration._find_migrate_repo(database='api')
- migrate_api.version_control(
- engine, repository, migration.MIGRATE_INIT_VERSION['api'])
-
- # now we can apply migrations as expected and the legacy path will be
- # followed
- super().db_sync(engine)
-
-
-class TestModelsLegacySyncSQLite(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- testtools.TestCase,
-):
- pass
-
-
-class TestModelsLegacySyncMySQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- testtools.TestCase,
-):
- FIXTURE = test_fixtures.MySQLOpportunisticFixture
-
-
-class TestModelsLegacySyncPostgreSQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- testtools.TestCase,
-):
- FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-
-
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
@@ -179,7 +138,7 @@ class NovaMigrationsWalk(
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('api')
- self.init_version = migration.ALEMBIC_INIT_VERSION['api']
+ self.init_version = 'd67eeaabee36'
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
diff --git a/nova/tests/unit/db/main/test_api.py b/nova/tests/unit/db/main/test_api.py
index c9a9e83154..98f9c854d9 100644
--- a/nova/tests/unit/db/main/test_api.py
+++ b/nova/tests/unit/db/main/test_api.py
@@ -18,10 +18,10 @@
import copy
import datetime
+from unittest import mock
from dateutil import parser as dateutil_parser
import iso8601
-import mock
import netaddr
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
@@ -167,27 +167,25 @@ class DbTestCase(test.TestCase):
class HelperTestCase(test.TestCase):
@mock.patch('sqlalchemy.orm.joinedload')
def test_joinedload_helper(self, mock_jl):
- query = db._joinedload_all('foo.bar.baz')
+ query = db._joinedload_all(
+ models.SecurityGroup, 'instances.info_cache'
+ )
# We call sqlalchemy.orm.joinedload() on the first element
- mock_jl.assert_called_once_with('foo')
+ mock_jl.assert_called_once_with(models.SecurityGroup.instances)
# Then first.joinedload(second)
column2 = mock_jl.return_value
- column2.joinedload.assert_called_once_with('bar')
-
- # Then second.joinedload(third)
- column3 = column2.joinedload.return_value
- column3.joinedload.assert_called_once_with('baz')
+ column2.joinedload.assert_called_once_with(models.Instance.info_cache)
- self.assertEqual(column3.joinedload.return_value, query)
+ self.assertEqual(column2.joinedload.return_value, query)
@mock.patch('sqlalchemy.orm.joinedload')
def test_joinedload_helper_single(self, mock_jl):
- query = db._joinedload_all('foo')
+ query = db._joinedload_all(models.SecurityGroup, 'instances')
# We call sqlalchemy.orm.joinedload() on the first element
- mock_jl.assert_called_once_with('foo')
+ mock_jl.assert_called_once_with(models.SecurityGroup.instances)
# We should have gotten back just the result of the joinedload()
# call if there were no other elements
@@ -279,33 +277,21 @@ class DecoratorTestCase(test.TestCase):
'No DB access allowed in ',
mock_log.error.call_args[0][0])
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_writer_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_writer_disable_db_access(self):
@db.pick_context_manager_writer
def func(context, value):
pass
self._test_pick_context_manager_disable_db_access(func)
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_reader_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_reader_disable_db_access(self):
@db.pick_context_manager_reader
def func(context, value):
pass
self._test_pick_context_manager_disable_db_access(func)
- @mock.patch.object(db, 'LOG')
- @mock.patch.object(db, 'DISABLE_DB_ACCESS', return_value=True)
- def test_pick_context_manager_reader_allow_async_disable_db_access(
- self, mock_DISABLE_DB_ACCESS, mock_log,
- ):
+ def test_pick_context_manager_reader_allow_async_disable_db_access(self):
@db.pick_context_manager_reader_allow_async
def func(context, value):
pass
@@ -1683,28 +1669,40 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
instances = db.instance_get_all_by_filters_sort(self.ctxt, filters)
self.assertEqual([], instances)
- @mock.patch('sqlalchemy.orm.undefer')
@mock.patch('sqlalchemy.orm.joinedload')
- def test_instance_get_all_by_filters_extra_columns(self,
- mock_joinedload,
- mock_undefer):
+ def test_instance_get_all_by_filters_extra_columns(self, mock_joinedload):
db.instance_get_all_by_filters_sort(
self.ctxt, {},
- columns_to_join=['info_cache', 'extra.pci_requests'])
- mock_joinedload.assert_called_once_with('info_cache')
- mock_undefer.assert_called_once_with('extra.pci_requests')
+ columns_to_join=['info_cache', 'extra.pci_requests'],
+ )
+ mock_joinedload.assert_has_calls(
+ [
+ mock.call(models.Instance.info_cache),
+ mock.ANY,
+ mock.call(models.Instance.extra),
+ mock.ANY,
+ mock.ANY,
+ ]
+ )
- @mock.patch('sqlalchemy.orm.undefer')
@mock.patch('sqlalchemy.orm.joinedload')
- def test_instance_get_active_by_window_extra_columns(self,
- mock_joinedload,
- mock_undefer):
+ def test_instance_get_active_by_window_extra_columns(
+ self, mock_joinedload,
+ ):
now = datetime.datetime(2013, 10, 10, 17, 16, 37, 156701)
db.instance_get_active_by_window_joined(
self.ctxt, now,
- columns_to_join=['info_cache', 'extra.pci_requests'])
- mock_joinedload.assert_called_once_with('info_cache')
- mock_undefer.assert_called_once_with('extra.pci_requests')
+ columns_to_join=['info_cache', 'extra.pci_requests'],
+ )
+ mock_joinedload.assert_has_calls(
+ [
+ mock.call(models.Instance.info_cache),
+ mock.ANY,
+ mock.call(models.Instance.extra),
+ mock.ANY,
+ mock.ANY,
+ ]
+ )
def test_instance_get_all_by_filters_with_meta(self):
self.create_instance_with_args()
@@ -3349,7 +3347,7 @@ class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
self._assertEqualListsOfObjects(expected, actions)
def test_instance_actions_get_are_in_order(self):
- """Ensure retrived actions are in order."""
+ """Ensure retrieved actions are in order."""
uuid1 = uuidsentinel.uuid1
extra = {
@@ -3608,7 +3606,7 @@ class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
self._assertActionEventSaved(event, action['id'])
def test_instance_action_events_get_are_in_order(self):
- """Ensure retrived action events are in order."""
+ """Ensure retrieved action events are in order."""
uuid1 = uuidsentinel.uuid1
action = db.action_start(self.ctxt,
@@ -5653,7 +5651,6 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
super(ArchiveTestCase, self).setUp()
self.engine = db.get_engine()
self.metadata = sa.MetaData()
- self.conn = self.engine.connect()
self.instance_id_mappings = models.InstanceIdMapping.__table__
self.shadow_instance_id_mappings = sqlalchemyutils.get_table(
self.engine, "shadow_instance_id_mappings")
@@ -5681,17 +5678,18 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Ensure shadow tables are empty
This method ensures that all the shadow tables in the schema,
- except for specificially named exceptions, are empty. This
+ except for specifically named exceptions, are empty. This
makes sure that archiving isn't moving unexpected content.
"""
metadata = sa.MetaData()
metadata.reflect(bind=self.engine)
- for table in metadata.tables:
- if table.startswith("shadow_") and table not in exceptions:
- rows = self.conn.exec_driver_sql(
- "SELECT * FROM %s" % table
- ).fetchall()
- self.assertEqual(rows, [], "Table %s not empty" % table)
+ with self.engine.connect() as conn, conn.begin():
+ for table in metadata.tables:
+ if table.startswith("shadow_") and table not in exceptions:
+ rows = conn.exec_driver_sql(
+ "SELECT * FROM %s" % table
+ ).fetchall()
+ self.assertEqual(rows, [], "Table %s not empty" % table)
def test_shadow_tables(self):
"""Validate shadow table schema.
@@ -5744,57 +5742,72 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
self._assert_shadow_tables_empty_except()
def test_archive_deleted_rows(self):
- # Add 6 rows to table
- for uuidstr in self.uuidstrs:
- ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ # Add 6 rows to table
+ for uuidstr in self.uuidstrs:
+ ins_stmt = self.instance_id_mappings.insert().values(
+ uuid=uuidstr,
+ )
+ conn.execute(ins_stmt)
+
# Set 4 to deleted
- update_statement = self.instance_id_mappings.update().\
- where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
- .values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement)
+ with self.engine.connect() as conn, conn.begin():
+ update_statement = self.instance_id_mappings.update().where(
+ self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4])
+ ).values(deleted=1, deleted_at=timeutils.utcnow())
+ conn.execute(update_statement)
+
qiim = sql.select(self.instance_id_mappings).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 6 in main
- self.assertEqual(len(rows), 6)
qsiim = sql.select(self.shadow_instance_id_mappings).where(
self.shadow_instance_id_mappings.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 0 in shadow
- self.assertEqual(len(rows), 0)
- # Archive 2 rows
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 0)
+
results = db.archive_deleted_rows(max_rows=2)
expected = dict(instance_id_mappings=2)
self._assertEqualObjects(expected, results[0])
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 4 left in main
- self.assertEqual(len(rows), 4)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 2 in shadow
- self.assertEqual(len(rows), 2)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Archive 2 rows and verify we have 4 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Verify we have 2 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 2)
# Archive 2 more rows
results = db.archive_deleted_rows(max_rows=2)
expected = dict(instance_id_mappings=2)
self._assertEqualObjects(expected, results[0])
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 2 left in main
- self.assertEqual(len(rows), 2)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 4 in shadow
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 4)
+
# Try to archive more, but there are no deleted rows left.
results = db.archive_deleted_rows(max_rows=2)
expected = dict()
self._assertEqualObjects(expected, results[0])
- rows = self.conn.execute(qiim).fetchall()
- # Verify we still have 2 left in main
- self.assertEqual(len(rows), 2)
- rows = self.conn.execute(qsiim).fetchall()
- # Verify we still have 4 in shadow
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we still have 2 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we still have 4 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 4)
# Ensure only deleted rows were deleted
self._assert_shadow_tables_empty_except(
@@ -5804,34 +5817,45 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = self.instances.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
- ins_stmt = self.instance_actions.insert().\
- values(instance_uuid=uuidstr)
- result = self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
+
+ ins_stmt = self.instance_actions.insert().values(
+ instance_uuid=uuidstr,
+ )
+ with self.engine.connect() as conn, conn.begin():
+ result = conn.execute(ins_stmt)
+
instance_action_uuid = result.inserted_primary_key[0]
- ins_stmt = self.instance_actions_events.insert().\
- values(action_id=instance_action_uuid)
- self.conn.execute(ins_stmt)
+ ins_stmt = self.instance_actions_events.insert().values(
+ action_id=instance_action_uuid,
+ )
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
# Set 1 to deleted before 2017-01-01
deleted_at = timeutils.parse_strtime('2017-01-01T00:00:00.0')
- update_statement = self.instances.update().\
- where(self.instances.c.uuid.in_(self.uuidstrs[0:1]))\
- .values(deleted=1, deleted_at=deleted_at)
- self.conn.execute(update_statement)
+ update_statement = self.instances.update().where(
+ self.instances.c.uuid.in_(self.uuidstrs[0:1])
+ ).values(deleted=1, deleted_at=deleted_at)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
# Set 1 to deleted before 2017-01-02
deleted_at = timeutils.parse_strtime('2017-01-02T00:00:00.0')
- update_statement = self.instances.update().\
- where(self.instances.c.uuid.in_(self.uuidstrs[1:2]))\
- .values(deleted=1, deleted_at=deleted_at)
- self.conn.execute(update_statement)
+ update_statement = self.instances.update().where(
+ self.instances.c.uuid.in_(self.uuidstrs[1:2])
+ ).values(deleted=1, deleted_at=deleted_at)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
# Set 2 to deleted now
- update_statement = self.instances.update().\
- where(self.instances.c.uuid.in_(self.uuidstrs[2:4]))\
- .values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement)
+ update_statement = self.instances.update().where(
+ self.instances.c.uuid.in_(self.uuidstrs[2:4])
+ ).values(deleted=1, deleted_at=timeutils.utcnow())
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
qiim = sql.select(self.instances).where(
self. instances.c.uuid.in_(self.uuidstrs)
)
@@ -5839,9 +5863,11 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.shadow_instances.c.uuid.in_(self.uuidstrs)
)
- # Verify we have 6 in main
- rows = self.conn.execute(qiim).fetchall()
- self.assertEqual(len(rows), 6)
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 6)
+
# Make sure 'before' comparison is for < not <=, nothing deleted
before_date = dateutil_parser.parse('2017-01-01', fuzzy=True)
_, uuids, _ = db.archive_deleted_rows(max_rows=1, before=before_date)
@@ -5875,22 +5901,25 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
expected = {}
self._assertEqualObjects(expected, results[0])
- # Verify we have 4 left in main
- rows = self.conn.execute(qiim).fetchall()
- self.assertEqual(len(rows), 4)
- # Verify we have 2 in shadow
- rows = self.conn.execute(qsiim).fetchall()
- self.assertEqual(len(rows), 2)
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 4 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Verify we have 2 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 2)
# Archive everything else, make sure default operation without
# before argument didn't break
results = db.archive_deleted_rows(max_rows=1000)
- # Verify we have 2 left in main
- rows = self.conn.execute(qiim).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we have 4 in shadow
- rows = self.conn.execute(qsiim).fetchall()
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 left in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 4)
def test_archive_deleted_rows_for_every_uuid_table(self):
tablenames = []
@@ -5918,94 +5947,117 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
for uuidstr in self.uuidstrs:
ins_stmt = main_table.insert().values(uuid=uuidstr)
try:
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
except (db_exc.DBError, sqla_exc.OperationalError):
# This table has constraints that require a table-specific
# insert, so skip it.
return 2
# Set 4 to deleted
- update_statement = main_table.update().\
- where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
- .values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement)
+ update_statement = main_table.update().where(
+ main_table.c.uuid.in_(self.uuidstrs[:4])
+ ).values(deleted=1, deleted_at=timeutils.utcnow())
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
qmt = sql.select(main_table).where(
main_table.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qmt).fetchall()
- # Verify we have 6 in main
- self.assertEqual(len(rows), 6)
qst = sql.select(shadow_table).where(
shadow_table.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qst).fetchall()
- # Verify we have 0 in shadow
- self.assertEqual(len(rows), 0)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in main
+ rows = conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in shadow
+ rows = conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 0)
+
# Archive 2 rows
db._archive_deleted_rows_for_table(
self.metadata, self.engine, tablename, max_rows=2, before=None,
task_log=False,
)
- # Verify we have 4 left in main
- rows = self.conn.execute(qmt).fetchall()
- self.assertEqual(len(rows), 4)
- # Verify we have 2 in shadow
- rows = self.conn.execute(qst).fetchall()
- self.assertEqual(len(rows), 2)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 4 left in main
+ rows = conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 4)
+ # Verify we have 2 in shadow
+ rows = conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 2)
+
# Archive 2 more rows
db._archive_deleted_rows_for_table(
self.metadata, self.engine, tablename, max_rows=2, before=None,
task_log=False,
)
- # Verify we have 2 left in main
- rows = self.conn.execute(qmt).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we have 4 in shadow
- rows = self.conn.execute(qst).fetchall()
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 left in main
+ rows = conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 4)
+
# Try to archive more, but there are no deleted rows left.
db._archive_deleted_rows_for_table(
self.metadata, self.engine, tablename, max_rows=2, before=None,
task_log=False,
)
- # Verify we still have 2 left in main
- rows = self.conn.execute(qmt).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we still have 4 in shadow
- rows = self.conn.execute(qst).fetchall()
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we still have 2 left in main
+ rows = conn.execute(qmt).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we still have 4 in shadow
+ rows = conn.execute(qst).fetchall()
+ self.assertEqual(len(rows), 4)
+
return 0
def test_archive_deleted_rows_shadow_insertions_equals_deletions(self):
# Add 2 rows to table
for uuidstr in self.uuidstrs[:2]:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
+
# Set both to deleted
- update_statement = self.instance_id_mappings.update().\
- where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:2]))\
- .values(deleted=1)
- self.conn.execute(update_statement)
+ update_statement = self.instance_id_mappings.update().where(
+ self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:2])
+ ).values(deleted=1)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
qiim = sql.select(self.instance_id_mappings).where(
self. instance_id_mappings.c.uuid.in_(self.uuidstrs[:2])
)
- rows = self.conn.execute(qiim).fetchall()
- # Verify we have 2 in main
- self.assertEqual(len(rows), 2)
-
qsiim = sql.select(self.shadow_instance_id_mappings).where(
self.shadow_instance_id_mappings.c.uuid.in_(self.uuidstrs[:2])
)
- shadow_rows = self.conn.execute(qsiim).fetchall()
- # Verify we have 0 in shadow
- self.assertEqual(len(shadow_rows), 0)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 0 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 0)
# Archive the rows
db.archive_deleted_rows(max_rows=2)
- main_rows = self.conn.execute(qiim).fetchall()
- shadow_rows = self.conn.execute(qsiim).fetchall()
- # Verify the insertions into shadow is same as deletions from main
- self.assertEqual(len(shadow_rows), len(rows) - len(main_rows))
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we now have 0 in main
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 0)
+ # Verify we now have 2 in shadow
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 2)
def test_archive_deleted_rows_for_migrations(self):
# migrations.instance_uuid depends on instances.uuid
@@ -6015,13 +6067,18 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
instance_uuid = uuidsentinel.instance
ins_stmt = self.instances.insert().values(
- uuid=instance_uuid,
- deleted=1,
- deleted_at=timeutils.utcnow())
- self.conn.execute(ins_stmt)
- ins_stmt = self.migrations.insert().values(instance_uuid=instance_uuid,
- deleted=0)
- self.conn.execute(ins_stmt)
+ uuid=instance_uuid,
+ deleted=1,
+ deleted_at=timeutils.utcnow(),
+ )
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
+
+ ins_stmt = self.migrations.insert().values(
+ instance_uuid=instance_uuid, deleted=0,
+ )
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
# Archiving instances should result in migrations related to the
# instances also being archived.
num = db._archive_deleted_rows_for_table(
@@ -6037,70 +6094,86 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# Add 6 rows to each table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
- self.conn.execute(ins_stmt2)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt2)
# Set 4 of each to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
update_statement2 = self.instances.update().\
where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1, deleted_at=timeutils.utcnow())
- self.conn.execute(update_statement2)
- # Verify we have 6 in each main table
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement2)
+
qiim = sql.select(self.instance_id_mappings).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qiim).fetchall()
- self.assertEqual(len(rows), 6)
qi = sql.select(self.instances).where(
self.instances.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(rows), 6)
- # Verify we have 0 in each shadow table
qsiim = sql.select(self.shadow_instance_id_mappings).where(
self.shadow_instance_id_mappings.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qsiim).fetchall()
- self.assertEqual(len(rows), 0)
qsi = sql.select(self.shadow_instances).where(
self.shadow_instances.c.uuid.in_(self.uuidstrs)
)
- rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(rows), 0)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in each main table
+ rows = conn.execute(qiim).fetchall()
+ self.assertEqual(len(rows), 6)
+ rows = conn.execute(qi).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in each shadow table
+ rows = conn.execute(qsiim).fetchall()
+ self.assertEqual(len(rows), 0)
+ rows = conn.execute(qsi).fetchall()
+ self.assertEqual(len(rows), 0)
+
# Archive 7 rows, which should be 4 in one table and 3 in the other.
db.archive_deleted_rows(max_rows=7)
- # Verify we have 5 left in the two main tables combined
- iim_rows = self.conn.execute(qiim).fetchall()
- i_rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(iim_rows) + len(i_rows), 5)
- # Verify we have 7 in the two shadow tables combined.
- siim_rows = self.conn.execute(qsiim).fetchall()
- si_rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(siim_rows) + len(si_rows), 7)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 5 left in the two main tables combined
+ iim_rows = conn.execute(qiim).fetchall()
+ i_rows = conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 5)
+ # Verify we have 7 in the two shadow tables combined.
+ siim_rows = conn.execute(qsiim).fetchall()
+ si_rows = conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 7)
# Archive the remaining deleted rows.
db.archive_deleted_rows(max_rows=1)
- # Verify we have 4 total left in both main tables.
- iim_rows = self.conn.execute(qiim).fetchall()
- i_rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(iim_rows) + len(i_rows), 4)
- # Verify we have 8 in shadow
- siim_rows = self.conn.execute(qsiim).fetchall()
- si_rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 4 total left in both main tables.
+ iim_rows = conn.execute(qiim).fetchall()
+ i_rows = conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 4)
+ # Verify we have 8 in shadow
+ siim_rows = conn.execute(qsiim).fetchall()
+ si_rows = conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(max_rows=500)
- # Verify we have 4 total left in both main tables.
- iim_rows = self.conn.execute(qiim).fetchall()
- i_rows = self.conn.execute(qi).fetchall()
- self.assertEqual(len(iim_rows) + len(i_rows), 4)
- # Verify we have 8 in shadow
- siim_rows = self.conn.execute(qsiim).fetchall()
- si_rows = self.conn.execute(qsi).fetchall()
- self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 4 total left in both main tables.
+ iim_rows = conn.execute(qiim).fetchall()
+ i_rows = conn.execute(qi).fetchall()
+ self.assertEqual(len(iim_rows) + len(i_rows), 4)
+ # Verify we have 8 in shadow
+ siim_rows = conn.execute(qsiim).fetchall()
+ si_rows = conn.execute(qsi).fetchall()
+ self.assertEqual(len(siim_rows) + len(si_rows), 8)
+
self._assert_shadow_tables_empty_except(
'shadow_instances',
'shadow_instance_id_mappings'
@@ -6112,34 +6185,47 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
ins_stmt = self.task_log.insert().values(
id=i, task_name='instance_usage_audit', state='DONE',
host='host', message='message')
- self.conn.execute(ins_stmt)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(ins_stmt)
+
# Set 1 to updated before 2017-01-01
updated_at = timeutils.parse_strtime('2017-01-01T00:00:00.0')
update_statement = self.task_log.update().where(
- self.task_log.c.id == 1).values(updated_at=updated_at)
- self.conn.execute(update_statement)
+ self.task_log.c.id == 1
+ ).values(updated_at=updated_at)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
# Set 1 to updated before 2017-01-02
updated_at = timeutils.parse_strtime('2017-01-02T00:00:00.0')
update_statement = self.task_log.update().where(
- self.task_log.c.id == 2).values(updated_at=updated_at)
- self.conn.execute(update_statement)
+ self.task_log.c.id == 2
+ ).values(updated_at=updated_at)
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
# Set 2 to updated now
update_statement = self.task_log.update().where(
- self.task_log.c.id.in_(range(3, 5))).values(
- updated_at=timeutils.utcnow())
- self.conn.execute(update_statement)
- # Verify we have 6 in main
+ self.task_log.c.id.in_(range(3, 5))
+ ).values(updated_at=timeutils.utcnow())
+ with self.engine.connect() as conn, conn.begin():
+ conn.execute(update_statement)
+
qtl = sql.select(self.task_log).where(
self.task_log.c.id.in_(range(1, 7))
)
- rows = self.conn.execute(qtl).fetchall()
- self.assertEqual(len(rows), 6)
- # Verify we have 0 in shadow
qstl = sql.select(self.shadow_task_log).where(
self.shadow_task_log.c.id.in_(range(1, 7))
)
- rows = self.conn.execute(qstl).fetchall()
- self.assertEqual(len(rows), 0)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 6 in main
+ rows = conn.execute(qtl).fetchall()
+ self.assertEqual(len(rows), 6)
+ # Verify we have 0 in shadow
+ rows = conn.execute(qstl).fetchall()
+ self.assertEqual(len(rows), 0)
+
# Make sure 'before' comparison is for < not <=
before_date = dateutil_parser.parse('2017-01-01', fuzzy=True)
_, _, rows = db.archive_deleted_rows(
@@ -6161,22 +6247,27 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
results = db.archive_deleted_rows(max_rows=2, task_log=True)
expected = dict(task_log=2)
self._assertEqualObjects(expected, results[0])
- # Verify we have 2 left in main
- rows = self.conn.execute(qtl).fetchall()
- self.assertEqual(len(rows), 2)
- # Verify we have 4 in shadow
- rows = self.conn.execute(qstl).fetchall()
- self.assertEqual(len(rows), 4)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 2 left in main
+ rows = conn.execute(qtl).fetchall()
+ self.assertEqual(len(rows), 2)
+ # Verify we have 4 in shadow
+ rows = conn.execute(qstl).fetchall()
+ self.assertEqual(len(rows), 4)
+
# Archive the rest
results = db.archive_deleted_rows(max_rows=100, task_log=True)
expected = dict(task_log=2)
self._assertEqualObjects(expected, results[0])
- # Verify we have 0 left in main
- rows = self.conn.execute(qtl).fetchall()
- self.assertEqual(len(rows), 0)
- # Verify we have 6 in shadow
- rows = self.conn.execute(qstl).fetchall()
- self.assertEqual(len(rows), 6)
+
+ with self.engine.connect() as conn, conn.begin():
+ # Verify we have 0 left in main
+ rows = conn.execute(qtl).fetchall()
+ self.assertEqual(len(rows), 0)
+ # Verify we have 6 in shadow
+ rows = conn.execute(qstl).fetchall()
+ self.assertEqual(len(rows), 6)
class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
diff --git a/nova/tests/unit/db/main/test_migrations.py b/nova/tests/unit/db/main/test_migrations.py
index f5ce3697b3..579888cfd2 100644
--- a/nova/tests/unit/db/main/test_migrations.py
+++ b/nova/tests/unit/db/main/test_migrations.py
@@ -25,11 +25,11 @@ test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
+from unittest import mock
+
from alembic import command as alembic_api
from alembic import script as alembic_script
import fixtures
-from migrate.versioning import api as migrate_api
-import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
@@ -173,47 +173,6 @@ class TestModelsSyncPostgreSQL(
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
- """Test that the models match the database after old migrations are run."""
-
- def db_sync(self, engine):
- # the 'nova.db.migration.db_sync' method will not use the legacy
- # sqlalchemy-migrate-based migration flow unless the database is
- # already controlled with sqlalchemy-migrate, so we need to manually
- # enable version controlling with this tool to test this code path
- repository = migration._find_migrate_repo(database='main')
- migrate_api.version_control(
- engine, repository, migration.MIGRATE_INIT_VERSION['main'])
-
- # now we can apply migrations as expected and the legacy path will be
- # followed
- super().db_sync(engine)
-
-
-class TestModelsLegacySyncSQLite(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- base.BaseTestCase,
-):
- pass
-
-
-class TestModelsLegacySyncMySQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- base.BaseTestCase,
-):
- FIXTURE = test_fixtures.MySQLOpportunisticFixture
-
-
-class TestModelsLegacySyncPostgreSQL(
- NovaModelsMigrationsLegacySync,
- test_fixtures.OpportunisticDBTestMixin,
- base.BaseTestCase,
-):
- FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
-
-
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
@@ -226,7 +185,7 @@ class NovaMigrationsWalk(
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('main')
- self.init_version = migration.ALEMBIC_INIT_VERSION['main']
+ self.init_version = '8f2f1571d55b'
def assertIndexExists(self, connection, table_name, index):
self.assertTrue(
@@ -240,6 +199,12 @@ class NovaMigrationsWalk(
'Index %s on table %s should not exist' % (index, table_name),
)
+ def assertColumnExists(self, connection, table_name, column):
+ self.assertTrue(
+ oslodbutils.column_exists(connection, table_name, column),
+ 'Column %s on table %s should exist' % (column, table_name),
+ )
+
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
alembic_api.upgrade(self.config, revision)
@@ -280,6 +245,42 @@ class NovaMigrationsWalk(
# no check for the MySQL-specific change
+ def _check_ccb0fa1a2252(self, connection):
+ for prefix in ('', 'shadow_'):
+ table_name = prefix + 'block_device_mapping'
+ table = oslodbutils.get_table(connection, table_name)
+
+ self.assertColumnExists(connection, table_name, 'encrypted')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_secret_uuid')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_format')
+ self.assertColumnExists(
+ connection, table_name, 'encryption_options')
+
+ # Only check for the expected types if we're using sqlite because
+ # other databases' types may be different. For example, Boolean
+ # may be represented as an integer in MySQL
+ if connection.engine.name != 'sqlite':
+ return
+
+ self.assertIsInstance(table.c.encrypted.type, sa.types.Boolean)
+ self.assertIsInstance(
+ table.c.encryption_secret_uuid.type, sa.types.String)
+ self.assertIsInstance(
+ table.c.encryption_format.type, sa.types.String)
+ self.assertIsInstance(
+ table.c.encryption_options.type, sa.types.String)
+
+ def _check_960aac0e09ea(self, connection):
+ self.assertIndexNotExists(
+ connection, 'console_auth_tokens',
+ 'console_auth_tokens_token_hash_idx',
+ )
+ self.assertIndexNotExists(
+ connection, 'instances', 'uuid',
+ )
+
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
diff --git a/nova/tests/unit/db/test_migration.py b/nova/tests/unit/db/test_migration.py
index 6657bc48e0..17a099a8cc 100644
--- a/nova/tests/unit/db/test_migration.py
+++ b/nova/tests/unit/db/test_migration.py
@@ -12,14 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import glob
-import os
+from unittest import mock
import urllib
from alembic.runtime import migration as alembic_migration
-from migrate import exceptions as migrate_exceptions
-from migrate.versioning import api as migrate_api
-import mock
from nova.db.api import api as api_db_api
from nova.db.main import api as main_db_api
@@ -56,7 +52,7 @@ class TestDBURL(test.NoDBTestCase):
class TestDBSync(test.NoDBTestCase):
- def test_db_sync_invalid_databse(self):
+ def test_db_sync_invalid_database(self):
"""We only have two databases."""
self.assertRaises(
exception.Invalid, migration.db_sync, database='invalid')
@@ -68,17 +64,9 @@ class TestDBSync(test.NoDBTestCase):
migration.db_sync, '402')
@mock.patch.object(migration, '_upgrade_alembic')
- @mock.patch.object(migration, '_init_alembic_on_legacy_database')
- @mock.patch.object(migration, '_is_database_under_alembic_control')
- @mock.patch.object(migration, '_is_database_under_migrate_control')
@mock.patch.object(migration, '_find_alembic_conf')
- @mock.patch.object(migration, '_find_migrate_repo')
@mock.patch.object(migration, '_get_engine')
- def _test_db_sync(
- self, has_migrate, has_alembic, mock_get_engine, mock_find_repo,
- mock_find_conf, mock_is_migrate, mock_is_alembic, mock_init,
- mock_upgrade,
- ):
+ def test_db_sync(self, mock_get_engine, mock_find_conf, mock_upgrade):
# return an encoded URL to mimic sqlalchemy
mock_get_engine.return_value.url = (
@@ -86,13 +74,10 @@ class TestDBSync(test.NoDBTestCase):
'read_default_file=%2Fetc%2Fmy.cnf.d%2Fnova.cnf'
'&read_default_group=nova'
)
- mock_is_migrate.return_value = has_migrate
- mock_is_alembic.return_value = has_alembic
migration.db_sync()
mock_get_engine.assert_called_once_with('main', context=None)
- mock_find_repo.assert_called_once_with('main')
mock_find_conf.assert_called_once_with('main')
mock_find_conf.return_value.set_main_option.assert_called_once_with(
'sqlalchemy.url',
@@ -100,93 +85,25 @@ class TestDBSync(test.NoDBTestCase):
'read_default_file=%%2Fetc%%2Fmy.cnf.d%%2Fnova.cnf' # ...
'&read_default_group=nova'
)
- mock_is_migrate.assert_called_once_with(
- mock_get_engine.return_value, mock_find_repo.return_value)
-
- if has_migrate:
- mock_is_alembic.assert_called_once_with(
- mock_get_engine.return_value)
- else:
- mock_is_alembic.assert_not_called()
-
- # we should only attempt the upgrade of the remaining
- # sqlalchemy-migrate-based migrations and fake apply of the initial
- # alembic migrations if sqlalchemy-migrate is in place but alembic
- # hasn't been used yet
- if has_migrate and not has_alembic:
- mock_init.assert_called_once_with(
- mock_get_engine.return_value, 'main',
- mock_find_repo.return_value, mock_find_conf.return_value)
- else:
- mock_init.assert_not_called()
- # however, we should always attempt to upgrade the requested migration
- # to alembic
mock_upgrade.assert_called_once_with(
- mock_get_engine.return_value, mock_find_conf.return_value, None)
-
- def test_db_sync_new_deployment(self):
- """Mimic a new deployment without existing sqlalchemy-migrate cruft."""
- has_migrate = False
- has_alembic = False
- self._test_db_sync(has_migrate, has_alembic)
-
- def test_db_sync_with_existing_migrate_database(self):
- """Mimic a deployment currently managed by sqlalchemy-migrate."""
- has_migrate = True
- has_alembic = False
- self._test_db_sync(has_migrate, has_alembic)
-
- def test_db_sync_with_existing_alembic_database(self):
- """Mimic a deployment that's already switched to alembic."""
- has_migrate = True
- has_alembic = True
- self._test_db_sync(has_migrate, has_alembic)
+ mock_get_engine.return_value, mock_find_conf.return_value, None,
+ )
@mock.patch.object(alembic_migration.MigrationContext, 'configure')
-@mock.patch.object(migrate_api, 'db_version')
-@mock.patch.object(migration, '_is_database_under_alembic_control')
-@mock.patch.object(migration, '_is_database_under_migrate_control')
@mock.patch.object(migration, '_get_engine')
-@mock.patch.object(migration, '_find_migrate_repo')
class TestDBVersion(test.NoDBTestCase):
- def test_db_version_invalid_databse(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
+ def test_db_version_invalid_database(
+ self, mock_get_engine, mock_m_context_configure,
):
"""We only have two databases."""
self.assertRaises(
exception.Invalid, migration.db_version, database='invalid')
- def test_db_version_migrate(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
- ):
- """Database is controlled by sqlalchemy-migrate."""
- mock_is_migrate.return_value = True
- mock_is_alembic.return_value = False
-
- ret = migration.db_version('main')
- self.assertEqual(mock_migrate_version.return_value, ret)
-
- mock_find_repo.assert_called_once_with('main')
- mock_get_engine.assert_called_once_with('main', context=None)
- mock_is_migrate.assert_called_once()
- mock_is_alembic.assert_called_once()
- mock_migrate_version.assert_called_once_with(
- mock_get_engine.return_value, mock_find_repo.return_value)
- mock_m_context_configure.assert_not_called()
-
- def test_db_version_alembic(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
- ):
+ def test_db_version(self, mock_get_engine, mock_m_context_configure):
"""Database is controlled by alembic."""
- mock_is_migrate.return_value = False
- mock_is_alembic.return_value = True
-
ret = migration.db_version('main')
mock_m_context = mock_m_context_configure.return_value
self.assertEqual(
@@ -194,31 +111,9 @@ class TestDBVersion(test.NoDBTestCase):
ret
)
- mock_find_repo.assert_called_once_with('main')
mock_get_engine.assert_called_once_with('main', context=None)
- mock_is_migrate.assert_called_once()
- mock_is_alembic.assert_called_once()
- mock_migrate_version.assert_not_called()
mock_m_context_configure.assert_called_once()
- def test_db_version_not_controlled(
- self, mock_find_repo, mock_get_engine, mock_is_migrate,
- mock_is_alembic, mock_migrate_version, mock_m_context_configure,
- ):
- """Database is not controlled."""
- mock_is_migrate.return_value = False
- mock_is_alembic.return_value = False
-
- ret = migration.db_version()
- self.assertIsNone(ret)
-
- mock_find_repo.assert_called_once_with('main')
- mock_get_engine.assert_called_once_with('main', context=None)
- mock_is_migrate.assert_called_once()
- mock_is_alembic.assert_called_once()
- mock_migrate_version.assert_not_called()
- mock_m_context_configure.assert_not_called()
-
class TestGetEngine(test.NoDBTestCase):
@@ -237,77 +132,3 @@ class TestGetEngine(test.NoDBTestCase):
engine = migration._get_engine('api')
self.assertEqual('engine', engine)
mock_get_engine.assert_called_once_with()
-
-
-class TestDatabaseUnderVersionControl(test.NoDBTestCase):
-
- @mock.patch.object(migrate_api, 'db_version')
- def test__is_database_under_migrate_control__true(self, mock_db_version):
- ret = migration._is_database_under_migrate_control('engine', 'repo')
- self.assertTrue(ret)
-
- mock_db_version.assert_called_once_with('engine', 'repo')
-
- @mock.patch.object(migrate_api, 'db_version')
- def test__is_database_under_migrate_control__false(self, mock_db_version):
- mock_db_version.side_effect = \
- migrate_exceptions.DatabaseNotControlledError()
-
- ret = migration._is_database_under_migrate_control('engine', 'repo')
- self.assertFalse(ret)
-
- mock_db_version.assert_called_once_with('engine', 'repo')
-
- @mock.patch.object(alembic_migration.MigrationContext, 'configure')
- def test__is_database_under_alembic_control__true(self, mock_configure):
- context = mock_configure.return_value
- context.get_current_revision.return_value = 'foo'
- engine = mock.MagicMock()
-
- ret = migration._is_database_under_alembic_control(engine)
- self.assertTrue(ret)
-
- context.get_current_revision.assert_called_once_with()
-
- @mock.patch.object(alembic_migration.MigrationContext, 'configure')
- def test__is_database_under_alembic_control__false(self, mock_configure):
- context = mock_configure.return_value
- context.get_current_revision.return_value = None
- engine = mock.MagicMock()
-
- ret = migration._is_database_under_alembic_control(engine)
- self.assertFalse(ret)
-
- context.get_current_revision.assert_called_once_with()
-
-
-class ProjectTestCase(test.NoDBTestCase):
-
- def test_no_migrations_have_downgrade(self):
- topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
- # Walk both the nova_api and nova (cell) database migrations.
- includes_downgrade = []
- for directory in (
- os.path.join(topdir, 'db', 'main', 'legacy_migrations'),
- os.path.join(topdir, 'db', 'api', 'legacy_migrations'),
- ):
- py_glob = os.path.join(directory, 'versions', '*.py')
- for path in glob.iglob(py_glob):
- has_upgrade = False
- has_downgrade = False
- with open(path, "r") as f:
- for line in f:
- if 'def upgrade(' in line:
- has_upgrade = True
- if 'def downgrade(' in line:
- has_downgrade = True
-
- if has_upgrade and has_downgrade:
- fname = os.path.basename(path)
- includes_downgrade.append(fname)
-
- helpful_msg = (
- "The following migrations have a downgrade "
- "which is not supported:"
- "\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
- self.assertFalse(includes_downgrade, helpful_msg)
diff --git a/nova/tests/unit/fake_policy.py b/nova/tests/unit/fake_policy.py
index bfc90e119e..2f8c483554 100644
--- a/nova/tests/unit/fake_policy.py
+++ b/nova/tests/unit/fake_policy.py
@@ -44,6 +44,7 @@ policy_data = """
"os_compute_api:servers:trigger_crash_dump": "",
"os_compute_api:servers:show:host_status": "",
"os_compute_api:servers:show": "",
+ "os_compute_api:servers:show:flavor-extra-specs" : "",
"os_compute_api:servers:show:host_status:unknown-only": "",
"os_compute_api:servers:allow_all_filters": "",
"os_compute_api:servers:migrations:force_complete": "",
diff --git a/nova/tests/unit/fixtures/test_libvirt.py b/nova/tests/unit/fixtures/test_libvirt.py
index eab9c54a13..448f8f6720 100644
--- a/nova/tests/unit/fixtures/test_libvirt.py
+++ b/nova/tests/unit/fixtures/test_libvirt.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from lxml import etree
-import mock
from oslo_utils import uuidutils
from nova.objects import fields as obj_fields
diff --git a/nova/tests/unit/image/test_glance.py b/nova/tests/unit/image/test_glance.py
index 4f35f060e4..935a271d44 100644
--- a/nova/tests/unit/image/test_glance.py
+++ b/nova/tests/unit/image/test_glance.py
@@ -18,6 +18,7 @@ import copy
import datetime
import io
from io import StringIO
+from unittest import mock
import urllib.parse as urlparse
import cryptography
@@ -28,7 +29,6 @@ import glanceclient.exc
from glanceclient.v1 import images
from glanceclient.v2 import schemas
from keystoneauth1 import loading as ks_loading
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
import testtools
diff --git a/nova/db/api/legacy_migrations/versions/__init__.py b/nova/tests/unit/limit/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/db/api/legacy_migrations/versions/__init__.py
+++ b/nova/tests/unit/limit/__init__.py
diff --git a/nova/tests/unit/limit/test_local.py b/nova/tests/unit/limit/test_local.py
new file mode 100644
index 0000000000..8bf163d69f
--- /dev/null
+++ b/nova/tests/unit/limit/test_local.py
@@ -0,0 +1,256 @@
+# Copyright 2022 StackHPC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+from unittest import mock
+
+from oslo_config import cfg
+from oslo_limit import exception as limit_exceptions
+from oslo_limit import fixture as limit_fixture
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova import context
+from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import utils as limit_utils
+from nova import objects
+from nova import test
+
+CONF = cfg.CONF
+
+
+class TestLocalLimits(test.NoDBTestCase):
+ def setUp(self):
+ super(TestLocalLimits, self).setUp()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+ self.context = context.RequestContext()
+
+ def test_enforce_api_limit_metadata(self):
+ # default max is 128
+ self.useFixture(limit_fixture.LimitFixture(
+ {local_limit.SERVER_METADATA_ITEMS: 128}, {}))
+ local_limit.enforce_api_limit(local_limit.SERVER_METADATA_ITEMS, 128)
+
+ e = self.assertRaises(exception.MetadataLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.SERVER_METADATA_ITEMS, 129)
+ msg = ("Resource %s is over limit" % local_limit.SERVER_METADATA_ITEMS)
+ self.assertIn(msg, str(e))
+
+ def test_enforce_api_limit_skip(self):
+ self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
+ local_limit.enforce_api_limit(local_limit.SERVER_METADATA_ITEMS, 200)
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_api_limit_session_init_error(self, mock_util):
+ mock_util.side_effect = limit_exceptions.SessionInitError('error')
+
+ e = self.assertRaises(exception.KeystoneConnectionFailed,
+ local_limit.enforce_api_limit,
+ local_limit.SERVER_METADATA_ITEMS, 42)
+ expected = ('Failed to connect to keystone while enforcing '
+ 'server_metadata_items quota limit.')
+ self.assertIn(expected, str(e))
+
+ def test_enforce_api_limit_raises_for_invalid_entity(self):
+ e = self.assertRaises(ValueError,
+ local_limit.enforce_api_limit,
+ local_limit.KEY_PAIRS, 42)
+ expected = '%s is not a valid API limit: %s' % (
+ local_limit.KEY_PAIRS, local_limit.API_LIMITS)
+ self.assertEqual(expected, str(e))
+
+ def test_enforce_api_limit_no_registered_limit_found(self):
+ self.useFixture(limit_fixture.LimitFixture({}, {}))
+ e = self.assertRaises(exception.MetadataLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.SERVER_METADATA_ITEMS, 42)
+ msg = ("Resource %s is over limit" % local_limit.SERVER_METADATA_ITEMS)
+ self.assertIn(msg, str(e))
+
+ def test_enforce_injected_files(self):
+ reglimits = {local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ local_limit.enforce_api_limit(local_limit.INJECTED_FILES, 5)
+ local_limit.enforce_api_limit(local_limit.INJECTED_FILES_CONTENT,
+ 10 * 1024)
+ local_limit.enforce_api_limit(local_limit.INJECTED_FILES_PATH, 255)
+
+ e = self.assertRaises(exception.OnsetFileLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.INJECTED_FILES, 6)
+ msg = ("Resource %s is over limit" % local_limit.INJECTED_FILES)
+ self.assertIn(msg, str(e))
+ e = self.assertRaises(exception.OnsetFileContentLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.INJECTED_FILES_CONTENT,
+ 10 * 1024 + 1)
+ msg = (
+ "Resource %s is over limit" % local_limit.INJECTED_FILES_CONTENT)
+ self.assertIn(msg, str(e))
+ e = self.assertRaises(exception.OnsetFilePathLimitExceeded,
+ local_limit.enforce_api_limit,
+ local_limit.INJECTED_FILES_PATH, 256)
+ msg = ("Resource %s is over limit" % local_limit.INJECTED_FILES_PATH)
+ self.assertIn(msg, str(e))
+
+ @mock.patch.object(objects.KeyPairList, "get_count_by_user")
+ def test_enforce_db_limit_keypairs(self, mock_count):
+ self.useFixture(limit_fixture.LimitFixture(
+ {local_limit.KEY_PAIRS: 100}, {}))
+
+ mock_count.return_value = 99
+ local_limit.enforce_db_limit(self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 1)
+ mock_count.assert_called_once_with(self.context, uuids.user_id)
+
+ self.assertRaises(exception.KeypairLimitExceeded,
+ local_limit.enforce_db_limit,
+ self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 2)
+
+ mock_count.return_value = 100
+ local_limit.enforce_db_limit(self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 0)
+ mock_count.return_value = 101
+ self.assertRaises(exception.KeypairLimitExceeded,
+ local_limit.enforce_db_limit,
+ self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 0)
+
+ def test_enforce_db_limit_skip(self):
+ self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
+ local_limit.enforce_db_limit(self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, 1)
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_db_limit_session_init_error(self, mock_util):
+ mock_util.side_effect = limit_exceptions.SessionInitError(
+ test.TestingException())
+
+ e = self.assertRaises(exception.KeystoneConnectionFailed,
+ local_limit.enforce_db_limit, self.context,
+ local_limit.KEY_PAIRS, uuids.user_id, 42)
+ expected = ('Failed to connect to keystone while enforcing '
+ 'server_key_pairs quota limit.')
+ self.assertEqual(expected, str(e))
+
+ def test_enforce_db_limit_raise_on_invalid(self):
+ e = self.assertRaises(ValueError, local_limit.enforce_db_limit,
+ self.context, local_limit.INJECTED_FILES,
+ uuids.user_id, 1)
+ fmt = '%s does not have a DB count function defined: %s'
+ expected = fmt % (
+ local_limit.INJECTED_FILES, local_limit.DB_COUNT_FUNCTION.keys())
+ self.assertEqual(expected, str(e))
+
+ @mock.patch.object(objects.KeyPairList, "get_count_by_user")
+ def test_enforce_db_limit_no_registered_limit_found(self, mock_count):
+ self.useFixture(limit_fixture.LimitFixture({}, {}))
+ mock_count.return_value = 5
+ e = self.assertRaises(exception.KeypairLimitExceeded,
+ local_limit.enforce_db_limit, self.context,
+ local_limit.KEY_PAIRS, uuids.user_id, 42)
+ msg = ("Resource %s is over limit" % local_limit.KEY_PAIRS)
+ self.assertIn(msg, str(e))
+
+ def test_enforce_db_limit_raise_bad_delta(self):
+ e = self.assertRaises(ValueError, local_limit.enforce_db_limit,
+ self.context, local_limit.KEY_PAIRS,
+ uuids.user_id, -1)
+ self.assertEqual("delta must be a positive integer", str(e))
+
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_enforce_db_limit_server_groups(self, mock_count):
+ self.useFixture(limit_fixture.LimitFixture(
+ {local_limit.SERVER_GROUPS: 10}, {}))
+
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ local_limit.enforce_db_limit(self.context, local_limit.SERVER_GROUPS,
+ uuids.project_id, 1)
+ mock_count.assert_called_once_with(self.context, uuids.project_id)
+
+ self.assertRaises(exception.ServerGroupLimitExceeded,
+ local_limit.enforce_db_limit,
+ self.context, local_limit.SERVER_GROUPS,
+ uuids.project_id, 2)
+
+ @mock.patch.object(objects.InstanceGroup, "get_by_uuid")
+ def test_enforce_db_limit_server_group_members(self, mock_get):
+ self.useFixture(limit_fixture.LimitFixture(
+ {local_limit.SERVER_GROUP_MEMBERS: 10}, {}))
+
+ mock_get.return_value = objects.InstanceGroup(members=[])
+ local_limit.enforce_db_limit(self.context,
+ local_limit.SERVER_GROUP_MEMBERS,
+ uuids.server_group, 10)
+ mock_get.assert_called_once_with(self.context, uuids.server_group)
+
+ self.assertRaises(exception.GroupMemberLimitExceeded,
+ local_limit.enforce_db_limit,
+ self.context, local_limit.SERVER_GROUP_MEMBERS,
+ uuids.server_group, 11)
+
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_in_use(self, mock_count):
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ usages = local_limit.get_in_use(self.context, uuids.project_id)
+ expected_usages = {
+ 'injected_file_content_bytes': 0,
+ 'injected_file_path_bytes': 0,
+ 'injected_files': 0,
+ 'key_pairs': 0,
+ 'metadata_items': 0,
+ 'server_group_members': 0,
+ 'server_groups': 9
+ }
+ self.assertEqual(expected_usages, usages)
+
+
+class GetLegacyLimitsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(GetLegacyLimitsTest, self).setUp()
+ self.new = {"server_metadata_items": 1,
+ "server_injected_files": 2,
+ "server_injected_file_content_bytes": 3,
+ "server_injected_file_path_bytes": 4,
+ "server_key_pairs": 5,
+ "server_groups": 6,
+ "server_group_members": 7}
+ self.legacy = {"metadata_items": 1,
+ "injected_files": 2,
+ "injected_file_content_bytes": 3,
+ "injected_file_path_bytes": 4,
+ "key_pairs": 5,
+ "server_groups": 6,
+ "server_group_members": 7}
+ self.resources = list(local_limit.API_LIMITS | local_limit.DB_LIMITS)
+ self.resources.sort()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+
+ def test_convert_keys_to_legacy_name(self):
+ limits = local_limit._convert_keys_to_legacy_name(self.new)
+ self.assertEqual(self.legacy, limits)
+
+ def test_get_legacy_default_limits(self):
+ reglimits = copy.deepcopy(self.new)
+ reglimits.pop('server_key_pairs')
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ limits = local_limit.get_legacy_default_limits()
+ expected = copy.deepcopy(self.legacy)
+ expected['key_pairs'] = 0
+ self.assertEqual(expected, limits)
diff --git a/nova/tests/unit/limit/test_placement.py b/nova/tests/unit/limit/test_placement.py
new file mode 100644
index 0000000000..3640890c74
--- /dev/null
+++ b/nova/tests/unit/limit/test_placement.py
@@ -0,0 +1,353 @@
+# Copyright 2022 StackHPC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from oslo_config import cfg
+from oslo_limit import exception as limit_exceptions
+from oslo_limit import fixture as limit_fixture
+from oslo_limit import limit
+from oslo_utils.fixture import uuidsentinel as uuids
+
+from nova import context
+from nova import exception
+from nova.limit import placement as placement_limits
+from nova.limit import utils as limit_utils
+from nova import objects
+from nova import quota
+from nova.scheduler.client import report
+from nova import test
+
+CONF = cfg.CONF
+
+
+class TestGetUsage(test.NoDBTestCase):
+ def setUp(self):
+ super(TestGetUsage, self).setUp()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+ self.context = context.RequestContext()
+
+ @mock.patch.object(quota, "is_qfd_populated")
+ @mock.patch.object(objects.InstanceMappingList, "get_counts")
+ @mock.patch.object(report.SchedulerReportClient,
+ "get_usages_counts_for_limits")
+ def test_get_usage(self, mock_placement, mock_inst, mock_qfd):
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB",
+ "class:CUSTOM_BAREMETAL"]
+ mock_qfd.return_value = True
+ mock_placement.return_value = {"VCPU": 1, "CUSTOM_BAREMETAL": 2}
+ mock_inst.return_value = {"project": {"instances": 42}}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42,
+ 'class:CUSTOM_BAREMETAL': 2}
+ self.assertDictEqual(expected, usage)
+
+ def test_get_usage_bad_resources(self):
+ bad_resource = ["unknown_resource"]
+ self.assertRaises(ValueError, placement_limits._get_usage,
+ self.context, uuids.project, bad_resource)
+ bad_class = ["class:UNKNOWN_CLASS"]
+ self.assertRaises(ValueError, placement_limits._get_usage,
+ self.context, uuids.project, bad_class)
+ no_resources = []
+ self.assertRaises(ValueError, placement_limits._get_usage,
+ self.context, uuids.project, no_resources)
+
+ @mock.patch.object(quota, "is_qfd_populated")
+ def test_get_usage_bad_qfd(self, mock_qfd):
+ mock_qfd.return_value = False
+ resources = ["servers"]
+ e = self.assertRaises(ValueError, placement_limits._get_usage,
+ self.context, uuids.project, resources)
+ self.assertEqual("must first migrate instance mappings", str(e))
+
+ def test_get_usage_unified_limits_disabled(self):
+ self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
+ e = self.assertRaises(NotImplementedError, placement_limits._get_usage,
+ self.context, uuids.project, [])
+ self.assertEqual("Unified limits support is disabled", str(e))
+
+ @mock.patch.object(quota, "is_qfd_populated")
+ @mock.patch.object(objects.InstanceMappingList, "get_counts")
+ @mock.patch.object(report.SchedulerReportClient,
+ 'get_usages_counts_for_limits')
+ def test_get_usage_placement_fail(self, mock_placement, mock_inst,
+ mock_qfd):
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB",
+ "class:CUSTOM_BAREMETAL"]
+ mock_qfd.return_value = True
+ mock_placement.side_effect = exception.UsagesRetrievalFailed(
+ project_id=uuids.project, user_id=uuids.user)
+ mock_inst.return_value = {"project": {"instances": 42}}
+
+ e = self.assertRaises(
+ exception.UsagesRetrievalFailed, placement_limits._get_usage,
+ self.context, uuids.project, resources)
+
+ expected = ("Failed to retrieve usages from placement while enforcing "
+ "%s quota limits." % ", ".join(resources))
+ self.assertEqual(expected, str(e))
+
+ @mock.patch.object(quota, "is_qfd_populated")
+ @mock.patch.object(objects.InstanceMappingList, "get_counts")
+ @mock.patch.object(report.SchedulerReportClient,
+ "get_usages_counts_for_limits")
+ def test_get_usage_pcpu_as_vcpu(self, mock_placement, mock_inst, mock_qfd):
+ # Test that when configured, PCPU count is merged into VCPU count when
+ # appropriate.
+ self.flags(unified_limits_count_pcpu_as_vcpu=True, group="workarounds")
+ mock_qfd.return_value = True
+ mock_inst.return_value = {"project": {"instances": 42}}
+
+ # PCPU was not specified in the flavor but usage was found in
+ # placement. PCPU count should be merged into VCPU count.
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
+ mock_placement.return_value = {"VCPU": 1, "PCPU": 2}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 3, 'servers': 42}
+ self.assertDictEqual(expected, usage)
+
+ # PCPU was not specified in the flavor and usage was found in placement
+ # and there was no VCPU usage in placement. The PCPU count should be
+ # returned as VCPU count.
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
+ mock_placement.return_value = {"PCPU": 1}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42}
+ self.assertDictEqual(expected, usage)
+
+ # PCPU was not specified in the flavor but only VCPU usage was found in
+ # placement.
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
+ mock_placement.return_value = {"VCPU": 1}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42}
+ self.assertDictEqual(expected, usage)
+
+ # PCPU was specified in the flavor, so the counts should be separate.
+ resources = ["servers", "class:VCPU", "class:MEMORY_MB", "class:PCPU"]
+ mock_placement.return_value = {"VCPU": 1, "PCPU": 2}
+
+ usage = placement_limits._get_usage(self.context, uuids.project,
+ resources)
+
+ expected = {'class:MEMORY_MB': 0, 'class:VCPU': 1, 'servers': 42,
+ 'class:PCPU': 2}
+ self.assertDictEqual(expected, usage)
+
+
+class TestGetDeltas(test.NoDBTestCase):
+ def test_get_deltas(self):
+ flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5)
+
+ deltas = placement_limits._get_deltas_by_flavor(flavor, False, 2)
+
+ expected = {'servers': 2,
+ 'class:VCPU': 20, 'class:MEMORY_MB': 200,
+ 'class:DISK_GB': 14}
+ self.assertDictEqual(expected, deltas)
+
+ def test_get_deltas_recheck(self):
+ flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5)
+
+ deltas = placement_limits._get_deltas_by_flavor(flavor, False, 0)
+
+ expected = {'servers': 0,
+ 'class:VCPU': 0, 'class:MEMORY_MB': 0,
+ 'class:DISK_GB': 0}
+ self.assertDictEqual(expected, deltas)
+
+ def test_get_deltas_check_baremetal(self):
+ extra_specs = {"resources:VCPU": 0, "resources:MEMORY_MB": 0,
+ "resources:DISK_GB": 0, "resources:CUSTOM_BAREMETAL": 1}
+ flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5,
+ extra_specs=extra_specs)
+
+ deltas = placement_limits._get_deltas_by_flavor(flavor, True, 1)
+
+ expected = {'servers': 1, 'class:CUSTOM_BAREMETAL': 1}
+ self.assertDictEqual(expected, deltas)
+
+ def test_get_deltas_check_bfv(self):
+ flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5)
+
+ deltas = placement_limits._get_deltas_by_flavor(flavor, True, 2)
+
+ expected = {'servers': 2,
+ 'class:VCPU': 20, 'class:MEMORY_MB': 200,
+ 'class:DISK_GB': 4}
+ self.assertDictEqual(expected, deltas)
+
+
+class TestEnforce(test.NoDBTestCase):
+ def setUp(self):
+ super(TestEnforce, self).setUp()
+ self.context = context.RequestContext()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+
+ placement_limits._ENFORCER = mock.Mock(limit.Enforcer)
+ self.flavor = objects.Flavor(memory_mb=100, vcpus=10, swap=0,
+ ephemeral_gb=2, root_gb=5)
+
+ def test_enforce_num_instances_and_flavor_disabled(self):
+ self.flags(driver="nova.quota.NoopQuotaDriver", group="quota")
+ count = placement_limits.enforce_num_instances_and_flavor(
+ self.context, uuids.project_id, "flavor", False, 0, 42)
+ self.assertEqual(42, count)
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+
+ count = placement_limits.enforce_num_instances_and_flavor(
+ self.context, uuids.project_id, self.flavor, False, 0, 2)
+
+ self.assertEqual(2, count)
+ mock_limit.assert_called_once_with(mock.ANY)
+ mock_enforcer.enforce.assert_called_once_with(
+ uuids.project_id,
+ {'servers': 2, 'class:VCPU': 20, 'class:MEMORY_MB': 200,
+ 'class:DISK_GB': 14})
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor_recheck(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+
+ count = placement_limits.enforce_num_instances_and_flavor(
+ self.context, uuids.project_id, self.flavor, False, 0, 0)
+
+ self.assertEqual(0, count)
+ mock_limit.assert_called_once_with(mock.ANY)
+ mock_enforcer.enforce.assert_called_once_with(
+ uuids.project_id,
+ {'servers': 0, 'class:VCPU': 0, 'class:MEMORY_MB': 0,
+ 'class:DISK_GB': 0})
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor_retry(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+ over_limit_info_list = [
+ limit_exceptions.OverLimitInfo("class:VCPU", 12, 0, 30)
+ ]
+ mock_enforcer.enforce.side_effect = [
+ limit_exceptions.ProjectOverLimit(
+ uuids.project_id, over_limit_info_list),
+ None]
+
+ count = placement_limits.enforce_num_instances_and_flavor(
+ self.context, uuids.project_id, self.flavor, True, 0, 3)
+
+ self.assertEqual(2, count)
+ self.assertEqual(2, mock_enforcer.enforce.call_count)
+ mock_enforcer.enforce.assert_called_with(
+ uuids.project_id,
+ {'servers': 2, 'class:VCPU': 20, 'class:MEMORY_MB': 200,
+ 'class:DISK_GB': 4})
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor_fails(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+ over_limit_info_list = [
+ limit_exceptions.OverLimitInfo("class:VCPU", 12, 0, 20),
+ limit_exceptions.OverLimitInfo("servers", 2, 1, 2)
+ ]
+ expected = limit_exceptions.ProjectOverLimit(uuids.project_id,
+ over_limit_info_list)
+ mock_enforcer.enforce.side_effect = expected
+
+ # Verify that the oslo.limit ProjectOverLimit gets translated to a
+ # TooManyInstances that the API knows how to handle
+ e = self.assertRaises(
+ exception.TooManyInstances,
+ placement_limits.enforce_num_instances_and_flavor, self.context,
+ uuids.project_id, self.flavor, True, 2, 4)
+
+ self.assertEqual(str(expected), str(e))
+ self.assertEqual(3, mock_enforcer.enforce.call_count)
+
+ @mock.patch('oslo_limit.limit.Enforcer')
+ def test_enforce_num_instances_and_flavor_placement_fail(self, mock_limit):
+ mock_enforcer = mock.MagicMock()
+ mock_limit.return_value = mock_enforcer
+ mock_enforcer.enforce.side_effect = exception.UsagesRetrievalFailed(
+ 'Failed to retrieve usages')
+
+ e = self.assertRaises(
+ exception.UsagesRetrievalFailed,
+ placement_limits.enforce_num_instances_and_flavor, self.context,
+ uuids.project, self.flavor, True, 0, 5)
+
+ expected = str(mock_enforcer.enforce.side_effect)
+ self.assertEqual(expected, str(e))
+
+
+class GetLegacyLimitsTest(test.NoDBTestCase):
+ def setUp(self):
+ super(GetLegacyLimitsTest, self).setUp()
+ self.new = {"servers": 1, "class:VCPU": 2, "class:MEMORY_MB": 3}
+ self.legacy = {"instances": 1, "cores": 2, "ram": 3}
+ self.resources = ["servers", "class:VCPU", "class:MEMORY_MB"]
+ self.resources.sort()
+ self.flags(driver=limit_utils.UNIFIED_LIMITS_DRIVER, group="quota")
+
+ def test_convert_keys_to_legacy_name(self):
+ limits = placement_limits._convert_keys_to_legacy_name(self.new)
+ self.assertEqual(self.legacy, limits)
+
+ def test_get_legacy_default_limits(self):
+ reglimits = {'servers': 1, 'class:VCPU': 2}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+ limits = placement_limits.get_legacy_default_limits()
+ self.assertEqual({'cores': 2, 'instances': 1, 'ram': 0}, limits)
+
+ def test_get_legacy_project_limits(self):
+ reglimits = {'servers': 5, 'class:MEMORY_MB': 7}
+ projlimits = {uuids.project_id: {'servers': 1}}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, projlimits))
+ limits = placement_limits.get_legacy_project_limits(uuids.project_id)
+ self.assertEqual({'instances': 1, 'cores': 0, 'ram': 7}, limits)
+
+ @mock.patch.object(report.SchedulerReportClient,
+ "get_usages_counts_for_limits")
+ @mock.patch.object(objects.InstanceMappingList, "get_counts")
+ @mock.patch.object(quota, "is_qfd_populated")
+ def test_get_legacy_counts(self, mock_qfd, mock_counts, mock_placement):
+ mock_qfd.return_value = True
+ mock_counts.return_value = {"project": {"instances": 1}}
+ mock_placement.return_value = {
+ "VCPU": 2, "CUSTOM_BAREMETAL": 2, "MEMORY_MB": 3,
+ }
+ counts = placement_limits.get_legacy_counts(
+ "context", uuids.project_id)
+ self.assertEqual(self.legacy, counts)
diff --git a/nova/tests/unit/network/test_network_info.py b/nova/tests/unit/network/test_network_info.py
index 0420e2d791..1c604975b0 100644
--- a/nova/tests/unit/network/test_network_info.py
+++ b/nova/tests/unit/network/test_network_info.py
@@ -738,6 +738,52 @@ iface eth0 inet6 static
template = self._setup_injected_network_scenario(use_ipv4=False)
self.assertEqual(expected, template)
+ def test_injection_ipv6_only(self):
+ expected = '''\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet6 static
+ hwaddress ether aa:aa:aa:aa:aa:aa
+ address 1234:567::2
+ netmask 48
+ gateway 1234:567::1
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+'''
+ template = self._setup_injected_network_scenario(use_ipv4=False,
+ use_ipv6=True)
+ self.assertEqual(expected, template)
+
+ def test_injection_ipv6_only_no_gateway(self):
+ expected = '''\
+# Injected by Nova on instance boot
+#
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+auto eth0
+iface eth0 inet6 static
+ hwaddress ether aa:aa:aa:aa:aa:aa
+ address 1234:567::2
+ netmask 48
+ dns-nameservers 2001:4860:4860::8888 2001:4860:4860::8844
+'''
+ template = self._setup_injected_network_scenario(use_ipv4=False,
+ use_ipv6=True,
+ gateway=False)
+ self.assertEqual(expected, template)
+
def test_injection_ipv6_two_interfaces(self):
expected = """\
# Injected by Nova on instance boot
diff --git a/nova/tests/unit/network/test_neutron.py b/nova/tests/unit/network/test_neutron.py
index 5056b70c4e..9aa970aca1 100644
--- a/nova/tests/unit/network/test_neutron.py
+++ b/nova/tests/unit/network/test_neutron.py
@@ -16,11 +16,11 @@
import collections
import copy
+from unittest import mock
from keystoneauth1.fixture import V2Token
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import service_token
-import mock
from neutronclient.common import exceptions
from neutronclient.v2_0 import client
from oslo_config import cfg
@@ -39,9 +39,9 @@ from nova.network import constants
from nova.network import model
from nova.network import neutron as neutronapi
from nova import objects
+from nova.objects import fields as obj_fields
from nova.objects import network_request as net_req_obj
from nova.objects import virtual_interface as obj_vif
-from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova.pci import utils as pci_utils
from nova.pci import whitelist as pci_whitelist
@@ -514,7 +514,11 @@ class TestAPIBase(test.TestCase):
has_dns_extension = False
if kwargs.get('dns_extension'):
has_dns_extension = True
- self.api.extensions[constants.DNS_INTEGRATION] = 1
+ self.api.extensions = {
+ constants.DNS_INTEGRATION: {
+ 'alias': constants.DNS_INTEGRATION,
+ },
+ }
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
@@ -1166,35 +1170,14 @@ class TestAPI(TestAPIBase):
mock_get_physnet.assert_called_once_with(
mock.ANY, mock.ANY, self.port_data1[0]['network_id'])
- @mock.patch.object(neutronapi, 'get_client')
- def test_refresh_neutron_extensions_cache(self, mock_get_client):
+ def test_refresh_neutron_extensions_cache(self):
mocked_client = mock.create_autospec(client.Client)
- mock_get_client.return_value = mocked_client
mocked_client.list_extensions.return_value = {
- 'extensions': [{'name': constants.QOS_QUEUE}]}
- self.api._refresh_neutron_extensions_cache(self.context)
+ 'extensions': [{'alias': constants.DNS_INTEGRATION}]}
+ self.api._refresh_neutron_extensions_cache(mocked_client)
self.assertEqual(
- {constants.QOS_QUEUE: {'name': constants.QOS_QUEUE}},
+ {constants.DNS_INTEGRATION: {'alias': constants.DNS_INTEGRATION}},
self.api.extensions)
- mock_get_client.assert_called_once_with(self.context)
- mocked_client.list_extensions.assert_called_once_with()
-
- @mock.patch.object(neutronapi, 'get_client')
- def test_populate_neutron_extension_values_rxtx_factor(
- self, mock_get_client):
- mocked_client = mock.create_autospec(client.Client)
- mock_get_client.return_value = mocked_client
- mocked_client.list_extensions.return_value = {
- 'extensions': [{'name': constants.QOS_QUEUE}]}
- flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
- flavor['rxtx_factor'] = 1
- instance = objects.Instance(system_metadata={})
- instance.flavor = flavor
- port_req_body = {'port': {}}
- self.api._populate_neutron_extension_values(self.context, instance,
- None, port_req_body)
- self.assertEqual(1, port_req_body['port']['rxtx_factor'])
- mock_get_client.assert_called_once_with(self.context)
mocked_client.list_extensions.assert_called_once_with()
def test_allocate_for_instance_1(self):
@@ -2414,9 +2397,13 @@ class TestAPI(TestAPIBase):
mock_nc.show_port.side_effect = exceptions.PortNotFoundClient
if fip_ext_enabled:
- self.api.extensions = [constants.FIP_PORT_DETAILS]
+ self.api.extensions = {
+ constants.FIP_PORT_DETAILS: {
+ 'alias': constants.FIP_PORT_DETAILS,
+ },
+ }
else:
- self.api.extensions = []
+ self.api.extensions = {}
fip = self.api.get_floating_ip(self.context, uuids.fip_id)
@@ -2489,9 +2476,13 @@ class TestAPI(TestAPIBase):
mock_nc.show_port.side_effect = exceptions.PortNotFoundClient
if fip_ext_enabled:
- self.api.extensions = [constants.FIP_PORT_DETAILS]
+ self.api.extensions = {
+ constants.FIP_PORT_DETAILS: {
+ 'alias': constants.FIP_PORT_DETAILS,
+ },
+ }
else:
- self.api.extensions = []
+ self.api.extensions = {}
fip = self.api.get_floating_ip_by_address(self.context, '172.1.2.3')
@@ -3391,6 +3382,155 @@ class TestAPI(TestAPIBase):
mocked_client.list_ports.assert_called_once_with(
tenant_id=uuids.fake, device_id=uuids.instance)
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_full_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ force_refresh=True,
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_physnet_tunneled_info',
+ new=mock.Mock(return_value=(None, False)))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_preexisting_port_ids',
+ new=mock.Mock(return_value=[]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_subnets_from_port',
+ new=mock.Mock(return_value=[model.Subnet(cidr='1.0.0.0/8')]))
+ @mock.patch.object(
+ neutronapi.API,
+ '_get_floating_ips_by_fixed_and_port',
+ new=mock.Mock(return_value=[{'floating_ip_address': '10.0.0.1'}]))
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_build_network_info_model_single_vnic_type_change(
+ self, mock_get_client
+ ):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_get_client.return_value = mocked_client
+ fake_inst = objects.Instance()
+ fake_inst.project_id = uuids.fake
+ fake_inst.uuid = uuids.instance
+ fake_ports = [
+ {
+ "id": "port1",
+ "network_id": "net-id",
+ "tenant_id": uuids.fake,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "fixed_ips": [{"ip_address": "1.1.1.1"}],
+ "mac_address": "de:ad:be:ef:00:01",
+ "binding:vif_type": model.VIF_TYPE_BRIDGE,
+ "binding:vnic_type": model.VNIC_TYPE_DIRECT,
+ "binding:vif_details": {},
+ },
+ ]
+ fake_nets = [
+ {
+ "id": "net-id",
+ "name": "foo",
+ "tenant_id": uuids.fake,
+ }
+ ]
+ mocked_client.list_ports.return_value = {'ports': fake_ports}
+ fake_inst.info_cache = objects.InstanceInfoCache.new(
+ self.context, uuids.instance)
+ fake_inst.info_cache.network_info = model.NetworkInfo.hydrate([])
+
+ # build the network info first
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ self.assertEqual(1, len(nw_infos))
+ fake_inst.info_cache.network_info = nw_infos
+
+ # change the vnic_type of the port and rebuild the network info
+ fake_ports[0]["binding:vnic_type"] = model.VNIC_TYPE_MACVTAP
+ with mock.patch(
+ "nova.network.neutron.API._log_error_if_vnic_type_changed"
+ ) as mock_log:
+ nw_infos = self.api._build_network_info_model(
+ self.context,
+ fake_inst,
+ fake_nets,
+ [fake_ports[0]["id"]],
+ refresh_vif_id=fake_ports[0]["id"],
+ )
+
+ mock_log.assert_called_once_with(
+ fake_ports[0]["id"], "direct", "macvtap", fake_inst)
+ self.assertEqual(1, len(nw_infos))
+
@mock.patch.object(neutronapi, 'get_client')
def test_get_subnets_from_port(self, mock_get_client):
mocked_client = mock.create_autospec(client.Client)
@@ -3473,7 +3613,7 @@ class TestAPI(TestAPIBase):
'provider:network_type': 'vxlan'}]}}
test_ext_list = {'extensions':
[{'name': 'Multi Provider Network',
- 'alias': 'multi-segments'}]}
+ 'alias': 'multi-provider'}]}
mock_client = mock_get_client.return_value
mock_client.list_extensions.return_value = test_ext_list
@@ -3494,7 +3634,7 @@ class TestAPI(TestAPIBase):
'provider:network_type': 'vlan'}}
test_ext_list = {'extensions':
[{'name': 'Multi Provider Network',
- 'alias': 'multi-segments'}]}
+ 'alias': 'multi-provider'}]}
mock_client = mock_get_client.return_value
mock_client.list_extensions.return_value = test_ext_list
@@ -3520,7 +3660,7 @@ class TestAPI(TestAPIBase):
'provider:network_type': 'vlan'}]}}
test_ext_list = {'extensions':
[{'name': 'Multi Provider Network',
- 'alias': 'multi-segments'}]}
+ 'alias': 'multi-provider'}]}
mock_client = mock_get_client.return_value
mock_client.list_extensions.return_value = test_ext_list
@@ -3565,6 +3705,23 @@ class TestAPI(TestAPIBase):
self.assertFalse(tunneled)
self.assertIsNone(physnet_name)
+ def test_is_remote_managed(self):
+ cases = {
+ (model.VNIC_TYPE_NORMAL, False),
+ (model.VNIC_TYPE_DIRECT, False),
+ (model.VNIC_TYPE_MACVTAP, False),
+ (model.VNIC_TYPE_DIRECT_PHYSICAL, False),
+ (model.VNIC_TYPE_BAREMETAL, False),
+ (model.VNIC_TYPE_VIRTIO_FORWARDER, False),
+ (model.VNIC_TYPE_VDPA, False),
+ (model.VNIC_TYPE_ACCELERATOR_DIRECT, False),
+ (model.VNIC_TYPE_ACCELERATOR_DIRECT_PHYSICAL, False),
+ (model.VNIC_TYPE_REMOTE_MANAGED, True),
+ }
+
+ for vnic_type, expected in cases:
+ self.assertEqual(self.api._is_remote_managed(vnic_type), expected)
+
def _test_get_port_vnic_info(
self, mock_get_client, binding_vnic_type, expected_vnic_type,
port_resource_request=None, numa_policy=None
@@ -3711,6 +3868,27 @@ class TestAPI(TestAPIBase):
count = self.api.validate_networks(self.context, requested_networks, 1)
self.assertEqual(1, count)
+ @mock.patch('nova.network.neutron.API._show_port')
+ def test_deferred_ip_port_none_allocation(self, mock_show):
+ """Test behavior when the 'none' IP allocation policy is used."""
+ port = {
+ 'network_id': 'my_netid1',
+ 'device_id': None,
+ 'id': uuids.port,
+ 'fixed_ips': [], # no fixed ip
+ 'ip_allocation': 'none',
+ 'binding:vif_details': {
+ 'connectivity': 'l2',
+ },
+ }
+
+ mock_show.return_value = port
+
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id=port['id'])])
+ count = self.api.validate_networks(self.context, requested_networks, 1)
+ self.assertEqual(1, count)
+
@mock.patch('oslo_concurrency.lockutils.lock')
def test_get_instance_nw_info_locks_per_instance(self, mock_lock):
instance = objects.Instance(uuid=uuids.fake)
@@ -4356,7 +4534,7 @@ class TestAPI(TestAPIBase):
def test_update_instance_vnic_index(self, mock_get_client,
mock_refresh_extensions):
api = neutronapi.API()
- api.extensions = set([constants.VNIC_INDEX_EXT])
+ api.extensions = set([constants.VNIC_INDEX])
mock_client = mock_get_client.return_value
mock_client.update_port.return_value = 'port'
@@ -4381,7 +4559,7 @@ class TestAPI(TestAPIBase):
self, get_client_mock
):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
# We pass in a port profile which has a migration attribute and also
# a second port profile attribute 'fake_profile' this can be
@@ -4425,7 +4603,7 @@ class TestAPI(TestAPIBase):
value is None.
"""
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
fake_ports = {'ports': [
{'id': uuids.portid,
@@ -4477,16 +4655,16 @@ class TestAPI(TestAPIBase):
'device_owner': 'compute:%s' %
instance.availability_zone}})
+ @mock.patch.object(neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.Mock(return_value={}))
@mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=False),
)
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
- def test_update_port_bindings_for_instance_with_pci(self,
- get_client_mock,
- get_pci_device_devspec_mock):
-
+ def test_update_port_bindings_for_instance_with_pci(
+ self, get_client_mock, get_pci_device_devspec_mock):
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'physnet1'}
get_pci_device_devspec_mock.return_value = devspec
@@ -4494,17 +4672,21 @@ class TestAPI(TestAPIBase):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = objects.MigrationContext()
instance.migration_context.old_pci_devices = objects.PciDeviceList(
- objects=[objects.PciDevice(vendor_id='1377',
- product_id='0047',
- address='0000:0a:00.1',
- compute_node_id=1,
- request_id='1234567890')])
+ objects=[objects.PciDevice(
+ vendor_id='1377',
+ product_id='0047',
+ address='0000:0a:00.1',
+ compute_node_id=1,
+ request_id='1234567890',
+ dev_type=obj_fields.PciDeviceType.SRIOV_VF)])
instance.migration_context.new_pci_devices = objects.PciDeviceList(
- objects=[objects.PciDevice(vendor_id='1377',
- product_id='0047',
- address='0000:0b:00.1',
- compute_node_id=2,
- request_id='1234567890')])
+ objects=[objects.PciDevice(
+ vendor_id='1377',
+ product_id='0047',
+ address='0000:0b:00.1',
+ compute_node_id=2,
+ request_id='1234567890',
+ dev_type=obj_fields.PciDeviceType.SRIOV_VF)])
instance.pci_devices = instance.migration_context.old_pci_devices
# Validate that non-direct port aren't updated (fake-port-2).
@@ -4597,7 +4779,7 @@ class TestAPI(TestAPIBase):
def test_update_port_bindings_for_instance_with_pci_no_migration(self,
get_client_mock,
get_pci_device_devspec_mock):
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'physnet1'}
@@ -4647,7 +4829,7 @@ class TestAPI(TestAPIBase):
def test_update_port_bindings_for_instance_with_same_host_failed_vif_type(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
list_ports_mock = mock.Mock()
update_port_mock = mock.Mock()
@@ -4692,7 +4874,7 @@ class TestAPI(TestAPIBase):
def test_update_port_bindings_for_instance_with_diff_host_unbound_vif_type(
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
binding_profile = {'fake_profile': 'fake_data',
constants.MIGRATING_ATTR: 'my-dest-host'}
@@ -4775,6 +4957,174 @@ class TestAPI(TestAPIBase):
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=False),
)
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
+ def test_update_port_bindings_for_instance_with_sriov_pf(
+ self, get_client_mock, get_pci_device_devspec_mock
+ ):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ get_pci_device_devspec_mock.return_value = devspec
+
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.migration_context = objects.MigrationContext()
+ instance.migration_context.old_pci_devices = objects.PciDeviceList(
+ objects=[
+ objects.PciDevice(
+ vendor_id='8086',
+ product_id='154d',
+ address='0000:0a:01',
+ compute_node_id=1,
+ request_id=uuids.pci_req,
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'},
+ )
+ ]
+ )
+ instance.pci_devices = instance.migration_context.old_pci_devices
+ instance.migration_context.new_pci_devices = objects.PciDeviceList(
+ objects=[
+ objects.PciDevice(
+ vendor_id='8086',
+ product_id='154d',
+ address='0000:0a:02',
+ compute_node_id=2,
+ request_id=uuids.pci_req,
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:dd'},
+ )
+ ]
+ )
+ instance.pci_devices = instance.migration_context.new_pci_devices
+
+ fake_ports = {
+ 'ports': [
+ {
+ 'id': uuids.port,
+ 'binding:vnic_type': 'direct-physical',
+ constants.BINDING_HOST_ID: 'fake-host-old',
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:01',
+ 'physical_network': 'old_phys_net',
+ 'pci_vendor_info': 'old_pci_vendor_info',
+ },
+ },
+ ]
+ }
+
+ migration = objects.Migration(
+ status='confirmed', migration_type='migration')
+ list_ports_mock = mock.Mock(return_value=fake_ports)
+ get_client_mock.return_value.list_ports = list_ports_mock
+
+ update_port_mock = mock.Mock()
+ get_client_mock.return_value.update_port = update_port_mock
+
+ self.api._update_port_binding_for_instance(
+ self.context, instance, instance.host, migration)
+
+ # Assert that update_port is called with the binding:profile
+ # corresponding to the PCI device specified including MAC address.
+ update_port_mock.assert_called_once_with(
+ uuids.port,
+ {
+ 'port': {
+ constants.BINDING_HOST_ID: 'fake-host',
+ 'device_owner': 'compute:%s' % instance.availability_zone,
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:02',
+ 'physical_network': 'physnet1',
+ 'pci_vendor_info': '8086:154d',
+ 'device_mac_address': 'b4:96:91:34:f4:dd',
+ },
+ }
+ },
+ )
+
+ @mock.patch(
+ 'nova.network.neutron.API.has_extended_resource_request_extension',
+ new=mock.Mock(return_value=False),
+ )
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
+ def test_update_port_bindings_for_instance_with_sriov_pf_no_migration(
+ self, get_client_mock, get_pci_device_devspec_mock
+ ):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ get_pci_device_devspec_mock.return_value = devspec
+
+ instance = fake_instance.fake_instance_obj(self.context)
+ instance.pci_requests = objects.InstancePCIRequests(
+ instance_uuid=instance.uuid,
+ requests=[
+ objects.InstancePCIRequest(
+ requester_id=uuids.port,
+ request_id=uuids.pci_req,
+ )
+ ],
+ )
+ instance.pci_devices = objects.PciDeviceList(
+ objects=[
+ objects.PciDevice(
+ vendor_id='8086',
+ product_id='154d',
+ address='0000:0a:02',
+ compute_node_id=2,
+ request_id=uuids.pci_req,
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'},
+ )
+ ]
+ )
+
+ fake_ports = {
+ 'ports': [
+ {
+ 'id': uuids.port,
+ 'binding:vnic_type': 'direct-physical',
+ constants.BINDING_HOST_ID: 'fake-host-old',
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:01',
+ 'physical_network': 'old_phys_net',
+ 'pci_vendor_info': 'old_pci_vendor_info',
+ 'device_mac_address': 'b4:96:91:34:f4:dd'
+ },
+ },
+ ]
+ }
+
+ list_ports_mock = mock.Mock(return_value=fake_ports)
+ get_client_mock.return_value.list_ports = list_ports_mock
+
+ update_port_mock = mock.Mock()
+ get_client_mock.return_value.update_port = update_port_mock
+
+ self.api._update_port_binding_for_instance(
+ self.context, instance, instance.host)
+
+ # Assert that update_port is called with the binding:profile
+ # corresponding to the PCI device specified including MAC address.
+ update_port_mock.assert_called_once_with(
+ uuids.port,
+ {
+ 'port': {
+ constants.BINDING_HOST_ID: 'fake-host',
+ 'device_owner': 'compute:%s' % instance.availability_zone,
+ constants.BINDING_PROFILE: {
+ 'pci_slot': '0000:0a:02',
+ 'physical_network': 'physnet1',
+ 'pci_vendor_info': '8086:154d',
+ 'device_mac_address': 'b4:96:91:34:f4:36',
+ },
+ }
+ },
+ )
+
+ @mock.patch(
+ 'nova.network.neutron.API.has_extended_resource_request_extension',
+ new=mock.Mock(return_value=False),
+ )
@mock.patch.object(neutronapi, 'get_client', return_value=mock.Mock())
def test_update_port_bindings_for_instance_with_resource_req(
self, get_client_mock):
@@ -4982,7 +5332,7 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
# We test with an instance host and destination_host where the
# port will be moving.
get_ports = {'ports': [
@@ -5012,7 +5362,7 @@ class TestAPI(TestAPIBase):
destination host and the binding:profile is None in the port.
"""
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
# We test with an instance host and destination_host where the
# port will be moving but with binding:profile set to None.
get_ports = {
@@ -5043,7 +5393,7 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
port_id = uuids.port_id
get_ports = {'ports': [
{'id': port_id,
@@ -5063,7 +5413,7 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
get_ports = {'ports': [
{'id': uuids.port_id,
constants.BINDING_HOST_ID: instance.host}]}
@@ -5099,10 +5449,10 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
migrate_profile = {
constants.MIGRATING_ATTR: 'new-host'}
- # Pass a port with an migration porfile attribute.
+ # Pass a port with an migration profile attribute.
port_id = uuids.port_id
get_ports = {'ports': [
{'id': port_id,
@@ -5111,8 +5461,9 @@ class TestAPI(TestAPIBase):
self.api.list_ports = mock.Mock(return_value=get_ports)
mocked_client = get_client_mock.return_value
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.setup_networks_on_host(self.context,
instance,
host='new-host',
@@ -5130,10 +5481,10 @@ class TestAPI(TestAPIBase):
which is raised through to the caller.
"""
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
migrate_profile = {
constants.MIGRATING_ATTR: 'new-host'}
- # Pass a port with an migration porfile attribute.
+ # Pass a port with an migration profile attribute.
get_ports = {
'ports': [
{'id': uuids.port1,
@@ -5148,8 +5499,9 @@ class TestAPI(TestAPIBase):
mocked_client = get_client_mock.return_value
mocked_client.delete_port_binding.side_effect = NeutronError
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
ex = self.assertRaises(
exception.PortBindingDeletionFailed,
self.api.setup_networks_on_host,
@@ -5171,15 +5523,15 @@ class TestAPI(TestAPIBase):
self, get_client_mock):
instance = fake_instance.fake_instance_obj(self.context)
- self.api._has_port_binding_extension = mock.Mock(return_value=True)
- # Pass a port without any migration porfile attribute.
+ self.api.has_port_binding_extension = mock.Mock(return_value=True)
+ # Pass a port without any migration profile attribute.
get_ports = {'ports': [
{'id': uuids.port_id,
constants.BINDING_HOST_ID: instance.host}]}
self.api.list_ports = mock.Mock(return_value=get_ports)
update_port_mock = mock.Mock()
get_client_mock.return_value.update_port = update_port_mock
- with mock.patch.object(self.api, 'supports_port_binding_extension',
+ with mock.patch.object(self.api, 'has_port_binding_extension',
return_value=False):
self.api.setup_networks_on_host(self.context,
instance,
@@ -5212,7 +5564,8 @@ class TestAPI(TestAPIBase):
self.assertEqual(['2', '3'], result, "Invalid preexisting ports")
@mock.patch('nova.network.neutron.API._show_port')
- def _test_unbind_ports_get_client(self, mock_neutron, mock_show):
+ @mock.patch('nova.network.neutron.get_client')
+ def test_unbind_ports_get_client(self, mock_neutron, mock_show):
mock_ctx = mock.Mock(is_admin=False)
ports = ["1", "2", "3"]
@@ -5228,23 +5581,18 @@ class TestAPI(TestAPIBase):
self.assertEqual(1, mock_neutron.call_count)
mock_neutron.assert_has_calls(get_client_calls, True)
- @mock.patch('nova.network.neutron.get_client')
- def test_unbind_ports_get_client_binding_extension(self,
- mock_neutron):
- self._test_unbind_ports_get_client(mock_neutron)
-
- @mock.patch('nova.network.neutron.get_client')
- def test_unbind_ports_get_client(self, mock_neutron):
- self._test_unbind_ports_get_client(mock_neutron)
-
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
- def _test_unbind_ports(self, mock_neutron, mock_show):
+ @mock.patch('nova.network.neutron.get_client')
+ def test_unbind_ports(self, mock_neutron, mock_show):
mock_client = mock.Mock()
mock_update_port = mock.Mock()
mock_client.update_port = mock_update_port
mock_ctx = mock.Mock(is_admin=False)
ports = ["1", "2", "3"]
mock_show.side_effect = [{"id": "1"}, {"id": "2"}, {"id": "3"}]
+
api = neutronapi.API()
api._unbind_ports(mock_ctx, ports, mock_neutron, mock_client)
@@ -5258,14 +5606,6 @@ class TestAPI(TestAPIBase):
self.assertEqual(3, mock_update_port.call_count)
mock_update_port.assert_has_calls(update_port_calls)
- @mock.patch('nova.network.neutron.get_client')
- def test_unbind_ports_binding_ext(self, mock_neutron):
- self._test_unbind_ports(mock_neutron)
-
- @mock.patch('nova.network.neutron.get_client')
- def test_unbind_ports(self, mock_neutron):
- self._test_unbind_ports(mock_neutron)
-
def test_unbind_ports_no_port_ids(self):
# Tests that None entries in the ports list are filtered out.
mock_client = mock.Mock()
@@ -5279,7 +5619,11 @@ class TestAPI(TestAPIBase):
@mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
- new=mock.Mock()
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch(
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True),
)
@mock.patch('nova.network.neutron.API.get_instance_nw_info')
@mock.patch('nova.network.neutron.excutils')
@@ -5822,9 +6166,13 @@ class TestAPI(TestAPIBase):
mock_nc.list_ports.return_value = {'ports': []}
if fip_ext_enabled:
- self.api.extensions = [constants.FIP_PORT_DETAILS]
+ self.api.extensions = {
+ constants.FIP_PORT_DETAILS: {
+ 'alias': constants.FIP_PORT_DETAILS,
+ },
+ }
else:
- self.api.extensions = []
+ self.api.extensions = {}
fips = self.api.get_floating_ips_by_project(self.context)
@@ -5857,6 +6205,8 @@ class TestAPI(TestAPIBase):
"""Make sure we don't fail for floating IPs without attached ports."""
self._test_get_floating_ips_by_project(False, False)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True))
@mock.patch('nova.network.neutron.API._show_port')
def test_unbind_ports_reset_dns_name_by_admin(self, mock_show):
neutron = mock.Mock()
@@ -5867,7 +6217,6 @@ class TestAPI(TestAPIBase):
}
}
port_client = mock.Mock()
- self.api.extensions = [constants.DNS_INTEGRATION]
ports = [uuids.port_id]
mock_show.return_value = {'id': uuids.port}
self.api._unbind_ports(self.context, ports, neutron, port_client)
@@ -5880,6 +6229,8 @@ class TestAPI(TestAPIBase):
uuids.port_id, port_req_body)
neutron.update_port.assert_not_called()
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True))
@mock.patch('nova.network.neutron.API._show_port')
def test_unbind_ports_reset_dns_name_by_non_admin(self, mock_show):
neutron = mock.Mock()
@@ -5890,7 +6241,6 @@ class TestAPI(TestAPIBase):
}
}
port_client = mock.Mock()
- self.api.extensions = [constants.DNS_INTEGRATION]
ports = [uuids.port_id]
mock_show.return_value = {'id': uuids.port}
self.api._unbind_ports(self.context, ports, neutron, port_client)
@@ -5904,6 +6254,8 @@ class TestAPI(TestAPIBase):
neutron.update_port.assert_called_once_with(
uuids.port_id, non_admin_port_req_body)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
def test_unbind_ports_reset_allocation_in_port_binding(self, mock_show):
neutron = mock.Mock()
@@ -5919,6 +6271,8 @@ class TestAPI(TestAPIBase):
port_client.update_port.assert_called_once_with(
uuids.port_id, port_req_body)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
def test_unbind_ports_reset_binding_profile(self, mock_show):
neutron = mock.Mock()
@@ -5928,20 +6282,22 @@ class TestAPI(TestAPIBase):
'id': uuids.port,
'binding:profile': {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:0a:00.1',
+ 'card_serial_number': 'MT2113X00000',
'physical_network': 'physnet1',
'capabilities': ['switchdev']}
}
self.api._unbind_ports(self.context, ports, neutron, port_client)
port_req_body = {'port': {'binding:host_id': None,
'binding:profile':
- {'physical_network': 'physnet1',
- 'capabilities': ['switchdev']},
+ {'capabilities': ['switchdev']},
'device_id': '',
'device_owner': ''}
}
port_client.update_port.assert_called_once_with(
uuids.port_id, port_req_body)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._populate_neutron_extension_values')
@mock.patch('nova.network.neutron.API._update_port',
# called twice, fails on the 2nd call and triggers the cleanup
@@ -6014,7 +6370,6 @@ class TestAPI(TestAPIBase):
def test_unbind_ports_port_show_portnotfound(self, mock_log, mock_show):
api = neutronapi.API()
neutron_client = mock.Mock()
- mock_show.return_value = {'id': uuids.port}
api._unbind_ports(self.context, [uuids.port_id],
neutron_client, neutron_client)
mock_show.assert_called_once_with(
@@ -6023,6 +6378,65 @@ class TestAPI(TestAPIBase):
neutron_client=mock.ANY)
mock_log.assert_not_called()
+ @mock.patch(
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False),
+ )
+ @mock.patch('nova.network.neutron.API._show_port')
+ @mock.patch.object(neutronapi, 'LOG')
+ def test_unbind_ports_port_show_portnotfound_multiple_ports(
+ self, mock_log, mock_show,
+ ):
+ """Ensure we continue unbinding ports even when one isn't found."""
+ mock_show.side_effect = [
+ exception.PortNotFound(port_id=uuids.port_a),
+ {'id': uuids.port_b},
+ ]
+ api = neutronapi.API()
+ neutron_client = mock.Mock()
+
+ api._unbind_ports(
+ self.context,
+ [uuids.port_a, uuids.port_b],
+ neutron_client,
+ neutron_client,
+ )
+
+ mock_show.assert_has_calls(
+ [
+ mock.call(
+ self.context,
+ uuids.port_a,
+ fields=['binding:profile', 'network_id'],
+ neutron_client=neutron_client,
+ ),
+ mock.call(
+ self.context,
+ uuids.port_b,
+ fields=['binding:profile', 'network_id'],
+ neutron_client=neutron_client,
+ ),
+ ]
+ )
+ # Only the port that exists should be updated
+ neutron_client.update_port.assert_called_once_with(
+ uuids.port_b,
+ {
+ 'port': {
+ 'device_id': '',
+ 'device_owner': '',
+ 'binding:profile': {},
+ 'binding:host_id': None,
+ }
+ }
+ )
+ mock_log.exception.assert_not_called()
+ mock_log.debug.assert_called_with(
+ 'Unable to show port %s as it no longer exists.', uuids.port_a,
+ )
+
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port',
side_effect=Exception)
@mock.patch.object(neutronapi.LOG, 'exception')
@@ -6040,9 +6454,11 @@ class TestAPI(TestAPIBase):
'binding:profile': {}, 'binding:host_id': None}})
self.assertTrue(mock_log.called)
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
@mock.patch.object(neutronapi.LOG, 'exception')
- def test_unbind_ports_portnotfound(self, mock_log, mock_show):
+ def test_unbind_ports_port_update_portnotfound(self, mock_log, mock_show):
api = neutronapi.API()
neutron_client = mock.Mock()
neutron_client.update_port = mock.Mock(
@@ -6056,9 +6472,13 @@ class TestAPI(TestAPIBase):
'binding:profile': {}, 'binding:host_id': None}})
mock_log.assert_not_called()
+ @mock.patch('nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.network.neutron.API._show_port')
@mock.patch.object(neutronapi.LOG, 'exception')
- def test_unbind_ports_unexpected_error(self, mock_log, mock_show):
+ def test_unbind_ports_port_update_unexpected_error(
+ self, mock_log, mock_show,
+ ):
api = neutronapi.API()
neutron_client = mock.Mock()
neutron_client.update_port = mock.Mock(
@@ -6140,7 +6560,8 @@ class TestAPI(TestAPIBase):
objects.NetworkRequest(port_id=uuids.portid_4),
objects.NetworkRequest(port_id=uuids.portid_5),
objects.NetworkRequest(port_id=uuids.trusted_port),
- objects.NetworkRequest(port_id=uuids.portid_vdpa)])
+ objects.NetworkRequest(port_id=uuids.portid_vdpa),
+ objects.NetworkRequest(port_id=uuids.portid_remote_managed)])
pci_requests = objects.InstancePCIRequests(requests=[])
# _get_port_vnic_info should be called for every NetworkRequest with a
# port_id attribute (so six times)
@@ -6154,13 +6575,14 @@ class TestAPI(TestAPIBase):
(model.VNIC_TYPE_DIRECT, True, 'netN',
mock.sentinel.resource_request2, None, None),
(model.VNIC_TYPE_VDPA, None, 'netN', None, None, None),
+ (model.VNIC_TYPE_REMOTE_MANAGED, None, 'netN', None, None, None),
]
# _get_physnet_tunneled_info should be called for every NetworkRequest
# (so seven times)
mock_get_physnet_tunneled_info.side_effect = [
('physnet1', False), ('physnet1', False), ('', True),
('physnet1', False), ('physnet2', False), ('physnet3', False),
- ('physnet4', False), ('physnet1', False)
+ ('physnet4', False), ('physnet1', False), ('physnet1', False),
]
api = neutronapi.API()
@@ -6177,13 +6599,16 @@ class TestAPI(TestAPIBase):
mock.sentinel.request_group1,
mock.sentinel.request_group2],
port_resource_requests)
- self.assertEqual(6, len(pci_requests.requests))
+ self.assertEqual(7, len(pci_requests.requests))
has_pci_request_id = [net.pci_request_id is not None for net in
requested_networks.objects]
self.assertEqual(pci_requests.requests[3].spec[0]["dev_type"],
"type-PF")
self.assertEqual(pci_requests.requests[5].spec[0]["dev_type"], "vdpa")
- expected_results = [True, False, False, True, True, True, True, True]
+ self.assertEqual(pci_requests.requests[6].spec[0]["remote_managed"],
+ 'True')
+ expected_results = [True, False, False, True, True, True, True, True,
+ True]
self.assertEqual(expected_results, has_pci_request_id)
# Make sure only the trusted VF has the 'trusted' tag set in the spec.
for pci_req in pci_requests.requests:
@@ -6195,11 +6620,23 @@ class TestAPI(TestAPIBase):
else:
self.assertNotIn(pci_request.PCI_TRUSTED_TAG, spec)
+ # Only remote-managed ports must have the remote_managed tag set
+ # to True.
+ for pci_req in pci_requests.requests:
+ spec = pci_req.spec[0]
+ if pci_req.requester_id == uuids.portid_remote_managed:
+ self.assertEqual('True',
+ spec[pci_request.PCI_REMOTE_MANAGED_TAG])
+ else:
+ self.assertEqual('False',
+ spec[pci_request.PCI_REMOTE_MANAGED_TAG])
+
# Only SRIOV ports and those with a resource_request will have
# pci_req.requester_id.
self.assertEqual(
[uuids.portid_1, uuids.portid_3, uuids.portid_4, uuids.portid_5,
- uuids.trusted_port, uuids.portid_vdpa],
+ uuids.trusted_port, uuids.portid_vdpa,
+ uuids.portid_remote_managed],
[pci_req.requester_id for pci_req in pci_requests.requests])
self.assertCountEqual(
@@ -6671,7 +7108,7 @@ class TestAPI(TestAPIBase):
"""Tests that migrate_instance_start exits early if neutron doesn't
have the binding-extended API extension.
"""
- with mock.patch.object(self.api, 'supports_port_binding_extension',
+ with mock.patch.object(self.api, 'has_port_binding_extension',
return_value=False):
self.api.migrate_instance_start(
self.context, mock.sentinel.instance, {})
@@ -6691,8 +7128,9 @@ class TestAPI(TestAPIBase):
migration = objects.Migration(
source_compute='source', dest_compute='dest')
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.migrate_instance_start(
self.context, instance, migration)
@@ -6716,8 +7154,9 @@ class TestAPI(TestAPIBase):
migration = objects.Migration(
source_compute='source', dest_compute='dest')
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.migrate_instance_start(
self.context, instance, migration)
@@ -6743,8 +7182,9 @@ class TestAPI(TestAPIBase):
migration = objects.Migration(
source_compute='source', dest_compute='dest')
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.migrate_instance_start(
self.context, instance, migration)
@@ -6767,8 +7207,9 @@ class TestAPI(TestAPIBase):
migration = objects.Migration(
source_compute='source', dest_compute='dest')
- with mock.patch.object(self.api, 'supports_port_binding_extension',
- return_value=True):
+ with mock.patch.object(
+ self.api, 'has_port_binding_extension', return_value=True,
+ ):
self.api.migrate_instance_start(
self.context, instance, migration)
@@ -6945,13 +7386,17 @@ class TestAPI(TestAPIBase):
req_lvl_params.same_subtree,
)
- def test_get_segment_ids_for_network_no_segment_ext(self):
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_get_segment_ids_for_network_no_segment_ext(self, mock_client):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_client.return_value = mocked_client
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=False
+ self.api, 'has_segment_extension', return_value=False,
):
self.assertEqual(
[], self.api.get_segment_ids_for_network(self.context,
uuids.network_id))
+ mock_client.assert_called_once_with(self.context, admin=True)
@mock.patch.object(neutronapi, 'get_client')
def test_get_segment_ids_for_network_passes(self, mock_client):
@@ -6960,26 +7405,44 @@ class TestAPI(TestAPIBase):
mock_client.return_value = mocked_client
mocked_client.list_subnets.return_value = subnets
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
res = self.api.get_segment_ids_for_network(
self.context, uuids.network_id)
self.assertEqual([uuids.segment_id], res)
+ mock_client.assert_called_once_with(self.context, admin=True)
mocked_client.list_subnets.assert_called_once_with(
network_id=uuids.network_id, fields='segment_id')
@mock.patch.object(neutronapi, 'get_client')
- def test_get_segment_ids_for_network_with_no_segments(self, mock_client):
+ def test_get_segment_ids_for_network_with_segments_none(self, mock_client):
subnets = {'subnets': [{'segment_id': None}]}
mocked_client = mock.create_autospec(client.Client)
mock_client.return_value = mocked_client
mocked_client.list_subnets.return_value = subnets
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
+ ):
+ res = self.api.get_segment_ids_for_network(
+ self.context, uuids.network_id)
+ self.assertEqual([], res)
+ mock_client.assert_called_once_with(self.context, admin=True)
+ mocked_client.list_subnets.assert_called_once_with(
+ network_id=uuids.network_id, fields='segment_id')
+
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_get_segment_ids_for_network_with_no_segments(self, mock_client):
+ subnets = {'subnets': [{}]}
+ mocked_client = mock.create_autospec(client.Client)
+ mock_client.return_value = mocked_client
+ mocked_client.list_subnets.return_value = subnets
+ with mock.patch.object(
+ self.api, 'has_segment_extension', return_value=True,
):
res = self.api.get_segment_ids_for_network(
self.context, uuids.network_id)
self.assertEqual([], res)
+ mock_client.assert_called_once_with(self.context, admin=True)
mocked_client.list_subnets.assert_called_once_with(
network_id=uuids.network_id, fields='segment_id')
@@ -6990,19 +7453,24 @@ class TestAPI(TestAPIBase):
mocked_client.list_subnets.side_effect = (
exceptions.NeutronClientException(status_code=404))
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
self.assertRaises(exception.InvalidRoutedNetworkConfiguration,
self.api.get_segment_ids_for_network,
self.context, uuids.network_id)
+ mock_client.assert_called_once_with(self.context, admin=True)
- def test_get_segment_id_for_subnet_no_segment_ext(self):
+ @mock.patch.object(neutronapi, 'get_client')
+ def test_get_segment_id_for_subnet_no_segment_ext(self, mock_client):
+ mocked_client = mock.create_autospec(client.Client)
+ mock_client.return_value = mocked_client
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=False
+ self.api, 'has_segment_extension', return_value=False,
):
self.assertIsNone(
self.api.get_segment_id_for_subnet(self.context,
uuids.subnet_id))
+ mock_client.assert_called_once_with(self.context, admin=True)
@mock.patch.object(neutronapi, 'get_client')
def test_get_segment_id_for_subnet_passes(self, mock_client):
@@ -7011,11 +7479,12 @@ class TestAPI(TestAPIBase):
mock_client.return_value = mocked_client
mocked_client.show_subnet.return_value = subnet
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
res = self.api.get_segment_id_for_subnet(
self.context, uuids.subnet_id)
self.assertEqual(uuids.segment_id, res)
+ mock_client.assert_called_once_with(self.context, admin=True)
mocked_client.show_subnet.assert_called_once_with(uuids.subnet_id)
@mock.patch.object(neutronapi, 'get_client')
@@ -7025,11 +7494,12 @@ class TestAPI(TestAPIBase):
mock_client.return_value = mocked_client
mocked_client.show_subnet.return_value = subnet
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
self.assertIsNone(
self.api.get_segment_id_for_subnet(self.context,
uuids.subnet_id))
+ mock_client.assert_called_once_with(self.context, admin=True)
@mock.patch.object(neutronapi, 'get_client')
def test_get_segment_id_for_subnet_fails(self, mock_client):
@@ -7038,35 +7508,35 @@ class TestAPI(TestAPIBase):
mocked_client.show_subnet.side_effect = (
exceptions.NeutronClientException(status_code=404))
with mock.patch.object(
- self.api, '_has_segment_extension', return_value=True
+ self.api, 'has_segment_extension', return_value=True,
):
self.assertRaises(exception.InvalidRoutedNetworkConfiguration,
self.api.get_segment_id_for_subnet,
self.context, uuids.subnet_id)
+ mock_client.assert_called_once_with(self.context, admin=True)
@mock.patch.object(neutronapi.LOG, 'debug')
- def test_get_port_pci_slot(self, mock_debug):
+ def test_get_port_pci_dev(self, mock_debug):
fake_port = {'id': uuids.fake_port_id}
request = objects.InstancePCIRequest(requester_id=uuids.fake_port_id,
request_id=uuids.pci_request_id)
bad_request = objects.InstancePCIRequest(
requester_id=uuids.wrong_port_id)
- device = objects.PciDevice(request_id=uuids.pci_request_id,
- address='fake-pci-address')
+ device = objects.PciDevice(request_id=uuids.pci_request_id)
bad_device = objects.PciDevice(request_id=uuids.wrong_request_id)
# Test the happy path
instance = objects.Instance(
pci_requests=objects.InstancePCIRequests(requests=[request]),
pci_devices=objects.PciDeviceList(objects=[device]))
self.assertEqual(
- 'fake-pci-address',
- self.api._get_port_pci_slot(self.context, instance, fake_port))
+ device,
+ self.api._get_port_pci_dev(instance, fake_port))
# Test not finding the request
instance = objects.Instance(
pci_requests=objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(bad_request)]))
self.assertIsNone(
- self.api._get_port_pci_slot(self.context, instance, fake_port))
+ self.api._get_port_pci_dev(instance, fake_port))
mock_debug.assert_called_with('No PCI request found for port %s',
uuids.fake_port_id, instance=instance)
mock_debug.reset_mock()
@@ -7075,7 +7545,7 @@ class TestAPI(TestAPIBase):
pci_requests=objects.InstancePCIRequests(requests=[request]),
pci_devices=objects.PciDeviceList(objects=[bad_device]))
self.assertIsNone(
- self.api._get_port_pci_slot(self.context, instance, fake_port))
+ self.api._get_port_pci_dev(instance, fake_port))
mock_debug.assert_called_with('No PCI device found for request %s',
uuids.pci_request_id, instance=instance)
@@ -7246,9 +7716,9 @@ class TestInstanceHasExtendedResourceRequest(TestAPIBase):
self.addCleanup(patcher.stop)
self.mock_client = patcher.start().return_value
self.extension = {
- "extensions": [
+ 'extensions': [
{
- "name": constants.RESOURCE_REQUEST_GROUPS_EXTENSION,
+ 'alias': constants.RESOURCE_REQUEST_GROUPS,
}
]
}
@@ -7364,6 +7834,41 @@ class TestAPIModuleMethods(test.NoDBTestCase):
self.assertEqual(networks, [{'id': 1}, {'id': 2}, {'id': 3}])
+ @mock.patch('nova.network.neutron.LOG.info')
+ @mock.patch('nova.network.neutron.LOG.exception')
+ @mock.patch('nova.objects.instance_info_cache.InstanceInfoCache.save')
+ def test_update_instance_cache_with_nw_info_not_found(self, mock_save,
+ mock_log_exc,
+ mock_log_info):
+ """Tests that an attempt to update (save) the instance info cache will
+ not log a traceback but will reraise the exception for caller handling.
+ """
+ # Simulate the oslo.messaging created "<OriginalClass>_Remote" subclass
+ # type we'll be catching.
+ class InstanceNotFound_Remote(exception.InstanceNotFound):
+
+ def __init__(self, message=None, **kwargs):
+ super().__init__(message=message, **kwargs)
+
+ # Simulate a long exception message containing tracebacks because
+ # oslo.messaging appends them.
+ message = 'Instance was not found.\n'.ljust(255, '*')
+ mock_save.side_effect = InstanceNotFound_Remote(message=message,
+ instance_id=uuids.inst)
+ api = neutronapi.API()
+ ctxt = context.get_context()
+ instance = fake_instance.fake_instance_obj(ctxt, uuid=uuids.i)
+
+ self.assertRaises(
+ exception.InstanceNotFound,
+ neutronapi.update_instance_cache_with_nw_info, api, ctxt, instance,
+ nw_info=model.NetworkInfo())
+
+ # Verify we didn't log exception at level ERROR.
+ mock_log_exc.assert_not_called()
+ # Verify exception message was truncated before logging it.
+ self.assertLessEqual(len(mock_log_info.call_args.args[1]), 255)
+
class TestAPIPortbinding(TestAPIBase):
@@ -7390,25 +7895,83 @@ class TestAPIPortbinding(TestAPIBase):
mock_get_client.assert_called_once_with(mock.ANY)
mocked_client.list_extensions.assert_called_once_with()
+ @mock.patch.object(
+ neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.Mock(return_value={
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ }))
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
- def test_populate_neutron_extension_values_binding_sriov(self,
- mock_get_instance_pci_devs,
- mock_get_pci_device_devspec):
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_populate_neutron_extension_values_binding_sriov(
+ self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
+ 'card_serial_number': None,
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
}
PciDevice = collections.namedtuple('PciDevice',
- ['vendor_id', 'product_id', 'address'])
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'dev_type'])
mydev = PciDevice(**pci_dev)
profile = {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'physnet1',
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ }
+
+ mock_get_instance_pci_devs.return_value = [mydev]
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ mock_get_pci_device_devspec.return_value = devspec
+
+ self.api._populate_neutron_binding_profile(
+ instance, pci_req_id, port_req_body, None)
+
+ self.assertEqual(profile,
+ port_req_body['port'][
+ constants.BINDING_PROFILE])
+
+ @mock.patch.object(
+ neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.Mock(return_value= {
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ 'card_serial_number': 'MT2113X00000',
+ })
+ )
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_populate_neutron_extension_values_binding_sriov_card_serial(
+ self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
+ host_id = 'my_host_id'
+ instance = objects.Instance(host=host_id)
+ port_req_body = {'port': {}}
+ pci_req_id = 'my_req_id'
+ pci_dev = {'vendor_id': 'a2d6',
+ 'product_id': '15b3',
+ 'address': '0000:0a:00.1',
+ 'card_serial_number': 'MT2113X00000',
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
+ }
+ PciDevice = collections.namedtuple('PciDevice',
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'dev_type'])
+ mydev = PciDevice(**pci_dev)
+ profile = {'pci_vendor_info': 'a2d6:15b3',
+ 'pci_slot': '0000:0a:00.1',
+ 'physical_network': 'physnet1',
+ # card_serial_number is a property of the object obtained
+ # from extra_info.
+ 'card_serial_number': 'MT2113X00000',
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
}
mock_get_instance_pci_devs.return_value = [mydev]
@@ -7460,13 +8023,19 @@ class TestAPIPortbinding(TestAPIBase):
profile,
port_req_body['port'][constants.BINDING_PROFILE])
+ @mock.patch.object(
+ neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.Mock(return_value= {
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ })
+ )
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
- def test_populate_neutron_extension_values_binding_sriov_with_cap(self,
- mock_get_instance_pci_devs,
- mock_get_pci_device_devspec):
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_populate_neutron_extension_values_binding_sriov_with_cap(
+ self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {
constants.BINDING_PROFILE: {
'capabilities': ['switchdev']}}}
@@ -7474,20 +8043,26 @@ class TestAPIPortbinding(TestAPIBase):
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
+ 'card_serial_number': None,
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
}
PciDevice = collections.namedtuple('PciDevice',
- ['vendor_id', 'product_id', 'address'])
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'dev_type'])
mydev = PciDevice(**pci_dev)
profile = {'pci_vendor_info': '1377:0047',
'pci_slot': '0000:0a:00.1',
'physical_network': 'physnet1',
'capabilities': ['switchdev'],
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
}
mock_get_instance_pci_devs.return_value = [mydev]
devspec = mock.Mock()
devspec.get_tags.return_value = {'physical_network': 'physnet1'}
mock_get_pci_device_devspec.return_value = devspec
+
self.api._populate_neutron_binding_profile(
instance, pci_req_id, port_req_body, None)
@@ -7496,11 +8071,145 @@ class TestAPIPortbinding(TestAPIBase):
constants.BINDING_PROFILE])
@mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_populate_neutron_extension_values_binding_sriov_pf(
+ self, mock_get_instance_pci_devs, mock_get_devspec
+ ):
+ host_id = 'my_host_id'
+ instance = objects.Instance(host=host_id)
+ port_req_body = {'port': {}}
+
+ pci_dev = objects.PciDevice(
+ request_id=uuids.pci_req,
+ address='0000:01:00',
+ parent_addr='0000:02:00',
+ vendor_id='8086',
+ product_id='154d',
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={'mac_address': 'b4:96:91:34:f4:36'}
+ )
+
+ expected_profile = {
+ 'pci_vendor_info': '8086:154d',
+ 'pci_slot': '0000:01:00',
+ 'physical_network': 'physnet1',
+ 'device_mac_address': 'b4:96:91:34:f4:36',
+ }
+
+ mock_get_instance_pci_devs.return_value = [pci_dev]
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ mock_get_devspec.return_value = devspec
+
+ self.api._populate_neutron_binding_profile(
+ instance, uuids.pci_req, port_req_body, None)
+
+ self.assertEqual(
+ expected_profile,
+ port_req_body['port'][constants.BINDING_PROFILE]
+ )
+
+ @mock.patch.object(
+ pci_utils, 'get_vf_num_by_pci_address',
+ new=mock.MagicMock(side_effect=(lambda vf_a: 1
+ if vf_a == '0000:0a:00.1' else None)))
+ @mock.patch.object(
+ pci_utils, 'get_mac_by_pci_address',
+ new=mock.MagicMock(side_effect=(lambda vf_a: {
+ '0000:0a:00.0': '52:54:00:1e:59:c6'}.get(vf_a)))
+ )
+ def test__get_vf_pci_device_profile(self):
+ pci_dev = {'vendor_id': 'a2d6',
+ 'product_id': '15b3',
+ 'address': '0000:0a:00.1',
+ 'parent_addr': '0000:0a:00.0',
+ 'card_serial_number': 'MT2113X00000',
+ 'sriov_cap': {
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ },
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
+ }
+ PciDevice = collections.namedtuple('PciDevice',
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'sriov_cap',
+ 'dev_type', 'parent_addr'])
+ mydev = PciDevice(**pci_dev)
+ self.assertEqual(self.api._get_vf_pci_device_profile(mydev),
+ {'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ 'card_serial_number': 'MT2113X00000'})
+
+ @mock.patch.object(
+ neutronapi.API, '_get_vf_pci_device_profile',
+ new=mock.MagicMock(side_effect=(
+ lambda dev: {'0000:0a:00.1': {
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'vf_num': 1,
+ 'card_serial_number': 'MT2113X00000',
+ }}.get(dev.address)
+ )))
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ def test__get_pci_device_profile_vf(self, mock_get_pci_device_devspec):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ mock_get_pci_device_devspec.return_value = devspec
+
+ pci_dev = {'vendor_id': 'a2d6',
+ 'product_id': '15b3',
+ 'address': '0000:0a:00.1',
+ 'card_serial_number': 'MT2113X00000',
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
+ }
+ PciDevice = collections.namedtuple('PciDevice',
+ ['vendor_id', 'product_id', 'address',
+ 'card_serial_number', 'dev_type'])
+ mydev = PciDevice(**pci_dev)
+
+ self.assertEqual({'card_serial_number': 'MT2113X00000',
+ 'pci_slot': '0000:0a:00.1',
+ 'pci_vendor_info': 'a2d6:15b3',
+ 'pf_mac_address': '52:54:00:1e:59:c6',
+ 'physical_network': 'physnet1',
+ 'vf_num': 1},
+ self.api._get_pci_device_profile(mydev))
+
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ def test__get_pci_device_profile_pf(self, mock_get_pci_device_devspec):
+ devspec = mock.Mock()
+ devspec.get_tags.return_value = {'physical_network': 'physnet1'}
+ mock_get_pci_device_devspec.return_value = devspec
+
+ pci_dev = objects.PciDevice(
+ request_id=uuids.pci_req,
+ address='0000:0a:00.0',
+ parent_addr='0000:02:00',
+ vendor_id='a2d6',
+ product_id='15b3',
+ dev_type=obj_fields.PciDeviceType.SRIOV_PF,
+ extra_info={
+ 'capabilities': jsonutils.dumps(
+ {'card_serial_number': 'MT2113X00000'}),
+ 'mac_address': 'b4:96:91:34:f4:36',
+ },
+
+ )
+ self.assertEqual(
+ {
+ 'pci_slot': '0000:0a:00.0',
+ 'pci_vendor_info': 'a2d6:15b3',
+ 'physical_network': 'physnet1',
+ 'device_mac_address': 'b4:96:91:34:f4:36',
+ },
+ self.api._get_pci_device_profile(pci_dev),
+ )
+
+ @mock.patch.object(pci_whitelist.Whitelist, 'get_devspec')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
def test_populate_neutron_extension_values_binding_sriov_fail(
self, mock_get_instance_pci_devs, mock_get_pci_device_devspec):
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
port_req_body = {'port': {}}
pci_req_id = 'my_req_id'
pci_objs = [objects.PciDevice(vendor_id='1377',
@@ -7517,7 +8226,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_neutron_binding_profile,
instance, pci_req_id, port_req_body, None)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value=[])
+ @mock.patch('nova.objects.Instance.get_pci_devices', return_value=[])
def test_populate_neutron_binding_profile_pci_dev_not_found(
self, mock_get_instance_pci_devs):
api = neutronapi.API()
@@ -7528,28 +8237,52 @@ class TestAPIPortbinding(TestAPIBase):
api._populate_neutron_binding_profile,
instance, pci_req_id, port_req_body, None)
mock_get_instance_pci_devs.assert_called_once_with(
- instance, pci_req_id)
+ request_id=pci_req_id)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
- def test_pci_parse_whitelist_called_once(self,
- mock_get_instance_pci_devs):
- white_list = [
- '{"address":"0000:0a:00.1","physical_network":"default"}']
- cfg.CONF.set_override('passthrough_whitelist', white_list, 'pci')
+ @mock.patch.object(
+ pci_utils, 'is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
+ @mock.patch.object(
+ pci_utils, 'get_vf_num_by_pci_address',
+ new=mock.MagicMock(
+ side_effect=(lambda vf_a: {'0000:0a:00.1': 1}.get(vf_a)))
+ )
+ @mock.patch.object(
+ pci_utils, 'get_mac_by_pci_address',
+ new=mock.MagicMock(side_effect=(lambda vf_a: {
+ '0000:0a:00.0': '52:54:00:1e:59:c6'}.get(vf_a)))
+ )
+ @mock.patch('nova.objects.Instance.get_pci_devices')
+ def test_pci_parse_whitelist_called_once(
+ self, mock_get_instance_pci_devs
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:0a:00.1",
+ "physical_network": "default",
+ }
+ )
+ ]
+ cfg.CONF.set_override(
+ 'device_spec', device_spec, 'pci')
# NOTE(takashin): neutronapi.API must be initialized
- # after the 'passthrough_whitelist' is set in this test case.
+ # after the 'device_spec' is set in this test case.
api = neutronapi.API()
host_id = 'my_host_id'
- instance = {'host': host_id}
+ instance = objects.Instance(host=host_id)
pci_req_id = 'my_req_id'
port_req_body = {'port': {}}
pci_dev = {'vendor_id': '1377',
'product_id': '0047',
'address': '0000:0a:00.1',
+ 'parent_addr': '0000:0a:00.0',
+ 'dev_type': obj_fields.PciDeviceType.SRIOV_VF,
}
- whitelist = pci_whitelist.Whitelist(CONF.pci.passthrough_whitelist)
+ whitelist = pci_whitelist.Whitelist(CONF.pci.device_spec)
with mock.patch.object(pci_whitelist.Whitelist,
'_parse_white_list_from_config',
wraps=whitelist._parse_white_list_from_config
@@ -7575,7 +8308,7 @@ class TestAPIPortbinding(TestAPIBase):
vf.update_device(pci_dev)
return instance, pf, vf
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_pf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
@@ -7589,7 +8322,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 0, req)
self.assertEqual(expected_port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf(self, mock_get_mac_by_pci_address,
mock_get_instance_pci_devs):
@@ -7601,7 +8334,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch.object(pci_utils, 'get_mac_by_pci_address')
def test_populate_pci_mac_address_vf_fail(self,
mock_get_mac_by_pci_address,
@@ -7616,7 +8349,7 @@ class TestAPIPortbinding(TestAPIBase):
self.api._populate_pci_mac_address(instance, 42, port_req_body)
self.assertEqual(port_req_body, req)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
+ @mock.patch('nova.objects.Instance.get_pci_devices')
@mock.patch('nova.network.neutron.LOG.error')
def test_populate_pci_mac_address_no_device(self, mock_log_error,
mock_get_instance_pci_devs):
@@ -7774,7 +8507,7 @@ class TestAPIPortbinding(TestAPIBase):
self.assertEqual(1, mocked_client.create_port_binding.call_count)
self.assertDictEqual({uuids.port: binding['binding']}, result)
- # assert that that if vnic_type and profile are set in VIF object
+ # assert that if vnic_type and profile are set in VIF object
# the provided vnic_type and profile take precedence.
nwinfo = model.NetworkInfo([model.VIF(id=uuids.port,
@@ -7852,6 +8585,9 @@ class TestAPIPortbinding(TestAPIBase):
self.api.delete_port_binding(self.context, port_id,
'fake-host')
+ @mock.patch(
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=False))
@mock.patch('nova.accelerator.cyborg._CyborgClient.delete_arqs_by_uuid')
@mock.patch('nova.network.neutron.get_binding_profile')
@mock.patch('nova.network.neutron.API._show_port')
@@ -8241,7 +8977,7 @@ class TestAllocateForInstance(test.NoDBTestCase):
requested_ports_dict = {uuids.port1: {}, uuids.port2: {}}
mock_neutron.list_extensions.return_value = {"extensions": [
- {"name": "asdf"}]}
+ {"alias": "asdf"}]}
port1 = {"port": {"id": uuids.port1, "mac_address": "mac1r"}}
port2 = {"port": {"id": uuids.port2, "mac_address": "mac2r"}}
mock_admin.update_port.side_effect = [port1, port2]
@@ -8324,6 +9060,10 @@ class TestAPINeutronHostnameDNSPortbinding(TestAPIBase):
requested_networks=requested_networks)
@mock.patch(
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch(
'nova.network.neutron.API.has_extended_resource_request_extension',
new=mock.Mock(return_value=False)
)
@@ -8336,8 +9076,8 @@ class TestAPINeutronHostnameDNSPortbinding(TestAPIBase):
11, dns_extension=True, bind_host_id=self.instance.get('host'))
@mock.patch(
- "nova.network.neutron.API._has_dns_extension",
- new=mock.Mock(return_value=True)
+ 'nova.network.neutron.API.has_dns_extension',
+ new=mock.Mock(return_value=True),
)
def test_allocate_for_instance_with_requested_port_with_dns_domain(self):
# The port's dns_name attribute should be set by the port update
diff --git a/nova/tests/unit/network/test_os_vif_util.py b/nova/tests/unit/network/test_os_vif_util.py
index e15e4eb92a..338492aef0 100644
--- a/nova/tests/unit/network/test_os_vif_util.py
+++ b/nova/tests/unit/network/test_os_vif_util.py
@@ -696,6 +696,39 @@ class OSVIFUtilTestCase(test.NoDBTestCase):
self.assertObjEqual(expect, actual)
+ def test_nova_to_osvif_ovs_with_vnic_remote_managed(self):
+ vif = model.VIF(
+ id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
+ type=model.VIF_TYPE_OVS,
+ address="22:52:25:62:e2:aa",
+ vnic_type=model.VNIC_TYPE_REMOTE_MANAGED,
+ network=model.Network(
+ id="b82c1929-051e-481d-8110-4669916c7915",
+ label="Demo Net",
+ subnets=[]),
+ profile={'pci_slot': '0000:0a:00.1'}
+ )
+
+ actual = os_vif_util.nova_to_osvif_vif(vif)
+
+ expect = osv_objects.vif.VIFHostDevice(
+ id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
+ active=False,
+ address="22:52:25:62:e2:aa",
+ dev_address='0000:0a:00.1',
+ dev_type=os_vif_fields.VIFHostDeviceDevType.ETHERNET,
+ plugin="noop",
+ has_traffic_filtering=False,
+ preserve_on_delete=False,
+ network=osv_objects.network.Network(
+ id="b82c1929-051e-481d-8110-4669916c7915",
+ bridge_interface=None,
+ label="Demo Net",
+ subnets=osv_objects.subnet.SubnetList(
+ objects=[])))
+
+ self.assertObjEqual(expect, actual)
+
def test_nova_to_osvif_ovs_with_vnic_vdpa(self):
vif = model.VIF(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
diff --git a/nova/tests/unit/network/test_security_group.py b/nova/tests/unit/network/test_security_group.py
index b0bde1d9a2..a76dd4bf3c 100644
--- a/nova/tests/unit/network/test_security_group.py
+++ b/nova/tests/unit/network/test_security_group.py
@@ -13,10 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
#
-import mock
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import v2_0 as neutronv20
from neutronclient.v2_0 import client
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
diff --git a/nova/tests/unit/notifications/objects/test_flavor.py b/nova/tests/unit/notifications/objects/test_flavor.py
index 41fc8a36c3..e3cb9ec4c3 100644
--- a/nova/tests/unit/notifications/objects/test_flavor.py
+++ b/nova/tests/unit/notifications/objects/test_flavor.py
@@ -11,8 +11,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from nova import context
from nova.notifications.objects import flavor as flavor_notification
diff --git a/nova/tests/unit/notifications/objects/test_instance.py b/nova/tests/unit/notifications/objects/test_instance.py
index c2b7315587..8735e972dc 100644
--- a/nova/tests/unit/notifications/objects/test_instance.py
+++ b/nova/tests/unit/notifications/objects/test_instance.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/notifications/objects/test_notification.py b/nova/tests/unit/notifications/objects/test_notification.py
index 38d82d9ae9..de9e6f2762 100644
--- a/nova/tests/unit/notifications/objects/test_notification.py
+++ b/nova/tests/unit/notifications/objects/test_notification.py
@@ -12,8 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_versionedobjects import fixture
@@ -386,7 +386,7 @@ notification_object_data = {
# ImageMetaProps, so when you see a fail here for that reason, you must
# *also* bump the version of ImageMetaPropsPayload. See its docstring for
# more information.
- 'ImageMetaPropsPayload': '1.8-080bdcba9b96122eab57bf39d47348f7',
+ 'ImageMetaPropsPayload': '1.12-b9c64832d7772c1973e913bacbe0e8f9',
'InstanceActionNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
'InstanceActionPayload': '1.8-4fa3da9cbf0761f1f700ae578f36dc2f',
'InstanceActionRebuildNotification':
diff --git a/nova/tests/unit/notifications/objects/test_service.py b/nova/tests/unit/notifications/objects/test_service.py
index 6f0f5c7f7a..297dcac56f 100644
--- a/nova/tests/unit/notifications/objects/test_service.py
+++ b/nova/tests/unit/notifications/objects/test_service.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_utils import timeutils
from nova import context
diff --git a/nova/tests/unit/notifications/test_base.py b/nova/tests/unit/notifications/test_base.py
index 3ee2e36ddc..c0468ec64d 100644
--- a/nova/tests/unit/notifications/test_base.py
+++ b/nova/tests/unit/notifications/test_base.py
@@ -13,9 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import datetime
+from unittest import mock
from keystoneauth1 import exceptions as ks_exc
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context as nova_context
diff --git a/nova/tests/unit/objects/test_aggregate.py b/nova/tests/unit/objects/test_aggregate.py
index bdb14f72ad..3f01c9613d 100644
--- a/nova/tests/unit/objects/test_aggregate.py
+++ b/nova/tests/unit/objects/test_aggregate.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_block_device.py b/nova/tests/unit/objects/test_block_device.py
index 80c9e9a1fa..85959a961a 100644
--- a/nova/tests/unit/objects/test_block_device.py
+++ b/nova/tests/unit/objects/test_block_device.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
@@ -250,6 +251,14 @@ class _TestBlockDeviceMappingObject(object):
destination_type='local')
self.assertFalse(bdm.is_volume)
+ def test_is_local(self):
+ self.assertTrue(
+ objects.BlockDeviceMapping(
+ context=self.context, destination_type='local').is_local)
+ self.assertFalse(
+ objects.BlockDeviceMapping(
+ context=self.context, destination_type='volume').is_local)
+
def test_obj_load_attr_not_instance(self):
"""Tests that lazy-loading something other than the instance field
results in an error.
@@ -275,6 +284,11 @@ class _TestBlockDeviceMappingObject(object):
mock_inst_get_by_uuid.assert_called_once_with(
self.context, bdm.instance_uuid)
+ def test_obj_load_attr_encrypted(self):
+ bdm = objects.BlockDeviceMapping(self.context, **self.fake_bdm())
+ del bdm.encrypted
+ self.assertEqual(bdm.fields['encrypted'].default, bdm.encrypted)
+
def test_obj_make_compatible_pre_1_17(self):
values = {'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume',
diff --git a/nova/tests/unit/objects/test_build_request.py b/nova/tests/unit/objects/test_build_request.py
index 2b60888c5d..a55ab34008 100644
--- a/nova/tests/unit/objects/test_build_request.py
+++ b/nova/tests/unit/objects/test_build_request.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as o_vo_base
diff --git a/nova/tests/unit/objects/test_cell_mapping.py b/nova/tests/unit/objects/test_cell_mapping.py
index 3182269cc5..936793294b 100644
--- a/nova/tests/unit/objects/test_cell_mapping.py
+++ b/nova/tests/unit/objects/test_cell_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
index 297edfbd55..84c4e87785 100644
--- a/nova/tests/unit/objects/test_compute_node.py
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -13,9 +13,10 @@
# under the License.
import copy
+from unittest import mock
-import mock
import netaddr
+from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
@@ -341,6 +342,14 @@ class _TestComputeNodeObject(object):
'uuid': uuidsentinel.fake_compute_node}
mock_create.assert_called_once_with(self.context, param_dict)
+ @mock.patch('nova.db.main.api.compute_node_create')
+ def test_create_duplicate(self, mock_create):
+ mock_create.side_effect = db_exc.DBDuplicateEntry
+ compute = compute_node.ComputeNode(context=self.context)
+ compute.service_id = 456
+ compute.hypervisor_hostname = 'node1'
+ self.assertRaises(exception.DuplicateRecord, compute.create)
+
@mock.patch.object(db, 'compute_node_update')
@mock.patch(
'nova.db.main.api.compute_node_get', return_value=fake_compute_node)
@@ -553,17 +562,15 @@ class _TestComputeNodeObject(object):
def test_update_from_virt_driver_uuid_already_set(self):
"""Tests update_from_virt_driver where the compute node object already
- has a uuid value so the uuid from the virt driver is ignored.
+ has a uuid value so an error is raised.
"""
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
# Emulate the ironic driver which adds a uuid field.
resources['uuid'] = uuidsentinel.node_uuid
compute = compute_node.ComputeNode(uuid=uuidsentinel.something_else)
- compute.update_from_virt_driver(resources)
- expected = fake_compute_with_resources.obj_clone()
- expected.uuid = uuidsentinel.something_else
- self.assertTrue(base.obj_equal_prims(expected, compute))
+ self.assertRaises(exception.InvalidNodeConfiguration,
+ compute.update_from_virt_driver, resources)
def test_update_from_virt_driver_missing_field(self):
# NOTE(pmurray): update_from_virt_driver does not require
@@ -666,8 +673,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
@mock.patch('nova.db.main.api.compute_node_update')
@@ -694,8 +701,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
@mock.patch('nova.db.main.api.compute_node_update')
@@ -722,8 +729,8 @@ class _TestComputeNodeObject(object):
CONF.initial_disk_allocation_ratio, compute.disk_allocation_ratio)
mock_update.assert_called_once_with(
- self.context, 123, {'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5,
+ self.context, 123, {'cpu_allocation_ratio': 4.0,
+ 'ram_allocation_ratio': 1.0,
'disk_allocation_ratio': 1.0})
def test_get_all_by_not_mapped(self):
diff --git a/nova/tests/unit/objects/test_console_auth_token.py b/nova/tests/unit/objects/test_console_auth_token.py
index 9c92e798b0..9a0901e12a 100644
--- a/nova/tests/unit/objects/test_console_auth_token.py
+++ b/nova/tests/unit/objects/test_console_auth_token.py
@@ -14,7 +14,7 @@
# under the License.
import copy
-import mock
+from unittest import mock
import urllib.parse as urlparse
from oslo_db.exception import DBDuplicateEntry
diff --git a/nova/tests/unit/objects/test_ec2.py b/nova/tests/unit/objects/test_ec2.py
index 8261fd6173..55230a7599 100644
--- a/nova/tests/unit/objects/test_ec2.py
+++ b/nova/tests/unit/objects/test_ec2.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.db.main import api as db
diff --git a/nova/tests/unit/objects/test_external_event.py b/nova/tests/unit/objects/test_external_event.py
index 915358ba59..58c45c2549 100644
--- a/nova/tests/unit/objects/test_external_event.py
+++ b/nova/tests/unit/objects/test_external_event.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.objects import external_event as external_event_obj
from nova.tests.unit.objects import test_objects
diff --git a/nova/tests/unit/objects/test_fields.py b/nova/tests/unit/objects/test_fields.py
index 39f9de8cfe..461dc0ff6f 100644
--- a/nova/tests/unit/objects/test_fields.py
+++ b/nova/tests/unit/objects/test_fields.py
@@ -15,9 +15,9 @@
import collections
import datetime
import os
+from unittest import mock
import iso8601
-import mock
from oslo_serialization import jsonutils
from oslo_versionedobjects import exception as ovo_exc
@@ -551,7 +551,7 @@ class TestNetworkModel(TestField):
def setUp(self):
super(TestNetworkModel, self).setUp()
model = network_model.NetworkInfo()
- self.field = fields.Field(fields.NetworkModel())
+ self.field = fields.NetworkModelField()
self.coerce_good_values = [(model, model), (model.json(), model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, model.json())]
@@ -570,7 +570,7 @@ class TestNetworkVIFModel(TestField):
super(TestNetworkVIFModel, self).setUp()
model = network_model.VIF('6c197bc7-820c-40d5-8aff-7116b993e793')
primitive = jsonutils.dumps(model)
- self.field = fields.Field(fields.NetworkVIFModel())
+ self.field = fields.NetworkVIFModelField()
self.coerce_good_values = [(model, model), (primitive, model)]
self.coerce_bad_values = [[], 'foo']
self.to_primitive_values = [(model, primitive)]
diff --git a/nova/tests/unit/objects/test_flavor.py b/nova/tests/unit/objects/test_flavor.py
index 93294d95aa..4172d3fda3 100644
--- a/nova/tests/unit/objects/test_flavor.py
+++ b/nova/tests/unit/objects/test_flavor.py
@@ -13,8 +13,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/objects/test_host_mapping.py b/nova/tests/unit/objects/test_host_mapping.py
index 8917e318af..73eadb7047 100644
--- a/nova/tests/unit/objects/test_host_mapping.py
+++ b/nova/tests/unit/objects/test_host_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_db import exception as db_exc
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/objects/test_image_meta.py b/nova/tests/unit/objects/test_image_meta.py
index 1750caba01..371f7b101a 100644
--- a/nova/tests/unit/objects/test_image_meta.py
+++ b/nova/tests/unit/objects/test_image_meta.py
@@ -108,6 +108,7 @@ class TestImageMetaProps(test.NoDBTestCase):
'hw_video_model': 'vga',
'hw_video_ram': '512',
'hw_qemu_guest_agent': 'yes',
+ 'hw_locked_memory': 'true',
'trait:CUSTOM_TRUSTED': 'required',
# Fill sane values for the rest here
}
@@ -116,6 +117,7 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual('vga', virtprops.hw_video_model)
self.assertEqual(512, virtprops.hw_video_ram)
self.assertTrue(virtprops.hw_qemu_guest_agent)
+ self.assertTrue(virtprops.hw_locked_memory)
self.assertIsNotNone(virtprops.traits_required)
self.assertIn('CUSTOM_TRUSTED', virtprops.traits_required)
@@ -285,6 +287,28 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertEqual([set([0, 1, 2, 3])],
virtprops.hw_numa_cpus)
+ def test_locked_memory_prop(self):
+ props = {'hw_locked_memory': 'true'}
+ virtprops = objects.ImageMetaProps.from_dict(props)
+ self.assertTrue(virtprops.hw_locked_memory)
+
+ def test_obj_make_compatible_hw_locked_memory(self):
+ """Check 'hw_locked_memory' compatibility."""
+ # assert that 'hw_locked_memory' is supported
+ # on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_locked_memory='true',
+ )
+ primitive = obj.obj_to_primitive('1.33')
+ self.assertIn('hw_locked_memory',
+ primitive['nova_object.data'])
+ self.assertTrue(primitive['nova_object.data']['hw_locked_memory'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.32')
+ self.assertNotIn('hw_locked_memory',
+ primitive['nova_object.data'])
+
def test_get_unnumbered_trait_fields(self):
"""Tests that only valid un-numbered required traits are parsed from
the properties.
@@ -349,6 +373,53 @@ class TestImageMetaProps(test.NoDBTestCase):
self.assertRaises(exception.ObjectActionError,
obj.obj_to_primitive, '1.0')
+ def test_obj_make_compatible_hw_ephemeral_encryption(self):
+ """Check 'hw_ephemeral_encryption(_format)' compatibility."""
+ # assert that 'hw_ephemeral_encryption' and
+ # 'hw_ephemeral_encryption_format' is supported
+ # on a suitably new version
+ new_fields = (
+ 'hw_ephemeral_encryption',
+ 'hw_ephemeral_encryption_format'
+ )
+ eph_format = objects.fields.BlockDeviceEncryptionFormatType.LUKS
+ obj = objects.ImageMetaProps(
+ hw_ephemeral_encryption='yes',
+ hw_ephemeral_encryption_format=eph_format,
+ )
+ primitive = obj.obj_to_primitive('1.32')
+ for field in new_fields:
+ self.assertIn(field, primitive['nova_object.data'])
+ self.assertTrue(
+ primitive['nova_object.data']['hw_ephemeral_encryption'])
+ self.assertEqual(
+ eph_format,
+ primitive['nova_object.data']['hw_ephemeral_encryption_format'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.31')
+ for field in new_fields:
+ self.assertNotIn(field, primitive['nova_object.data'])
+
+ def test_obj_make_compatible_hw_emulation(self):
+ """Check 'hw_emulation_architecture' compatibility."""
+ # assert that 'hw_emulation_architecture' is supported
+ # on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_emulation_architecture=objects.fields.Architecture.AARCH64,
+ )
+ primitive = obj.obj_to_primitive('1.31')
+ self.assertIn('hw_emulation_architecture',
+ primitive['nova_object.data'])
+ self.assertEqual(
+ objects.fields.Architecture.AARCH64,
+ primitive['nova_object.data']['hw_emulation_architecture'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.29')
+ self.assertNotIn('hw_emulation_architecture',
+ primitive['nova_object.data'])
+
def test_obj_make_compatible_input_bus(self):
"""Check 'hw_input_bus' compatibility."""
# assert that 'hw_input_bus' is supported on a suitably new version
@@ -467,3 +538,19 @@ class TestImageMetaProps(test.NoDBTestCase):
hw_pci_numa_affinity_policy=fields.PCINUMAAffinityPolicy.SOCKET)
self.assertRaises(exception.ObjectActionError,
obj.obj_to_primitive, '1.27')
+
+ def test_obj_make_compatible_viommu_model(self):
+ """Check 'hw_viommu_model' compatibility."""
+ # assert that 'hw_viommu_model' is supported on a suitably new version
+ obj = objects.ImageMetaProps(
+ hw_viommu_model=objects.fields.VIOMMUModel.VIRTIO,
+ )
+ primitive = obj.obj_to_primitive('1.34')
+ self.assertIn('hw_viommu_model', primitive['nova_object.data'])
+ self.assertEqual(
+ objects.fields.VIOMMUModel.VIRTIO,
+ primitive['nova_object.data']['hw_viommu_model'])
+
+ # and is absent on older versions
+ primitive = obj.obj_to_primitive('1.33')
+ self.assertNotIn('hw_viommu_model', primitive['nova_object.data'])
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
index e187a4c251..6215d2be60 100644
--- a/nova/tests/unit/objects/test_instance.py
+++ b/nova/tests/unit/objects/test_instance.py
@@ -14,8 +14,8 @@
import collections
import datetime
+from unittest import mock
-import mock
import netaddr
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
@@ -25,6 +25,7 @@ from oslo_versionedobjects import base as ovo_base
from nova.compute import task_states
from nova.compute import vm_states
+from nova import context
from nova.db.main import api as db
from nova.db.main import models as sql_models
from nova import exception
@@ -2015,12 +2016,14 @@ class TestInstanceListObject(test_objects._LocalTest,
# manually here.
engine = db.get_engine()
table = sql_models.Instance.__table__
- with engine.connect() as conn:
- update = table.insert().values(user_id=self.context.user_id,
- project_id=self.context.project_id,
- uuid=uuids.nullinst,
- host='foo',
- hidden=None)
+ with engine.connect() as conn, conn.begin():
+ update = table.insert().values(
+ user_id=self.context.user_id,
+ project_id=self.context.project_id,
+ uuid=uuids.nullinst,
+ host='foo',
+ hidden=None,
+ )
conn.execute(update)
insts = objects.InstanceList.get_by_filters(self.context,
@@ -2071,3 +2074,164 @@ class TestInstanceObjectMisc(test.NoDBTestCase):
self.assertEqual(['metadata', 'system_metadata', 'info_cache',
'security_groups', 'pci_devices', 'tags', 'extra',
'extra.flavor'], result_list)
+
+
+class TestInstanceObjectGetPciDevices(test.NoDBTestCase):
+ def test_lazy_loading_pci_devices(self):
+ user_id = "fake-user"
+ project_id = "fake-project"
+ ctxt = context.RequestContext(user_id, project_id)
+
+ inst = instance.Instance(ctxt, uuid=uuids.instance)
+ with mock.patch(
+ "nova.objects.PciDeviceList.get_by_instance_uuid",
+ return_value=objects.PciDeviceList(),
+ ) as mock_get_pci:
+ self.assertEqual([], inst.get_pci_devices())
+
+ mock_get_pci.assert_called_once_with(ctxt, uuids.instance)
+
+ def test_lazy_loading_pci_requests(self):
+ user_id = "fake-user"
+ project_id = "fake-project"
+ ctxt = context.RequestContext(user_id, project_id)
+
+ devs = [objects.PciDevice(request_id=uuids.req1)]
+ inst = instance.Instance(
+ ctxt,
+ uuid=uuids.instance,
+ pci_devices=objects.PciDeviceList(
+ objects=devs
+ ),
+ )
+
+ with mock.patch(
+ "nova.objects.InstancePCIRequests.get_by_instance_uuid",
+ return_value=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ ]
+ ),
+ ) as mock_get_pci_req:
+ self.assertEqual(
+ devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
+
+ mock_get_pci_req.assert_called_once_with(ctxt, uuids.instance)
+
+ def test_no_filter(self):
+ devs = [objects.PciDevice()]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=devs)
+ )
+
+ self.assertEqual(devs, inst.get_pci_devices())
+
+ def test_no_filter_by_request_id(self):
+ expected_devs = [objects.PciDevice(request_id=uuids.req1)]
+ all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs)
+ )
+
+ self.assertEqual(
+ expected_devs, inst.get_pci_devices(request_id=uuids.req1)
+ )
+
+ def test_no_filter_by_source(self):
+ expected_devs = [
+ objects.PciDevice(request_id=uuids.req1),
+ objects.PciDevice(request_id=uuids.req1),
+ ]
+ all_devs = expected_devs + [objects.PciDevice(request_id=uuids.req2)]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ expected_devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
+
+ def test_no_filter_by_request_id_and_source(self):
+ expected_devs = []
+ all_devs = expected_devs + [
+ objects.PciDevice(request_id=uuids.req1),
+ objects.PciDevice(request_id=uuids.req2),
+ objects.PciDevice(request_id=uuids.req1),
+ ]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=all_devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.req1,
+ alias_name="pci-alias-1",
+ ),
+ objects.InstancePCIRequest(
+ request_id=uuids.req2,
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ expected_devs,
+ inst.get_pci_devices(
+ request_id=uuids.req1,
+ source=objects.InstancePCIRequest.NEUTRON_PORT,
+ ),
+ )
+
+ def test_old_pci_dev_and_req(self):
+ """This tests the case when the system has old InstancePCIRequest
+ objects without the request_id being filled. And therefore have
+ PciDevice object where the request_id is None too. These requests and
+ devices are always flavor based.
+ """
+ devs = [
+ objects.PciDevice(request_id=None),
+ objects.PciDevice(request_id=None),
+ ]
+
+ inst = instance.Instance(
+ pci_devices=objects.PciDeviceList(objects=devs),
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=None,
+ alias_name="pci-alias-1",
+ ),
+ ]
+ ),
+ )
+
+ self.assertEqual(
+ devs,
+ inst.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS,
+ ),
+ )
diff --git a/nova/tests/unit/objects/test_instance_action.py b/nova/tests/unit/objects/test_instance_action.py
index 1743623b1c..8322102021 100644
--- a/nova/tests/unit/objects/test_instance_action.py
+++ b/nova/tests/unit/objects/test_instance_action.py
@@ -14,8 +14,8 @@
import copy
import traceback
+from unittest import mock
-import mock
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_instance_device_metadata.py b/nova/tests/unit/objects/test_instance_device_metadata.py
index 6f998db84e..c04d02dcb7 100644
--- a/nova/tests/unit/objects/test_instance_device_metadata.py
+++ b/nova/tests/unit/objects/test_instance_device_metadata.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from nova import objects
diff --git a/nova/tests/unit/objects/test_instance_fault.py b/nova/tests/unit/objects/test_instance_fault.py
index b19d8663c1..1816801fca 100644
--- a/nova/tests/unit/objects/test_instance_fault.py
+++ b/nova/tests/unit/objects/test_instance_fault.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.db.main import api as db
diff --git a/nova/tests/unit/objects/test_instance_group.py b/nova/tests/unit/objects/test_instance_group.py
index 41efd08a36..5ea566fea7 100644
--- a/nova/tests/unit/objects/test_instance_group.py
+++ b/nova/tests/unit/objects/test_instance_group.py
@@ -13,8 +13,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -317,7 +317,7 @@ class _TestInstanceGroupObject(object):
obj_primitive = obj.obj_to_primitive()
self.assertIn('policy', data(obj_primitive))
self.assertIn('policies', data(obj_primitive))
- # Before 1.10, only has polices which is the list of policy name
+ # Before 1.10, only has policies which is the list of policy name
obj_primitive = obj.obj_to_primitive('1.10')
self.assertNotIn('policy', data(obj_primitive))
self.assertIn('policies', data(obj_primitive))
diff --git a/nova/tests/unit/objects/test_instance_info_cache.py b/nova/tests/unit/objects/test_instance_info_cache.py
index 2df596f5af..2c4d6a3263 100644
--- a/nova/tests/unit/objects/test_instance_info_cache.py
+++ b/nova/tests/unit/objects/test_instance_info_cache.py
@@ -13,8 +13,9 @@
# under the License.
import datetime
+from unittest import mock
-import mock
+from oslo_db import exception as db_exc
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -82,6 +83,30 @@ class _TestInstanceInfoCacheObject(object):
self.assertEqual(timeutils.normalize_time(fake_updated_at),
timeutils.normalize_time(obj.updated_at))
+ @mock.patch.object(db, 'instance_info_cache_update')
+ def test_save_fkey_constraint_fail(self, mock_update):
+ fake_updated_at = datetime.datetime(2015, 1, 1)
+ nwinfo = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
+ nwinfo_json = nwinfo.json()
+ new_info_cache = fake_info_cache.copy()
+ new_info_cache['id'] = 1
+ new_info_cache['updated_at'] = fake_updated_at
+ new_info_cache['network_info'] = nwinfo_json
+
+ # We should see InstanceNotFound raised for fkey=instance_uuid
+ mock_update.side_effect = db_exc.DBReferenceError(
+ 'table', 'constraint', 'instance_uuid', 'key_table')
+
+ obj = instance_info_cache.InstanceInfoCache(context=self.context)
+ obj.instance_uuid = uuids.info_instance
+ obj.network_info = nwinfo_json
+ self.assertRaises(exception.InstanceNotFound, obj.save)
+
+ # We should see the original exception raised for any other fkey
+ mock_update.side_effect = db_exc.DBReferenceError(
+ 'table', 'constraint', 'otherkey', 'key_table')
+ self.assertRaises(db_exc.DBReferenceError, obj.save)
+
@mock.patch.object(db, 'instance_info_cache_get',
return_value=fake_info_cache)
def test_refresh(self, mock_get):
diff --git a/nova/tests/unit/objects/test_instance_mapping.py b/nova/tests/unit/objects/test_instance_mapping.py
index 2c877c0a1f..865f5b6581 100644
--- a/nova/tests/unit/objects/test_instance_mapping.py
+++ b/nova/tests/unit/objects/test_instance_mapping.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import uuidutils
from sqlalchemy.orm import exc as orm_exc
diff --git a/nova/tests/unit/objects/test_instance_numa.py b/nova/tests/unit/objects/test_instance_numa.py
index f7a9ef7a1d..0d3bd0dba0 100644
--- a/nova/tests/unit/objects/test_instance_numa.py
+++ b/nova/tests/unit/objects/test_instance_numa.py
@@ -11,7 +11,8 @@
# under the License.
import copy
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base
import testtools
diff --git a/nova/tests/unit/objects/test_instance_pci_requests.py b/nova/tests/unit/objects/test_instance_pci_requests.py
index 9b6003ca49..91b289dbd5 100644
--- a/nova/tests/unit/objects/test_instance_pci_requests.py
+++ b/nova/tests/unit/objects/test_instance_pci_requests.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import base as ovo_base
@@ -112,23 +113,6 @@ class _TestInstancePCIRequests(object):
self.assertIsNone(req.requests[0].requester_id)
self.assertEqual(uuids.requester_id, req.requests[1].requester_id)
- def test_from_request_spec_instance_props(self):
- requests = objects.InstancePCIRequests(
- requests=[objects.InstancePCIRequest(count=1,
- request_id=FAKE_UUID,
- spec=[{'vendor_id': '8086',
- 'device_id': '1502'}])
- ],
- instance_uuid=FAKE_UUID)
- result = jsonutils.to_primitive(requests)
- result = objects.InstancePCIRequests.from_request_spec_instance_props(
- result)
- self.assertEqual(1, len(result.requests))
- self.assertEqual(1, result.requests[0].count)
- self.assertEqual(FAKE_UUID, result.requests[0].request_id)
- self.assertEqual([{'vendor_id': '8086', 'device_id': '1502'}],
- result.requests[0].spec)
-
def test_obj_make_compatible_pre_1_2(self):
topo_obj = objects.InstancePCIRequest(
count=1,
diff --git a/nova/tests/unit/objects/test_keypair.py b/nova/tests/unit/objects/test_keypair.py
index ad405b7e1b..b86bbb44de 100644
--- a/nova/tests/unit/objects/test_keypair.py
+++ b/nova/tests/unit/objects/test_keypair.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils import timeutils
from nova import exception
diff --git a/nova/tests/unit/objects/test_migrate_data.py b/nova/tests/unit/objects/test_migrate_data.py
index bc04c5bd13..7ceaf2a192 100644
--- a/nova/tests/unit/objects/test_migrate_data.py
+++ b/nova/tests/unit/objects/test_migrate_data.py
@@ -94,8 +94,8 @@ class _TestLibvirtLiveMigrateData(object):
target_connect_addr='127.0.0.1',
dst_wants_file_backed_memory=False,
file_backed_memory_discard=False,
- src_supports_numa_live_migraton=True,
- dst_supports_numa_live_migraton=True,
+ src_supports_numa_live_migration=True,
+ dst_supports_numa_live_migration=True,
dst_numa_info=migrate_data.LibvirtLiveMigrateNUMAInfo())
manifest = ovo_base.obj_tree_get_versions(obj.obj_name())
@@ -219,67 +219,6 @@ class TestRemoteHyperVLiveMigrateData(test_objects._RemoteTest,
pass
-class _TestPowerVMLiveMigrateData(object):
- @staticmethod
- def _mk_obj():
- return migrate_data.PowerVMLiveMigrateData(
- host_mig_data=dict(one=2),
- dest_ip='1.2.3.4',
- dest_user_id='a_user',
- dest_sys_name='a_sys',
- public_key='a_key',
- dest_proc_compat='POWER7',
- vol_data=dict(three=4),
- vea_vlan_mappings=dict(five=6),
- old_vol_attachment_ids=dict(seven=8),
- wait_for_vif_plugged=True)
-
- @staticmethod
- def _mk_leg():
- return {
- 'host_mig_data': {'one': '2'},
- 'dest_ip': '1.2.3.4',
- 'dest_user_id': 'a_user',
- 'dest_sys_name': 'a_sys',
- 'public_key': 'a_key',
- 'dest_proc_compat': 'POWER7',
- 'vol_data': {'three': '4'},
- 'vea_vlan_mappings': {'five': '6'},
- 'old_vol_attachment_ids': {'seven': '8'},
- 'wait_for_vif_plugged': True
- }
-
- def test_migrate_data(self):
- obj = self._mk_obj()
- self.assertEqual('a_key', obj.public_key)
- obj.public_key = 'key2'
- self.assertEqual('key2', obj.public_key)
-
- def test_obj_make_compatible(self):
- obj = self._mk_obj()
-
- data = lambda x: x['nova_object.data']
-
- primitive = data(obj.obj_to_primitive())
- self.assertIn('vea_vlan_mappings', primitive)
- primitive = data(obj.obj_to_primitive(target_version='1.0'))
- self.assertNotIn('vea_vlan_mappings', primitive)
- primitive = data(obj.obj_to_primitive(target_version='1.1'))
- self.assertNotIn('old_vol_attachment_ids', primitive)
- primitive = data(obj.obj_to_primitive(target_version='1.2'))
- self.assertNotIn('wait_for_vif_plugged', primitive)
-
-
-class TestPowerVMLiveMigrateData(test_objects._LocalTest,
- _TestPowerVMLiveMigrateData):
- pass
-
-
-class TestRemotePowerVMLiveMigrateData(test_objects._RemoteTest,
- _TestPowerVMLiveMigrateData):
- pass
-
-
class TestVIFMigrateData(test.NoDBTestCase):
def test_get_dest_vif_source_vif_not_set(self):
diff --git a/nova/tests/unit/objects/test_migration.py b/nova/tests/unit/objects/test_migration.py
index 970122a409..6da232b933 100644
--- a/nova/tests/unit/objects/test_migration.py
+++ b/nova/tests/unit/objects/test_migration.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
diff --git a/nova/tests/unit/objects/test_migration_context.py b/nova/tests/unit/objects/test_migration_context.py
index 94e8e9d57f..12becaee38 100644
--- a/nova/tests/unit/objects/test_migration_context.py
+++ b/nova/tests/unit/objects/test_migration_context.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index 33be416167..aab079381c 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -19,9 +19,9 @@ import datetime
import inspect
import os
import pprint
+from unittest import mock
import fixtures
-import mock
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
@@ -1046,7 +1046,7 @@ class TestRegistry(test.NoDBTestCase):
object_data = {
'Aggregate': '1.3-f315cb68906307ca2d1cca84d4753585',
'AggregateList': '1.3-3ea55a050354e72ef3306adefa553957',
- 'BlockDeviceMapping': '1.20-45a6ad666ddf14bbbedece2293af77e2',
+ 'BlockDeviceMapping': '1.21-220abb8aa1450e759b72fce8ec6ff955',
'BlockDeviceMappingList': '1.18-73bcbbae5ef5e8adcedbc821db869306',
'BuildRequest': '1.3-077dee42bed93f8a5b62be77657b7152',
'BuildRequestList': '1.0-cd95608eccb89fbc702c8b52f38ec738',
@@ -1066,20 +1066,20 @@ object_data = {
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'Flavor': '1.2-4ce99b41327bb230262e5a8f45ff0ce3',
'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c',
- 'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HostMappingList': '1.1-18ac2bfb8c1eb5545bed856da58a79bc',
+ 'HVSpec': '1.2-de06bcec472a2f04966b855a49c46b41',
'HyperVLiveMigrateData': '1.4-e265780e6acfa631476c8170e8d6fce0',
'IDEDeviceBus': '1.0-29d4c9f27ac44197f01b6ac1b7e16502',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
- 'ImageMetaProps': '1.30-5bfc3dd01bbfdbb28cb3a096c0b2f383',
+ 'ImageMetaProps': '1.34-29b3a6b7fe703f36bfd240d914f16c21',
'Instance': '2.7-d187aec68cad2e4d8b8a03a68e4739ce',
'InstanceAction': '1.2-9a5abc87fdd3af46f45731960651efb5',
'InstanceActionEvent': '1.4-5b1f361bd81989f8bb2c20bb7e8a4cb4',
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
'InstanceActionList': '1.1-a2b2fb6006b47c27076d3a1d48baa759',
'InstanceDeviceMetadata': '1.0-74d78dd36aa32d26d2769a1b57caf186',
- 'InstanceExternalEvent': '1.4-06c2dfcf2d2813c24cd37ee728524f1a',
+ 'InstanceExternalEvent': '1.5-1ec57351a9851c1eb43ccd90662d6dd0',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451',
'InstanceGroup': '1.11-852ac511d30913ee88f3c3a869a8f30a',
@@ -1097,27 +1097,27 @@ object_data = {
'LibvirtLiveMigrateBDMInfo': '1.1-5f4a68873560b6f834b74e7861d71aaf',
'LibvirtLiveMigrateData': '1.10-348cf70ea44d3b985f45f64725d6f6a7',
'LibvirtLiveMigrateNUMAInfo': '1.0-0e777677f3459d0ed1634eabbdb6c22f',
+ 'LibvirtVPMEMDevice': '1.0-17ffaf47585199eeb9a2b83d6bde069f',
'MemoryDiagnostics': '1.0-2c995ae0f2223bb0f8e523c5cc0b83da',
'Migration': '1.7-bd45b232fd7c95cd79ae9187e10ef582',
'MigrationContext': '1.2-89f10a83999f852a489962ae37d8a026',
'MigrationList': '1.5-36793f8d65bae421bd5564d09a4de7be',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
- 'NUMACell': '1.5-2592de3c926a7840d763bcc85f81afa7',
- 'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
- 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
- 'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',
'NetworkInterfaceMetadata': '1.2-6f3d480b40fe339067b1c0dd4d656716',
'NetworkMetadata': '1.0-2cb8d21b34f87b0261d3e1d1ae5cf218',
'NetworkRequest': '1.3-3a815ea3df7defa61e0b894dee5288ba',
'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NicDiagnostics': '1.0-895e9ad50e0f56d5258585e3e066aea5',
- 'PCIDeviceBus': '1.0-2b891cb77e42961044689f3dc2718995',
+ 'NUMACell': '1.5-2592de3c926a7840d763bcc85f81afa7',
+ 'NUMAPagesTopology': '1.1-edab9fa2dc43c117a38d600be54b4542',
+ 'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
+ 'NUMATopologyLimits': '1.1-4235c5da7a76c7e36075f0cd2f5cf922',
'PciDevice': '1.7-680e4c590aae154958ccf9677774413b',
+ 'PCIDeviceBus': '1.0-2b891cb77e42961044689f3dc2718995',
'PciDeviceList': '1.3-52ff14355491c8c580bdc0ba34c26210',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
- 'PowerVMLiveMigrateData': '1.4-a745f4eda16b45e1bc5686a0c498f27e',
'Quotas': '1.3-3b2b91371f60e788035778fc5f87797d',
'QuotasNoOp': '1.3-d1593cf969c81846bc8192255ea95cce',
'RequestGroup': '1.3-0458d350a8ec9d0673f9be5640a990ce',
@@ -1127,9 +1127,9 @@ object_data = {
'ResourceList': '1.0-4a53826625cc280e15fae64a575e0879',
'ResourceMetadata': '1.0-77509ea1ea0dd750d5864b9bd87d3f9d',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
- 'SCSIDeviceBus': '1.0-61c1e89a00901069ab1cf2991681533b',
'SchedulerLimits': '1.0-249c4bd8e62a9b327b7026b7f19cc641',
'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0',
+ 'SCSIDeviceBus': '1.0-61c1e89a00901069ab1cf2991681533b',
'SecurityGroup': '1.2-86d67d8d3ab0c971e1dc86e02f9524a8',
'SecurityGroupList': '1.1-c655ed13298e630f4d398152f7d08d71',
'Selection': '1.1-548e3c2f04da2a61ceaf9c4e1589f264',
@@ -1142,16 +1142,14 @@ object_data = {
'TrustedCerts': '1.0-dcf528851e0f868c77ee47e90563cda7',
'USBDeviceBus': '1.0-e4c7dd6032e46cd74b027df5eb2d4750',
'VIFMigrateData': '1.0-cb15282b25a039ab35046ed705eb931d',
- 'VMwareLiveMigrateData': '1.0-a3cc858a2bf1d3806d6f57cfaa1fb98a',
'VirtCPUFeature': '1.0-ea2464bdd09084bd388e5f61d5d4fc86',
'VirtCPUModel': '1.0-5e1864af9227f698326203d7249796b5',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.3-efd3ca8ebcc5ce65fff5a25f31754c54',
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
+ 'VMwareLiveMigrateData': '1.0-a3cc858a2bf1d3806d6f57cfaa1fb98a',
'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475',
'XenDeviceBus': '1.0-272a4f899b24e31e42b2b9a7ed7e9194',
- # TODO(efried): re-alphabetize this
- 'LibvirtVPMEMDevice': '1.0-17ffaf47585199eeb9a2b83d6bde069f',
}
diff --git a/nova/tests/unit/objects/test_pci_device.py b/nova/tests/unit/objects/test_pci_device.py
index 277a7fe7c4..1e971c5a21 100644
--- a/nova/tests/unit/objects/test_pci_device.py
+++ b/nova/tests/unit/objects/test_pci_device.py
@@ -14,8 +14,8 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -161,6 +161,16 @@ class _TestPciDeviceObject(object):
'vendor_id', 'numa_node', 'status', 'uuid',
'extra_info', 'dev_type', 'parent_addr']))
+ def test_pci_device_extra_info_card_serial_number(self):
+ self.dev_dict = copy.copy(dev_dict)
+ self.pci_device = pci_device.PciDevice.create(None, self.dev_dict)
+ self.assertIsNone(self.pci_device.card_serial_number)
+
+ self.dev_dict = copy.copy(dev_dict)
+ self.dev_dict['capabilities'] = {'vpd': {'card_serial_number': '42'}}
+ self.pci_device = pci_device.PciDevice.create(None, self.dev_dict)
+ self.assertEqual(self.pci_device.card_serial_number, '42')
+
def test_update_device(self):
self.pci_device = pci_device.PciDevice.create(None, dev_dict)
self.pci_device.obj_reset_changes()
@@ -457,6 +467,16 @@ class _TestPciDeviceObject(object):
devobj.claim(self.inst.uuid)
self.assertRaises(exception.PciDeviceInvalidStatus, devobj.remove)
+ def test_remove_device_fail_owned_with_unavailable_state(self):
+ # This test creates an PCI device in an invalid state. This should
+ # not happen in any known scenario. But we want to be save not to allow
+ # removing a device that has an owner. See bug 1969496 for more details
+ self._create_fake_instance()
+ devobj = pci_device.PciDevice.create(None, dev_dict)
+ devobj.claim(self.inst.uuid)
+ devobj.status = fields.PciDeviceStatus.UNAVAILABLE
+ self.assertRaises(exception.PciDeviceInvalidOwner, devobj.remove)
+
class TestPciDeviceObject(test_objects._LocalTest,
_TestPciDeviceObject):
diff --git a/nova/tests/unit/objects/test_quotas.py b/nova/tests/unit/objects/test_quotas.py
index 154c9f278a..15da48f1c4 100644
--- a/nova/tests/unit/objects/test_quotas.py
+++ b/nova/tests/unit/objects/test_quotas.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova.db.main import api as db_api
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index 31797f8133..58b9859234 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -12,8 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -116,14 +116,19 @@ class _TestRequestSpecObject(object):
else:
self.assertEqual(instance.get(field), getattr(spec, field))
- @mock.patch.object(objects.InstancePCIRequests,
- 'from_request_spec_instance_props')
- def test_from_instance_with_pci_requests(self, pci_from_spec):
- fake_pci_requests = objects.InstancePCIRequests()
- pci_from_spec.return_value = fake_pci_requests
+ def test_from_instance_with_pci_requests(self):
+ fake_pci_requests = objects.InstancePCIRequests(
+ instance_uuid=uuids.instance,
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ spec=[{'vendor_id': '8086'}],
+ ),
+ ],
+ )
instance = dict(
- uuid=uuidutils.generate_uuid(),
+ uuid=uuids.instance,
root_gb=10,
ephemeral_gb=0,
memory_mb=10,
@@ -132,14 +137,15 @@ class _TestRequestSpecObject(object):
project_id=fakes.FAKE_PROJECT_ID,
user_id=fakes.FAKE_USER_ID,
availability_zone='nova',
- pci_requests={
- 'instance_uuid': 'fakeid',
- 'requests': [{'count': 1, 'spec': [{'vendor_id': '8086'}]}]})
+ pci_requests=fake_pci_requests.obj_to_primitive(),
+ )
spec = objects.RequestSpec()
spec._from_instance(instance)
- pci_from_spec.assert_called_once_with(instance['pci_requests'])
- self.assertEqual(fake_pci_requests, spec.pci_requests)
+ self.assertEqual(
+ fake_pci_requests.requests[0].spec,
+ spec.pci_requests.requests[0].spec,
+ )
def test_from_instance_with_numa_stuff(self):
instance = dict(
@@ -424,6 +430,62 @@ class _TestRequestSpecObject(object):
self.assertListEqual([rg], spec.requested_resources)
self.assertEqual(req_lvl_params, spec.request_level_params)
+ def test_from_components_flavor_based_pci_requests(self):
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+ ctxt = context.RequestContext(
+ fakes.FAKE_USER_ID, fakes.FAKE_PROJECT_ID
+ )
+ instance = fake_instance.fake_instance_obj(ctxt)
+ image = {
+ "id": uuids.image_id,
+ "properties": {"mappings": []},
+ "status": "fake-status",
+ "location": "far-away",
+ }
+ flavor = fake_flavor.fake_flavor_obj(ctxt)
+ filter_properties = {"fake": "property"}
+
+ qos_port_rg = request_spec.RequestGroup()
+ req_lvl_params = request_spec.RequestLevelParams()
+
+ pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "1234", "product_id": "fe12"}],
+ )
+ ]
+ )
+ pci_request_group = request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_1234_FE12": 1},
+ same_provider=True,
+ )
+
+ spec = objects.RequestSpec.from_components(
+ ctxt,
+ instance.uuid,
+ image,
+ flavor,
+ instance.numa_topology,
+ pci_requests,
+ filter_properties,
+ None,
+ instance.availability_zone,
+ port_resource_requests=[qos_port_rg],
+ request_level_params=req_lvl_params,
+ )
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(qos_port_rg, spec.requested_resources[0])
+ self.assertEqual(
+ pci_request_group.obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+ self.assertEqual(req_lvl_params, spec.request_level_params)
+
def test_get_scheduler_hint(self):
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
'foo_mul': ['1', '2']})
@@ -615,6 +677,30 @@ class _TestRequestSpecObject(object):
self.assertIsInstance(req_obj.instance_group, objects.InstanceGroup)
self.assertEqual('fresh', req_obj.instance_group.name)
+ @mock.patch.object(
+ request_spec.RequestSpec, '_get_by_instance_uuid_from_db'
+ )
+ @mock.patch('nova.objects.InstanceGroup.get_by_uuid')
+ def test_get_by_instance_uuid_deleted_group(
+ self, mock_get_ig, get_by_uuid
+ ):
+ fake_spec_obj = fake_request_spec.fake_spec_obj()
+ fake_spec_obj.scheduler_hints['group'] = ['fresh']
+ fake_spec = fake_request_spec.fake_db_spec(fake_spec_obj)
+ get_by_uuid.return_value = fake_spec
+ mock_get_ig.side_effect = exception.InstanceGroupNotFound(
+ group_uuid=uuids.instgroup
+ )
+
+ req_obj = request_spec.RequestSpec.get_by_instance_uuid(
+ self.context, fake_spec['instance_uuid']
+ )
+ # assert that both the instance_group object and scheduler hint
+ # are cleared if the instance_group was deleted since the request
+ # spec was last saved to the db.
+ self.assertIsNone(req_obj.instance_group, objects.InstanceGroup)
+ self.assertEqual({'hint': ['over-there']}, req_obj.scheduler_hints)
+
@mock.patch('nova.objects.request_spec.RequestSpec.save')
@mock.patch.object(
request_spec.RequestSpec, '_get_by_instance_uuid_from_db')
@@ -1024,6 +1110,183 @@ class TestRemoteRequestSpecObject(test_objects._RemoteTest,
pass
+class TestInstancePCIRequestToRequestGroups(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.flags(group='filter_scheduler', pci_in_placement=True)
+
+ def test_pci_reqs_ignored_if_disabled(self):
+ self.flags(group='filter_scheduler', pci_in_placement=False)
+
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_neutron_based_requests_are_ignored(self):
+ pci_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[],
+ )
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(requests=[pci_req]),
+ )
+ self.assertEqual(
+ objects.InstancePCIRequest.NEUTRON_PORT, pci_req.source
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(0, len(spec.requested_resources))
+
+ def test_rc_from_product_and_vendor(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[{"vendor_id": "fff", "product_id": "dead"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_multi_device_split_to_multiple_groups(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=2,
+ request_id=uuids.req1,
+ spec=[{"vendor_id": "de12", "product_id": "1234"}],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-1",
+ resources={"CUSTOM_PCI_DE12_1234": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[1].obj_to_primitive(),
+ )
+
+ def test_with_rc_and_traits_from_the_pci_req_spec(self):
+ spec = request_spec.RequestSpec(
+ requested_resources=[],
+ pci_requests=objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "de12",
+ "product_id": "1234",
+ "resource_class": "gpu",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req2,
+ spec=[
+ {
+ "vendor_id": "fff",
+ "product_id": "dead",
+ "traits": "foo,bar,CUSTOM_BLUE",
+ }
+ ],
+ alias_name="a-dev",
+ ),
+ ]
+ ),
+ )
+
+ spec.generate_request_groups_from_pci_requests()
+
+ self.assertEqual(2, len(spec.requested_resources))
+ self.assertEqual(
+ request_spec.RequestGroup(
+ requester_id=f"{uuids.req1}-0",
+ resources={"CUSTOM_GPU": 1},
+ use_same_provider=True,
+ ).obj_to_primitive(),
+ spec.requested_resources[0].obj_to_primitive(),
+ )
+ # Note that sets would be serialized to tuples by obj_to_primitive in
+ # random order, so we need to match this spec field by field
+ expected = request_spec.RequestGroup(
+ requester_id=f"{uuids.req2}-0",
+ resources={"CUSTOM_PCI_FFF_DEAD": 1},
+ required_traits={"CUSTOM_FOO", "CUSTOM_BAR", "CUSTOM_BLUE"},
+ use_same_provider=True,
+ )
+ actual = spec.requested_resources[1]
+ for field in request_spec.RequestGroup.fields.keys():
+ self.assertEqual(getattr(expected, field), getattr(actual, field))
+
+
class TestRequestGroupObject(test.NoDBTestCase):
def setUp(self):
super(TestRequestGroupObject, self).setUp()
diff --git a/nova/tests/unit/objects/test_resource.py b/nova/tests/unit/objects/test_resource.py
index 3ac12eee84..0e43df185b 100644
--- a/nova/tests/unit/objects/test_resource.py
+++ b/nova/tests/unit/objects/test_resource.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/objects/test_security_group.py b/nova/tests/unit/objects/test_security_group.py
index 7d6a3773c5..527e5d84d6 100644
--- a/nova/tests/unit/objects/test_security_group.py
+++ b/nova/tests/unit/objects/test_security_group.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_versionedobjects import fixture as ovo_fixture
diff --git a/nova/tests/unit/objects/test_service.py b/nova/tests/unit/objects/test_service.py
index 84cbd4bf6a..60ab806207 100644
--- a/nova/tests/unit/objects/test_service.py
+++ b/nova/tests/unit/objects/test_service.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
diff --git a/nova/tests/unit/objects/test_tag.py b/nova/tests/unit/objects/test_tag.py
index 29579b1e78..caf039152d 100644
--- a/nova/tests/unit/objects/test_tag.py
+++ b/nova/tests/unit/objects/test_tag.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.objects import tag
from nova.tests.unit.objects import test_objects
diff --git a/nova/tests/unit/objects/test_task_log.py b/nova/tests/unit/objects/test_task_log.py
index 6d93ebab4c..2ac7971c28 100644
--- a/nova/tests/unit/objects/test_task_log.py
+++ b/nova/tests/unit/objects/test_task_log.py
@@ -11,9 +11,9 @@
# under the License.
import datetime
+from unittest import mock
import iso8601
-import mock
from oslo_utils import timeutils
from nova import objects
diff --git a/nova/tests/unit/objects/test_trusted_certs.py b/nova/tests/unit/objects/test_trusted_certs.py
index 3010dd6b5c..9029845ef3 100644
--- a/nova/tests/unit/objects/test_trusted_certs.py
+++ b/nova/tests/unit/objects/test_trusted_certs.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.objects import trusted_certs
from nova.tests.unit.objects import test_objects
diff --git a/nova/tests/unit/objects/test_virtual_interface.py b/nova/tests/unit/objects/test_virtual_interface.py
index a9049bac88..a806668c6b 100644
--- a/nova/tests/unit/objects/test_virtual_interface.py
+++ b/nova/tests/unit/objects/test_virtual_interface.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.db.main import api as db
diff --git a/nova/tests/unit/objects/test_volume_usage.py b/nova/tests/unit/objects/test_volume_usage.py
index a465955ad6..d8df53d5c7 100644
--- a/nova/tests/unit/objects/test_volume_usage.py
+++ b/nova/tests/unit/objects/test_volume_usage.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
diff --git a/nova/tests/unit/pci/fakes.py b/nova/tests/unit/pci/fakes.py
index 93ab33b27f..e0267ff087 100644
--- a/nova/tests/unit/pci/fakes.py
+++ b/nova/tests/unit/pci/fakes.py
@@ -14,8 +14,8 @@
# under the License.
import functools
+from unittest import mock
-import mock
from nova.pci import whitelist
diff --git a/nova/tests/unit/pci/test_devspec.py b/nova/tests/unit/pci/test_devspec.py
index 69d774cd86..4f747e7b7d 100644
--- a/nova/tests/unit/pci/test_devspec.py
+++ b/nova/tests/unit/pci/test_devspec.py
@@ -11,12 +11,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-
-import mock
+from unittest import mock
from nova import exception
from nova import objects
from nova.pci import devspec
+from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova import test
dev = {"vendor_id": "8086",
@@ -51,7 +51,7 @@ class PhysicalPciAddressTestCase(test.NoDBTestCase):
for component in invalid_val_addr:
address = dict(self.pci_addr)
address[component] = str(invalid_val_addr[component])
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PhysicalPciAddress, address)
def test_init_by_dict_missing_values(self):
@@ -75,7 +75,7 @@ class PhysicalPciAddressTestCase(test.NoDBTestCase):
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PhysicalPciAddress, address)
def test_init_by_string_missing_values(self):
@@ -121,7 +121,7 @@ class PciAddressGlobSpecTestCase(test.NoDBTestCase):
"0000:0a:" + str(devspec.MAX_SLOT + 1) + ".5",
"0000:0a:00." + str(devspec.MAX_FUNC + 1)]
for address in invalid_addresses:
- self.assertRaises(exception.PciConfigInvalidWhitelist,
+ self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciAddressGlobSpec, address)
def test_match(self):
@@ -207,18 +207,18 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_address_invalid_character(self):
pci_info = {"address": "0000:h4.12:6", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ("Invalid PCI devices Whitelist config: property func ('12:6') "
+ msg = ("Invalid [pci]device_spec config: property func ('12:6') "
"does not parse as a hex number.")
self.assertEqual(msg, str(exc))
def test_max_func(self):
pci_info = {"address": "0000:0a:00.%s" % (devspec.MAX_FUNC + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property func (%x) is '
+ msg = ('Invalid [pci]device_spec config: property func (%x) is '
'greater than the maximum allowable value (%x).'
% (devspec.MAX_FUNC + 1, devspec.MAX_FUNC))
self.assertEqual(msg, str(exc))
@@ -226,9 +226,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_domain(self):
pci_info = {"address": "%x:0a:00.5" % (devspec.MAX_DOMAIN + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property domain (%X) '
+ msg = ('Invalid [pci]device_spec config: property domain (%X) '
'is greater than the maximum allowable value (%X).'
% (devspec.MAX_DOMAIN + 1, devspec.MAX_DOMAIN))
self.assertEqual(msg, str(exc))
@@ -236,9 +236,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_bus(self):
pci_info = {"address": "0000:%x:00.5" % (devspec.MAX_BUS + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property bus (%X) is '
+ msg = ('Invalid [pci]device_spec config: property bus (%X) is '
'greater than the maximum allowable value (%X).'
% (devspec.MAX_BUS + 1, devspec.MAX_BUS))
self.assertEqual(msg, str(exc))
@@ -246,9 +246,9 @@ class PciAddressTestCase(test.NoDBTestCase):
def test_max_slot(self):
pci_info = {"address": "0000:0a:%x.5" % (devspec.MAX_SLOT + 1),
"physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
- msg = ('Invalid PCI devices Whitelist config: property slot (%X) is '
+ msg = ('Invalid [pci]device_spec config: property slot (%X) is '
'greater than the maximum allowable value (%X).'
% (devspec.MAX_SLOT + 1, devspec.MAX_SLOT))
self.assertEqual(msg, str(exc))
@@ -382,10 +382,10 @@ class PciDevSpecTestCase(test.NoDBTestCase):
def test_vendor_id_out_of_range(self):
pci_info = {"vendor_id": "80860", "address": "*:*:*.5",
"product_id": "5057", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
self.assertEqual(
- "Invalid PCI devices Whitelist config: property vendor_id (80860) "
+ "Invalid [pci]device_spec config: property vendor_id (80860) "
"is greater than the maximum allowable value (FFFF).",
str(exc))
@@ -398,10 +398,10 @@ class PciDevSpecTestCase(test.NoDBTestCase):
def test_product_id_out_of_range(self):
pci_info = {"vendor_id": "8086", "address": "*:*:*.5",
"product_id": "50570", "physical_network": "hr_net"}
- exc = self.assertRaises(exception.PciConfigInvalidWhitelist,
+ exc = self.assertRaises(exception.PciConfigInvalidSpec,
devspec.PciDeviceSpec, pci_info)
self.assertEqual(
- "Invalid PCI devices Whitelist config: property product_id "
+ "Invalid [pci]device_spec config: property product_id "
"(50570) is greater than the maximum allowable value (FFFF).",
str(exc))
@@ -449,3 +449,242 @@ class PciDevSpecTestCase(test.NoDBTestCase):
pci_obj = objects.PciDevice.create(None, pci_dev)
self.assertTrue(pci.match_pci_obj(pci_obj))
+
+
+class PciDevSpecRemoteManagedTestCase(test.NoDBTestCase):
+
+ def setUp(self):
+ self.test_dev = {
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "address": "0000:0a:00.0",
+ "capabilities": {"vpd": {"card_serial_number": "MT2113X00000"}},
+ }
+ super().setUp()
+
+ @mock.patch('nova.pci.utils.get_function_by_ifname',
+ new=mock.Mock(return_value=(None, False)))
+ def test_remote_managed_unknown_raises(self):
+ pci_info = {"devname": "nonexdev0", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciDeviceRemoteManagedNotPresent,
+ devspec.PciDeviceSpec, pci_info)
+
+ @mock.patch('nova.pci.utils.get_vf_product_id_by_pf_addr',
+ new=mock.Mock(return_value="5058"))
+ @mock.patch('nova.pci.utils.get_pci_ids_by_pci_addr',
+ new=mock.Mock(return_value=("8086", "5057")))
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_remote_managed_pf_raises(self):
+ """Remote-managed PF test case with PF-based VF matching
+
+ 5058 is the expected VF product ID which differs from the
+ one specified in the whitelist. This is to simulate a mistake
+ in the whitelist where a user uses both the PF PCI address and
+ PF product and vendor ID instead of using the VF product ID.
+ """
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciDeviceInvalidPFRemoteManaged,
+ devspec.PciDeviceSpec, pci_info)
+
+ @mock.patch('nova.pci.utils.get_vf_product_id_by_pf_addr',
+ new=mock.Mock(return_value="5058"))
+ @mock.patch('nova.pci.utils.get_pci_ids_by_pci_addr',
+ new=mock.Mock(return_value=("8086", "5057")))
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_remote_managed_vf_by_pf(self):
+ """Remote-managed PF test case with PF-based VF matching
+
+ This is to test the supported matching of a VF by using
+ its product and vendor ID and a specific PF PCI address.
+ """
+ # Full match: 5058 is the expected VF product ID which
+ # matches the one specified in the whitelist. This is to
+ # simulate the supported matching of a VF by using its
+ # product and vendor ID and a specific PF PCI address.
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5058", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ devspec.PciDeviceSpec(pci_info)
+
+ # This spec would match both PFs and VFs. Since we care that
+ # remote-managed PFs are not allowed, we have to prohibit the
+ # this altogether.
+ pci_info = {"vendor_id": "*", "address": "0000:0a:00.0",
+ "product_id": "*", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciDeviceInvalidPFRemoteManaged,
+ devspec.PciDeviceSpec, pci_info)
+
+ # Don't care about a VF product ID. Like above, this would
+ # match both PFs and VFs (since VFs have the same vendor ID).
+ # Therefore, this case is prohibited to avoid remote-managed PFs.
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "*", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciDeviceInvalidPFRemoteManaged,
+ devspec.PciDeviceSpec, pci_info)
+
+ # Don't care about a VF vendor ID.
+ pci_info = {"vendor_id": "*", "address": "0000:0a:00.0",
+ "product_id": "5058", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ devspec.PciDeviceSpec(pci_info)
+
+ @mock.patch('nova.pci.utils.get_vf_product_id_by_pf_addr',
+ new=mock.Mock(return_value="5058"))
+ @mock.patch('nova.pci.utils.get_pci_ids_by_pci_addr',
+ new=mock.Mock(return_value=("8086", "5057")))
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_remote_managed_vf_by_pf_raises(self):
+ """Remote-managed PF test case with PF-based VF matching
+
+ 5058 is the expected VF product ID which matches the one
+ specified in the whitelist. This is to simulate the supported
+ matching of a VF by using its product and vendor ID and a
+ specific PF PCI address.
+ """
+ # VF vendor ID and device ID mismatch.
+ pci_info = {"vendor_id": "8080", "address": "0000:0a:00.0",
+ "product_id": "5050", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciConfigInvalidSpec,
+ devspec.PciDeviceSpec, pci_info)
+
+ # VF device ID mismatch.
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5050", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciConfigInvalidSpec,
+ devspec.PciDeviceSpec, pci_info)
+
+ # VF vendor ID mismatch.
+ pci_info = {"vendor_id": "8080", "address": "0000:0a:00.0",
+ "product_id": "5058", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ self.assertRaises(exception.PciConfigInvalidSpec,
+ devspec.PciDeviceSpec, pci_info)
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_not_remote_managed_pf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "false"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=True))
+ def test_no_remote_managed_specified_pf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_remote_managed_specified_vf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_remote_managed_specified_no_serial_vf_no_match(self):
+ # No card serial number available - must not get a match.
+ test_dev = {
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "address": "0000:0a:00.0",
+ }
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_remote_managed_specified_empty_serial_vf_no_match(self):
+ # Card serial is an empty string.
+ test_dev = {
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "address": "0000:0a:00.0",
+ "capabilities": {"vpd": {"card_serial_number": ""}},
+ }
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertFalse(pci.match(test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_not_remote_managed_vf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "false"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch('nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False))
+ def test_no_remote_managed_specified_vf_match(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net"}
+ pci = devspec.PciDeviceSpec(pci_info)
+ self.assertTrue(pci.match(self.test_dev))
+
+ @mock.patch(
+ 'nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
+ def test_remote_managed_vf_match_by_pci_obj(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.2",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+
+ pci = devspec.PciDeviceSpec(pci_info)
+ pci_dev = {
+ "compute_node_id": 1,
+ "address": "0000:0a:00.2",
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "capabilities": {"vpd": {"card_serial_number": "MT2113X00000"}},
+ "status": "available",
+ "parent_addr": "0000:0a:00.1",
+ }
+
+ pci_obj = objects.PciDevice.create(None, pci_dev)
+ self.assertTrue(pci.match_pci_obj(pci_obj))
+
+ @mock.patch(
+ 'nova.pci.utils.is_physical_function',
+ new=mock.Mock(return_value=False)
+ )
+ def test_remote_managed_vf_no_match_by_pci_obj(self):
+ pci_info = {"vendor_id": "8086", "address": "0000:0a:00.0",
+ "product_id": "5057", "physical_network": "hr_net",
+ PCI_REMOTE_MANAGED_TAG: "true"}
+
+ pci = devspec.PciDeviceSpec(pci_info)
+ pci_dev = {
+ "compute_node_id": 1,
+ "address": "0000:0a:00.2",
+ "vendor_id": "8086",
+ "product_id": "5057",
+ "status": "available",
+ "parent_addr": "0000:0a:00.1",
+ }
+
+ pci_obj = objects.PciDevice.create(None, pci_dev)
+ self.assertFalse(pci.match_pci_obj(pci_obj))
diff --git a/nova/tests/unit/pci/test_manager.py b/nova/tests/unit/pci/test_manager.py
index 39d0b116bb..bcd4cecb85 100644
--- a/nova/tests/unit/pci/test_manager.py
+++ b/nova/tests/unit/pci/test_manager.py
@@ -14,13 +14,14 @@
# under the License.
import copy
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
from nova.compute import vm_states
from nova import context
+from nova import exception
from nova import objects
from nova.objects import fields
from nova.pci import manager
@@ -42,6 +43,8 @@ fake_pci_1 = dict(fake_pci, address='0000:00:00.2',
product_id='p1', vendor_id='v1')
fake_pci_2 = dict(fake_pci, address='0000:00:00.3')
+fake_pci_devs = [fake_pci, fake_pci_1, fake_pci_2]
+
fake_pci_3 = dict(fake_pci, address='0000:00:01.1',
dev_type=fields.PciDeviceType.SRIOV_PF,
vendor_id='v2', product_id='p2', numa_node=None)
@@ -53,6 +56,7 @@ fake_pci_5 = dict(fake_pci, address='0000:00:02.2',
dev_type=fields.PciDeviceType.SRIOV_VF,
parent_addr='0000:00:01.1',
vendor_id='v2', product_id='p2', numa_node=None)
+fake_pci_devs_tree = [fake_pci_3, fake_pci_4, fake_pci_5]
fake_db_dev = {
'created_at': None,
@@ -142,14 +146,14 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
requests=pci_reqs)
def _create_tracker(self, fake_devs):
- self.fake_devs = fake_devs
+ self.fake_devs = copy.deepcopy(fake_devs)
self.tracker = manager.PciDevTracker(
self.fake_context, objects.ComputeNode(id=1, numa_topology=None))
def setUp(self):
super(PciDevTrackerTestCase, self).setUp()
self.fake_context = context.get_admin_context()
- self.fake_devs = fake_db_devs[:]
+ self.fake_devs = copy.deepcopy(fake_db_devs)
self.stub_out('nova.db.main.api.pci_device_get_all_by_node',
self._fake_get_pci_devices)
# The fake_pci_whitelist must be called before creating the fake
@@ -157,7 +161,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_instance()
- self._create_tracker(fake_db_devs[:])
+ self._create_tracker(fake_db_devs)
def test_pcidev_tracker_create(self):
self.assertEqual(len(self.tracker.pci_devs), 3)
@@ -231,7 +235,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self, mock_debug):
self.flags(
group='pci',
- passthrough_whitelist=[
+ device_spec=[
'{"product_id":"2032", "vendor_id":"8086"}'])
# There are systems where 32 bit PCI domain is used. See bug 1897528
# for example. While nova (and qemu) does not support assigning such
@@ -266,9 +270,8 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
def test_set_hvdev_new_dev(self):
fake_pci_3 = dict(fake_pci, address='0000:00:00.4', vendor_id='v2')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_1),
- copy.deepcopy(fake_pci_2), copy.deepcopy(fake_pci_3)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci, fake_pci_1, fake_pci_2, fake_pci_3]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(len(self.tracker.pci_devs), 4)
self.assertEqual(set([dev.address for
dev in self.tracker.pci_devs]),
@@ -284,11 +287,8 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self._create_tracker(fake_db_devs_tree)
fake_new_device = dict(fake_pci_5, id=12, address='0000:00:02.3')
- fake_pci_devs = [copy.deepcopy(fake_pci_3),
- copy.deepcopy(fake_pci_4),
- copy.deepcopy(fake_pci_5),
- copy.deepcopy(fake_new_device)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci_3, fake_pci_4, fake_pci_5, fake_new_device]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(len(self.tracker.pci_devs), 4)
pf = [dev for dev in self.tracker.pci_devs
@@ -304,15 +304,14 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
def test_set_hvdev_changed(self):
fake_pci_v2 = dict(fake_pci, address='0000:00:00.2', vendor_id='v1')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
- copy.deepcopy(fake_pci_v2)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci, fake_pci_2, fake_pci_v2]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(set([dev.vendor_id for
dev in self.tracker.pci_devs]),
set(['v', 'v1']))
def test_set_hvdev_remove(self):
- self.tracker._set_hvdevs([fake_pci])
+ self.tracker._set_hvdevs(copy.deepcopy([fake_pci]))
self.assertEqual(
len([dev for dev in self.tracker.pci_devs
if dev.status == fields.PciDeviceStatus.REMOVED]),
@@ -324,8 +323,8 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
# from previous scans)
self._create_tracker(fake_db_devs_tree)
- fake_pci_devs = [copy.deepcopy(fake_pci_3), copy.deepcopy(fake_pci_4)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci_3, fake_pci_4]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(
2,
len([dev for dev in self.tracker.pci_devs
@@ -344,8 +343,9 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
# Make sure the device tree is properly maintained when there are
# devices removed from the system that are allocated to vms.
- all_devs = fake_db_devs_tree[:]
- self._create_tracker(all_devs)
+ all_db_devs = fake_db_devs_tree
+ all_pci_devs = fake_pci_devs_tree
+ self._create_tracker(all_db_devs)
# we start with 3 devices
self.assertEqual(
3,
@@ -358,7 +358,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
claimed_dev = self.tracker.claim_instance(
mock.sentinel.context, pci_requests_obj, None)[0]
- self.tracker._set_hvdevs(all_devs)
+ self.tracker._set_hvdevs(copy.deepcopy(all_pci_devs))
# and assert that no devices were removed
self.assertEqual(
0,
@@ -366,10 +366,10 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
if dev.status == fields.PciDeviceStatus.REMOVED]))
# we then try to remove the allocated device from the set reported
# by the driver.
- fake_pci_devs = [dev for dev in all_devs
+ fake_pci_devs = [dev for dev in all_pci_devs
if dev['address'] != claimed_dev.address]
with mock.patch("nova.pci.manager.LOG.warning") as log:
- self.tracker._set_hvdevs(fake_pci_devs)
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
log.assert_called_once()
args = log.call_args_list[0][0] # args of first call
self.assertIn('Unable to remove device with', args[0])
@@ -380,7 +380,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
if dev.status == fields.PciDeviceStatus.REMOVED]))
# free the device that was allocated and update tracker again
self.tracker._free_device(claimed_dev)
- self.tracker._set_hvdevs(fake_pci_devs)
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
# and assert that one device is removed from the tracker
self.assertEqual(
1,
@@ -393,12 +393,249 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.tracker.claim_instance(mock.sentinel.context,
pci_requests_obj, None)
fake_pci_3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v2')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
- copy.deepcopy(fake_pci_3)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci, fake_pci_2, fake_pci_3]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.assertEqual(len(self.tracker.stale), 1)
self.assertEqual(self.tracker.stale['0000:00:00.2']['vendor_id'], 'v2')
+ def _get_device_by_address(self, address):
+ devs = [dev for dev in self.tracker.pci_devs if dev.address == address]
+ if len(devs) == 1:
+ return devs[0]
+ if devs:
+ raise ValueError('ambiguous address', devs)
+ else:
+ raise ValueError('device not found', address)
+
+ def test_set_hvdevs_unavailable_vf_removed(self):
+ # We start with a PF parent and two VF children
+ self._create_tracker([fake_db_dev_3, fake_db_dev_4, fake_db_dev_5])
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_PF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # then claim and allocate the PF that makes the VFs unavailable
+ self.tracker.claim_instance(
+ mock.sentinel.context, pci_requests_obj, None)
+ self.tracker.allocate_instance({'uuid': uuidsentinel.instance1})
+
+ dev3_pf = self._get_device_by_address(fake_db_dev_3['address'])
+ self.assertEqual('allocated', dev3_pf.status)
+ self.assertEqual(uuidsentinel.instance1, dev3_pf.instance_uuid)
+ dev4_vf = self._get_device_by_address(fake_db_dev_4['address'])
+ self.assertEqual('unavailable', dev4_vf.status)
+ dev5_vf = self._get_device_by_address(fake_db_dev_5['address'])
+ self.assertEqual('unavailable', dev5_vf.status)
+
+ # now simulate that one VF (dev_5) is removed from the hypervisor and
+ # the compute is restarted. As the VF is not claimed or allocated we
+ # are free to remove it from the tracker.
+ self.tracker._set_hvdevs(copy.deepcopy([fake_pci_3, fake_pci_4]))
+
+ dev3_pf = self._get_device_by_address(fake_db_dev_3['address'])
+ self.assertEqual('allocated', dev3_pf.status)
+ self.assertEqual(uuidsentinel.instance1, dev3_pf.instance_uuid)
+ dev4_vf = self._get_device_by_address(fake_db_dev_4['address'])
+ self.assertEqual('unavailable', dev4_vf.status)
+ dev5_vf = self._get_device_by_address(fake_db_dev_5['address'])
+ self.assertEqual('removed', dev5_vf.status)
+
+ def test_set_hvdevs_unavailable_pf_removed(self):
+ # We start with one PF parent and one child VF
+ self._create_tracker([fake_db_dev_3, fake_db_dev_4])
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_VF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # Then we claim and allocate the VF that makes the PF unavailable
+ self.tracker.claim_instance(
+ mock.sentinel.context, pci_requests_obj, None)
+ self.tracker.allocate_instance({'uuid': uuidsentinel.instance1})
+
+ dev3_pf = self._get_device_by_address(fake_db_dev_3['address'])
+ self.assertEqual('unavailable', dev3_pf.status)
+ dev4_vf = self._get_device_by_address(fake_db_dev_4['address'])
+ self.assertEqual('allocated', dev4_vf.status)
+ self.assertEqual(uuidsentinel.instance1, dev4_vf.instance_uuid)
+
+ # now simulate that the parent PF is removed from the hypervisor and
+ # the compute is restarted. As the PF is not claimed or allocated we
+ # are free to remove it from the tracker.
+ self.tracker._set_hvdevs(copy.deepcopy([fake_pci_4]))
+
+ dev3_pf = self._get_device_by_address(fake_db_dev_3['address'])
+ self.assertEqual('removed', dev3_pf.status)
+ dev4_vf = self._get_device_by_address(fake_db_dev_4['address'])
+ self.assertEqual('allocated', dev4_vf.status)
+ self.assertEqual(uuidsentinel.instance1, dev4_vf.instance_uuid)
+
+ def test_claim_available_pf_while_child_vf_is_unavailable(self):
+ # NOTE(gibi): this is bug 1969496. The state created here is
+ # inconsistent and should not happen. But it did happen in some cases
+ # where we were not able to track down the way how it happened.
+
+ # We start with a PF parent and a VF child. The PF is available and
+ # the VF is unavailable.
+ pf = copy.deepcopy(fake_db_dev_3)
+ vf = copy.deepcopy(fake_db_dev_4)
+ vf['status'] = fields.PciDeviceStatus.UNAVAILABLE
+ self._create_tracker([pf, vf])
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf_dev = self._get_device_by_address(vf['address'])
+ self.assertEqual('unavailable', vf_dev.status)
+
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_PF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # now try to claim and allocate the PF. It should work as it is
+ # available
+ self.tracker.claim_instance(
+ mock.sentinel.context, pci_requests_obj, None)
+ self.tracker.allocate_instance({'uuid': uuidsentinel.instance1})
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('allocated', pf_dev.status)
+ vf_dev = self._get_device_by_address(vf['address'])
+ self.assertEqual('unavailable', vf_dev.status)
+
+ self.assertIn(
+ 'Some child device of parent 0000:00:01.1 is in an inconsistent '
+ 'state. If you can reproduce this warning then please report a '
+ 'bug at https://bugs.launchpad.net/nova/+filebug with '
+ 'reproduction steps. Inconsistent children with state: '
+ '0000:00:02.1 - unavailable',
+ self.stdlog.logger.output
+ )
+
+ # Ensure that the claim actually fixes the inconsistency so when the
+ # parent if freed the children become available too.
+ self.tracker.free_instance(
+ mock.sentinel.context, {'uuid': uuidsentinel.instance1})
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf_dev = self._get_device_by_address(vf['address'])
+ self.assertEqual('available', vf_dev.status)
+
+ def test_claim_available_pf_while_children_vfs_are_in_mixed_state(self):
+ # We start with a PF parent and two VF children. The PF is available
+ # and one of the VF is unavailable while the other is available.
+ pf = copy.deepcopy(fake_db_dev_3)
+ vf1 = copy.deepcopy(fake_db_dev_4)
+ vf1['status'] = fields.PciDeviceStatus.UNAVAILABLE
+ vf2 = copy.deepcopy(fake_db_dev_5)
+ vf2['status'] = fields.PciDeviceStatus.AVAILABLE
+ self._create_tracker([pf, vf1, vf2])
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('unavailable', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('available', vf2_dev.status)
+
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_PF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # now try to claim and allocate the PF. It should work as it is
+ # available
+ self.tracker.claim_instance(
+ mock.sentinel.context, pci_requests_obj, None)
+ self.tracker.allocate_instance({'uuid': uuidsentinel.instance1})
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('allocated', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('unavailable', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('unavailable', vf2_dev.status)
+
+ self.assertIn(
+ 'Some child device of parent 0000:00:01.1 is in an inconsistent '
+ 'state. If you can reproduce this warning then please report a '
+ 'bug at https://bugs.launchpad.net/nova/+filebug with '
+ 'reproduction steps. Inconsistent children with state: '
+ '0000:00:02.1 - unavailable',
+ self.stdlog.logger.output
+ )
+
+ # Ensure that the claim actually fixes the inconsistency so when the
+ # parent if freed the children become available too.
+ self.tracker.free_instance(
+ mock.sentinel.context, {'uuid': uuidsentinel.instance1})
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('available', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('available', vf2_dev.status)
+
+ def test_claim_available_pf_while_a_child_is_used(self):
+ pf = copy.deepcopy(fake_db_dev_3)
+ vf1 = copy.deepcopy(fake_db_dev_4)
+ vf1['status'] = fields.PciDeviceStatus.UNAVAILABLE
+ vf2 = copy.deepcopy(fake_db_dev_5)
+ vf2['status'] = fields.PciDeviceStatus.CLAIMED
+ self._create_tracker([pf, vf1, vf2])
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('unavailable', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('claimed', vf2_dev.status)
+
+ pci_requests_obj = self._create_pci_requests_object(
+ [
+ {
+ 'count': 1,
+ 'spec': [{'dev_type': fields.PciDeviceType.SRIOV_PF}]
+ }
+ ],
+ instance_uuid=uuidsentinel.instance1,
+ )
+ # now try to claim and allocate the PF. The claim should fail as on of
+ # the child is used.
+ self.assertRaises(
+ exception.PciDeviceVFInvalidStatus,
+ self.tracker.claim_instance,
+ mock.sentinel.context,
+ pci_requests_obj,
+ None,
+ )
+
+ pf_dev = self._get_device_by_address(pf['address'])
+ self.assertEqual('available', pf_dev.status)
+ vf1_dev = self._get_device_by_address(vf1['address'])
+ self.assertEqual('unavailable', vf1_dev.status)
+ vf2_dev = self._get_device_by_address(vf2['address'])
+ self.assertEqual('claimed', vf2_dev.status)
+
def test_update_pci_for_instance_active(self):
pci_requests_obj = self._create_pci_requests_object(fake_pci_requests)
self.tracker.claim_instance(mock.sentinel.context,
@@ -414,8 +651,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
pci_requests = copy.deepcopy(fake_pci_requests)
pci_requests[0]['count'] = 4
pci_requests_obj = self._create_pci_requests_object(pci_requests)
- self.tracker.claim_instance(mock.sentinel.context,
- pci_requests_obj, None)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.tracker.claim_instance,
+ mock.sentinel.context,
+ pci_requests_obj,
+ None
+ )
self.assertEqual(len(self.tracker.claims[self.inst['uuid']]), 0)
devs = self.tracker.update_pci_for_instance(None,
self.inst,
@@ -424,13 +666,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.assertIsNone(devs)
def test_pci_claim_instance_with_numa(self):
- fake_db_dev_3 = dict(fake_db_dev_1, id=4, address='0000:00:00.4')
- fake_devs_numa = copy.deepcopy(fake_db_devs)
- fake_devs_numa.append(fake_db_dev_3)
+ fake_pci_3 = dict(fake_pci_1, address='0000:00:00.4')
+ fake_devs_numa = copy.deepcopy(fake_pci_devs)
+ fake_devs_numa.append(fake_pci_3)
self.tracker = manager.PciDevTracker(
mock.sentinel.context,
objects.ComputeNode(id=1, numa_topology=None))
- self.tracker._set_hvdevs(fake_devs_numa)
+ self.tracker._set_hvdevs(copy.deepcopy(fake_devs_numa))
pci_requests = copy.deepcopy(fake_pci_requests)[:1]
pci_requests[0]['count'] = 2
pci_requests_obj = self._create_pci_requests_object(pci_requests)
@@ -450,11 +692,13 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.inst.numa_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([1, 2]), memory=512)])
- claims = self.tracker.claim_instance(
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.tracker.claim_instance,
mock.sentinel.context,
pci_requests_obj,
- self.inst.numa_topology)
- self.assertEqual([], claims)
+ self.inst.numa_topology
+ )
def test_update_pci_for_instance_deleted(self):
pci_requests_obj = self._create_pci_requests_object(fake_pci_requests)
@@ -477,9 +721,8 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
'nova.db.main.api.pci_device_update',
self._fake_pci_device_update)
fake_pci_v3 = dict(fake_pci, address='0000:00:00.2', vendor_id='v3')
- fake_pci_devs = [copy.deepcopy(fake_pci), copy.deepcopy(fake_pci_2),
- copy.deepcopy(fake_pci_v3)]
- self.tracker._set_hvdevs(fake_pci_devs)
+ fake_pci_devs = [fake_pci, fake_pci_2, fake_pci_v3]
+ self.tracker._set_hvdevs(copy.deepcopy(fake_pci_devs))
self.update_called = 0
self.tracker.save(self.fake_context)
self.assertEqual(self.update_called, 3)
@@ -503,7 +746,10 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
def test_clean_usage(self):
inst_2 = copy.copy(self.inst)
inst_2.uuid = uuidsentinel.instance2
- migr = {'instance_uuid': 'uuid2', 'vm_state': vm_states.BUILDING}
+ migr = objects.Migration(
+ instance_uuid='uuid2',
+ vm_state=vm_states.BUILDING,
+ )
pci_requests_obj = self._create_pci_requests_object(
[{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
@@ -564,7 +810,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
free_pci_device_ids = (
[dev.id for dev in self.tracker.pci_stats.get_free_devs()])
self.assertEqual(2, len(free_pci_device_ids))
- allocated_devs = manager.get_instance_pci_devs(self.inst)
+ allocated_devs = self.inst.get_pci_devices()
pci_device = allocated_devs[0]
self.assertNotIn(pci_device.id, free_pci_device_ids)
instance_uuid = self.inst['uuid']
@@ -627,24 +873,3 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
self.assertIsNone(self.tracker.allocations.get(instance_uuid))
free_devs = self.tracker.pci_stats.get_free_devs()
self.assertEqual(len(fake_db_devs), len(free_devs))
-
-
-class PciGetInstanceDevs(test.NoDBTestCase):
-
- def test_get_devs_object(self):
- def _fake_obj_load_attr(foo, attrname):
- if attrname == 'pci_devices':
- self.load_attr_called = True
- foo.pci_devices = objects.PciDeviceList()
-
- self.stub_out(
- 'nova.objects.Instance.obj_load_attr',
- _fake_obj_load_attr)
-
- self.load_attr_called = False
- manager.get_instance_pci_devs(objects.Instance())
- self.assertTrue(self.load_attr_called)
-
- def test_get_devs_no_pci_devices(self):
- inst = objects.Instance(pci_devices=None)
- self.assertEqual([], manager.get_instance_pci_devs(inst))
diff --git a/nova/tests/unit/pci/test_request.py b/nova/tests/unit/pci/test_request.py
index 3c2ba5b61f..4a3f17f6cb 100644
--- a/nova/tests/unit/pci/test_request.py
+++ b/nova/tests/unit/pci/test_request.py
@@ -15,7 +15,8 @@
"""Tests for PCI request."""
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel
@@ -186,6 +187,21 @@ class PciRequestTestCase(test.NoDBTestCase):
self.assertIn("xxx", aliases)
self.assertEqual(policy, aliases["xxx"][0])
+ def test_get_alias_from_config_valid_rc_and_traits(self):
+ fake_alias = jsonutils.dumps({
+ "name": "xxx",
+ "resource_class": "foo",
+ "traits": "bar,baz",
+ })
+ self.flags(alias=[fake_alias], group='pci')
+ aliases = request._get_alias_from_config()
+ self.assertIsNotNone(aliases)
+ self.assertIn("xxx", aliases)
+ self.assertEqual(
+ ("legacy", [{"resource_class": "foo", "traits": "bar,baz"}]),
+ aliases["xxx"],
+ )
+
def test_get_alias_from_config_conflicting_device_type(self):
"""Check behavior when device_type conflicts occur."""
fake_alias_a = jsonutils.dumps({
@@ -255,7 +271,7 @@ class PciRequestTestCase(test.NoDBTestCase):
requests = request._translate_alias_to_requests(
"QuickAssist : 3, IntelNIC: 1")
- self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
+ self.assertEqual(set([p.count for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
def test_translate_alias_to_requests_invalid(self):
@@ -292,7 +308,7 @@ class PciRequestTestCase(test.NoDBTestCase):
requests = request._translate_alias_to_requests(
"QuickAssist : 3, IntelNIC: 1", affinity_policy=policy)
- self.assertEqual(set([p['count'] for p in requests]), set([1, 3]))
+ self.assertEqual(set([p.count for p in requests]), set([1, 3]))
self._verify_result(expect_request, requests)
@mock.patch.object(objects.compute_node.ComputeNode,
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
index 394a07f9eb..7eb43a05f4 100644
--- a/nova/tests/unit/pci/test_stats.py
+++ b/nova/tests/unit/pci/test_stats.py
@@ -12,13 +12,17 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import collections
+from unittest import mock
-import mock
from oslo_config import cfg
+from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
from nova import objects
from nova.objects import fields
+from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova.pci import stats
from nova.pci import whitelist
from nova import test
@@ -97,33 +101,26 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsTestCase, self).setUp()
- self._setup_pci_stats()
-
- def _setup_pci_stats(self, numa_topology=None):
- """Exists for tests that need to setup pci_stats with a specific NUMA
- topology, while still allowing tests that don't care to get the default
- "empty" one.
- """
- if not numa_topology:
- numa_topology = objects.NUMATopology()
- self.pci_stats = stats.PciDeviceStats(numa_topology)
+ self.pci_stats = stats.PciDeviceStats(objects.NUMATopology())
# The following two calls need to be made before adding the devices.
patcher = fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._create_fake_devs()
def test_add_device(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.assertEqual(set([d['vendor_id'] for d in self.pci_stats]),
set(['v1', 'v2', 'v3']))
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_remove_device(self):
+ self.assertEqual(len(self.pci_stats.pools), 4)
self.pci_stats.remove_device(self.fake_dev_2)
- self.assertEqual(len(self.pci_stats.pools), 2)
- self.assertEqual(self.pci_stats.pools[0]['count'], 2)
+ self.assertEqual(len(self.pci_stats.pools), 3)
+ self.assertEqual(self.pci_stats.pools[0]['count'], 1)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
+ self.assertEqual(self.pci_stats.pools[1]['count'], 1)
+ self.assertEqual(self.pci_stats.pools[1]['vendor_id'], 'v1')
def test_remove_device_exception(self):
self.pci_stats.remove_device(self.fake_dev_2)
@@ -152,36 +149,36 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
m = self.pci_stats.to_device_pools_obj()
new_stats = stats.PciDeviceStats(objects.NUMATopology(), m)
- self.assertEqual(len(new_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in new_stats]),
- set([1, 2]))
+ self.assertEqual(len(new_stats.pools), 4)
+ self.assertEqual([d['count'] for d in new_stats], [1, 1, 1, 1])
self.assertEqual(set([d['vendor_id'] for d in new_stats]),
set(['v1', 'v2', 'v3']))
def test_apply_requests(self):
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.pci_stats.apply_requests(pci_requests)
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.pci_stats.apply_requests(pci_requests, {})
self.assertEqual(len(self.pci_stats.pools), 2)
self.assertEqual(self.pci_stats.pools[0]['vendor_id'], 'v1')
self.assertEqual(self.pci_stats.pools[0]['count'], 1)
def test_apply_requests_failed(self):
- self.assertRaises(exception.PciDeviceRequestFailed,
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
self.pci_stats.apply_requests,
- pci_requests_multiple)
+ pci_requests_multiple,
+ {},
+ )
def test_support_requests(self):
- self.assertTrue(self.pci_stats.support_requests(pci_requests))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set((1, 2)))
+ self.assertTrue(self.pci_stats.support_requests(pci_requests, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_failed(self):
self.assertFalse(
- self.pci_stats.support_requests(pci_requests_multiple))
- self.assertEqual(len(self.pci_stats.pools), 3)
- self.assertEqual(set([d['count'] for d in self.pci_stats]),
- set([1, 2]))
+ self.pci_stats.support_requests(pci_requests_multiple, {}))
+ self.assertEqual(len(self.pci_stats.pools), 4)
+ self.assertEqual([d['count'] for d in self.pci_stats], [1, 1, 1, 1])
def test_support_requests_numa(self):
cells = [
@@ -190,14 +187,18 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
objects.InstanceNUMACell(
id=1, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_failed(self):
cells = [
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info(self):
cells = [
@@ -205,12 +206,16 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
pci_requests = self._get_fake_requests(vendor_ids=['v3'])
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
# 'legacy' is the default numa_policy so the result must be same
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy = fields.PCINUMAAffinityPolicy.LEGACY)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_numa_pci_numa_policy_preferred(self):
# numa node 0 has 2 devices with vendor_id 'v1'
@@ -224,7 +229,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(
numa_policy=fields.PCINUMAAffinityPolicy.PREFERRED)
- self.assertTrue(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertTrue(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_support_requests_no_numa_info_pci_numa_policy_required(self):
# pci device with vendor_id 'v3' has numa_node=None.
@@ -236,21 +243,23 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(vendor_ids=['v3'],
numa_policy=fields.PCINUMAAffinityPolicy.REQUIRED)
- self.assertFalse(self.pci_stats.support_requests(pci_requests, cells))
+ self.assertFalse(
+ self.pci_stats.support_requests(pci_requests, {}, cells)
+ )
def test_filter_pools_for_socket_affinity_no_socket(self):
- self._setup_pci_stats(
- objects.NUMATopology(
- cells=[objects.NUMACell(socket=None)]))
+ self.pci_stats.numa_topology = objects.NUMATopology(
+ cells=[objects.NUMACell(socket=None)])
+
self.assertEqual(
[],
self.pci_stats._filter_pools_for_socket_affinity(
self.pci_stats.pools, [objects.InstanceNUMACell()]))
def test_filter_pools_for_socket_affinity(self):
- self._setup_pci_stats(
- objects.NUMATopology(
- cells=[objects.NUMACell(id=1, socket=1)]))
+ self.pci_stats.numa_topology = objects.NUMATopology(
+ cells=[objects.NUMACell(id=1, socket=1)])
+
pools = self.pci_stats._filter_pools_for_socket_affinity(
self.pci_stats.pools, [objects.InstanceNUMACell(id=1)])
self.assertEqual(1, len(pools))
@@ -268,8 +277,11 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
self.assertEqual(0, len(devs))
def test_consume_requests_failed(self):
- self.assertIsNone(self.pci_stats.consume_requests(
- pci_requests_multiple))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests_multiple,
+ )
def test_consume_requests_numa(self):
cells = [
@@ -288,7 +300,12 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
objects.InstanceNUMACell(
id=0, cpuset=set(), pcpuset=set(), memory=0),
]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests, cells))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ cells,
+ )
def test_consume_requests_no_numa_info(self):
cells = [
@@ -320,11 +337,16 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
pci_requests = self._get_fake_requests(vendor_ids=[vendor_id],
numa_policy=policy, count=count)
- devs = self.pci_stats.consume_requests(pci_requests, cells)
if expected is None:
- self.assertIsNone(devs)
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ cells,
+ )
else:
+ devs = self.pci_stats.consume_requests(pci_requests, cells)
self.assertEqual(set(expected),
set([dev.product_id for dev in devs]))
@@ -451,9 +473,9 @@ class PciDeviceStatsTestCase(test.NoDBTestCase):
@mock.patch(
'nova.pci.whitelist.Whitelist._parse_white_list_from_config')
- def test_white_list_parsing(self, mock_whitelist_parse):
- white_list = '{"product_id":"0001", "vendor_id":"8086"}'
- CONF.set_override('passthrough_whitelist', white_list, 'pci')
+ def test_device_spec_parsing(self, mock_whitelist_parse):
+ device_spec = {"product_id": "0001", "vendor_id": "8086"}
+ CONF.set_override('device_spec', jsonutils.dumps(device_spec), 'pci')
pci_stats = stats.PciDeviceStats(objects.NUMATopology())
pci_stats.add_device(self.fake_dev_2)
pci_stats.remove_device(self.fake_dev_2)
@@ -464,11 +486,34 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceStatsWithTagsTestCase, self).setUp()
- white_list = ['{"vendor_id":"1137","product_id":"0071",'
- '"address":"*:0a:00.*","physical_network":"physnet1"}',
- '{"vendor_id":"1137","product_id":"0072"}']
- self.flags(passthrough_whitelist=white_list, group='pci')
- dev_filter = whitelist.Whitelist(white_list)
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "vendor_id": "1137",
+ "product_id": "0071",
+ "address": "*:0a:00.*",
+ "physical_network": "physnet1",
+ }
+ ),
+ jsonutils.dumps({"vendor_id": "1137", "product_id": "0072"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "101e",
+ "remote_managed": "true",
+ }
+ ),
+ jsonutils.dumps({"vendor_id": "15b3", "product_id": "101c"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "1018",
+ "remote_managed": "false",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
self.pci_stats = stats.PciDeviceStats(
objects.NUMATopology(),
dev_filter=dev_filter)
@@ -502,12 +547,64 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.pci_untagged_devices.append(objects.PciDevice.create(None,
pci_dev))
+ self.locally_managed_netdevs = []
+ self.remote_managed_netdevs = []
+ self.remote_managed_netdevs.append(
+ objects.PciDevice.create(
+ None, {
+ 'compute_node_id': 1,
+ 'address': '0000:0c:00.1',
+ 'vendor_id': '15b3',
+ 'product_id': '101e',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_VF,
+ 'parent_addr': '0000:0c:00.0',
+ 'numa_node': 0,
+ "capabilities": {"vpd": {
+ "card_serial_number": "MT2113X00000"}}
+ }))
+
+ # For testing implicit remote_managed == False tagging.
+ self.locally_managed_netdevs.append(
+ objects.PciDevice.create(
+ None, {
+ 'compute_node_id': 1,
+ 'address': '0000:0d:00.1',
+ 'vendor_id': '15b3',
+ 'product_id': '101c',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_VF,
+ 'parent_addr': '0000:0d:00.0',
+ 'numa_node': 0}))
+
+ # For testing explicit remote_managed == False tagging.
+ self.locally_managed_netdevs.append(
+ objects.PciDevice.create(
+ None, {
+ 'compute_node_id': 1,
+ 'address': '0000:0e:00.1',
+ 'vendor_id': '15b3',
+ 'product_id': '101c',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_VF,
+ 'parent_addr': '0000:0e:00.0',
+ 'numa_node': 0}))
+
for dev in self.pci_tagged_devices:
self.pci_stats.add_device(dev)
for dev in self.pci_untagged_devices:
self.pci_stats.add_device(dev)
+ for dev in self.remote_managed_netdevs:
+ self.pci_stats.add_device(dev)
+
+ for dev in self.locally_managed_netdevs:
+ self.pci_stats.add_device(dev)
+
def _assertPoolContent(self, pool, vendor_id, product_id, count, **tags):
self.assertEqual(vendor_id, pool['vendor_id'])
self.assertEqual(product_id, pool['product_id'])
@@ -517,21 +614,68 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.assertEqual(v, pool[k])
def _assertPools(self):
+ nr_tagged = len(self.pci_tagged_devices)
+ nr_untagged = len(self.pci_untagged_devices)
+ nr_remote = len(self.remote_managed_netdevs)
+ nr_local = len(self.locally_managed_netdevs)
+ self.assertEqual(
+ nr_tagged + nr_untagged + nr_remote + nr_local,
+ len(self.pci_stats.pools),
+ )
# Pools are ordered based on the number of keys. 'product_id',
# 'vendor_id' are always part of the keys. When tags are present,
- # they are also part of the keys. In this test class, we have
- # two pools with the second one having the tag 'physical_network'
- # and the value 'physnet1'
- self.assertEqual(2, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
+ # they are also part of the keys.
+
+ # 3 pools for the pci_untagged_devices
+ devs = []
+ j = 0
+ for i in range(j, j + nr_untagged):
+ self._assertPoolContent(self.pci_stats.pools[i], '1137', '0072', 1)
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_untagged_devices, devs)
+ j += nr_untagged
+
+ # 4 pools for the pci_tagged_devices'
+ devs = []
+ for i in range(j, j + nr_tagged):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "1137",
+ "0071",
+ 1,
+ physical_network="physnet1",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.pci_tagged_devices, devs)
+ j += nr_tagged
+
+ # one with remote_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_remote):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101e",
+ 1,
+ remote_managed="true",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.remote_managed_netdevs, devs)
+ j += nr_remote
+
+ # two with locally_managed_netdevs
+ devs = []
+ for i in range(j, j + nr_local):
+ self._assertPoolContent(
+ self.pci_stats.pools[i],
+ "15b3",
+ "101c",
+ 1,
+ remote_managed="false",
+ )
+ devs += self.pci_stats.pools[i]['devices']
+ self.assertEqual(self.locally_managed_netdevs, devs)
+ j += nr_local
def test_add_devices(self):
self._create_pci_devices()
@@ -543,15 +687,43 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
spec=[{'physical_network': 'physnet1'}]),
objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': '1137',
- 'product_id': '0072'}])]
+ 'product_id': '0072'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '15b3',
+ 'product_id': '101e',
+ PCI_REMOTE_MANAGED_TAG: 'True'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '15b3',
+ 'product_id': '101c',
+ PCI_REMOTE_MANAGED_TAG: 'False'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '15b3',
+ 'product_id': '101c',
+ PCI_REMOTE_MANAGED_TAG: 'False'}])]
devs = self.pci_stats.consume_requests(pci_requests)
- self.assertEqual(2, len(devs))
- self.assertEqual(set(['0071', '0072']),
+ self.assertEqual(5, len(devs))
+ self.assertEqual(set(['0071', '0072', '101e', '101c']),
set([dev.product_id for dev in devs]))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 2)
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071', 3,
+ self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072', 0)
+ self._assertPoolContent(self.pci_stats.pools[1], '1137', '0072', 1)
+ self._assertPoolContent(self.pci_stats.pools[2], '1137', '0072', 1)
+
+ self._assertPoolContent(self.pci_stats.pools[3], '1137', '0071', 0,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[4], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[5], '1137', '0071', 1,
+ physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[6], '1137', '0071', 1,
physical_network='physnet1')
+ self._assertPoolContent(self.pci_stats.pools[7], '15b3', '101e', 0,
+ remote_managed='true')
+ self._assertPoolContent(self.pci_stats.pools[8], '15b3', '101c', 0,
+ remote_managed='false')
+ self._assertPoolContent(self.pci_stats.pools[9], '15b3', '101c', 0,
+ remote_managed='false')
+
def test_add_device_no_devspec(self):
self._create_pci_devices()
pci_dev = {'compute_node_id': 1,
@@ -592,38 +764,779 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
self.pci_stats.remove_device(dev2)
self._assertPools()
- def test_update_device(self):
- # Update device type of one of the device from type-PCI to
+ def test_update_device_splits_the_pool(self):
+ # Update device type of one of the device from type-VF to
# type-PF. Verify if the existing pool is updated and a new
# pool is created with dev_type type-PF.
- self._create_pci_devices()
- dev1 = self.pci_tagged_devices.pop()
- dev1.dev_type = 'type-PF'
- self.pci_stats.update_device(dev1)
- self.assertEqual(3, len(self.pci_stats.pools))
- self._assertPoolContent(self.pci_stats.pools[0], '1137', '0072',
- len(self.pci_untagged_devices))
- self.assertEqual(self.pci_untagged_devices,
- self.pci_stats.pools[0]['devices'])
- self._assertPoolContent(self.pci_stats.pools[1], '1137', '0071',
- len(self.pci_tagged_devices),
- physical_network='physnet1')
- self.assertEqual(self.pci_tagged_devices,
- self.pci_stats.pools[1]['devices'])
- self._assertPoolContent(self.pci_stats.pools[2], '1137', '0071',
- 1,
- physical_network='physnet1')
- self.assertEqual(dev1,
- self.pci_stats.pools[2]['devices'][0])
+ vfs = []
+ for i in range(3):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="1137",
+ product_id="0071",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(3, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+
+ dev = vfs.pop()
+ dev.dev_type = 'type-PF'
+ dev.parent_addr = None
+ self.pci_stats.update_device(dev)
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(1, self.pci_stats.pools[1]["count"])
+ self.assertEqual([dev], self.pci_stats.pools[1]["devices"])
+
+ def test_only_vfs_from_the_same_parent_are_pooled(self):
+ pf1_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0a:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0a:01.0",
+ numa_node=0
+ )
+ pf1_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ pf2_vfs = []
+ for i in range(2):
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ address="0000:0b:00.%d" % i,
+ vendor_id="15b3",
+ product_id="1018",
+ status="available",
+ dev_type="type-VF",
+ parent_addr="0000:0b:01.0",
+ numa_node=0
+ )
+ pf2_vfs.append(dev)
+ self.pci_stats.add_device(dev)
+
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[0]["count"])
+ self.assertEqual(pf1_vfs, self.pci_stats.pools[0]["devices"])
+ self.assertEqual(2, len(self.pci_stats.pools))
+ self.assertEqual(2, self.pci_stats.pools[1]["count"])
+ self.assertEqual(pf2_vfs, self.pci_stats.pools[1]["devices"])
+
+
+class PciDeviceStatsPlacementSupportTestCase(test.NoDBTestCase):
+
+ def test_device_spec_rc_and_traits_ignored_during_pooling(self):
+ """Assert that resource_class and traits from the device spec are not
+ used as discriminator for pool creation.
+ """
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "resource_class": "foo",
+ "address": "*:81:00.1",
+ "traits": "gold",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "resource_class": "baar",
+ "address": "*:81:00.2",
+ "traits": "silver",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ # the two device matched by different device_specs with different
+ # resource_class and traits fields
+ pci_stats.add_device(pci_dev1)
+ pci_stats.add_device(pci_dev2)
+
+ # but they are put in the same pool as all the other fields are
+ # matching
+ self.assertEqual(1, len(pci_stats.pools))
+ self.assertEqual(2, pci_stats.pools[0]["count"])
+
+ def test_filter_pools_for_spec_ignores_rc_and_traits_in_spec(self):
+ """Assert that resource_class and traits are ignored in the pci
+ request spec during matching the request to pools.
+ """
+ pci_stats = stats.PciDeviceStats(objects.NUMATopology())
+ pools = [{"vendor_id": "dead", "product_id": "beef"}]
+
+ matching_pools = pci_stats._filter_pools_for_spec(
+ pools=pools,
+ request=objects.InstancePCIRequest(
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "resource_class": "foo",
+ "traits": "blue",
+ }
+ ]
+ ),
+ )
+
+ self.assertEqual(pools, matching_pools)
+
+ def test_populate_pools_metadata_from_assigned_devices(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp1}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertEqual(uuids.rp1, pci_stats.pools[0]['rp_uuid'])
+
+ def test_populate_pools_metadata_from_assigned_devices_device_without_rp(
+ self
+ ):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ self.assertNotIn('rp_uuid', pci_stats.pools[0])
+
+ def test_populate_pools_metadata_from_assigned_devices_multiple_rp(self):
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "0000:81:00.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ dev_filter = whitelist.Whitelist(device_spec)
+ pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(),
+ dev_filter=dev_filter)
+ pci_dev1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_dev2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ pci_stats.add_device(pci_dev1)
+ pci_dev1.extra_info = {'rp_uuid': uuids.rp1}
+ pci_stats.add_device(pci_dev2)
+ pci_dev2.extra_info = {'rp_uuid': uuids.rp2}
+
+ self.assertEqual(1, len(pci_stats.pools))
+
+ self.assertRaises(
+ ValueError,
+ pci_stats.populate_pools_metadata_from_assigned_devices,
+ )
+
+
+class PciDeviceStatsProviderMappingTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ # for simplicity accept any devices
+ device_spec = [
+ jsonutils.dumps(
+ {
+ "address": "*:*:*.*",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group="pci")
+ self.dev_filter = whitelist.Whitelist(device_spec)
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ # add devices represented by different RPs in placement
+ # two VFs on the same PF
+ self.vf1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.1",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.vf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:81:00.2",
+ parent_addr="0000:81:00.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(self.vf1)
+ self.vf1.extra_info = {'rp_uuid': uuids.pf1}
+ self.pci_stats.add_device(self.vf2)
+ self.vf2.extra_info = {'rp_uuid': uuids.pf1}
+ # two PFs pf2 and pf3 (pf1 is used for the paren of the above VFs)
+ self.pf2 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:82:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf2)
+ self.pf2.extra_info = {'rp_uuid': uuids.pf2}
+
+ self.pf3 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:83:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PF",
+ )
+ self.pci_stats.add_device(self.pf3)
+ self.pf3.extra_info = {'rp_uuid': uuids.pf3}
+ # a PCI
+ self.pci1 = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address="0000:84:00.0",
+ parent_addr=None,
+ numa_node=0,
+ dev_type="type-PCI",
+ )
+ self.pci_stats.add_device(self.pci1)
+ self.pci1.extra_info = {'rp_uuid': uuids.pci1}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 1 pool for the two VFs then the rest has it own pool one by
+ # one
+ self.num_pools = 4
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 5
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_unrestricted(self):
+ reqs = []
+ for dev_type in ["type-VF", "type-PF", "type-PCI"]:
+ req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": dev_type,
+ }
+ ],
+ )
+ reqs.append(req)
+
+ # an empty mapping means unrestricted by any provider
+ # we have devs for all type so each request should fit
+ self.assertTrue(self.pci_stats.support_requests(reqs, {}))
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the same request to consume the pools
+ self.pci_stats.apply_requests(reqs, {})
+ # we have consumed a 3 devs (a VF, a PF, and a PCI)
+ self.assertEqual(
+ self.num_devs - 3,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # the empty pools are purged. We have one pool for the remaining VF
+ # and the remaining PF
+ self.assertEqual(2, len(self.pci_stats.pools))
+
+ def test_support_request_restricted_by_provider_mapping(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # simulate the placement restricted the possible RPs to pf3
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+ )
+
+ # the support_requests call is expected not to consume any device
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ # now apply the request and see if the right device is consumed
+ self.pci_stats.apply_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf3]}
+ )
+
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools any more
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_request_restricted_by_provider_mapping_does_not_fit(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect the request to fail
+ self.assertFalse(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.apply_requests,
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and the pools are not changed
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_support_request_neutron_port_based_request_ignore_mapping(self):
+ # by not having the alias_name set this becomes a neutron port based
+ # PCI request
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate with a PF
+ # that is not in the pools anymore, e.g. filtered out by numa cell.
+ # We expect that the placement selection is ignored for neutron port
+ # based requests so this request should fit as we have PFs in the pools
+ self.assertTrue(
+ self.pci_stats.support_requests(
+ [pf_req], {f"{uuids.req1}-0": [uuids.pf4]}
+ )
+ )
+ self.pci_stats.apply_requests(
+ [pf_req],
+ {f"{uuids.req1}-0": [uuids.pf4]},
+ )
+ # and a PF is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+
+ def test_support_request_req_with_count_2(self):
+ # now ask for two PFs in a single request
+ pf_req = objects.InstancePCIRequest(
+ count=2,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both PF reqs
+ mapping = {
+ f"{uuids.req1}-0": [uuids.pf2],
+ f"{uuids.req1}-1": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(self.pci_stats.support_requests([pf_req], mapping))
+ self.pci_stats.apply_requests([pf_req], mapping)
+ # and both PFs are consumed
+ self.assertEqual(self.num_pools - 2, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_support_requests_multiple_reqs(self):
+ # request both a VF and a PF
+ vf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.pf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned one candidate RP for both reqs
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.pf_req}-0": [uuids.pf3],
+ }
+ # so the request fits
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req, pf_req], mapping)
+ )
+ self.pci_stats.apply_requests([vf_req, pf_req], mapping)
+ # and the proper devices are consumed
+ # Note that the VF pool still has a device so it remains
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 2,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def test_apply_gets_requested_uuids_from_pci_req(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # call apply with None mapping signalling that the allocation is
+ # already done and the resulted mapping is stored in the request
+ self.pci_stats.apply_requests([pf_req], provider_mapping=None)
+
+ # assert that the right device is consumed
+ self.assertEqual(self.num_pools - 1, len(self.pci_stats.pools))
+ self.assertEqual(
+ self.num_devs - 1,
+ sum(pool["count"] for pool in self.pci_stats.pools),
+ )
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {pool['rp_uuid'] for pool in self.pci_stats.pools},
+ )
+
+ def _create_two_pools_with_two_vfs(self):
+ # create two pools (PFs) with two VFs each
+ self.pci_stats = stats.PciDeviceStats(
+ objects.NUMATopology(), dev_filter=self.dev_filter
+ )
+ for pf_index in [1, 2]:
+ for vf_index in [1, 2]:
+ dev = objects.PciDevice(
+ compute_node_id=1,
+ vendor_id="dead",
+ product_id="beef",
+ address=f"0000:81:0{pf_index}.{vf_index}",
+ parent_addr=f"0000:81:0{pf_index}.0",
+ numa_node=0,
+ dev_type="type-VF",
+ )
+ self.pci_stats.add_device(dev)
+ dev.extra_info = {'rp_uuid': getattr(uuids, f"pf{pf_index}")}
+
+ # populate the RP -> pool mapping from the devices to its pools
+ self.pci_stats.populate_pools_metadata_from_assigned_devices()
+
+ # we have 2 pool and 4 devs in total
+ self.num_pools = 2
+ self.assertEqual(self.num_pools, len(self.pci_stats.pools))
+ self.num_devs = 4
+ self.assertEqual(
+ self.num_devs, sum(pool["count"] for pool in self.pci_stats.pools)
+ )
+
+ def test_apply_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ }
+ ],
+ )
+
+ # Simulate that placement returned an allocation candidate where 1 VF
+ # is consumed from PF1 and two from PF2
+ mapping = {
+ # the VF is represented by the parent PF RP
+ f"{uuids.vf_req}-0": [uuids.pf1],
+ f"{uuids.vf_req}-1": [uuids.pf2],
+ f"{uuids.vf_req}-2": [uuids.pf2],
+ }
+ # This should fit
+ self.assertTrue(
+ self.pci_stats.support_requests([vf_req], mapping)
+ )
+ # and when consumed the consumption from the pools should be in sync
+ # with the placement allocation. So the PF2 pool is expected to
+ # disappear as it is fully consumed and the PF1 pool should have
+ # one free device.
+ self.pci_stats.apply_requests([vf_req], mapping)
+ self.assertEqual(1, len(self.pci_stats.pools))
+ self.assertEqual(uuids.pf1, self.pci_stats.pools[0]['rp_uuid'])
+ self.assertEqual(1, self.pci_stats.pools[0]['count'])
+
+ def test_consume_asymmetric_allocation(self):
+ self._create_two_pools_with_two_vfs()
+ # ask for 3 VFs
+ vf_req = objects.InstancePCIRequest(
+ count=3,
+ alias_name='a-vf',
+ request_id=uuids.vf_req,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-VF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # In placement 1 VF is allocated from PF1 and two from PF2
+ "rp_uuids": ",".join([uuids.pf1, uuids.pf2, uuids.pf2])
+ }
+ ],
+ )
+
+ # So when the PCI claim consumes devices based on this request we
+ # expect that nova follows what is allocated in placement.
+ devs = self.pci_stats.consume_requests([vf_req])
+ self.assertEqual(
+ {"0000:81:01.0": 1, "0000:81:02.0": 2},
+ collections.Counter(dev.parent_addr for dev in devs),
+ )
+
+ def test_consume_restricted_by_allocation(self):
+ pf_req = objects.InstancePCIRequest(
+ count=1,
+ alias_name='a-dev',
+ request_id=uuids.req1,
+ spec=[
+ {
+ "vendor_id": "dead",
+ "product_id": "beef",
+ "dev_type": "type-PF",
+ # Simulate that the scheduler already allocate a candidate
+ # and the mapping is stored in the request.
+ # The allocation restricts that we can only consume from
+ # PF3
+ "rp_uuids": ",".join([uuids.pf3])
+ }
+ ],
+ )
+
+ # Call consume. It always expects the allocated mapping to be stores
+ # the in PCI request as it is always called from the compute side.
+ consumed_devs = self.pci_stats.consume_requests([pf_req])
+ # assert that the right device is consumed
+ self.assertEqual([self.pf3], consumed_devs)
+ # pf3 is not available in the pools anymore
+ self.assertEqual(
+ {uuids.pf1, uuids.pf2, uuids.pci1},
+ {
+ pool["rp_uuid"]
+ for pool in self.pci_stats.pools
+ if pool["count"] > 0
+ },
+ )
class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
def setUp(self):
super(PciDeviceVFPFStatsTestCase, self).setUp()
- white_list = ['{"vendor_id":"8086","product_id":"1528"}',
- '{"vendor_id":"8086","product_id":"1515"}']
- self.flags(passthrough_whitelist=white_list, group='pci')
+ device_spec = [
+ jsonutils.dumps({"vendor_id": "8086", "product_id": "1528"}),
+ jsonutils.dumps({"vendor_id": "8086", "product_id": "1515"}),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "a2d6",
+ "remote_managed": "false",
+ }
+ ),
+ jsonutils.dumps(
+ {
+ "vendor_id": "15b3",
+ "product_id": "101e",
+ "remote_managed": "true",
+ }
+ ),
+ ]
+ self.flags(device_spec=device_spec, group='pci')
self.pci_stats = stats.PciDeviceStats(objects.NUMATopology())
def _create_pci_devices(self, vf_product_id=1515, pf_product_id=1528):
@@ -644,6 +1557,26 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
dev_obj.child_devices = []
self.sriov_pf_devices.append(dev_obj)
+ # PF devices for remote_managed VFs.
+ self.sriov_pf_devices_remote = []
+ for dev in range(2):
+ pci_dev = {
+ 'compute_node_id': 1,
+ 'address': '0001:81:00.%d' % dev,
+ 'vendor_id': '15b3',
+ 'product_id': 'a2d6',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_PF,
+ 'parent_addr': None,
+ 'numa_node': 0,
+ "capabilities": {"vpd": {
+ "card_serial_number": "MT2113X00000"}},
+ }
+ dev_obj = objects.PciDevice.create(None, pci_dev)
+ dev_obj.child_devices = []
+ self.sriov_pf_devices_remote.append(dev_obj)
+
self.sriov_vf_devices = []
for dev in range(8):
pci_dev = {
@@ -662,6 +1595,25 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
dev_obj.parent_device.child_devices.append(dev_obj)
self.sriov_vf_devices.append(dev_obj)
+ self.sriov_vf_devices_remote = []
+ for dev in range(8):
+ pci_dev = {
+ 'compute_node_id': 1,
+ 'address': '0001:81:10.%d' % dev,
+ 'vendor_id': '15b3',
+ 'product_id': '101e',
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_VF,
+ 'parent_addr': '0001:81:00.%d' % int(dev / 4),
+ 'numa_node': 0,
+ "capabilities": {"vpd": {"card_serial_number": "MT2113X00000"}}
+ }
+ dev_obj = objects.PciDevice.create(None, pci_dev)
+ dev_obj.parent_device = self.sriov_pf_devices_remote[int(dev / 4)]
+ dev_obj.parent_device.child_devices.append(dev_obj)
+ self.sriov_vf_devices_remote.append(dev_obj)
+
self.vdpa_devices = []
for dev in range(8):
pci_dev = {
@@ -683,6 +1635,8 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
list(map(self.pci_stats.add_device, self.sriov_pf_devices))
list(map(self.pci_stats.add_device, self.sriov_vf_devices))
list(map(self.pci_stats.add_device, self.vdpa_devices))
+ list(map(self.pci_stats.add_device, self.sriov_pf_devices_remote))
+ list(map(self.pci_stats.add_device, self.sriov_vf_devices_remote))
def test_consume_VDPA_requests(self):
self._create_pci_devices()
@@ -726,7 +1680,8 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
free_devs = self.pci_stats.get_free_devs()
# Validate that there are no free devices left, as when allocating
# both available PFs, its VFs should not be available.
- self.assertEqual(0, len(free_devs))
+ self.assertEqual(0, len([d for d in free_devs
+ if d.product_id == '1515']))
def test_consume_VF_and_PF_requests(self):
self._create_pci_devices()
@@ -747,10 +1702,85 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
objects.InstancePCIRequest(count=1,
spec=[{'product_id': '1528',
'dev_type': 'type-PF'}])]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
- def test_consume_VF_and_PF_same_prodict_id_failed(self):
+ def test_consume_VF_and_PF_same_product_id_failed(self):
self._create_pci_devices(pf_product_id=1515)
pci_requests = [objects.InstancePCIRequest(count=9,
spec=[{'product_id': '1515'}])]
- self.assertIsNone(self.pci_stats.consume_requests(pci_requests))
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
+
+ def test_consume_PF_not_remote_managed(self):
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=2,
+ spec=[{'product_id': '1528',
+ 'dev_type': 'type-PF',
+ PCI_REMOTE_MANAGED_TAG: 'false'}])]
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['1528']),
+ set([dev.product_id for dev in devs]))
+ free_devs = self.pci_stats.get_free_devs()
+ # Validate that there are no free devices left with the
+ # product ID under test, as when allocating both available
+ # PFs, its VFs should not be available.
+ self.assertEqual(0, len([d for d in free_devs
+ if d.product_id == '1528']))
+
+ def test_consume_VF_requests_remote_managed(self):
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=2,
+ spec=[{PCI_REMOTE_MANAGED_TAG: 'true'}])]
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['101e']),
+ set([dev.product_id for dev in devs]))
+ free_devs = self.pci_stats.get_free_devs()
+ # Validate that the parents of these VFs has been removed
+ # from pools.
+ for dev in devs:
+ self.assertNotIn(dev.parent_addr,
+ [free_dev.address for free_dev in free_devs])
+
+ def test_consume_VF_requests_remote_managed_filtered(self):
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=1,
+ spec=[{'product_id': '101e',
+ PCI_REMOTE_MANAGED_TAG: 'false'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'product_id': '101e'}])]
+ free_devs_before = self.pci_stats.get_free_devs()
+ self.assertRaises(
+ exception.PciDeviceRequestFailed,
+ self.pci_stats.consume_requests,
+ pci_requests,
+ )
+ free_devs_after = self.pci_stats.get_free_devs()
+ self.assertEqual(free_devs_before, free_devs_after)
+
+ def test_consume_VF_requests_remote_managed_mix(self):
+ self._create_pci_devices()
+ pci_requests = [objects.InstancePCIRequest(count=1,
+ spec=[{'product_id': '101e',
+ PCI_REMOTE_MANAGED_TAG: 'true'}]),
+ objects.InstancePCIRequest(count=1,
+ spec=[{'product_id': '1515',
+ PCI_REMOTE_MANAGED_TAG: 'false'}])]
+ devs = self.pci_stats.consume_requests(pci_requests)
+ self.assertEqual(2, len(devs))
+ self.assertEqual(set(['101e', '1515']),
+ set([dev.product_id for dev in devs]))
+ free_devs = self.pci_stats.get_free_devs()
+ # Validate that the parents of these VFs has been removed
+ # from pools.
+ for dev in devs:
+ self.assertNotIn(dev.parent_addr,
+ [free_dev.address for free_dev in free_devs])
diff --git a/nova/tests/unit/pci/test_utils.py b/nova/tests/unit/pci/test_utils.py
index e444f13729..1a1f9955b9 100644
--- a/nova/tests/unit/pci/test_utils.py
+++ b/nova/tests/unit/pci/test_utils.py
@@ -16,9 +16,9 @@
import glob
import os
+from unittest import mock
import fixtures
-import mock
from nova import exception
from nova.pci import utils
@@ -44,7 +44,7 @@ class PciDeviceMatchTestCase(test.NoDBTestCase):
[{'vendor_id': 'v1', 'device_id': 'd1'},
{'vendor_id': 'v3', 'device_id': 'd3'}]))
- def test_spec_dismatch(self):
+ def test_spec_mismatch(self):
self.assertFalse(utils.pci_device_prop_match(
self.fake_pci_1,
[{'vendor_id': 'v4', 'device_id': 'd4'},
@@ -239,7 +239,7 @@ class GetVfNumByPciAddressTestCase(test.NoDBTestCase):
mock_iglob.return_value = self.paths
mock_readlink.return_value = '../../0000:00:00.1'
vf_num = utils.get_vf_num_by_pci_address(self.pci_address)
- self.assertEqual(vf_num, '3')
+ self.assertEqual(vf_num, 3)
@mock.patch.object(os, 'readlink')
@mock.patch.object(glob, 'iglob')
@@ -251,3 +251,180 @@ class GetVfNumByPciAddressTestCase(test.NoDBTestCase):
utils.get_vf_num_by_pci_address,
self.pci_address
)
+
+
+class GetProductIDByPfPciAddressTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.pci_address = "0000:0a:00.0"
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/sriov_vf_device":
+ mock.mock_open(
+ read_data="101e\n"
+ )()
+ }.get(f)
+ )
+ ),
+ )
+ def test_sriov_vf_device_read(self):
+ product_id = utils.get_vf_product_id_by_pf_addr(self.pci_address)
+ self.assertEqual(product_id, "101e")
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/sriov_vf_device":
+ mock.mock_open(
+ read_data=""
+ )()
+ }.get(f)
+ )
+ ),
+ )
+ def test_sriov_vf_device_read_value_error(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_vf_product_id_by_pf_addr,
+ self.pci_address,
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/sriov_vf_device":
+ mock.mock_open(
+ mock=mock.MagicMock(side_effect=IOError())
+ )()
+ }.get(f)
+ )
+ ),
+ )
+ def test_sriov_vf_device_read_io_error(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_vf_product_id_by_pf_addr,
+ self.pci_address,
+ )
+
+
+class GetPciIdsByPciAddressTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.pci_address = "0000:0a:00.0"
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor":
+ mock.mock_open(
+ read_data="0x15b3\n"
+ )(),
+ "/sys/bus/pci/devices/0000:0a:00.0/product":
+ mock.mock_open(
+ read_data="0x101e\n"
+ )(),
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids(self):
+ self.assertEqual(
+ utils.get_pci_ids_by_pci_addr(self.pci_address), ("15b3", "101e")
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor": mock.mock_open(
+ read_data=""
+ )(),
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids_value_error_vendor(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_pci_ids_by_pci_addr,
+ self.pci_address,
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor":
+ mock.mock_open(
+ read_data="0x15b3\n"
+ )(),
+ "/sys/bus/pci/devices/0000:0a:00.0/product":
+ mock.mock_open(
+ read_data=""
+ )(),
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids_value_error_product(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_pci_ids_by_pci_addr,
+ self.pci_address,
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor": mock.mock_open(
+ mock=mock.MagicMock(side_effect=IOError())
+ )()
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids_io_error_vendor(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_pci_ids_by_pci_addr,
+ self.pci_address,
+ )
+
+ @mock.patch(
+ "builtins.open",
+ new=mock.MagicMock(
+ side_effect=(
+ lambda f: {
+ "/sys/bus/pci/devices/0000:0a:00.0/vendor":
+ mock.mock_open(
+ read_data="0x15b3\n"
+ )(),
+ "/sys/bus/pci/devices/0000:0a:00.0/product":
+ mock.mock_open(
+ mock=mock.MagicMock(side_effect=IOError())
+ )(),
+ }.get(f)
+ )
+ ),
+ )
+ def test_get_pci_ids_io_error_product(self):
+ self.assertRaises(
+ ValueError,
+ utils.get_pci_ids_by_pci_addr,
+ self.pci_address,
+ )
diff --git a/nova/tests/unit/policies/base.py b/nova/tests/unit/policies/base.py
index aff01d14f6..7490441d92 100644
--- a/nova/tests/unit/policies/base.py
+++ b/nova/tests/unit/policies/base.py
@@ -24,6 +24,23 @@ from nova.tests import fixtures
LOG = logging.getLogger(__name__)
+def rule_if_system(system_rule, non_system_rule, context):
+ """Helper function to pick a rule based on system-ness of context.
+
+ This can be used (with functools.partial) to choose between two
+ rule names, based on whether or not the context has system
+ scope. Specifically if we will fail the parent of a nested policy
+ check based on scope_types=['project'], this can be used to choose
+ the parent rule name for the error message check in
+ common_policy_check().
+
+ """
+ if context.system_scope:
+ return system_rule
+ else:
+ return non_system_rule
+
+
class BasePolicyTest(test.TestCase):
# NOTE(gmann): Set this flag to True if you would like to tests the
# new behaviour of policy without deprecated rules.
@@ -36,11 +53,21 @@ class BasePolicyTest(test.TestCase):
# For Example:
# rules_without_deprecation{
# "os_compute_api:os-deferred-delete:restore":
- # "rule:system_admin_or_owner"}
+ # "rule:project_admin_api"}
rules_without_deprecation = {}
def setUp(self):
super(BasePolicyTest, self).setUp()
+ # TODO(gmann): enforce_scope and enforce_new_defaults are enabled
+ # by default in the code so disable them in base test class until
+ # we have deprecated rules and their tests. We have enforce_scope
+ # and no-legacy tests which are explicitly enabling scope and new
+ # defaults to test the new defaults and scope. In future, once
+ # we remove the deprecated rules, along with refactoring the unit
+ # tests we can remove overriding the oslo policy flags.
+ self.flags(enforce_scope=False, group="oslo_policy")
+ if not self.without_deprecated_rules:
+ self.flags(enforce_new_defaults=False, group="oslo_policy")
self.useFixture(fixtures.NeutronFixture(self))
self.policy = self.useFixture(fixtures.RealPolicyFixture())
@@ -95,33 +122,135 @@ class BasePolicyTest(test.TestCase):
project_id=self.project_id_other,
roles=['reader'])
- self.all_contexts = [
+ self.all_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.other_project_member_context,
self.project_foo_context, self.other_project_reader_context
- ]
+ ])
+
+ # All the project contexts for easy access.
+ self.all_project_contexts = set([
+ self.legacy_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_member_context,
+ self.other_project_reader_context,
+ ])
+ # All the system contexts for easy access.
+ self.all_system_contexts = set([
+ self.system_admin_context, self.system_foo_context,
+ self.system_member_context, self.system_reader_context,
+ ])
+ # A few commmon set of contexts to be used in tests
+ #
+ # With scope disable and no legacy rule, any admin,
+ # project members have access. No other role in that project
+ # will have access.
+ self.project_member_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ ])
+ # With scope enable and legacy rule, only project scoped admin
+ # and any role in that project will have access.
+ self.project_m_r_or_admin_with_scope_and_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin
+ # and project members have access. No other role in that project
+ # or system scoped token will have access.
+ self.project_member_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context
+ ])
+ # With scope disable and no legacy rule, any admin,
+ # project members, and project reader have access. No other
+ # role in that project will have access.
+ self.project_reader_or_admin_with_no_scope_no_legacy = set([
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context
+ ])
+ # With scope enable and no legacy rule, only project scoped admin,
+ # project members, and project reader have access. No other role
+ # in that project or system scoped token will have access.
+ self.project_reader_or_admin_with_scope_no_legacy = set([
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context
+ ])
if self.without_deprecated_rules:
# To simulate the new world, remove deprecations by overriding
# rules which has the deprecated rules.
self.rules_without_deprecation.update({
- "system_admin_or_owner":
- "rule:system_admin_api or rule:project_member_api",
- "system_or_project_reader":
- "rule:system_reader_api or rule:project_reader_api",
- "system_admin_api":
- "role:admin and system_scope:all",
- "system_reader_api":
- "role:reader and system_scope:all",
+ "context_is_admin":
+ "role:admin",
+ "project_reader_or_admin":
+ "rule:project_reader_api or rule:context_is_admin",
+ "project_admin_api":
+ "role:admin and project_id:%(project_id)s",
"project_member_api":
"role:member and project_id:%(project_id)s",
+ "project_reader_api":
+ "role:reader and project_id:%(project_id)s",
+ "project_member_or_admin":
+ "rule:project_member_api or rule:context_is_admin",
+ "project_reader_or_admin":
+ "rule:project_reader_api or rule:context_is_admin",
})
self.policy.set_rules(self.rules_without_deprecation,
overwrite=False)
+ def reduce_set(self, name, new_set):
+ """Reduce a named set of contexts in a subclass.
+
+ This removes things from a set in a child test class by taking
+ a new set, but asserts that no *new* contexts are added over
+ what is defined in the parent.
+
+ :param name: The name of a set of contexts on self
+ (i.e. 'project' for self.project_contexts
+ :param new_set: The new set of contexts that should be used in
+ the above set. The new_set is asserted to be a
+ perfect subset of the existing set
+ """
+ current = getattr(self, '%s_contexts' % name)
+
+ errors = ','.join(x.user_id for x in new_set - current)
+ self.assertEqual('', errors,
+ 'Attempt to reduce set would add %s' % errors)
+
+ LOG.info('%s.%s_contexts: removing %s',
+ self.__class__.__name__,
+ name,
+ ','.join(x.user_id for x in current - new_set))
+ setattr(self, '%s_contexts' % name, new_set)
+
+ def common_policy_auth(self, authorized_contexts,
+ rule_name,
+ func, req, *arg, **kwarg):
+ """Check a policy rule against a set of authorized contexts.
+
+ This is exactly like common_policy_check, except that it
+ assumes any contexts not in the authorized set are in the
+ unauthorized set.
+ """
+ # The unauthorized users are any not in the authorized set.
+ unauth = list(set(self.all_contexts) - set(authorized_contexts))
+ # In case a set was passed in, convert to list for stable ordering.
+ authorized_contexts = list(authorized_contexts)
+ # Log both sets in the order we will test them to aid debugging of
+ # fatal=False responses.
+ LOG.info('Authorized users: %s', list(
+ x.user_id for x in authorized_contexts))
+ LOG.info('Unauthorized users: %s', list(x.user_id for x in unauth))
+ return self.common_policy_check(authorized_contexts, unauth,
+ rule_name, func, req, *arg, **kwarg)
+
def common_policy_check(self, authorized_contexts,
unauthorized_contexts, rule_name,
func, req, *arg, **kwarg):
@@ -146,15 +275,25 @@ class BasePolicyTest(test.TestCase):
def ensure_raises(req, *args, **kwargs):
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, req, *arg, **kwarg)
+ # NOTE(danms): We may need to check a different rule_name
+ # as the enforced policy, based on the context we are
+ # using. Examples are multi-policy APIs for similar
+ # reasons as below. If we are passed a function for
+ # rule_name, call it with the context being used to
+ # determine the rule_name we should verify.
+ if callable(rule_name):
+ actual_rule_name = rule_name(req.environ['nova.context'])
+ else:
+ actual_rule_name = rule_name
# NOTE(gmann): In case of multi-policy APIs, PolicyNotAuthorized
# exception can be raised from either of the policy so checking
# the error message, which includes the rule name, can mismatch.
# Tests verifying the multi policy can pass rule_name as None
# to skip the error message assert.
- if rule_name is not None:
+ if actual_rule_name is not None:
self.assertEqual(
"Policy doesn't allow %s to be performed." %
- rule_name, exc.format_message())
+ actual_rule_name, exc.format_message())
# Verify all the context having allowed scope and roles pass
# the policy check.
for context in authorized_contexts:
diff --git a/nova/tests/unit/policies/test_admin_actions.py b/nova/tests/unit/policies/test_admin_actions.py
index c5522616ff..21157fd832 100644
--- a/nova/tests/unit/policies/test_admin_actions.py
+++ b/nova/tests/unit/policies/test_admin_actions.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -40,40 +41,42 @@ class AdminActionsPolicyTest(base.BasePolicyTest):
uuid = uuids.fake_id
self.instance = fake_instance.fake_instance_obj(
self.project_member_context,
- id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
- task_state=None, launched_at=timeutils.utcnow())
+ id=1, uuid=uuid, project_id=self.project_id,
+ vm_state=vm_states.ACTIVE, task_state=None,
+ launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin is able to change the service
- self.admin_authorized_contexts = [
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to perform
+ # server admin actions
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to change the service
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
@mock.patch('nova.objects.Instance.save')
def test_reset_state_policy(self, mock_save):
rule_name = "os_compute_api:os-admin-actions:reset_state"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._reset_state,
- self.req, self.instance.uuid,
- body={'os-resetState': {'state': 'active'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name, self.controller._reset_state,
+ self.req, self.instance.uuid,
+ body={'os-resetState': {'state': 'active'}})
def test_inject_network_info_policy(self):
rule_name = "os_compute_api:os-admin-actions:inject_network_info"
with mock.patch.object(self.controller.compute_api,
"inject_network_info"):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._inject_network_info,
- self.req, self.instance.uuid, body={})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._inject_network_info,
+ self.req, self.instance.uuid, body={})
+
+
+class AdminActionsNoLegacyNoScopePolicyTest(AdminActionsPolicyTest):
+ """Test Admin Actions APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
@@ -90,27 +93,15 @@ class AdminActionsScopeTypePolicyTest(AdminActionsPolicyTest):
def setUp(self):
super(AdminActionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enable, system admin will not be able to
+ # perform server admin actions.
+ self.project_action_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class AdminActionsNoLegacyPolicyTest(AdminActionsScopeTypePolicyTest):
+class AdminActionsScopeTypeNoLegacyPolicyTest(AdminActionsScopeTypePolicyTest):
"""Test Admin Actions APIs policies with system scope enabled,
- and no more deprecated rules.
+ and no more deprecated rules which means scope + new defaults so
+ only project admin is able to perform admin action on their server.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(AdminActionsScopeTypePolicyTest, self).setUp()
- # Check that system admin is able to perform the system level actions
- # on server.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to perform the system
- # level actions on server.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
diff --git a/nova/tests/unit/policies/test_admin_password.py b/nova/tests/unit/policies/test_admin_password.py
index b733f83e5d..01cce2950e 100644
--- a/nova/tests/unit/policies/test_admin_password.py
+++ b/nova/tests/unit/policies/test_admin_password.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -47,28 +48,23 @@ class AdminPasswordPolicyTest(base.BasePolicyTest):
user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to change the password
- self.admin_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to change
+ # the password for their server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin is not able to change the password
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.set_admin_password')
def test_change_paassword_policy(self, mock_password):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- self.rule_name,
- self.controller.change_password,
- self.req, self.instance.uuid,
- body={'changePassword': {
- 'adminPass': '1234pass'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ self.rule_name,
+ self.controller.change_password,
+ self.req, self.instance.uuid,
+ body={'changePassword': {
+ 'adminPass': '1234pass'}})
def test_change_password_overridden_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -93,6 +89,22 @@ class AdminPasswordPolicyTest(base.BasePolicyTest):
mock.ANY, '1234pass')
+class AdminPasswordNoLegacyNoScopePolicyTest(AdminPasswordPolicyTest):
+ """Test Admin Password APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(AdminPasswordNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to change the server password.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
"""Test Admin Password APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -106,31 +118,24 @@ class AdminPasswordScopeTypePolicyTest(AdminPasswordPolicyTest):
def setUp(self):
super(AdminPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to change password.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class AdminPasswordNoLegacyPolicyTest(AdminPasswordPolicyTest):
+class AdminPasswordScopeTypeNoLegacyTest(AdminPasswordScopeTypePolicyTest):
"""Test Admin Password APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules which means scope + new defaults so
+ only project admin and member is able to change their server password.
"""
+
without_deprecated_rules = True
def setUp(self):
- super(AdminPasswordNoLegacyPolicyTest, self).setUp()
+ super(AdminPasswordScopeTypeNoLegacyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system or projct admin or owner is able to change
- # the password.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to change the
- # password.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope enable and no legacy rule only project admin/member
+ # will be able to change password for the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_aggregates.py b/nova/tests/unit/policies/test_aggregates.py
index ce3c00f30b..6ac7b6e010 100644
--- a/nova/tests/unit/policies/test_aggregates.py
+++ b/nova/tests/unit/policies/test_aggregates.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import aggregates
@@ -31,39 +32,19 @@ class AggregatesPolicyTest(base.BasePolicyTest):
super(AggregatesPolicyTest, self).setUp()
self.controller = aggregates.AggregateController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to perform Aggregate Operations
- self.admin_authorized_contexts = [
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to perform Aggregate
+ # Operations.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to perform Aggregate Operations
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system reader is able to get Aggregate
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-admin is not able to get Aggregate
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate_list')
def test_list_aggregate_policy(self, mock_list):
rule_name = "os_compute_api:os-aggregates:index"
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.compute.api.AggregateAPI.create_aggregate')
def test_create_aggregate_policy(self, mock_create):
@@ -74,66 +55,59 @@ class AggregatesPolicyTest(base.BasePolicyTest):
"hosts": ["host1", "host2"]})
body = {"aggregate": {"name": "test",
"availability_zone": "nova1"}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.compute.api.AggregateAPI.update_aggregate')
def test_update_aggregate_policy(self, mock_update):
rule_name = "os_compute_api:os-aggregates:update"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- self.req, 1,
- body={"aggregate": {"name": "new_name"}})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ self.req, 1,
+ body={"aggregate": {"name": "new_name"}})
@mock.patch('nova.compute.api.AggregateAPI.delete_aggregate')
def test_delete_aggregate_policy(self, mock_delete):
rule_name = "os_compute_api:os-aggregates:delete"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, 1)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, 1)
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate')
def test_show_aggregate_policy(self, mock_show):
rule_name = "os_compute_api:os-aggregates:show"
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, 1)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, 1)
@mock.patch('nova.compute.api.AggregateAPI.update_aggregate_metadata')
def test_set_metadata_aggregate_policy(self, mock_metadata):
rule_name = "os_compute_api:os-aggregates:set_metadata"
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._set_metadata,
- self.req, 1, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller._set_metadata,
+ self.req, 1, body=body)
@mock.patch('nova.compute.api.AggregateAPI.add_host_to_aggregate')
def test_add_host_aggregate_policy(self, mock_add):
rule_name = "os_compute_api:os-aggregates:add_host"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._add_host,
- self.req, 1,
- body={"add_host": {"host": "host1"}})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller._add_host,
+ self.req, 1,
+ body={"add_host": {"host": "host1"}})
@mock.patch('nova.compute.api.AggregateAPI.remove_host_from_aggregate')
def test_remove_host_aggregate_policy(self, mock_remove):
rule_name = "os_compute_api:os-aggregates:remove_host"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._remove_host,
- self.req, 1,
- body={"remove_host": {"host": "host1"}})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller._remove_host,
+ self.req, 1,
+ body={"remove_host": {"host": "host1"}})
@mock.patch('nova.compute.api.AggregateAPI.get_aggregate')
def test_images_aggregate_policy(self, mock_get):
@@ -144,10 +118,21 @@ class AggregatesPolicyTest(base.BasePolicyTest):
body = {'cache': [{'id': uuids.fake_id}]}
req = fakes.HTTPRequest.blank('', version='2.81')
with mock.patch('nova.conductor.api.ComputeTaskAPI.cache_images'):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.images,
- req, 1, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.images,
+ req, 1, body=body)
+
+
+class AggregatesNoLegacyNoScopePolicyTest(AggregatesPolicyTest):
+ """Test Aggregates APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to
+ perform Aggregate Operations. Legacy admin will be allowed as policy
+ is just admin if no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class AggregatesScopeTypePolicyTest(AggregatesPolicyTest):
@@ -164,28 +149,16 @@ class AggregatesScopeTypePolicyTest(AggregatesPolicyTest):
super(AggregatesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to perform Aggregate Operations.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to perform
- # Aggregate Operations.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
- # Check that system reader is able to get Aggregate
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-admin is not able to get Aggregate
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ # With scope checks enabled, only project-scoped admins are
+ # able to perform Aggregate Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class AggregatesScopeTypeNoLegacyPolicyTest(AggregatesScopeTypePolicyTest):
+ """Test Aggregates APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to perform aggregates Operations.
+ """
+
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_assisted_volume_snapshots.py b/nova/tests/unit/policies/test_assisted_volume_snapshots.py
index 3c9836e07b..dce62e5bcc 100644
--- a/nova/tests/unit/policies/test_assisted_volume_snapshots.py
+++ b/nova/tests/unit/policies/test_assisted_volume_snapshots.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
import urllib
@@ -32,18 +33,12 @@ class AssistedVolumeSnapshotPolicyTest(base.BasePolicyTest):
super(AssistedVolumeSnapshotPolicyTest, self).setUp()
self.controller = snapshots.AssistedVolumeSnapshotsController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to take volume snapshot.
- self.admin_authorized_contexts = [
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to
+ # take volume snapshot.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to take volume snapshot.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
@mock.patch('nova.compute.api.API.volume_snapshot_create')
def test_assisted_create_policy(self, mock_create):
@@ -52,10 +47,9 @@ class AssistedVolumeSnapshotPolicyTest(base.BasePolicyTest):
'create_info': {'type': 'qcow2',
'new_file': 'new_file',
'snapshot_id': 'snapshot_id'}}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, body=body)
@mock.patch('nova.compute.api.API.volume_snapshot_delete')
def test_assisted_delete_policy(self, mock_delete):
@@ -64,11 +58,20 @@ class AssistedVolumeSnapshotPolicyTest(base.BasePolicyTest):
'delete_info': jsonutils.dumps({'volume_id': '1'}),
}
req = fakes.HTTPRequest.blank('?%s' % urllib.parse.urlencode(params))
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- req, 1)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ req, 1)
+
+
+class AssistedSnapshotNoLegacyNoScopePolicyTest(
+ AssistedVolumeSnapshotPolicyTest):
+ """Test Assisted Snapshot APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class AssistedSnapshotScopeTypePolicyTest(AssistedVolumeSnapshotPolicyTest):
@@ -85,16 +88,15 @@ class AssistedSnapshotScopeTypePolicyTest(AssistedVolumeSnapshotPolicyTest):
super(AssistedSnapshotScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to take volume snapshot.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to take volume
- # snapshot.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ # With scope check enabled, system admin is not able to
+ # take volume snapshot.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+
+
+class AssistedSnapshotScopeTypeNoLegacyPolicyTest(
+ AssistedSnapshotScopeTypePolicyTest):
+ """Test os-volume-attachments APIs policies with system scope enabled,
+ and no legacy deprecated rules.
+ """
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_attach_interfaces.py b/nova/tests/unit/policies/test_attach_interfaces.py
index 05f62d5cf0..33c531c9c7 100644
--- a/nova/tests/unit/policies/test_attach_interfaces.py
+++ b/nova/tests/unit/policies/test_attach_interfaces.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -47,40 +48,25 @@ class AttachInterfacesPolicyTest(base.BasePolicyTest):
vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- self.admin_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_foo_context,
- self.project_reader_context, self.project_member_context
- ]
-
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- self.reader_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to attach,
+ # detach an interface from a server.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context, self.project_foo_context
- ]
-
- self.reader_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
+ # and they can get their own server attached interfaces.
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.network.neutron.API.list_ports')
def test_index_interfaces_policy(self, mock_port, mock_get):
rule_name = "os_compute_api:os-attach-interfaces:list"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req, uuids.fake_id)
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.network.neutron.API.show_port')
@@ -97,11 +83,10 @@ class AttachInterfacesPolicyTest(base.BasePolicyTest):
"fixed_ips": ["10.0.2.2"],
"device_id": server_id,
}}
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, server_id, port_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, server_id, port_id)
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.api.openstack.compute.attach_interfaces'
@@ -110,19 +95,43 @@ class AttachInterfacesPolicyTest(base.BasePolicyTest):
def test_attach_interface(self, mock_interface, mock_port, mock_get):
rule_name = "os_compute_api:os-attach-interfaces:create"
body = {'interfaceAttachment': {'net_id': uuids.fake_id}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, uuids.fake_id, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, uuids.fake_id, body=body)
@mock.patch('nova.compute.api.API.get')
@mock.patch('nova.compute.api.API.detach_interface')
def test_delete_interface(self, mock_detach, mock_get):
rule_name = "os_compute_api:os-attach-interfaces:delete"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, uuids.fake_id, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, uuids.fake_id, uuids.fake_id)
+
+
+class AttachInterfacesNoLegacyNoScopePolicyTest(AttachInterfacesPolicyTest):
+ """Test Attach Interfaces APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ ai_policies.POLICY_ROOT % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ ai_policies.POLICY_ROOT % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ ai_policies.POLICY_ROOT % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ ai_policies.POLICY_ROOT % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(AttachInterfacesNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, legacy admin loose power.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
@@ -138,6 +147,11 @@ class AttachInterfacesScopeTypePolicyTest(AttachInterfacesPolicyTest):
def setUp(self):
super(AttachInterfacesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
@@ -173,12 +187,12 @@ class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
@mock.patch('nova.network.neutron.API.list_ports')
def test_deprecated_policy_overridden_rule_is_checked(self, mock_port,
mock_get):
- # Test to verify if deprecatd overridden policy is working.
+ # Test to verify if deprecated overridden policy is working.
# check for success as admin role. Deprecated rule
# has been overridden with admin checks in policy.yaml
# If admin role pass it means overridden rule is enforced by
- # olso.policy because new default is system or project reader and the
+ # oslo.policy because new default is system or project reader and the
# old default is admin.
self.controller.index(self.admin_req, uuids.fake_id)
@@ -192,55 +206,27 @@ class AttachInterfacesDeprecatedPolicyTest(base.BasePolicyTest):
exc.format_message())
-class AttachInterfacesNoLegacyPolicyTest(AttachInterfacesPolicyTest):
+class AttachInterfacesScopeTypeNoLegacyPolicyTest(
+ AttachInterfacesScopeTypePolicyTest):
"""Test Attach Interfaces APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
ai_policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'create':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
ai_policies.POLICY_ROOT % 'delete':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(AttachInterfacesNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system or projct admin or owner is able to
- # create or delete interfaces.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to
- # create or delete interfaces.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- # Check that system reader or projct is able to
- # create or delete interfaces.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context
- ]
-
- # Check that non-system reader nd non-admin/owner is not able to
- # create or delete interfaces.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(AttachInterfacesScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to allowed operation on server interface.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_availability_zone.py b/nova/tests/unit/policies/test_availability_zone.py
index 454692fde8..1852f8444c 100644
--- a/nova/tests/unit/policies/test_availability_zone.py
+++ b/nova/tests/unit/policies/test_availability_zone.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import availability_zone
from nova.tests.unit.api.openstack import fakes
@@ -31,49 +31,38 @@ class AvailabilityZonePolicyTest(base.BasePolicyTest):
self.controller = availability_zone.AvailabilityZoneController()
self.req = fakes.HTTPRequest.blank('')
- # Check that everyone is able to list the AZ
- self.everyone_authorized_contexts = [
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to get AZ with host
+ # information.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_member_context, self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = []
-
- # Check that system reader is able to list the AZ Detail
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to list the AZ. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
self.project_admin_context]
- # Check that non-system-reader are not able to list the AZ.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
+ self.project_authorized_contexts = self.all_contexts
@mock.patch('nova.objects.Instance.save')
def test_availability_zone_list_policy(self, mock_save):
rule_name = "os_compute_api:os-availability-zone:list"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
def test_availability_zone_detail_policy(self):
rule_name = "os_compute_api:os-availability-zone:detail"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.detail,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.detail,
+ self.req)
+
+
+class AvailabilityZoneNoLegacyNoScopePolicyTest(AvailabilityZonePolicyTest):
+ """Test Availability Zones APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to get
+ AZ with host information. Legacy admin will be allowed as policy
+ is just admin if no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class AvailabilityZoneScopeTypePolicyTest(AvailabilityZonePolicyTest):
@@ -91,15 +80,17 @@ class AvailabilityZoneScopeTypePolicyTest(AvailabilityZonePolicyTest):
super(AvailabilityZoneScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to list the AZ.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to list AZ.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
+ # With scope checks enable, only project-scoped admins are
+ # able to get AZ with host information.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+ self.project_authorized_contexts = self.all_project_contexts
+
+
+class AZScopeTypeNoLegacyPolicyTest(AvailabilityZoneScopeTypePolicyTest):
+ """Test Availability Zones APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to get AZ with host information.
+ """
+
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_baremetal_nodes.py b/nova/tests/unit/policies/test_baremetal_nodes.py
index 77e6def26d..68f02087c4 100644
--- a/nova/tests/unit/policies/test_baremetal_nodes.py
+++ b/nova/tests/unit/policies/test_baremetal_nodes.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import baremetal_nodes
@@ -40,25 +41,17 @@ class BaremetalNodesPolicyTest(base.BasePolicyTest):
self.stub_out('nova.api.openstack.compute.'
'baremetal_nodes._get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
- # Check that system reader is able to get baremetal nodes.
- self.system_reader_authorized_contexts = [
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to get baremetal nodes.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to get baremetal nodes.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
- ]
+ self.project_admin_context]
def test_index_nodes_policy(self):
rule_name = "os_compute_api:os-baremetal-nodes:list"
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
@@ -69,11 +62,22 @@ class BaremetalNodesPolicyTest(base.BasePolicyTest):
mock_get.return_value = node
mock_port.return_value = []
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, uuids.fake_id)
+
+
+class BaremetalNodesNoLegacyNoScopePolicyTest(BaremetalNodesPolicyTest):
+ """Test Baremetal Nodes APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In that case
+ system admin, legacy admin, and project admin will be able to get
+ Baremetal nodes Legacy admin will be allowed as policy is just admin if
+ no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class BaremetalNodesScopeTypePolicyTest(BaremetalNodesPolicyTest):
@@ -91,28 +95,21 @@ class BaremetalNodesScopeTypePolicyTest(BaremetalNodesPolicyTest):
super(BaremetalNodesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to get baremetal nodes.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system or non-reader is not able to get
- # baremetal nodes.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
- ]
-
-
-class BaremetalNodesNoLegacyPolicyTest(BaremetalNodesScopeTypePolicyTest):
- """Test Baremetal Nodes APIs policies with system scope enabled,
- and no more deprecated rules.
+ # With scope checks enable, only project-scoped admins are
+ # able to get baremetal nodes.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class BNScopeTypeNoLegacyPolicyTest(BaremetalNodesScopeTypePolicyTest):
+ """Test Baremetal Nodes APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to get baremetal nodes.
"""
+
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'list':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
policies.BASE_POLICY_NAME % 'show':
- base_policy.SYSTEM_READER}
+ base_policy.ADMIN}
diff --git a/nova/tests/unit/policies/test_console_auth_tokens.py b/nova/tests/unit/policies/test_console_auth_tokens.py
index 27dbd59540..a658816538 100644
--- a/nova/tests/unit/policies/test_console_auth_tokens.py
+++ b/nova/tests/unit/policies/test_console_auth_tokens.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import console_auth_tokens
from nova.tests.unit.api.openstack import fakes
@@ -31,33 +31,29 @@ class ConsoleAuthTokensPolicyTest(base.BasePolicyTest):
self.controller = console_auth_tokens.ConsoleAuthTokensController()
self.req = fakes.HTTPRequest.blank('', version='2.31')
- # Check that system reader is able to get console connection
- # information.
+ # With legacy rule, any admin can get console connection
# NOTE(gmann): Until old default rule which is admin_api is
# deprecated and not removed, project admin and legacy admin
# will be able to get console. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
+ # tokens will keep working.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to get console connection
- # information.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_console_connect_info_token_policy(self, mock_validate):
rule_name = "os_compute_api:os-console-auth-tokens"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, fakes.FAKE_UUID)
+
+
+class ConsoleAuthTokensNoLegacyNoScopeTest(ConsoleAuthTokensPolicyTest):
+ """Test Console Auth Tokens API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest):
@@ -75,17 +71,14 @@ class ConsoleAuthTokensScopeTypePolicyTest(ConsoleAuthTokensPolicyTest):
super(ConsoleAuthTokensScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to get console connection
- # information.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to get console connection
- # information.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+
+
+class ConsoleAuthTokensScopeTypeNoLegacyPolicyTest(
+ ConsoleAuthTokensScopeTypePolicyTest):
+ """Test Console Auth Tokens APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_console_output.py b/nova/tests/unit/policies/test_console_output.py
index 3c16f5c1fa..c1bccf1d55 100644
--- a/nova/tests/unit/policies/test_console_output.py
+++ b/nova/tests/unit/policies/test_console_output.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -43,30 +44,37 @@ class ConsoleOutputPolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to get the server console.
- self.admin_authorized_contexts = [
+ # With legacy rule, any admin and role in project
+ # can get the server console.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to get the server
- # console.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.compute.api.API.get_console_output')
def test_console_output_policy(self, mock_console):
mock_console.return_value = '\n'.join([str(i) for i in range(2)])
rule_name = "os_compute_api:os-console-output"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.get_console_output,
- self.req, self.instance.uuid,
- body={'os-getConsoleOutput': {}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.get_console_output,
+ self.req, self.instance.uuid,
+ body={'os-getConsoleOutput': {}})
+
+
+class ConsoleOutputNoLegacyNoScopePolicyTest(ConsoleOutputPolicyTest):
+ """Test Server Console Output APIs policies with no legacy deprecated
+ rule and no scope check.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ConsoleOutputNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member is able to
+ # get the server console.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
@@ -83,31 +91,22 @@ class ConsoleOutputScopeTypePolicyTest(ConsoleOutputPolicyTest):
def setUp(self):
super(ConsoleOutputScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ConsoleOutputNoLegacyPolicyTest(ConsoleOutputPolicyTest):
+class ConsoleOutputScopeTypeNoLegacyPolicyTest(
+ ConsoleOutputScopeTypePolicyTest):
"""Test Console Output APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(ConsoleOutputNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
+ super(ConsoleOutputScopeTypeNoLegacyPolicyTest, self).setUp()
- # Check that system or projct admin or owner is able to
- # get the server console.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to
+ # With scope enable and no legacy rule, only project admin/member can
# get the server console.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_create_backup.py b/nova/tests/unit/policies/test_create_backup.py
index 4985119201..b54ed366df 100644
--- a/nova/tests/unit/policies/test_create_backup.py
+++ b/nova/tests/unit/policies/test_create_backup.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -43,20 +44,14 @@ class CreateBackupPolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to create server backup.
- self.admin_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to create
+ # server backup.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to create server
- # backup.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.compute.api.API.backup')
def test_create_backup_policy(self, mock_backup):
@@ -68,11 +63,26 @@ class CreateBackupPolicyTest(base.BasePolicyTest):
'rotation': 1,
},
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._create_backup,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller._create_backup,
+ self.req, self.instance.uuid,
+ body=body)
+
+
+class CreateBackupNoLegacyNoScopePolicyTest(CreateBackupPolicyTest):
+ """Test Create Backup server APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(CreateBackupNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to create the server backup.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
@@ -89,31 +99,20 @@ class CreateBackupScopeTypePolicyTest(CreateBackupPolicyTest):
def setUp(self):
super(CreateBackupScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system users to create the server.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class CreateBackupNoLegacyPolicyTest(CreateBackupPolicyTest):
+class CreateBackupScopeTypeNoLegacyPolicyTest(CreateBackupScopeTypePolicyTest):
"""Test Create Backup APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(CreateBackupNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system or projct admin or owner is able to create
- # server backup.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to
- # create server backup.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(CreateBackupScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to create the server backup.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_deferred_delete.py b/nova/tests/unit/policies/test_deferred_delete.py
index ca2253df54..08bb0213f4 100644
--- a/nova/tests/unit/policies/test_deferred_delete.py
+++ b/nova/tests/unit/policies/test_deferred_delete.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -47,37 +48,29 @@ class DeferredDeletePolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to force delete or restore server.
- self.admin_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to force
+ # delete or restore server.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to force delete or
- # restore server.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.compute.api.API.restore')
def test_restore_server_policy(self, mock_restore):
rule_name = dd_policies.BASE_POLICY_NAME % 'restore'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._restore,
- self.req, self.instance.uuid,
- body={'restore': {}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller._restore,
+ self.req, self.instance.uuid,
+ body={'restore': {}})
def test_force_delete_server_policy(self):
rule_name = dd_policies.BASE_POLICY_NAME % 'force'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._force_delete,
- self.req, self.instance.uuid,
- body={'forceDelete': {}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller._force_delete,
+ self.req, self.instance.uuid,
+ body={'forceDelete': {}})
def test_force_delete_server_policy_failed_with_other_user(self):
rule_name = dd_policies.BASE_POLICY_NAME % 'force'
@@ -103,6 +96,27 @@ class DeferredDeletePolicyTest(base.BasePolicyTest):
self.req.environ['nova.context'], self.instance)
+class DeferredDeleteNoLegacyNoScopePolicyTest(DeferredDeletePolicyTest):
+ """Test Deferred Delete server APIs policies with no legacy deprecated
+ rule and no scope check.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ dd_policies.BASE_POLICY_NAME % 'restore':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ dd_policies.BASE_POLICY_NAME % 'force':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(DeferredDeleteNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member is able to force
+ # delete or restore server.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
"""Test Deferred Delete APIs policies with system scope enabled.
@@ -117,36 +131,27 @@ class DeferredDeleteScopeTypePolicyTest(DeferredDeletePolicyTest):
def setUp(self):
super(DeferredDeleteScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class DeferredDeleteNoLegacyPolicyTest(DeferredDeletePolicyTest):
+class DeferredDeleteScopeTypeNoLegacyPolicyTest(
+ DeferredDeleteScopeTypePolicyTest):
"""Test Deferred Delete APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
dd_policies.BASE_POLICY_NAME % 'restore':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
dd_policies.BASE_POLICY_NAME % 'force':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(DeferredDeleteNoLegacyPolicyTest, self).setUp()
+ super(DeferredDeleteScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system or projct admin or owner is able to
- # force delete or restore server.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to
- # force delete or restore server.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope enable and no legacy rule, only project admin/member is
+ # able to force delete or restore server.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_evacuate.py b/nova/tests/unit/policies/test_evacuate.py
index 203cc136e9..b9e4c29dba 100644
--- a/nova/tests/unit/policies/test_evacuate.py
+++ b/nova/tests/unit/policies/test_evacuate.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -55,18 +56,12 @@ class EvacuatePolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin is able to evacuate the server
- self.admin_authorized_contexts = [
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to evacuate
+ # the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to evacuate the server
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
@mock.patch('nova.compute.api.API.evacuate')
def test_evacuate_policy(self, mock_evacuate):
@@ -75,11 +70,10 @@ class EvacuatePolicyTest(base.BasePolicyTest):
'onSharedStorage': 'False',
'adminPass': 'admin_pass'}
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._evacuate,
- self.req, uuids.fake_id,
- body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name, self.controller._evacuate,
+ self.req, uuids.fake_id,
+ body=body)
def test_evacuate_policy_failed_with_other_user(self):
rule_name = "os_compute_api:os-evacuate"
@@ -109,7 +103,16 @@ class EvacuatePolicyTest(base.BasePolicyTest):
evacuate_mock.assert_called_once_with(
self.user_req.environ['nova.context'],
mock.ANY, 'my-host', False,
- 'MyNewPass', None)
+ 'MyNewPass', None, None)
+
+
+class EvacuateNoLegacyNoScopePolicyTest(EvacuatePolicyTest):
+ """Test Evacuate APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
class EvacuateScopeTypePolicyTest(EvacuatePolicyTest):
@@ -126,28 +129,14 @@ class EvacuateScopeTypePolicyTest(EvacuatePolicyTest):
def setUp(self):
super(EvacuateScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enable, system admin will not be able to
+ # evacuate the server.
+ self.project_action_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class EvacuateNoLegacyPolicyTest(EvacuateScopeTypePolicyTest):
+class EvacuateScopeTypeNoLegacyPolicyTest(EvacuateScopeTypePolicyTest):
"""Test Evacuate APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+ and no more deprecated rules which means scope + new defaults.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(EvacuateNoLegacyPolicyTest, self).setUp()
-
- # Check that system admin is able to evacuate server.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to evacuate
- # server.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
diff --git a/nova/tests/unit/policies/test_extensions.py b/nova/tests/unit/policies/test_extensions.py
index 35f451087d..d2e3c6adde 100644
--- a/nova/tests/unit/policies/test_extensions.py
+++ b/nova/tests/unit/policies/test_extensions.py
@@ -71,11 +71,20 @@ class ExtensionsScopeTypePolicyTest(ExtensionsPolicyTest):
def setUp(self):
super(ExtensionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.everyone_unauthorized_contexts = [
+ self.system_admin_context, self.system_member_context,
+ self.system_reader_context, self.system_foo_context]
class ExtensionsNoLegacyPolicyTest(ExtensionsScopeTypePolicyTest):
"""Test Extensions APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_flavor_access.py b/nova/tests/unit/policies/test_flavor_access.py
index 46d9368c4b..cfdbbd2470 100644
--- a/nova/tests/unit/policies/test_flavor_access.py
+++ b/nova/tests/unit/policies/test_flavor_access.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_access
@@ -49,62 +50,61 @@ class FlavorAccessPolicyTest(base.BasePolicyTest):
self.stub_out('nova.objects.flavor._get_projects_from_db',
lambda context, flavorid: [])
- # Check that admin is able to add/remove flavor access
- # to a tenant.
+ # With legacy rule and no scope checks, all admin is able to
+ # add/remove flavor access to a tenant.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to add/remove flavor access
- # to a tenant.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that everyone is able to list flavor access
- # information which is nothing but bug#1867840.
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- self.reader_unauthorized_contexts = [
- ]
+ # With legacy rule, anyone can access flavor access info.
+ self.admin_index_authorized_contexts = self.all_contexts
def test_list_flavor_access_policy(self):
rule_name = fa_policy.BASE_POLICY_NAME
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller_index.index,
- self.req, '1')
+ self.common_policy_auth(self.admin_index_authorized_contexts,
+ rule_name, self.controller_index.index,
+ self.req, '1')
@mock.patch('nova.objects.Flavor.add_access')
def test_add_tenant_access_policy(self, mock_add):
rule_name = fa_policy.POLICY_ROOT % "add_tenant_access"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._add_tenant_access,
- self.req, '1',
- body={'addTenantAccess': {'tenant': 't1'}})
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller._add_tenant_access,
+ self.req, '1',
+ body={'addTenantAccess': {'tenant': 't1'}})
@mock.patch('nova.objects.Flavor.remove_access')
def test_remove_tenant_access_policy(self, mock_remove):
rule_name = fa_policy.POLICY_ROOT % "remove_tenant_access"
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._remove_tenant_access,
- self.req, '1',
- body={'removeTenantAccess': {'tenant': 't1'}})
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller._remove_tenant_access,
+ self.req, '1',
+ body={'removeTenantAccess': {'tenant': 't1'}})
+
+
+class FlavorAccessNoLegacyNoScopeTest(FlavorAccessPolicyTest):
+ """Test Flavor Access API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ fa_policy.POLICY_ROOT % "add_tenant_access":
+ base_policy.ADMIN,
+ fa_policy.POLICY_ROOT % "remove_tenant_access":
+ base_policy.ADMIN,
+ fa_policy.BASE_POLICY_NAME:
+ base_policy.ADMIN}
+
+ def setUp(self):
+ super(FlavorAccessNoLegacyNoScopeTest, self).setUp()
+
+ # with no legacy rule means all admin is able to list access info.
+ self.admin_index_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
class FlavorAccessScopeTypePolicyTest(FlavorAccessPolicyTest):
@@ -122,81 +122,32 @@ class FlavorAccessScopeTypePolicyTest(FlavorAccessPolicyTest):
super(FlavorAccessScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to add/remove flavor access
- # to a tenant.
+ # Scope checks remove system users' power.
self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system-admin is not able to add/remove flavor access
- # to a tenant.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system user is able to list flavor access
- # information.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context]
- # Check that non-system is not able to list flavor access
- # information.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.other_project_member_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_reader_context,
- ]
-
-
-class FlavorAccessNoLegacyPolicyTest(FlavorAccessPolicyTest):
+ self.legacy_admin_context,
+ self.project_admin_context]
+ self.admin_index_authorized_contexts = self.all_project_contexts
+
+
+class FlavorAccessScopeTypeNoLegacyPolicyTest(FlavorAccessScopeTypePolicyTest):
"""Test FlavorAccess APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_redear APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
fa_policy.POLICY_ROOT % "add_tenant_access":
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
fa_policy.POLICY_ROOT % "remove_tenant_access":
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
fa_policy.BASE_POLICY_NAME:
- base_policy.SYSTEM_READER}
+ base_policy.ADMIN}
def setUp(self):
- super(FlavorAccessNoLegacyPolicyTest, self).setUp()
+ super(FlavorAccessScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to add/remove flavor access
- # to a tenant.
+ # New defaults make this admin-only
self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system-admin is not able to add/remove flavor access
- # to a tenant.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system reader is able to list flavor access
- # information.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context]
- # Check that non-system-reader is not able to list flavor access
- # information.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.other_project_member_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_foo_context,
- self.other_project_reader_context,
- ]
+ self.legacy_admin_context,
+ self.project_admin_context]
+ self.admin_index_authorized_contexts = self.admin_authorized_contexts
diff --git a/nova/tests/unit/policies/test_flavor_extra_specs.py b/nova/tests/unit/policies/test_flavor_extra_specs.py
index 3129cb6213..f3c8cacd57 100644
--- a/nova/tests/unit/policies/test_flavor_extra_specs.py
+++ b/nova/tests/unit/policies/test_flavor_extra_specs.py
@@ -10,22 +10,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_manage
from nova.api.openstack.compute import flavors
from nova.api.openstack.compute import flavors_extraspecs
-from nova.api.openstack.compute import servers
-from nova.compute import vm_states
-from nova import objects
from nova.policies import flavor_extra_specs as policies
from nova.policies import flavor_manage as fm_policies
-from nova.policies import servers as s_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_flavor
-from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
@@ -42,30 +37,7 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
self.controller = flavors_extraspecs.FlavorExtraSpecsController()
self.flavor_ctrl = flavors.FlavorsController()
self.fm_ctrl = flavor_manage.FlavorManageController()
- self.server_ctrl = servers.ServersController()
self.req = fakes.HTTPRequest.blank('')
- self.server_ctrl._view_builder._add_security_grps = mock.MagicMock()
- self.server_ctrl._view_builder._get_metadata = mock.MagicMock()
- self.server_ctrl._view_builder._get_addresses = mock.MagicMock()
- self.server_ctrl._view_builder._get_host_id = mock.MagicMock()
- self.server_ctrl._view_builder._get_fault = mock.MagicMock()
- self.server_ctrl._view_builder._add_host_status = mock.MagicMock()
-
- self.instance = fake_instance.fake_instance_obj(
- self.project_member_context,
- id=1, uuid=uuids.fake_id, project_id=self.project_id,
- vm_state=vm_states.ACTIVE)
-
- self.mock_get = self.useFixture(
- fixtures.MockPatch('nova.api.openstack.common.get_instance')).mock
- self.mock_get.return_value = self.instance
-
- fakes.stub_out_secgroup_api(
- self, security_groups=[{'name': 'default'}])
- self.mock_get_all = self.useFixture(fixtures.MockPatchObject(
- self.server_ctrl.compute_api, 'get_all')).mock
- self.mock_get_all.return_value = objects.InstanceList(
- objects=[self.instance])
def get_flavor_extra_specs(context, flavor_id):
return fake_flavor.fake_flavor_obj(
@@ -77,99 +49,72 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
self.stub_out('nova.api.openstack.common.get_flavor',
get_flavor_extra_specs)
- # Check that all are able to get flavor extra specs.
- self.all_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.all_unauthorized_contexts = []
- # Check that all system scoped are able to get flavor extra specs.
- self.all_system_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.all_system_unauthorized_contexts = []
-
- # Check that admin is able to create, update and delete flavor
- # extra specs.
- self.admin_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to create, update and
- # delete flavor extra specs.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # In the base/legacy case, all project and system contexts are
+ # authorized in the "anyone" case.
+ self.all_authorized_contexts = (self.all_project_contexts |
+ self.all_system_contexts)
+
+ # In the base/legacy case, all project and system contexts are
+ # authorized in the case of things that distinguish between
+ # scopes, since scope checking is disabled.
+ self.all_project_authorized_contexts = (self.all_project_contexts |
+ self.all_system_contexts)
+
+ # In the base/legacy case, any admin is an admin.
+ self.admin_authorized_contexts = set([self.project_admin_context,
+ self.system_admin_context,
+ self.legacy_admin_context])
@mock.patch('nova.objects.Flavor.save')
def test_create_flavor_extra_specs_policy(self, mock_save):
body = {'extra_specs': {'hw:numa_nodes': '1'}}
rule_name = policies.POLICY_ROOT % 'create'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, '1234',
- body=body)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, '1234',
+ body=body)
@mock.patch('nova.objects.Flavor._flavor_extra_specs_del')
@mock.patch('nova.objects.Flavor.save')
def test_delete_flavor_extra_specs_policy(self, mock_save, mock_delete):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, '1234', 'hw:cpu_policy')
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, '1234', 'hw:cpu_policy')
@mock.patch('nova.objects.Flavor.save')
def test_update_flavor_extra_specs_policy(self, mock_save):
body = {'hw:cpu_policy': 'shared'}
rule_name = policies.POLICY_ROOT % 'update'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, '1234', 'hw:cpu_policy',
- body=body)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, '1234', 'hw:cpu_policy',
+ body=body)
def test_show_flavor_extra_specs_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, '1234',
- 'hw:cpu_policy')
+ self.common_policy_auth(self.all_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, '1234',
+ 'hw:cpu_policy')
def test_index_flavor_extra_specs_policy(self):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, '1234')
+ self.common_policy_auth(self.all_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, '1234')
def test_flavor_detail_with_extra_specs_policy(self):
fakes.stub_out_flavor_get_all(self)
rule_name = policies.POLICY_ROOT % 'index'
req = fakes.HTTPRequest.blank('', version='2.61')
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts, self.all_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_authorized_contexts,
rule_name, self.flavor_ctrl.detail, req,
fatal=False)
for resp in authorize_res:
@@ -181,8 +126,8 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
fakes.stub_out_flavor_get_by_flavor_id(self)
rule_name = policies.POLICY_ROOT % 'index'
req = fakes.HTTPRequest.blank('', version='2.61')
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts, self.all_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_authorized_contexts,
rule_name, self.flavor_ctrl.show, req, '1',
fatal=False)
for resp in authorize_res:
@@ -221,9 +166,8 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
"disk": 1,
}
}
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_system_authorized_contexts,
- self.all_system_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_project_authorized_contexts,
rule_name, self.fm_ctrl._create, req, body=body,
fatal=False)
for resp in authorize_res:
@@ -242,9 +186,8 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.61')
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_system_authorized_contexts,
- self.all_system_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_project_authorized_contexts,
rule_name, self.fm_ctrl._update, req, '1',
body={'flavor': {'description': None}},
fatal=False)
@@ -253,88 +196,6 @@ class FlavorExtraSpecsPolicyTest(base.BasePolicyTest):
for resp in unauthorize_res:
self.assertNotIn('extra_specs', resp['flavor'])
- def test_server_detail_with_extra_specs_policy(self):
- rule = s_policies.SERVERS % 'detail'
- # server 'detail' policy is checked before flavor extra specs 'index'
- # policy so we have to allow it for everyone otherwise it will fail
- # first for unauthorized contexts.
- self.policy.set_rules({rule: "@"}, overwrite=False)
- req = fakes.HTTPRequest.blank('', version='2.47')
- rule_name = policies.POLICY_ROOT % 'index'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts, self.all_unauthorized_contexts,
- rule_name, self.server_ctrl.detail, req,
- fatal=False)
- for resp in authorize_res:
- self.assertIn('extra_specs', resp['servers'][0]['flavor'])
- for resp in unauthorize_res:
- self.assertNotIn('extra_specs', resp['servers'][0]['flavor'])
-
- @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
- @mock.patch('nova.compute.api.API.get_instance_host_status')
- def test_server_show_with_extra_specs_policy(self, mock_get, mock_block):
- rule = s_policies.SERVERS % 'show'
- # server 'show' policy is checked before flavor extra specs 'index'
- # policy so we have to allow it for everyone otherwise it will fail
- # first for unauthorized contexts.
- self.policy.set_rules({rule: "@"}, overwrite=False)
- req = fakes.HTTPRequest.blank('', version='2.47')
- rule_name = policies.POLICY_ROOT % 'index'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name, self.server_ctrl.show, req, 'fake',
- fatal=False)
- for resp in authorize_res:
- self.assertIn('extra_specs', resp['server']['flavor'])
- for resp in unauthorize_res:
- self.assertNotIn('extra_specs', resp['server']['flavor'])
-
- @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
- @mock.patch('nova.compute.api.API.get_instance_host_status')
- @mock.patch('nova.compute.api.API.rebuild')
- def test_server_rebuild_with_extra_specs_policy(self, mock_rebuild,
- mock_get, mock_bdm):
- rule = s_policies.SERVERS % 'rebuild'
- # server 'rebuild' policy is checked before flavor extra specs 'index'
- # policy so we have to allow it for everyone otherwise it will fail
- # first for unauthorized contexts.
- self.policy.set_rules({rule: "@"}, overwrite=False)
- req = fakes.HTTPRequest.blank('', version='2.47')
- rule_name = policies.POLICY_ROOT % 'index'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name, self.server_ctrl._action_rebuild,
- req, self.instance.uuid,
- body={'rebuild': {"imageRef": uuids.fake_id}},
- fatal=False)
- for resp in authorize_res:
- self.assertIn('extra_specs', resp.obj['server']['flavor'])
- for resp in unauthorize_res:
- self.assertNotIn('extra_specs', resp.obj['server']['flavor'])
-
- @mock.patch('nova.compute.api.API.update_instance')
- def test_server_update_with_extra_specs_policy(self, mock_update):
- rule = s_policies.SERVERS % 'update'
- # server 'update' policy is checked before flavor extra specs 'index'
- # policy so we have to allow it for everyone otherwise it will fail
- # first for unauthorized contexts.
- self.policy.set_rules({rule: "@"}, overwrite=False)
- req = fakes.HTTPRequest.blank('', version='2.47')
- rule_name = policies.POLICY_ROOT % 'index'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.all_authorized_contexts,
- self.all_unauthorized_contexts,
- rule_name, self.server_ctrl.update,
- req, self.instance.uuid,
- body={'server': {'name': 'test'}},
- fatal=False)
- for resp in authorize_res:
- self.assertIn('extra_specs', resp['server']['flavor'])
- for resp in unauthorize_res:
- self.assertNotIn('extra_specs', resp['server']['flavor'])
-
class FlavorExtraSpecsScopeTypePolicyTest(FlavorExtraSpecsPolicyTest):
"""Test Flavor Extra Specs APIs policies with system scope enabled.
@@ -350,65 +211,53 @@ class FlavorExtraSpecsScopeTypePolicyTest(FlavorExtraSpecsPolicyTest):
super(FlavorExtraSpecsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that all system scoped are able to get flavor extra specs.
- self.all_system_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context
- ]
- self.all_system_unauthorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system admin is able to create, update and delete flavor
- # extra specs.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system admin is not able to create, update and
- # delete flavor extra specs.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # Only project users are authorized
+ self.reduce_set('all_project_authorized', self.all_project_contexts)
+ self.reduce_set('all_authorized', self.all_project_contexts)
+
+ # Only admins can do admin things
+ self.admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class FlavorExtraSpecsNoLegacyNoScopeTest(FlavorExtraSpecsPolicyTest):
+ """Test Flavor Extra Specs API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(FlavorExtraSpecsNoLegacyNoScopeTest, self).setUp()
+
+ # Disabling legacy rules means that random roles no longer
+ # have power, but without scope checking there is no
+ # difference between project and system
+ everything_but_foo = (
+ self.all_project_contexts | self.all_system_contexts) - set([
+ self.system_foo_context,
+ self.project_foo_context,
+ ])
+ self.reduce_set('all_project_authorized', everything_but_foo)
+ self.reduce_set('all_authorized', everything_but_foo)
class FlavorExtraSpecsNoLegacyPolicyTest(FlavorExtraSpecsScopeTypePolicyTest):
"""Test Flavor Extra Specs APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
super(FlavorExtraSpecsNoLegacyPolicyTest, self).setUp()
- # Check that system or project reader are able to get flavor
- # extra specs.
- self.all_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.system_member_context,
- self.system_reader_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.all_unauthorized_contexts = [
- self.project_foo_context, self.system_foo_context
- ]
- # Check that all system scoped reader are able to get flavor
- # extra specs.
- self.all_system_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context
- ]
- self.all_system_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # Non-legacy rules do not imply random roles have any
+ # access. Same note as above, regarding other_project_*
+ # contexts. With scope checking enabled, project and system
+ # contexts stay separate.
+ self.reduce_set(
+ 'all_project_authorized',
+ self.all_project_contexts - set([self.project_foo_context]))
+ everything_but_foo_and_system = (
+ self.all_contexts - set([
+ self.project_foo_context,
+ ]) - self.all_system_contexts)
+ self.reduce_set('all_authorized', everything_but_foo_and_system)
diff --git a/nova/tests/unit/policies/test_flavor_manage.py b/nova/tests/unit/policies/test_flavor_manage.py
index 8a890a85af..0663a689cb 100644
--- a/nova/tests/unit/policies/test_flavor_manage.py
+++ b/nova/tests/unit/policies/test_flavor_manage.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import flavor_manage
@@ -31,18 +32,11 @@ class FlavorManagePolicyTest(base.BasePolicyTest):
super(FlavorManagePolicyTest, self).setUp()
self.controller = flavor_manage.FlavorManageController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to manage the flavors.
+ # With legacy rule and no scope checks, all admin can manage
+ # the flavors.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to manage the flavors.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
def test_create_flavor_policy(self):
rule_name = fm_policies.POLICY_ROOT % 'create'
@@ -67,29 +61,34 @@ class FlavorManagePolicyTest(base.BasePolicyTest):
"disk": 1,
}
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._create,
- self.req, body=body)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller._create,
+ self.req, body=body)
@mock.patch('nova.objects.Flavor.get_by_flavor_id')
@mock.patch('nova.objects.Flavor.save')
def test_update_flavor_policy(self, mock_save, mock_get):
rule_name = fm_policies.POLICY_ROOT % 'update'
req = fakes.HTTPRequest.blank('', version='2.55')
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._update,
- req, uuids.fake_id,
- body={'flavor': {'description': None}})
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller._update,
+ req, uuids.fake_id,
+ body={'flavor': {'description': None}})
@mock.patch('nova.objects.Flavor.destroy')
def test_delete_flavor_policy(self, mock_delete):
rule_name = fm_policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller._delete,
+ self.req, uuids.fake_id)
+
+
+class FlavorManageNoLegacyNoScopeTest(FlavorManagePolicyTest):
+ """Test Flavor Access API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class FlavorManageScopeTypePolicyTest(FlavorManagePolicyTest):
@@ -106,23 +105,16 @@ class FlavorManageScopeTypePolicyTest(FlavorManagePolicyTest):
super(FlavorManageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to manage the flavors.
+ # With scope enabled, only project admin is able to manage
+ # the flavors.
self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system-admin is not able to manage the flavors.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-
-class FlavorManageNoLegacyPolicyTest(FlavorManageScopeTypePolicyTest):
+ self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class FlavorManageScopeTypeNoLegacyPolicyTest(
+ FlavorManageScopeTypePolicyTest):
"""Test Flavor Manage APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_floating_ip_pools.py b/nova/tests/unit/policies/test_floating_ip_pools.py
index 08f36134d5..551f482bd4 100644
--- a/nova/tests/unit/policies/test_floating_ip_pools.py
+++ b/nova/tests/unit/policies/test_floating_ip_pools.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import floating_ip_pools
from nova.tests.unit.api.openstack import fakes
@@ -32,15 +32,15 @@ class FloatingIPPoolsPolicyTest(base.BasePolicyTest):
self.req = fakes.HTTPRequest.blank('')
# Check that everyone is able to list FIP pools.
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_reader_context,
self.other_project_member_context,
self.system_member_context, self.system_reader_context,
- self.system_foo_context]
- self.everyone_unauthorized_contexts = []
+ self.system_foo_context])
+ self.everyone_unauthorized_contexts = set([])
@mock.patch('nova.network.neutron.API.get_floating_ip_pools')
def test_floating_ip_pools_policy(self, mock_get):
@@ -66,6 +66,10 @@ class FloatingIPPoolsScopeTypePolicyTest(FloatingIPPoolsPolicyTest):
super(FloatingIPPoolsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.reduce_set('everyone_authorized', self.all_project_contexts)
+ self.everyone_unauthorized_contexts = (
+ self.all_contexts - self.everyone_authorized_contexts)
+
class FloatingIPPoolsNoLegacyPolicyTest(FloatingIPPoolsScopeTypePolicyTest):
"""Test Floating IP Pools APIs policies with system scope enabled,
diff --git a/nova/tests/unit/policies/test_floating_ips.py b/nova/tests/unit/policies/test_floating_ips.py
index 55453e7708..26c721e9e9 100644
--- a/nova/tests/unit/policies/test_floating_ips.py
+++ b/nova/tests/unit/policies/test_floating_ips.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -48,12 +49,13 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that everyone is able to perform crud operation on FIP.
+ # With legacy rule and scope disable, everyone is able to perform crud
+ # operation on FIP.
# NOTE: Nova cannot verify the FIP owner during nova policy
# enforcement so will be passing context's project_id as target to
# policy and always pass. If requester is not admin or owner
# of FIP then neutron will be returning the appropriate error.
- self.reader_authorized_contexts = [
+ self.member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -62,48 +64,45 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.reader_unauthorized_contexts = []
- self.cd_authorized_contexts = self.reader_authorized_contexts
- self.cd_unauthorized_contexts = self.reader_unauthorized_contexts
- # Check that admin or owner is able to add/delete FIP to server.
- self.admin_or_owner_authorized_contexts = [
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to add/delete
- # FIP to server.
- self.admin_or_owner_unauthorized_contexts = [
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
+ self.other_project_member_context
]
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to add,
+ # delete FIP to server.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.network.neutron.API.get_floating_ip')
def test_show_floating_ip_policy(self, mock_get):
rule_name = "os_compute_api:os-floating-ips:show"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, uuids.fake_id)
@mock.patch('nova.network.neutron.API.get_floating_ips_by_project')
def test_index_floating_ip_policy(self, mock_get):
rule_name = "os_compute_api:os-floating-ips:list"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.network.neutron.API.get_floating_ip_by_address')
@mock.patch('nova.network.neutron.API.allocate_floating_ip')
def test_create_floating_ip_policy(self, mock_create, mock_get):
rule_name = "os_compute_api:os-floating-ips:create"
- self.common_policy_check(self.cd_authorized_contexts,
- self.cd_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.member_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, uuids.fake_id)
@mock.patch('nova.network.neutron.API.get_instance_id_by_floating_address')
@mock.patch('nova.network.neutron.API.get_floating_ip')
@@ -112,10 +111,9 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
def test_delete_floating_ip_policy(self, mock_delete, mock_get,
mock_instance):
rule_name = "os_compute_api:os-floating-ips:delete"
- self.common_policy_check(self.cd_authorized_contexts,
- self.cd_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.member_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, uuids.fake_id)
@mock.patch('nova.objects.Instance.get_network_info')
@mock.patch('nova.network.neutron.API.associate_floating_ip')
@@ -127,11 +125,10 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
mock_net.return_value = network_model.NetworkInfo.hydrate(ninfo)
body = {'addFloatingIp': {
'address': '1.2.3.4'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.action_controller._add_floating_ip,
- self.req, self.instance.uuid, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.action_controller._add_floating_ip,
+ self.req, self.instance.uuid, body=body)
@mock.patch('nova.network.neutron.API.get_instance_id_by_floating_address')
@mock.patch('nova.network.neutron.API.get_floating_ip_by_address')
@@ -142,11 +139,53 @@ class FloatingIPPolicyTest(base.BasePolicyTest):
mock_instance.return_value = self.instance.uuid
body = {'removeFloatingIp': {
'address': '1.2.3.4'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.action_controller._remove_floating_ip,
- self.req, self.instance.uuid, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.action_controller._remove_floating_ip,
+ self.req, self.instance.uuid, body=body)
+
+
+class FloatingIPNoLegacyNoScopePolicyTest(FloatingIPPolicyTest):
+ """Test Floating IP APIs policies with system scope disabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ fip_policies.BASE_POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'add':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ fip_policies.BASE_POLICY_NAME % 'remove':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(FloatingIPNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to add/remove FIP to server.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ # With no legacy, project other roles like foo will not be able
+ # to operate on FIP.
+ self.member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class FloatingIPScopeTypePolicyTest(FloatingIPPolicyTest):
@@ -163,63 +202,58 @@ class FloatingIPScopeTypePolicyTest(FloatingIPPolicyTest):
def setUp(self):
super(FloatingIPScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system users.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
-class FloatingIPNoLegacyPolicyTest(FloatingIPScopeTypePolicyTest):
+class FloatingIPScopeTypeNoLegacyPolicyTest(FloatingIPScopeTypePolicyTest):
"""Test Floating IP APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
fip_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
fip_policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(FloatingIPNoLegacyPolicyTest, self).setUp()
+ super(FloatingIPScopeTypeNoLegacyPolicyTest, self).setUp()
# Check that system admin or owner is able to
# add/delete FIP to server.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- ]
- # Check that non-system and non-admin/owner is not able
- # to add/delete FIP to server.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ # With no legacy and scope enabled, system users and project
+ # other roles like foo will not be able to operate FIP.
+ self.member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.other_project_member_context
]
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context,
self.other_project_reader_context,
- ]
- self.reader_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
- self.cd_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.project_admin_context, self.project_member_context,
- self.legacy_admin_context, self.other_project_member_context
- ]
- self.cd_unauthorized_contexts = [
- self.system_reader_context,
- self.project_reader_context, self.project_foo_context,
- self.system_foo_context, self.other_project_reader_context
+ self.other_project_member_context
]
diff --git a/nova/tests/unit/policies/test_hosts.py b/nova/tests/unit/policies/test_hosts.py
index cdce7d2b1c..e07c907cf8 100644
--- a/nova/tests/unit/policies/test_hosts.py
+++ b/nova/tests/unit/policies/test_hosts.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import hosts
from nova.policies import base as base_policy
@@ -32,37 +32,19 @@ class HostsPolicyTest(base.BasePolicyTest):
self.controller = hosts.HostController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to perform operations on hosts.
- self.system_admin_authorized_contexts = [
- self.system_admin_context, self.legacy_admin_context,
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to perform hosts
+ # Operations.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to perform operations
- # on hosts.
- self.system_admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context, self.other_project_reader_context
- ]
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
- self.project_admin_context
- ]
- self.system_reader_unauthorized_contexts = [
- self.project_foo_context, self.system_foo_context,
- self.project_member_context, self.project_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
@mock.patch('nova.compute.api.HostAPI.service_get_all')
def test_list_hosts_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'list'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.context.set_target_cell')
@mock.patch('nova.objects.HostMapping.get_by_host')
@@ -71,41 +53,48 @@ class HostsPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.HostAPI.instance_get_all_by_host')
def test_show_host_policy(self, mock_get, mock_node, mock_map, mock_set):
rule_name = policies.POLICY_NAME % 'show'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, 11111)
def test_update_host_policy(self):
rule_name = policies.POLICY_NAME % 'update'
- self.common_policy_check(self.system_admin_authorized_contexts,
- self.system_admin_unauthorized_contexts,
- rule_name, self.controller.update,
- self.req, 11111, body={})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ self.req, 11111, body={})
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_reboot_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'reboot'
- self.common_policy_check(self.system_admin_authorized_contexts,
- self.system_admin_unauthorized_contexts,
- rule_name, self.controller.reboot,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.reboot,
+ self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_shutdown_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'shutdown'
- self.common_policy_check(self.system_admin_authorized_contexts,
- self.system_admin_unauthorized_contexts,
- rule_name, self.controller.shutdown,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.shutdown,
+ self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.host_power_action')
def test_startup_host_policy(self, mock_action):
rule_name = policies.POLICY_NAME % 'start'
- self.common_policy_check(self.system_admin_authorized_contexts,
- self.system_admin_unauthorized_contexts,
- rule_name, self.controller.startup,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.startup,
+ self.req, 11111)
+
+
+class HostsNoLegacyNoScopePolicyTest(HostsPolicyTest):
+ """Test Hosts APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to perform
+ hosts Operations. Legacy admin will be allowed as policy is just admin
+ if no scope checks.
+
+ """
+
+ without_deprecated_rules = True
class HostsScopeTypePolicyTest(HostsPolicyTest):
@@ -122,72 +111,29 @@ class HostsScopeTypePolicyTest(HostsPolicyTest):
super(HostsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to perform operations on hosts.
- self.system_admin_authorized_contexts = [
- self.system_admin_context]
- # Check that system non-admin is not able to perform operations
- # on hosts.
- self.system_admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context, self.other_project_reader_context
- ]
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context
- ]
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
-
-
-class HostsNoLegacyPolicyTest(HostsScopeTypePolicyTest):
- """Test Hosts APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ # With scope checks enable, only system admin is able to perform
+ # hosts Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class HostsScopeTypeNoLegacyPolicyTest(HostsScopeTypePolicyTest):
+ """Test Hosts APIs policies with with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults. So
+ only system admin is able to perform hosts Operations.
"""
+
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'reboot':
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'shutdown':
- base_policy.SYSTEM_ADMIN,
+ base_policy.ADMIN,
policies.POLICY_NAME % 'startup':
- base_policy.SYSTEM_ADMIN}
-
- def setUp(self):
- super(HostsNoLegacyPolicyTest, self).setUp()
-
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context
- ]
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.system_admin_authorized_contexts = [
- self.system_admin_context
- ]
- self.system_admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.project_admin_context, self.project_member_context,
- self.legacy_admin_context, self.other_project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_foo_context, self.other_project_reader_context
- ]
+ base_policy.ADMIN}
diff --git a/nova/tests/unit/policies/test_hypervisors.py b/nova/tests/unit/policies/test_hypervisors.py
index 2b9eefcfd9..dd17ebe2fe 100644
--- a/nova/tests/unit/policies/test_hypervisors.py
+++ b/nova/tests/unit/policies/test_hypervisors.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import hypervisors
from nova.policies import base as base_policy
@@ -36,76 +36,67 @@ class HypervisorsPolicyTest(base.BasePolicyTest):
self.controller.host_api.service_get_by_compute_host = mock.MagicMock()
self.controller.host_api.compute_node_get = mock.MagicMock()
- # Check that system scoped admin, member and reader are able to
- # perform operations on hypervisors.
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to get hypervisors. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to perform hypervisors
+ # Operations.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-system-reader are not able to perform operations
- # on hypervisors
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
def test_list_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'list'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
def test_list_details_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'list-detail'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.detail,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.detail,
+ self.req)
def test_show_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'show'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.get_host_uptime')
def test_uptime_hypervisors_policy(self, mock_uptime):
rule_name = hv_policies.BASE_POLICY_NAME % 'uptime'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.uptime,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.uptime,
+ self.req, 11111)
def test_search_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'search'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.search,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.search,
+ self.req, 11111)
def test_servers_hypervisors_policy(self):
rule_name = hv_policies.BASE_POLICY_NAME % 'servers'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.servers,
- self.req, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.servers,
+ self.req, 11111)
@mock.patch('nova.compute.api.HostAPI.compute_node_statistics')
def test_statistics_hypervisors_policy(self, mock_statistics):
rule_name = hv_policies.BASE_POLICY_NAME % 'statistics'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.statistics,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.statistics,
+ self.req)
+
+
+class HypervisorsNoLegacyNoScopePolicyTest(HypervisorsPolicyTest):
+ """Test Hypervisors APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to perform
+ Hypervisors Operations. Legacy admin will be allowed as policy is just
+ admin if no scope checks.
+ """
+
+ without_deprecated_rules = True
class HypervisorsScopeTypePolicyTest(HypervisorsPolicyTest):
@@ -122,40 +113,33 @@ class HypervisorsScopeTypePolicyTest(HypervisorsPolicyTest):
super(HypervisorsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to perform operations
- # on hypervisors.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to perform operations
- # on hypervisors.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-
-class HypervisorsNoLegacyPolicyTest(HypervisorsScopeTypePolicyTest):
- """Test Hypervisors APIs policies with system scope enabled,
- and no more deprecated rules.
+ # With scope checks enable, only system admin is able to perform
+ # hypervisors Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class HypervisorsScopeTypeNoLegacyPolicyTest(HypervisorsScopeTypePolicyTest):
+ """Test Hypervisors APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to perform hypervisors Operations.
"""
+
without_deprecated_rules = True
+
rules_without_deprecation = {
hv_policies.BASE_POLICY_NAME % 'list':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'list-detail':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'show':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'statistics':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'uptime':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'search':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
hv_policies.BASE_POLICY_NAME % 'servers':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
}
diff --git a/nova/tests/unit/policies/test_instance_actions.py b/nova/tests/unit/policies/test_instance_actions.py
index b3e43b3498..1ca9a66c14 100644
--- a/nova/tests/unit/policies/test_instance_actions.py
+++ b/nova/tests/unit/policies/test_instance_actions.py
@@ -11,8 +11,9 @@
# under the License.
import copy
+from unittest import mock
+
import fixtures
-import mock
from nova.api.openstack import api_version_request
from oslo_policy import policy as oslo_policy
@@ -62,33 +63,17 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that system reader are able to show the instance
- # actions events.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
+ # With legacy rule and no scope checks, any role in project can
+ # get server action and all admin is able to get server action
+ # with event details.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-system-reader are not able to show the instance
- # actions events.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
-
- self.project_or_system_reader_authorized_contexts = [
+ # and project reader can get their server topology without host info.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context, self.project_foo_context
- ]
-
- self.project_or_system_reader_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
def _set_policy_rules(self, overwrite=True):
rules = {ia_policies.BASE_POLICY_NAME % 'show': '@'}
@@ -97,9 +82,8 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
def test_index_instance_action_policy(self):
rule_name = ia_policies.BASE_POLICY_NAME % "list"
- self.common_policy_check(
- self.project_or_system_reader_authorized_contexts,
- self.project_or_system_reader_unauthorized_contexts,
+ self.common_policy_auth(
+ self.project_reader_authorized_contexts,
rule_name, self.controller.index,
self.req, self.instance['uuid'])
@@ -108,9 +92,8 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
fake_action = self.fake_actions[FAKE_UUID][FAKE_REQUEST_ID]
mock_action_get.return_value = fake_action
rule_name = ia_policies.BASE_POLICY_NAME % "show"
- self.common_policy_check(
- self.project_or_system_reader_authorized_contexts,
- self.project_or_system_reader_unauthorized_contexts,
+ self.common_policy_auth(
+ self.project_reader_authorized_contexts,
rule_name, self.controller.show,
self.req, self.instance['uuid'], fake_action['request_id'])
@@ -131,9 +114,8 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
self._set_policy_rules(overwrite=False)
rule_name = ia_policies.BASE_POLICY_NAME % "events"
- authorize_res, unauthorize_res = self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, self.instance['uuid'],
fake_action['request_id'], fatal=False)
@@ -149,6 +131,28 @@ class InstanceActionsPolicyTest(base.BasePolicyTest):
self.assertNotIn('events', action['instanceAction'])
+class InstanceActionsNoLegacyNoScopePolicyTest(InstanceActionsPolicyTest):
+ """Test os-instance-actions APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ ia_policies.BASE_POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ ia_policies.BASE_POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ ia_policies.BASE_POLICY_NAME % 'events':
+ base_policy.ADMIN,
+ }
+
+ def setUp(self):
+ super(InstanceActionsNoLegacyNoScopePolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+
class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
"""Test os-instance-actions APIs Deprecated policies.
@@ -185,7 +189,7 @@ class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
@mock.patch('nova.api.openstack.common.get_instance')
def test_deprecated_policy_overridden_rule_is_checked(
self, mock_instance_get, mock_actions_get):
- # Test to verify if deprecatd overridden policy is working.
+ # Test to verify if deprecated overridden policy is working.
instance = fake_instance.fake_instance_obj(
self.admin_or_owner_req.environ['nova.context'])
@@ -193,7 +197,7 @@ class InstanceActionsDeprecatedPolicyTest(base.BasePolicyTest):
# Check for success as admin_or_owner role. Deprecated rule
# has been overridden with admin checks in policy.yaml
# If admin role pass it means overridden rule is enforced by
- # olso.policy because new default is system reader and the old
+ # oslo.policy because new default is system reader and the old
# default is admin.
self.controller.index(self.admin_or_owner_req, instance['uuid'])
@@ -221,6 +225,11 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
def setUp(self):
super(InstanceActionsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
@mock.patch('nova.objects.InstanceActionEventList.get_by_action')
@mock.patch('nova.objects.InstanceAction.get_by_request_id')
@@ -241,9 +250,8 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
self._set_policy_rules(overwrite=False)
rule_name = ia_policies.BASE_POLICY_NAME % "events:details"
- authorize_res, unauthorize_res = self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show,
self.req, self.instance['uuid'],
fake_action['request_id'], fatal=False)
@@ -267,54 +275,25 @@ class InstanceActionsScopeTypePolicyTest(InstanceActionsPolicyTest):
self.assertNotIn('details', event)
-class InstanceActionsNoLegacyPolicyTest(InstanceActionsPolicyTest):
+class InstanceActionsScopeTypeNoLegacyPolicyTest(
+ InstanceActionsScopeTypePolicyTest):
"""Test os-instance-actions APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
ia_policies.BASE_POLICY_NAME % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
ia_policies.BASE_POLICY_NAME % 'events':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
}
def setUp(self):
- super(InstanceActionsNoLegacyPolicyTest, self).setUp()
- self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system reader are able to get the
- # instance action events.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_reader_context,
- self.system_member_context]
- # Check that non-system-reader are not able to
- # get the instance action events
- self.system_reader_unauthorized_contexts = [
- self.project_admin_context,
- self.system_foo_context, self.legacy_admin_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
-
- # Check that system or projct reader is able to
- # show the instance actions events.
- self.project_or_system_reader_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context,
- ]
-
- # Check that non-system or non-project reader is not able to
- # show the instance actions events.
- self.project_or_system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(InstanceActionsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to get server action and only admin
+ # with event details.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_instance_usage_audit_log.py b/nova/tests/unit/policies/test_instance_usage_audit_log.py
index e320beacd2..71b0cdd2aa 100644
--- a/nova/tests/unit/policies/test_instance_usage_audit_log.py
+++ b/nova/tests/unit/policies/test_instance_usage_audit_log.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import instance_usage_audit_log as iual
from nova.policies import base as base_policy
@@ -35,37 +35,37 @@ class InstanceUsageAuditLogPolicyTest(base.BasePolicyTest):
self.controller.host_api.task_log_get_all = mock.MagicMock()
self.controller.host_api.service_get_all = mock.MagicMock()
- # Check that admin is able to get instance usage audit log.
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to get instance usage audit log. This make sure
- # that existing tokens will keep working even we have changed
- # this policy defaults to reader role.
- self.reader_authorized_contexts = [
+ # With legacy rule, all admin_api will be able to get instance usage
+ # audit log.
+ self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-admin is not able to get instance usage audit log.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ self.project_admin_context]
def test_show_policy(self):
rule_name = iual_policies.BASE_POLICY_NAME % 'show'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, '2020-03-25 14:40:00')
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, '2020-03-25 14:40:00')
def test_index_policy(self):
rule_name = iual_policies.BASE_POLICY_NAME % 'list'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
+
+
+class InstanceUsageNoLegacyNoScopeTest(InstanceUsageAuditLogPolicyTest):
+ """Test Instance Usage API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ iual_policies.BASE_POLICY_NAME % 'list':
+ base_policy.ADMIN,
+ iual_policies.BASE_POLICY_NAME % 'show':
+ base_policy.ADMIN,
+ }
class InstanceUsageScopeTypePolicyTest(InstanceUsageAuditLogPolicyTest):
@@ -83,29 +83,21 @@ class InstanceUsageScopeTypePolicyTest(InstanceUsageAuditLogPolicyTest):
super(InstanceUsageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to get instance usage audit log.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-admin is not able to get instance
- # usage audit log.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-
-class InstanceUsageNoLegacyPolicyTest(InstanceUsageScopeTypePolicyTest):
+ # Scope checks remove project users power.
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class InstanceUsageScopeTypeNoLegacyPolicyTest(
+ InstanceUsageScopeTypePolicyTest):
"""Test Instance Usage Audit Log APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
iual_policies.BASE_POLICY_NAME % 'list':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
iual_policies.BASE_POLICY_NAME % 'show':
- base_policy.SYSTEM_READER,
+ base_policy.ADMIN,
}
diff --git a/nova/tests/unit/policies/test_keypairs.py b/nova/tests/unit/policies/test_keypairs.py
index 4faefea2ef..ee39133b7a 100644
--- a/nova/tests/unit/policies/test_keypairs.py
+++ b/nova/tests/unit/policies/test_keypairs.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from nova.policies import keypairs as policies
from nova.api.openstack.compute import keypairs
@@ -34,7 +35,7 @@ class KeypairsPolicyTest(base.BasePolicyTest):
# Check that everyone is able to create, delete and get
# their keypairs.
- self.everyone_authorized_contexts = [
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
@@ -42,88 +43,58 @@ class KeypairsPolicyTest(base.BasePolicyTest):
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = []
+ ])
# Check that admin is able to create, delete and get
# other users keypairs.
- self.admin_authorized_contexts = [
+ self.admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to create, delete and get
- # other users keypairs.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- # Check that system reader is able to get
- # other users keypairs.
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get
- # other users keypairs.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context])
@mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
def test_index_keypairs_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
@mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
def test_index_others_keypairs_policy(self, mock_get):
req = fakes.HTTPRequest.blank('?user_id=user2', version='2.10')
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- req)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ req)
@mock.patch('nova.compute.api.KeypairAPI.get_key_pair')
def test_show_keypairs_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, fakes.FAKE_UUID)
@mock.patch('nova.compute.api.KeypairAPI.get_key_pair')
def test_show_others_keypairs_policy(self, mock_get):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('?user_id=user2', version='2.10')
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ req, fakes.FAKE_UUID)
@mock.patch('nova.compute.api.KeypairAPI.create_key_pair')
def test_create_keypairs_policy(self, mock_create):
rule_name = policies.POLICY_ROOT % 'create'
mock_create.return_value = (test_keypair.fake_keypair, 'FAKE_KEY')
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req,
- body={'keypair': {'name': 'create_test'}})
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req,
+ body={'keypair': {'name': 'create_test'}})
@mock.patch('nova.compute.api.KeypairAPI.create_key_pair')
def test_create_others_keypairs_policy(self, mock_create):
@@ -132,31 +103,39 @@ class KeypairsPolicyTest(base.BasePolicyTest):
rule_name = policies.POLICY_ROOT % 'create'
mock_create.return_value = (test_keypair.fake_keypair, 'FAKE_KEY')
body = {'keypair': {'name': 'test2', 'user_id': 'user2'}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.create,
- req, body=body)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ req, body=body)
@mock.patch('nova.compute.api.KeypairAPI.delete_key_pair')
def test_delete_keypairs_policy(self, mock_delete):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, fakes.FAKE_UUID)
@mock.patch('nova.compute.api.KeypairAPI.delete_key_pair')
def test_delete_others_keypairs_policy(self, mock_delete):
# Change the user_id in request context.
req = fakes.HTTPRequest.blank('?user_id=user2', version='2.10')
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- req, fakes.FAKE_UUID)
+ self.common_policy_auth(self.admin_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ req, fakes.FAKE_UUID)
+
+
+class KeypairsNoLegacyNoScopeTest(KeypairsPolicyTest):
+ """Test Keypairs API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(KeypairsNoLegacyNoScopeTest, self).setUp()
class KeypairsScopeTypePolicyTest(KeypairsPolicyTest):
@@ -173,6 +152,12 @@ class KeypairsScopeTypePolicyTest(KeypairsPolicyTest):
super(KeypairsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope checking, only project-scoped users are allowed
+ self.reduce_set('everyone_authorized', self.all_project_contexts)
+ self.admin_authorized_contexts = [
+ self.legacy_admin_context,
+ self.project_admin_context]
+
class KeypairsNoLegacyPolicyTest(KeypairsScopeTypePolicyTest):
"""Test Keypairs APIs policies with system scope enabled,
@@ -180,35 +165,3 @@ class KeypairsNoLegacyPolicyTest(KeypairsScopeTypePolicyTest):
access system APIs.
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(KeypairsNoLegacyPolicyTest, self).setUp()
-
- # Check that system admin is able to create, delete and get
- # other users keypairs.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that system non-admin is not able to create, delete and get
- # other users keypairs.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
- # Check that system reader is able to get
- # other users keypairs.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get
- # other users keypairs.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
diff --git a/nova/tests/unit/policies/test_limits.py b/nova/tests/unit/policies/test_limits.py
index cab2b5f679..aba647caec 100644
--- a/nova/tests/unit/policies/test_limits.py
+++ b/nova/tests/unit/policies/test_limits.py
@@ -10,15 +10,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+import functools
+from unittest import mock
from nova.api.openstack.compute import limits
+import nova.conf
from nova.policies import base as base_policy
from nova.policies import limits as limits_policies
from nova import quota
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
+CONF = nova.conf.CONF
+
class LimitsPolicyTest(base.BasePolicyTest):
"""Test Limits APIs policies with all possible context.
@@ -55,48 +59,52 @@ class LimitsPolicyTest(base.BasePolicyTest):
mock_get_project_quotas.start()
# Check that everyone is able to get their limits
- self.everyone_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_member_context, self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = []
-
- # Check that system reader is able to get other projects limit.
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to get limit. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
+ self.everyone_authorized_contexts = self.all_contexts
+
+ # With legacy rule, any admin is able to get other projects limit.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-admin is not able to get other projects limit.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ self.project_admin_context]
def test_get_limits_policy(self):
rule_name = limits_policies.BASE_POLICY_NAME
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
def test_get_other_limits_policy(self):
+ rule = limits_policies.BASE_POLICY_NAME
+ self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('/?tenant_id=faketenant')
rule_name = limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- req)
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(base.rule_if_system,
+ rule, rule_name)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ check_rule, self.controller.index,
+ req)
+
+
+class LimitsNoLegacyNoScopeTest(LimitsPolicyTest):
+ """Test Flavor Access API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
+ base_policy.ADMIN}
+
+ def setUp(self):
+ super(LimitsNoLegacyNoScopeTest, self).setUp()
+
+ # Even with no legacy rule, any admin can get other project
+ # limits.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
class LimitsScopeTypePolicyTest(LimitsPolicyTest):
@@ -114,22 +122,18 @@ class LimitsScopeTypePolicyTest(LimitsPolicyTest):
super(LimitsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to get other projects limit.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able toget other
- # projects limit.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
+ # With Scope enable, system users no longer allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
+ self.project_foo_context, self.other_project_reader_context
]
-class LimitsNoLegacyPolicyTest(LimitsScopeTypePolicyTest):
+class LimitsScopeTypeNoLegacyPolicyTest(LimitsScopeTypePolicyTest):
"""Test Limits APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -137,4 +141,17 @@ class LimitsNoLegacyPolicyTest(LimitsScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
limits_policies.OTHER_PROJECT_LIMIT_POLICY_NAME:
- base_policy.SYSTEM_READER}
+ base_policy.ADMIN}
+
+ def setUp(self):
+ super(LimitsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project level admin
+ # will get other projects limit.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.other_project_member_context,
+ self.project_foo_context, self.other_project_reader_context
+ ]
diff --git a/nova/tests/unit/policies/test_lock_server.py b/nova/tests/unit/policies/test_lock_server.py
index 883c71929e..31de5cff0c 100644
--- a/nova/tests/unit/policies/test_lock_server.py
+++ b/nova/tests/unit/policies/test_lock_server.py
@@ -10,13 +10,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from nova.api.openstack.compute import lock_server
from nova.compute import vm_states
+import nova.conf
from nova import exception
from nova.policies import base as base_policy
from nova.policies import lock_server as ls_policies
@@ -24,6 +27,8 @@ from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
+CONF = nova.conf.CONF
+
class LockServerPolicyTest(base.BasePolicyTest):
"""Test Lock server APIs policies with all possible context.
@@ -48,54 +53,39 @@ class LockServerPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to lock/unlock
- # the server
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to lock,
+ # unlock the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to lock/unlock
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that admin is able to unlock the server which is
- # locked by other
- self.admin_authorized_contexts = [
+
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to override
+ # unlock, regardless who locked the server.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to unlock the server
- # which is locked by other
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.lock')
def test_lock_server_policy(self, mock_lock):
rule_name = ls_policies.POLICY_ROOT % 'lock'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._lock,
- self.req, self.instance.uuid,
- body={'lock': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._lock,
+ self.req, self.instance.uuid,
+ body={'lock': {}})
@mock.patch('nova.compute.api.API.unlock')
def test_unlock_server_policy(self, mock_unlock):
rule_name = ls_policies.POLICY_ROOT % 'unlock'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._unlock,
- self.req, self.instance.uuid,
- body={'unlock': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._unlock,
+ self.req, self.instance.uuid,
+ body={'unlock': {}})
@mock.patch('nova.compute.api.API.unlock')
@mock.patch('nova.compute.api.API.is_expected_locked_by')
@@ -104,12 +94,16 @@ class LockServerPolicyTest(base.BasePolicyTest):
rule = ls_policies.POLICY_ROOT % 'unlock'
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = ls_policies.POLICY_ROOT % 'unlock:unlock_override'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._unlock,
- self.req, self.instance.uuid,
- body={'unlock': {}})
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(base.rule_if_system,
+ rule, rule_name)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ check_rule,
+ self.controller._unlock,
+ self.req, self.instance.uuid,
+ body={'unlock': {}})
def test_lock_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -125,7 +119,7 @@ class LockServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.lock')
- def test_lock_sevrer_overridden_policy_pass_with_same_user(
+ def test_lock_server_overridden_policy_pass_with_same_user(
self, mock_lock):
rule_name = ls_policies.POLICY_ROOT % 'lock'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
@@ -134,6 +128,22 @@ class LockServerPolicyTest(base.BasePolicyTest):
body={'lock': {}})
+class LockServerNoLegacyNoScopePolicyTest(LockServerPolicyTest):
+ """Test lock/unlock server APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(LockServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to lock/unlock the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class LockServerScopeTypePolicyTest(LockServerPolicyTest):
"""Test Lock Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -147,49 +157,28 @@ class LockServerScopeTypePolicyTest(LockServerPolicyTest):
def setUp(self):
super(LockServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to lock/unlock the server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class LockServerNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
+class LockServerScopeTypeNoLegacyPolicyTest(LockServerScopeTypePolicyTest):
"""Test Lock Server APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(LockServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin or and server owner is able to lock/unlock
- # the server
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to lock/unlock
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
-
- # Check that system admin is able to unlock the server which is
- # locked by other
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that system non-admin is not able to unlock the server
- # which is locked by other
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
+ super(LockServerScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to lock/unlock the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
-class LockServerOverridePolicyTest(LockServerNoLegacyPolicyTest):
+class LockServerOverridePolicyTest(LockServerScopeTypeNoLegacyPolicyTest):
"""Test Lock Server APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
@@ -198,21 +187,11 @@ class LockServerOverridePolicyTest(LockServerNoLegacyPolicyTest):
def setUp(self):
super(LockServerOverridePolicyTest, self).setUp()
-
- # Check that system admin or project scoped role as override above
- # is able to unlock the server which is locked by other
- self.admin_authorized_contexts = [
- self.system_admin_context,
+ # We are overriding the 'unlock:unlock_override' policy
+ # to PROJECT_MEMBER so testing it with both admin as well
+ # as project member as allowed context.
+ self.project_admin_authorized_contexts = [
self.project_admin_context, self.project_member_context]
- # Check that non-system admin or project role is not able to
- # unlock the server which is locked by other
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
def test_unlock_override_server_policy(self):
rule = ls_policies.POLICY_ROOT % 'unlock:unlock_override'
@@ -220,6 +199,6 @@ class LockServerOverridePolicyTest(LockServerNoLegacyPolicyTest):
# make unlock allowed for everyone so that we can check unlock
# override policy.
ls_policies.POLICY_ROOT % 'unlock': "@",
- rule: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}, overwrite=False)
+ rule: base_policy.PROJECT_MEMBER}, overwrite=False)
super(LockServerOverridePolicyTest,
self).test_unlock_override_server_policy()
diff --git a/nova/tests/unit/policies/test_migrate_server.py b/nova/tests/unit/policies/test_migrate_server.py
index 0082b3d414..0f750770d9 100644
--- a/nova/tests/unit/policies/test_migrate_server.py
+++ b/nova/tests/unit/policies/test_migrate_server.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -47,28 +48,19 @@ class MigrateServerPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin is able to migrate the server.
- self.admin_authorized_contexts = [
+ # With legacy rule, any admin is able to migrate
+ # the server.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context
- ]
- # Check that non-admin is not able to migrate the server
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context]
@mock.patch('nova.compute.api.API.resize')
def test_migrate_server_policy(self, mock_resize):
rule_name = ms_policies.POLICY_ROOT % 'migrate'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._migrate,
- self.req, self.instance.uuid,
- body={'migrate': None})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller._migrate,
+ self.req, self.instance.uuid,
+ body={'migrate': None})
@mock.patch('nova.compute.api.API.live_migrate')
def test_migrate_live_server_policy(self, mock_live_migrate):
@@ -78,11 +70,18 @@ class MigrateServerPolicyTest(base.BasePolicyTest):
'block_migration': "False",
'disk_over_commit': "False"}
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._migrate_live,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller._migrate_live,
+ self.req, self.instance.uuid,
+ body=body)
+
+
+class MigrateServerNoLegacyNoScopeTest(MigrateServerPolicyTest):
+ """Test Server Migrations API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class MigrateServerScopeTypePolicyTest(MigrateServerPolicyTest):
@@ -99,32 +98,21 @@ class MigrateServerScopeTypePolicyTest(MigrateServerPolicyTest):
def setUp(self):
super(MigrateServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class MigrateServerNoLegacyPolicyTest(MigrateServerScopeTypePolicyTest):
+class MigrateServerScopeTypeNoLegacyPolicyTest(
+ MigrateServerScopeTypePolicyTest):
"""Test Migrate Server APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
- def setUp(self):
- super(MigrateServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin is able to migrate the server.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non system admin is not able to migrate the server
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
-
-class MigrateServerOverridePolicyTest(MigrateServerNoLegacyPolicyTest):
+
+class MigrateServerOverridePolicyTest(
+ MigrateServerScopeTypeNoLegacyPolicyTest):
"""Test Migrate Server APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
@@ -136,23 +124,13 @@ class MigrateServerOverridePolicyTest(MigrateServerNoLegacyPolicyTest):
rule_migrate = ms_policies.POLICY_ROOT % 'migrate'
rule_live_migrate = ms_policies.POLICY_ROOT % 'migrate_live'
# NOTE(gmann): override the rule to project member and verify it
- # work as policy is system and projct scoped.
+ # work as policy is system and project scoped.
self.policy.set_rules({
- rule_migrate: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
- rule_live_migrate: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN},
+ rule_migrate: base_policy.PROJECT_MEMBER,
+ rule_live_migrate: base_policy.PROJECT_MEMBER},
overwrite=False)
- # Check that system admin or project scoped role as override above
+ # Check that project member role as override above
# is able to migrate the server
- self.admin_authorized_contexts = [
- self.system_admin_context,
+ self.project_admin_authorized_contexts = [
self.project_admin_context, self.project_member_context]
- # Check that non-system admin or project role is not able to
- # migrate the server
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
diff --git a/nova/tests/unit/policies/test_migrations.py b/nova/tests/unit/policies/test_migrations.py
index a124fa508b..25cd75a125 100644
--- a/nova/tests/unit/policies/test_migrations.py
+++ b/nova/tests/unit/policies/test_migrations.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import migrations
from nova.policies import migrations as migrations_policies
@@ -32,27 +32,25 @@ class MigrationsPolicyTest(base.BasePolicyTest):
self.controller = migrations.MigrationsController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to list migrations.
- self.reader_authorized_contexts = [
+ # With legacy rule, any admin is able to list migrations.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context
- ]
- # Check that non-admin is not able to list migrations.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context]
@mock.patch('nova.compute.api.API.get_migrations')
def test_list_migrations_policy(self, mock_migration):
rule_name = migrations_policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
+
+
+class MigrationsNoLegacyNoScopeTest(MigrationsPolicyTest):
+ """Test Migrations API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class MigrationsScopeTypePolicyTest(MigrationsPolicyTest):
@@ -70,15 +68,14 @@ class MigrationsScopeTypePolicyTest(MigrationsPolicyTest):
super(MigrationsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system reader is able to list migrations.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non system reader is not able to list migrations.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+
+
+class MigrationsScopeTypeNoLegacyPolicyTest(
+ MigrationsScopeTypePolicyTest):
+ """Test Migrations APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_multinic.py b/nova/tests/unit/policies/test_multinic.py
index 21c14bfc57..852ff25965 100644
--- a/nova/tests/unit/policies/test_multinic.py
+++ b/nova/tests/unit/policies/test_multinic.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -45,40 +46,53 @@ class MultinicPolicyTest(base.BasePolicyTest):
id=1, uuid=uuid, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to add/remove fixed ip.
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to
+ # add/remove fixed ip.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin and non-owner is not able to add/remove
- # fixed ip.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_reader_context, self.project_foo_context]
@mock.patch('nova.compute.api.API.add_fixed_ip')
def test_add_fixed_ip_policy(self, mock_add):
rule_name = "os_compute_api:os-multinic:add"
body = dict(addFixedIp=dict(networkId='test_net'))
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller._add_fixed_ip,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name, self.controller._add_fixed_ip,
+ self.req, self.instance.uuid,
+ body=body)
@mock.patch('nova.compute.api.API.remove_fixed_ip')
def test_remove_fixed_ip_policy(self, mock_remove):
rule_name = "os_compute_api:os-multinic:remove"
body = dict(removeFixedIp=dict(address='1.2.3.4'))
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller._remove_fixed_ip,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name, self.controller._remove_fixed_ip,
+ self.req, self.instance.uuid,
+ body=body)
+
+
+class MultinicNoLegacyNoScopePolicyTest(MultinicPolicyTest):
+ """Test Multinic APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.BASE_POLICY_NAME % 'add':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.BASE_POLICY_NAME % 'remove':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(MultinicNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to add/remove the fixed ip.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class MultinicScopeTypePolicyTest(MultinicPolicyTest):
@@ -95,33 +109,26 @@ class MultinicScopeTypePolicyTest(MultinicPolicyTest):
def setUp(self):
super(MultinicScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to add/remove
+ # the fixed ip.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class MultinicNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
+class MultinicScopeTypeNoLegacyPolicyTest(MultinicScopeTypePolicyTest):
"""Test Multinic APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(MultinicNoLegacyPolicyTest, self).setUp()
- # Check that system admin or owner is able to
- # add/delete Fixed IP to server.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- ]
- # Check that non-system and non-admin/owner is not able
- # to add/delete Fixed IP to server.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(MultinicScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to add/remove the fixed ip.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_networks.py b/nova/tests/unit/policies/test_networks.py
index 9ca018835c..9c3e0b735a 100644
--- a/nova/tests/unit/policies/test_networks.py
+++ b/nova/tests/unit/policies/test_networks.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import networks
@@ -38,7 +39,7 @@ class NetworksPolicyTest(base.BasePolicyTest):
# enforcement so will be passing context's project_id as target to
# policy and always pass. If requester is not admin or owner
# of networks then neutron will be returning the appropriate error.
- self.reader_authorized_contexts = [
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -47,23 +48,47 @@ class NetworksPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.reader_unauthorized_contexts = []
@mock.patch('nova.network.neutron.API.get_all')
def test_list_networks_policy(self, mock_get):
rule_name = "os_compute_api:os-networks:list"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.network.neutron.API.get')
def test_show_network_policy(self, mock_get):
rule_name = "os_compute_api:os-networks:show"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, uuids.fake_id)
+
+
+class NetworksNoLegacyNoScopePolicyTest(NetworksPolicyTest):
+ """Test Networks APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_ROOT % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_ROOT % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN}
+
+ def setUp(self):
+ super(NetworksNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to get network.
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class NetworksScopeTypePolicyTest(NetworksPolicyTest):
@@ -80,30 +105,30 @@ class NetworksScopeTypePolicyTest(NetworksPolicyTest):
def setUp(self):
super(NetworksScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
-class NetworksNoLegacyPolicyTest(NetworksScopeTypePolicyTest):
+class NetworksScopeTypeNoLegacyPolicyTest(NetworksScopeTypePolicyTest):
"""Test Networks APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_ROOT % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_ROOT % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER}
+ base_policy.PROJECT_READER_OR_ADMIN}
def setUp(self):
- super(NetworksNoLegacyPolicyTest, self).setUp()
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
+ super(NetworksScopeTypeNoLegacyPolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
self.other_project_member_context,
self.other_project_reader_context,
]
- self.reader_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
diff --git a/nova/tests/unit/policies/test_pause_server.py b/nova/tests/unit/policies/test_pause_server.py
index 73e78bd55d..86a3e616dd 100644
--- a/nova/tests/unit/policies/test_pause_server.py
+++ b/nova/tests/unit/policies/test_pause_server.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -46,41 +47,32 @@ class PauseServerPolicyTest(base.BasePolicyTest):
user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
-
- # Check that admin or and server owner is able to pause/unpause
- # the server
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to pause,
+ # unpause the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to pause/unpause
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.pause')
def test_pause_server_policy(self, mock_pause):
rule_name = ps_policies.POLICY_ROOT % 'pause'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._pause,
- self.req, self.instance.uuid,
- body={'pause': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._pause,
+ self.req, self.instance.uuid,
+ body={'pause': {}})
@mock.patch('nova.compute.api.API.unpause')
def test_unpause_server_policy(self, mock_unpause):
rule_name = ps_policies.POLICY_ROOT % 'unpause'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._unpause,
- self.req, self.instance.uuid,
- body={'unpause': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._unpause,
+ self.req, self.instance.uuid,
+ body={'unpause': {}})
def test_pause_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -105,6 +97,22 @@ class PauseServerPolicyTest(base.BasePolicyTest):
body={'pause': {}})
+class PauseServerNoLegacyNoScopePolicyTest(PauseServerPolicyTest):
+ """Test Pause/unpause server APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(PauseServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to pause/unpause the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
"""Test Pause Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -118,28 +126,20 @@ class PauseServerScopeTypePolicyTest(PauseServerPolicyTest):
def setUp(self):
super(PauseServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to pause/unpause the server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class PauseServerNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
+class PauseServerScopeTypeNoLegacyPolicyTest(PauseServerScopeTypePolicyTest):
"""Test Pause Server APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(PauseServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin or server owner is able to pause/unpause
- # the server
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to pause/unpause
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(PauseServerScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to pause/unpause the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_quota_class_sets.py b/nova/tests/unit/policies/test_quota_class_sets.py
index 276c22fac4..09b90d5ebc 100644
--- a/nova/tests/unit/policies/test_quota_class_sets.py
+++ b/nova/tests/unit/policies/test_quota_class_sets.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import quota_classes
from nova.policies import quota_class_sets as policies
@@ -31,30 +31,12 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
self.controller = quota_classes.QuotaClassSetsController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to update quota class
- self.admin_authorized_contexts = [
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to get, update quota
+ # class.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to update quota class
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to get quota class
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get quota class
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.objects.Quotas.update_class')
def test_update_quota_class_sets_policy(self, mock_update):
@@ -64,21 +46,30 @@ class QuotaClassSetsPolicyTest(base.BasePolicyTest):
'ram': 51200, 'floating_ips': -1,
'fixed_ips': -1, 'instances': 10,
'injected_files': 5, 'cores': 20}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, 'test_class',
- body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, 'test_class',
+ body=body)
@mock.patch('nova.quota.QUOTAS.get_class_quotas')
def test_show_quota_class_sets_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, 'test_class')
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, 'test_class')
+
+
+class QuotaClassSetsNoLegacyNoScopePolicyTest(QuotaClassSetsPolicyTest):
+ """Test QuotaClassSets APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to get
+ update quota class. Legacy admin will be allowed as policy
+ is just admin if no scope checks.
+
+ """
+ without_deprecated_rules = True
class QuotaClassSetsScopeTypePolicyTest(QuotaClassSetsPolicyTest):
@@ -94,38 +85,17 @@ class QuotaClassSetsScopeTypePolicyTest(QuotaClassSetsPolicyTest):
def setUp(self):
super(QuotaClassSetsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to update and get quota class
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system/admin is not able to update and get quota class
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to get quota class
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get quota class
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
-
-class QuotaClassSetsNoLegacyPolicyTest(QuotaClassSetsScopeTypePolicyTest):
- """Test Quota Class Sets APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+
+ # With scope checks enable, only project admins are able to
+ # update and get quota class.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class QuotaClassScopeTypeNoLegacyPolicyTest(QuotaClassSetsScopeTypePolicyTest):
+ """Test QuotaClassSets APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to update and get quota class.
+
"""
without_deprecated_rules = True
-
- def setUp(self):
- super(QuotaClassSetsNoLegacyPolicyTest, self).setUp()
diff --git a/nova/tests/unit/policies/test_quota_sets.py b/nova/tests/unit/policies/test_quota_sets.py
index 0b8d15c384..3ff8cd1c02 100644
--- a/nova/tests/unit/policies/test_quota_sets.py
+++ b/nova/tests/unit/policies/test_quota_sets.py
@@ -10,9 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import quota_sets
+from nova import exception
from nova.policies import quota_sets as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
@@ -33,55 +34,29 @@ class QuotaSetsPolicyTest(base.BasePolicyTest):
self.req = fakes.HTTPRequest.blank('')
self.project_id = self.req.environ['nova.context'].project_id
- # Check that admin is able to update or revert quota
- # to default.
- self.admin_authorized_contexts = [
+ # With legacy rule all admin is able to update or revert their quota
+ # to default or get other project quota.
+ self.project_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to update or revert
- # quota to default.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system reader is able to get another project's quota.
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get another
- # project's quota.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that everyone is able to get the default quota or
- # their own quota.
- self.everyone_authorized_contexts = [
+ self.project_admin_context])
+ # With legacy rule, everyone is able to get their own quota.
+ self.project_reader_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.everyone_unauthorized_contexts = []
- # Check that system reader or owner is able to get their own quota.
- self.system_reader_or_owner_authorized_contexts = [
+ self.other_project_reader_context])
+ # Everyone is able to get the default quota
+ self.everyone_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context,
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
self.other_project_member_context,
- self.other_project_reader_context
- ]
+ self.other_project_reader_context])
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
@mock.patch('nova.quota.QUOTAS.get_settable_quotas')
@@ -91,41 +66,57 @@ class QuotaSetsPolicyTest(base.BasePolicyTest):
'instances': 50,
'cores': 50}
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, self.project_id,
- body=body)
+ for cxtx in self.project_admin_authorized_contexts:
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ self.controller.update(req, cxtx.project_id, body=body)
+ for cxtx in (self.all_contexts -
+ set(self.project_admin_authorized_contexts)):
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller.update,
+ req, cxtx.project_id, body=body)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
@mock.patch('nova.objects.Quotas.destroy_all_by_project')
def test_delete_quota_sets_policy(self, mock_delete):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, self.project_id)
+ for cxtx in self.project_admin_authorized_contexts:
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ self.controller.delete(req, cxtx.project_id)
+ for cxtx in (self.all_contexts -
+ set(self.project_admin_authorized_contexts)):
+ req = fakes.HTTPRequest.blank('')
+ req.environ['nova.context'] = cxtx
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized, self.controller.delete,
+ req, cxtx.project_id)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
@mock.patch('nova.quota.QUOTAS.get_defaults')
def test_default_quota_sets_policy(self, mock_default):
rule_name = policies.POLICY_ROOT % 'defaults'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.defaults,
- self.req, self.project_id)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.defaults,
+ self.req, self.project_id)
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_detail_quota_sets_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'detail'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.detail,
- self.req, 'try-other-project')
- # Check if everyone (owner) is able to get their own quota
- for cxtx in self.system_reader_or_owner_authorized_contexts:
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.detail,
+ self.req, 'try-other-project')
+ # Check if project reader or higher roles are able to get
+ # their own quota
+ for cxtx in self.project_reader_authorized_contexts:
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'] = cxtx
self.controller.detail(req, cxtx.project_id)
@@ -133,18 +124,44 @@ class QuotaSetsPolicyTest(base.BasePolicyTest):
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_show_quota_sets_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, 'try-other-project')
- # Check if everyone (owner) is able to get their own quota
- for cxtx in self.system_reader_or_owner_authorized_contexts:
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, 'try-other-project')
+ # Check if project reader or higher roles are able to get
+ # their own quota
+ for cxtx in self.project_reader_authorized_contexts:
req = fakes.HTTPRequest.blank('')
req.environ['nova.context'] = cxtx
self.controller.show(req, cxtx.project_id)
+class QuotaSetsNoLegacyNoScopePolicyTest(QuotaSetsPolicyTest):
+ """Test QuotaSets APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(QuotaSetsNoLegacyNoScopePolicyTest, self).setUp()
+ # Even with no legacy rule, because any admin requesting
+ # update/revert quota for their own project will be allowed.
+ # And any admin will be able to get other project quota.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # With no legacy rule, other project and foo role will not be
+ # able to get the quota.
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context,
+ self.system_member_context, self.system_reader_context,
+ self.project_member_context,
+ self.project_reader_context]
+
+
class QuotaSetsScopeTypePolicyTest(QuotaSetsPolicyTest):
"""Test Quota Sets APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -159,23 +176,16 @@ class QuotaSetsScopeTypePolicyTest(QuotaSetsPolicyTest):
super(QuotaSetsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to update or revert quota
- # to default.
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system admin is not able to update or revert
- # quota to default.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.project_admin_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # With scope enabled, system users will be disallowed.
+ self.reduce_set('project_admin_authorized', set([
+ self.legacy_admin_context,
+ self.project_admin_context]))
+ self.reduce_set('project_reader_authorized',
+ self.all_project_contexts)
+ self.everyone_authorized_contexts = self.all_project_contexts
-class QuotaSetsNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
+class QuotaSetsScopeTypeNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
"""Test Quota Sets APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -183,28 +193,9 @@ class QuotaSetsNoLegacyPolicyTest(QuotaSetsScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(QuotaSetsNoLegacyPolicyTest, self).setUp()
-
- # Check that system reader is able to get another project's quota.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get anotherproject's
- # quota.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that everyone is able to get their own quota.
- self.system_reader_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.project_member_context,
- self.project_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(QuotaSetsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enabled and no legacy, system and
+ # non-reader/member users are disallowed.
+ self.reduce_set('project_reader_authorized',
+ self.all_project_contexts -
+ set([self.project_foo_context]))
diff --git a/nova/tests/unit/policies/test_remote_consoles.py b/nova/tests/unit/policies/test_remote_consoles.py
index 825f78a938..a441d1c550 100644
--- a/nova/tests/unit/policies/test_remote_consoles.py
+++ b/nova/tests/unit/policies/test_remote_consoles.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova.policies import remote_consoles as rc_policies
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -48,31 +49,38 @@ class RemoteConsolesPolicyTest(base.BasePolicyTest):
user_id=user_id, vm_state=vm_states.ACTIVE,
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
-
- # Check that admin or and server owner is able to get server
- # remote consoles.
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to get
+ # server remote consoles.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to get server
- # remote consoles.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
def test_create_console_policy(self):
rule_name = rc_policies.BASE_POLICY_NAME
body = {'remote_console': {'protocol': 'vnc', 'type': 'novnc'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, self.instance.uuid,
- body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, self.instance.uuid,
+ body=body)
+
+
+class RemoteConsolesNoLegacyNoScopePolicyTest(RemoteConsolesPolicyTest):
+ """Test Remote Consoles APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(RemoteConsolesNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able get server remote consoles.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
@@ -88,9 +96,14 @@ class RemoteConsolesScopeTypePolicyTest(RemoteConsolesPolicyTest):
def setUp(self):
super(RemoteConsolesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to get server
+ # remote console.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class RemoteConsolesNoLegacyPolicyTest(RemoteConsolesScopeTypePolicyTest):
+class RemoteConsolesScopeTypeNoLegacyPolicyTest(
+ RemoteConsolesScopeTypePolicyTest):
"""Test Remote Consoles APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -98,18 +111,8 @@ class RemoteConsolesNoLegacyPolicyTest(RemoteConsolesScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(RemoteConsolesNoLegacyPolicyTest, self).setUp()
- # Check that system admin or and server owner is able to get server
- # remote consoles.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to get server
- # remote consoles.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(RemoteConsolesScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to get server remote console.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_rescue.py b/nova/tests/unit/policies/test_rescue.py
index f970425b40..120809877c 100644
--- a/nova/tests/unit/policies/test_rescue.py
+++ b/nova/tests/unit/policies/test_rescue.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova.policies import base as base_policy
from nova.policies import rescue as rs_policies
from oslo_utils.fixture import uuidsentinel as uuids
@@ -48,40 +49,32 @@ class RescueServerPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to rescue/unrescue
- # the sevrer
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to rescue,
+ # unrescue the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to rescue/unrescue
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.rescue')
def test_rescue_server_policy(self, mock_rescue):
rule_name = rs_policies.BASE_POLICY_NAME
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._rescue,
- self.req, self.instance.uuid,
- body={'rescue': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._rescue,
+ self.req, self.instance.uuid,
+ body={'rescue': {}})
@mock.patch('nova.compute.api.API.unrescue')
def test_unrescue_server_policy(self, mock_unrescue):
rule_name = rs_policies.UNRESCUE_POLICY_NAME
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._unrescue,
- self.req, self.instance.uuid,
- body={'unrescue': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._unrescue,
+ self.req, self.instance.uuid,
+ body={'unrescue': {}})
def test_rescue_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -97,7 +90,7 @@ class RescueServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.rescue')
- def test_rescue_sevrer_overridden_policy_pass_with_same_user(
+ def test_rescue_server_overridden_policy_pass_with_same_user(
self, mock_rescue):
rule_name = rs_policies.BASE_POLICY_NAME
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
@@ -106,6 +99,27 @@ class RescueServerPolicyTest(base.BasePolicyTest):
body={'rescue': {}})
+class RescueServerNoLegacyNoScopePolicyTest(RescueServerPolicyTest):
+ """Test rescue/unrescue server APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ rs_policies.UNRESCUE_POLICY_NAME:
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ rs_policies.BASE_POLICY_NAME:
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(RescueServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to rescue/unrescue the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
"""Test Rescue Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -119,9 +133,13 @@ class RescueServerScopeTypePolicyTest(RescueServerPolicyTest):
def setUp(self):
super(RescueServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to rescue/unrescue the
+ # server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class RescueServerNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
+class RescueServerScopeTypeNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
"""Test Rescue Server APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -129,23 +147,13 @@ class RescueServerNoLegacyPolicyTest(RescueServerScopeTypePolicyTest):
without_deprecated_rules = True
rules_without_deprecation = {
rs_policies.UNRESCUE_POLICY_NAME:
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
rs_policies.BASE_POLICY_NAME:
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(RescueServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin or and server owner is able to
- # rescue/unrescue the sevrer
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to rescue/unrescue
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(RescueServerScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to rescue/unrescue the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_security_groups.py b/nova/tests/unit/policies/test_security_groups.py
index 5fb35f83a0..a9d2f484ba 100644
--- a/nova/tests/unit/policies/test_security_groups.py
+++ b/nova/tests/unit/policies/test_security_groups.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -48,62 +49,75 @@ class ServerSecurityGroupsPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to operate
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to operate
# server security groups.
- self.admin_or_owner_authorized_contexts = [
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to operate
- # server security groups.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
-
- self.reader_authorized_contexts = [
+ # With legacy rule, any admin or project role is able to get their
+ # server SG.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context, self.project_foo_context
- ]
-
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
]
@mock.patch('nova.network.security_group_api.get_instance_security_groups')
def test_get_security_groups_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'list'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.network.security_group_api.add_to_instance')
def test_add_security_groups_policy(self, mock_add):
rule_name = policies.POLICY_NAME % 'add'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.action_ctr._addSecurityGroup,
- self.req, self.instance.uuid,
- body={'addSecurityGroup':
- {'name': 'fake'}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.action_ctr._addSecurityGroup,
+ self.req, self.instance.uuid,
+ body={'addSecurityGroup':
+ {'name': 'fake'}})
@mock.patch('nova.network.security_group_api.remove_from_instance')
def test_remove_security_groups_policy(self, mock_remove):
rule_name = policies.POLICY_NAME % 'remove'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.action_ctr._removeSecurityGroup,
- self.req, self.instance.uuid,
- body={'removeSecurityGroup':
- {'name': 'fake'}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.action_ctr._removeSecurityGroup,
+ self.req, self.instance.uuid,
+ body={'removeSecurityGroup':
+ {'name': 'fake'}})
+
+
+class ServerSecurityGroupsNoLegacyNoScopePolicyTest(
+ ServerSecurityGroupsPolicyTest):
+ """Test Server Security Groups server APIs policies with no legacy
+ deprecated rules and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'add':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'remove':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(ServerSecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to add/remove SG to server and reader to get SG.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SecurityGroupsPolicyTest(base.BasePolicyTest):
@@ -120,14 +134,23 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
self.rule_ctr = security_groups.SecurityGroupRulesController()
self.req = fakes.HTTPRequest.blank('')
- # Check that everyone is able to perform crud operation on.
- # security groups.
+ # With legacy and scope disabled, everyone is able to perform crud
+ # operation on security groups.
# NOTE(gmann): Nova cannot verify the security groups owner during
# nova policy enforcement so will be passing context's project_id
# as target to policy and always pass. If requester is not admin
# or owner of security groups then neutron will be returning the
# appropriate error.
- self.reader_authorized_contexts = [
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -136,29 +159,22 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.reader_unauthorized_contexts = []
- self.sys_admin_or_owner_authorized_contexts = (
- self.reader_authorized_contexts)
- self.sys_admin_or_owner_unauthorized_contexts = (
- self.reader_unauthorized_contexts)
@mock.patch('nova.network.security_group_api.list')
def test_list_security_groups_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'get'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
@mock.patch('nova.network.security_group_api.get')
def test_show_security_groups_policy(self, mock_get):
rule_name = policies.POLICY_NAME % 'show'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, uuids.fake_id)
@mock.patch('nova.network.security_group_api.get')
@mock.patch('nova.network.security_group_api.update_security_group')
@@ -167,11 +183,10 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
body = {'security_group': {
'name': 'test',
'description': 'test-desc'}}
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, uuids.fake_id, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, uuids.fake_id, body=body)
@mock.patch('nova.network.security_group_api.create_security_group')
def test_create_security_groups_policy(self, mock_create):
@@ -179,21 +194,19 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
body = {'security_group': {
'name': 'test',
'description': 'test-desc'}}
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.network.security_group_api.get')
@mock.patch('nova.network.security_group_api.destroy')
def test_delete_security_groups_policy(self, mock_destroy, mock_get):
rule_name = policies.POLICY_NAME % 'delete'
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, uuids.fake_id)
@mock.patch('nova.network.security_group_api.get')
@mock.patch('nova.network.security_group_api.create_security_group_rule')
@@ -202,12 +215,11 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
body = {'security_group_rule': {
'ip_protocol': 'test', 'group_id': uuids.fake_id,
'parent_group_id': uuids.fake_id,
- 'from_port': 22, 'from_port': 22}}
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.rule_ctr.create,
- self.req, body=body)
+ 'from_port': 22}}
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.rule_ctr.create,
+ self.req, body=body)
@mock.patch('nova.network.security_group_api.get_rule')
@mock.patch('nova.network.security_group_api.get')
@@ -215,11 +227,52 @@ class SecurityGroupsPolicyTest(base.BasePolicyTest):
def test_delete_security_group_rules_policy(self, mock_remove, mock_get,
mock_rules):
rule_name = policies.POLICY_NAME % 'rule:delete'
- self.common_policy_check(self.sys_admin_or_owner_authorized_contexts,
- self.sys_admin_or_owner_unauthorized_contexts,
- rule_name,
- self.rule_ctr.delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.rule_ctr.delete,
+ self.req, uuids.fake_id)
+
+
+class SecurityGroupsNoLegacyNoScopePolicyTest(
+ SecurityGroupsPolicyTest):
+ """Test Security Groups APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_NAME % 'get':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'update':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'rule:create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ policies.POLICY_NAME % 'rule:delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(SecurityGroupsNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to operate on SG.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class SecurityGroupsScopeTypePolicyTest(SecurityGroupsPolicyTest):
@@ -235,6 +288,20 @@ class SecurityGroupsScopeTypePolicyTest(SecurityGroupsPolicyTest):
def setUp(self):
super(SecurityGroupsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system users will not be able to
+ # operate on SG.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
class ServerSecurityGroupsScopeTypePolicyTest(ServerSecurityGroupsPolicyTest):
@@ -251,102 +318,71 @@ class ServerSecurityGroupsScopeTypePolicyTest(ServerSecurityGroupsPolicyTest):
def setUp(self):
super(ServerSecurityGroupsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system users.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerSecurityGroupsNoLegacyPolicyTest(
+class ServerSecurityGroupsScopeTypeNoLegacyPolicyTest(
ServerSecurityGroupsScopeTypePolicyTest):
"""Test Security Groups APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'list':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'add':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'remove':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(ServerSecurityGroupsNoLegacyPolicyTest, self).setUp()
-
- # Check that system or projct admin or owner is able to operate
- # server security groups.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to operate
- # server security groups.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context]
-
- # Check that system reader or projct is able to get
- # server security groups.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context,
- ]
-
- # Check that non-system reader nd non-admin/owner is not able to get
- # server security groups.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(ServerSecurityGroupsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to add/remove the SG to their server and reader
+ # will get SG of server.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class SecurityGroupsNoLegacyPolicyTest(SecurityGroupsScopeTypePolicyTest):
"""Test Security Groups APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.POLICY_NAME % 'get':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.POLICY_NAME % 'create':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'update':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'delete':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:create':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
policies.POLICY_NAME % 'rule:delete':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
super(SecurityGroupsNoLegacyPolicyTest, self).setUp()
-
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
+ # With no legacy and scope enabled, system users and project
+ # other roles like foo will not be able to operate SG.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context,
self.other_project_reader_context,
- ]
- self.reader_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
- self.sys_admin_or_owner_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.project_admin_context, self.project_member_context,
- self.legacy_admin_context, self.other_project_member_context
- ]
- self.sys_admin_or_owner_unauthorized_contexts = [
- self.system_reader_context,
- self.project_reader_context, self.project_foo_context,
- self.system_foo_context, self.other_project_reader_context
+ self.other_project_member_context
]
diff --git a/nova/tests/unit/policies/test_server_diagnostics.py b/nova/tests/unit/policies/test_server_diagnostics.py
index 04a099a7a3..4a4b192baa 100644
--- a/nova/tests/unit/policies/test_server_diagnostics.py
+++ b/nova/tests/unit/policies/test_server_diagnostics.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -46,26 +47,24 @@ class ServerDiagnosticsPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin is able to get server diagnostics.
- self.admin_authorized_contexts = [
+ # With legacy rule, any admin is able get server diagnostics.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context
- ]
- # Check that non-admin is not able to get server diagnostics.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context]
def test_server_diagnostics_policy(self):
rule_name = policies.BASE_POLICY_NAME
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req, self.instance.uuid)
+
+
+class ServerDiagnosticsNoLegacyNoScopeTest(ServerDiagnosticsPolicyTest):
+ """Test Server Diagnostics API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class ServerDiagnosticsScopeTypePolicyTest(ServerDiagnosticsPolicyTest):
@@ -82,33 +81,21 @@ class ServerDiagnosticsScopeTypePolicyTest(ServerDiagnosticsPolicyTest):
def setUp(self):
super(ServerDiagnosticsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class ServerDiagnosticsNoLegacyPolicyTest(
+class ServerDiagnosticsScopeTypeNoLegacyPolicyTest(
ServerDiagnosticsScopeTypePolicyTest):
"""Test Server Diagnostics APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerDiagnosticsNoLegacyPolicyTest, self).setUp()
- # Check that system admin is able to get server diagnostics.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non system admin is not able to get server diagnostics.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
-
-class ServerDiagnosticsOverridePolicyTest(ServerDiagnosticsNoLegacyPolicyTest):
+
+class ServerDiagnosticsOverridePolicyTest(
+ ServerDiagnosticsScopeTypeNoLegacyPolicyTest):
"""Test Server Diagnostics APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
@@ -119,22 +106,12 @@ class ServerDiagnosticsOverridePolicyTest(ServerDiagnosticsNoLegacyPolicyTest):
super(ServerDiagnosticsOverridePolicyTest, self).setUp()
rule = policies.BASE_POLICY_NAME
# NOTE(gmann): override the rule to project member and verify it
- # work as policy is system and projct scoped.
+ # work as policy is project scoped.
self.policy.set_rules({
- rule: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN},
+ rule: base_policy.PROJECT_MEMBER},
overwrite=False)
- # Check that system admin or project scoped role as override above
+ # Check that project member role as override above
# is able to get server diagnostics.
- self.admin_authorized_contexts = [
- self.system_admin_context,
+ self.project_admin_authorized_contexts = [
self.project_admin_context, self.project_member_context]
- # Check that non-system admin or project role is not able to
- # get server diagnostics.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
diff --git a/nova/tests/unit/policies/test_server_external_events.py b/nova/tests/unit/policies/test_server_external_events.py
index f8f1bcd663..401b55325f 100644
--- a/nova/tests/unit/policies/test_server_external_events.py
+++ b/nova/tests/unit/policies/test_server_external_events.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_external_events as ev
@@ -33,20 +34,12 @@ class ServerExternalEventsPolicyTest(base.BasePolicyTest):
self.controller = ev.ServerExternalEventsController()
self.req = fakes.HTTPRequest.blank('')
- # Check that admin is able to create the server external events.
- self.admin_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin can
+ # create the server external events.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context
]
- # Check that non-admin is not able to create the server
- # external events.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.external_instance_event')
@mock.patch('nova.objects.InstanceMappingList.get_by_instance_uuids')
@@ -58,10 +51,18 @@ class ServerExternalEventsPolicyTest(base.BasePolicyTest):
'server_uuid': uuids.fake_id,
'status': 'completed'}]
}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, body=body)
+
+
+class ServerExternalEventsNoLegacyNoScopeTest(
+ ServerExternalEventsPolicyTest):
+ """Test Server External Events API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class ServerExternalEventsScopeTypePolicyTest(ServerExternalEventsPolicyTest):
@@ -79,23 +80,12 @@ class ServerExternalEventsScopeTypePolicyTest(ServerExternalEventsPolicyTest):
super(ServerExternalEventsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that admin is able to create the server external events.
- self.admin_authorized_contexts = [
- self.system_admin_context,
- ]
- # Check that non-admin is not able to create the server
- # external events.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope checks, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class ServerExternalEventsNoLegacyPolicyTest(
+class ServerExternalEventsScopeTypeNoLegacyPolicyTest(
ServerExternalEventsScopeTypePolicyTest):
"""Test Server External Events APIs policies with system scope enabled,
and no more deprecated rules.
diff --git a/nova/tests/unit/policies/test_server_groups.py b/nova/tests/unit/policies/test_server_groups.py
index 0c8b3de0cd..b0df7ccb89 100644
--- a/nova/tests/unit/policies/test_server_groups.py
+++ b/nova/tests/unit/policies/test_server_groups.py
@@ -9,17 +9,22 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+
+from unittest import mock
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_groups
+import nova.conf
from nova import objects
from nova.policies import server_groups as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
+CONF = nova.conf.CONF
+
class ServerGroupPolicyTest(base.BasePolicyTest):
"""Test Server Groups APIs policies with all possible context.
@@ -45,98 +50,85 @@ class ServerGroupPolicyTest(base.BasePolicyTest):
user_id='u2', policies=[], members=[])]
self.mock_get.return_value = self.sg[0]
- # Check that admin or and owner is able to delete
- # the server group.
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or project any role(because legacy rule allow SG
+ # owner- having same project id and no role check) is able to
+ # delete and get SG.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to delete
- # the server group.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
+ self.project_reader_context, self.project_foo_context,
]
- # Check that system reader or owner is able to get
- # the server group. Due to old default everyone
- # is allowed to perform this operation.
- self.system_reader_or_owner_authorized_contexts = [
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.system_member_context,
- self.system_reader_context, self.project_foo_context
- ]
- self.system_reader_or_owner_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
+ self.project_reader_context, self.project_foo_context,
]
- # Check that everyone is able to list
- # theie own server group. Due to old defaults everyone
- # is able to list their server groups.
+ # By default, legacy rule are enabled and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to get
+ # all projects SG.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+
+ # List SG can not check for project id so everyone is allowed.
self.everyone_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = [
- ]
- # Check that project member is able to create server group.
- # Due to old defaults everyone is able to list their server groups.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.system_member_context, self.project_reader_context,
- self.project_foo_context, self.system_reader_context,
+ self.system_member_context, self.system_reader_context,
self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
+ self.other_project_member_context
]
- self.project_member_unauthorized_contexts = []
+
+ # With legacy rule, anyone can create SG.
+ self.project_create_authorized_contexts = (
+ self.everyone_authorized_contexts)
@mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
def test_index_server_groups_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req)
+ self.common_policy_auth(self.everyone_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
@mock.patch('nova.objects.InstanceGroupList.get_all')
- def test_index_all_project_server_groups_policy(self, mock_get_all):
+ @mock.patch('nova.objects.InstanceGroupList.get_by_project_id')
+ def test_index_all_project_server_groups_policy(self, mock_get,
+ mock_get_all):
mock_get_all.return_value = objects.InstanceGroupList(objects=self.sg)
+ mock_get.return_value = objects.InstanceGroupList(
+ objects=[self.sg[0]])
# 'index' policy is checked before 'index:all_projects' so
# we have to allow it for everyone otherwise it will fail for
# unauthorized contexts here.
rule = policies.POLICY_ROOT % 'index'
self.policy.set_rules({rule: "@"}, overwrite=False)
- admin_req = fakes.HTTPRequest.blank(
- '/os-server-groups?all_projects=True',
- version='2.13', use_admin_context=True)
- # Check admin user get all projects server groups.
- resp = self.controller.index(admin_req)
- projs = [sg['project_id'] for sg in resp['server_groups']]
- self.assertEqual(2, len(projs))
- self.assertIn('proj2', projs)
- # Check non-admin user does not get all projects server groups.
- req = fakes.HTTPRequest.blank('/os-server-groups?all_projects=True',
- version='2.13')
- resp = self.controller.index(req)
- projs = [sg['project_id'] for sg in resp['server_groups']]
- self.assertNotIn('proj2', projs)
+ rule_name = policies.POLICY_ROOT % 'index:all_projects'
+ req = fakes.HTTPRequest.blank('?all_projects', version='2.13')
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(base.rule_if_system,
+ rule, rule_name)
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
+ check_rule, self.controller.index,
+ req, fatal=False)
+ for resp in authorize_res:
+ projs = [sg['project_id'] for sg in resp['server_groups']]
+ self.assertEqual(2, len(projs))
+ self.assertIn('proj2', projs)
+ for resp in unauthorize_res:
+ projs = [sg['project_id'] for sg in resp['server_groups']]
+ self.assertNotIn('proj2', projs)
def test_show_server_groups_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(
- self.system_reader_or_owner_authorized_contexts,
- self.system_reader_or_owner_unauthorized_contexts,
+ self.common_policy_auth(
+ self.project_reader_authorized_contexts,
rule_name,
self.controller.show,
self.req, uuids.fake_id)
@@ -146,20 +138,54 @@ class ServerGroupPolicyTest(base.BasePolicyTest):
rule_name = policies.POLICY_ROOT % 'create'
body = {'server_group': {'name': 'fake',
'policies': ['affinity']}}
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_create_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.objects.InstanceGroup.destroy')
def test_delete_server_groups_policy(self, mock_destroy):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, uuids.fake_id)
+
+
+class ServerGroupNoLegacyNoScopePolicyTest(ServerGroupPolicyTest):
+ """Test Server Groups APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerGroupNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, only project admin, member will be able to delete
+ # the SG and also reader will be able to get the SG.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+ # Even with no legacy rule, legacy admin is allowed to create SG
+ # use requesting context's project_id. Same for list SG.
+ self.project_create_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context, self.other_project_member_context]
+
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class ServerGroupScopeTypePolicyTest(ServerGroupPolicyTest):
@@ -176,27 +202,31 @@ class ServerGroupScopeTypePolicyTest(ServerGroupPolicyTest):
super(ServerGroupScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check if project scoped can create the server group.
- self.project_member_authorized_contexts = [
+ # With scope enable, it disallow system users.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+
+ self.project_create_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
self.project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_member_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context]
+
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+
+ self.everyone_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
self.other_project_reader_context,
+ self.other_project_member_context
]
- # Check if non-project scoped cannot create the server group.
- self.project_member_unauthorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context
- ]
-
- # TODO(gmann): Test this with system scope once we remove
- # the hardcoded admin check
- def test_index_all_project_server_groups_policy(self):
- pass
-class ServerGroupNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
+class ServerGroupScopeTypeNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
"""Test Server Group APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -204,58 +234,25 @@ class ServerGroupNoLegacyPolicyTest(ServerGroupScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(ServerGroupNoLegacyPolicyTest, self).setUp()
+ super(ServerGroupScopeTypeNoLegacyPolicyTest, self).setUp()
+
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+
+ self.project_create_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.other_project_member_context]
+
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
+
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
- # Check that system admin or and owner is able to delete
- # the server group.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- ]
- # Check that non-system admin/owner is not able to delete
- # the server group.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader or owner is able to get
- # the server group.
- self.system_reader_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.system_member_context,
- self.system_reader_context
- ]
- self.system_reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.other_project_member_context, self.project_foo_context,
- self.other_project_reader_context,
- ]
self.everyone_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
- # Check if project member can create the server group.
- self.project_member_authorized_contexts = [
self.legacy_admin_context, self.project_admin_context,
- self.project_member_context, self.other_project_member_context
- ]
- # Check if non-project member cannot create the server group.
- self.project_member_unauthorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_reader_context,
- self.project_foo_context,
+ self.project_member_context, self.project_reader_context,
self.other_project_reader_context,
+ self.other_project_member_context
]
diff --git a/nova/tests/unit/policies/test_server_ips.py b/nova/tests/unit/policies/test_server_ips.py
index 29bd2d81c4..b837d2d0e2 100644
--- a/nova/tests/unit/policies/test_server_ips.py
+++ b/nova/tests/unit/policies/test_server_ips.py
@@ -49,37 +49,43 @@ class ServerIpsPolicyTest(base.BasePolicyTest):
self.mock_get_network.return_value = {'net1':
{'ips': '', 'floating_ips': ''}}
- # Check that admin or and server owner is able to get server
- # IP addresses.
- self.reader_or_owner_authorized_contexts = [
+ # With legacy rule, any admin or project role is able to get their
+ # server IP addresses.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context]
- # Check that non-admin/owner is not able to get the server IP
- # adderesses
- self.reader_or_owner_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
]
def test_index_ips_policy(self):
rule_name = ips_policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
def test_show_ips_policy(self):
rule_name = ips_policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, self.instance.uuid,
- 'net1')
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, self.instance.uuid,
+ 'net1')
+
+
+class ServerIpsNoLegacyNoScopePolicyTest(ServerIpsPolicyTest):
+ """Test Server Ips APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerIpsNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, only project admin, member, and reader will be able
+ # to get their server IP addresses.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
@@ -95,28 +101,21 @@ class ServerIpsScopeTypePolicyTest(ServerIpsPolicyTest):
def setUp(self):
super(ServerIpsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system users will not be able
+ # to get the server IP addresses.
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerIpsNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
+class ServerIpsScopeTypeNoLegacyPolicyTest(ServerIpsScopeTypePolicyTest):
"""Test Server IPs APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(ServerIpsNoLegacyPolicyTest, self).setUp()
-
- # Check that system reader or owner is able to
- # get the server IP adderesses.
- self.reader_or_owner_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context]
- # Check that non-system and non-owner is not able to
- # get the server IP adderesses.
- self.reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(ServerIpsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only admin, member,
+ # and reader will be able to get their server IP addresses.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_metadata.py b/nova/tests/unit/policies/test_server_metadata.py
index 89c6480adc..cf4fb19e7b 100644
--- a/nova/tests/unit/policies/test_server_metadata.py
+++ b/nova/tests/unit/policies/test_server_metadata.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_metadata
@@ -40,92 +41,88 @@ class ServerMetadataPolicyTest(base.BasePolicyTest):
id=1, uuid=uuids.fake_id, project_id=self.project_id)
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to CRUD
- # the server metadata.
- self.admin_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to CRUD
- # the server metadata
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that admin or and server owner is able to get
- # the server metadata.
- self.reader_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to create,
+ # update, and delete the server metadata.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.system_member_context, self.system_reader_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to get
- # the server metadata.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # and they can get their own server metadata.
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
@mock.patch('nova.compute.api.API.get_instance_metadata')
def test_index_server_Metadata_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.compute.api.API.get_instance_metadata')
def test_show_server_Metadata_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'show'
mock_get.return_value = {'key9': 'value'}
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, self.instance.uuid, 'key9')
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, self.instance.uuid, 'key9')
@mock.patch('nova.compute.api.API.update_instance_metadata')
def test_create_server_Metadata_policy(self, mock_quota):
rule_name = policies.POLICY_ROOT % 'create'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, self.instance.uuid,
- body={"metadata": {"key9": "value9"}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, self.instance.uuid,
+ body={"metadata": {"key9": "value9"}})
@mock.patch('nova.compute.api.API.update_instance_metadata')
def test_update_server_Metadata_policy(self, mock_quota):
rule_name = policies.POLICY_ROOT % 'update'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, self.instance.uuid, 'key9',
- body={"meta": {"key9": "value9"}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, self.instance.uuid, 'key9',
+ body={"meta": {"key9": "value9"}})
@mock.patch('nova.compute.api.API.update_instance_metadata')
def test_update_all_server_Metadata_policy(self, mock_quota):
rule_name = policies.POLICY_ROOT % 'update_all'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update_all,
- self.req, self.instance.uuid,
- body={"metadata": {"key9": "value9"}})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update_all,
+ self.req, self.instance.uuid,
+ body={"metadata": {"key9": "value9"}})
@mock.patch('nova.compute.api.API.get_instance_metadata')
@mock.patch('nova.compute.api.API.delete_instance_metadata')
def test_delete_server_Metadata_policy(self, mock_delete, mock_get):
rule_name = policies.POLICY_ROOT % 'delete'
mock_get.return_value = {'key9': 'value'}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, self.instance.uuid, 'key9')
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, self.instance.uuid, 'key9')
+
+
+class ServerMetadataNoLegacyNoScopePolicyTest(ServerMetadataPolicyTest):
+ """Test Server Metadata APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerMetadataNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, legacy admin loose power.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
@@ -141,9 +138,15 @@ class ServerMetadataScopeTypePolicyTest(ServerMetadataPolicyTest):
def setUp(self):
super(ServerMetadataScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerMetadataNoLegacyPolicyTest(ServerMetadataScopeTypePolicyTest):
+class ServerMetadataScopeTypeNoLegacyPolicyTest(
+ ServerMetadataScopeTypePolicyTest):
"""Test Server Metadata APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -151,32 +154,10 @@ class ServerMetadataNoLegacyPolicyTest(ServerMetadataScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(ServerMetadataNoLegacyPolicyTest, self).setUp()
- # Check that system admin or project member is able to create, update
- # and delete the server metadata.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context, self.project_admin_context,
- self.project_member_context]
- # Check that non-system/admin/member is not able to create, update
- # and delete the server metadata.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_reader_context,
- self.system_foo_context, self.system_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system admin or project member is able to
- # get the server metadata.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
- # Check that non-system/admin/member is not able to
- # get the server metadata.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(ServerMetadataScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to allowed operation on server metadata.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_migrations.py b/nova/tests/unit/policies/test_server_migrations.py
index b06d9ec167..b17d4ded1d 100644
--- a/nova/tests/unit/policies/test_server_migrations.py
+++ b/nova/tests/unit/policies/test_server_migrations.py
@@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import fixtures
-import mock
+from unittest import mock
+import fixtures
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_migrations
@@ -45,42 +45,18 @@ class ServerMigrationsPolicyTest(base.BasePolicyTest):
vm_state=vm_states.ACTIVE)
self.mock_get.return_value = self.instance
- # Check that admin is able to perform operations
+ # With legacy rule, any admin is able to perform operations
# for server migrations.
- self.admin_authorized_contexts = [
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to perform operations
- # for server migrations.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
- # Check that system-reader are able to perform operations
- # for server migrations.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
- self.project_admin_context]
- # Check that non-system-reader are not able to perform operations
- # for server migrations.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.get_migrations_in_progress_by_instance')
def test_list_server_migrations_policy(self, mock_get):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.api.openstack.compute.server_migrations.output')
@mock.patch('nova.compute.api.API.get_migration_by_id_and_instance')
@@ -90,27 +66,32 @@ class ServerMigrationsPolicyTest(base.BasePolicyTest):
migration_type='live-migration',
status='running',
)
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, self.instance.uuid, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, self.instance.uuid, 11111)
@mock.patch('nova.compute.api.API.live_migrate_abort')
def test_delete_server_migrations_policy(self, mock_delete):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, self.instance.uuid, 11111)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, self.instance.uuid, 11111)
@mock.patch('nova.compute.api.API.live_migrate_force_complete')
def test_force_delete_server_migrations_policy(self, mock_force):
rule_name = policies.POLICY_ROOT % 'force_complete'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller._force_complete,
- self.req, self.instance.uuid, 11111,
- body={"force_complete": None})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller._force_complete,
+ self.req, self.instance.uuid, 11111,
+ body={"force_complete": None})
+
+
+class ServerMigrationsNoLegacyNoScopeTest(ServerMigrationsPolicyTest):
+ """Test Server Migrations API policies with deprecated rules
+ disabled, but scope checking still disabled.
+ """
+
+ without_deprecated_rules = True
class ServerMigrationsScopeTypePolicyTest(ServerMigrationsPolicyTest):
@@ -126,48 +107,21 @@ class ServerMigrationsScopeTypePolicyTest(ServerMigrationsPolicyTest):
def setUp(self):
super(ServerMigrationsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system admin is not allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class ServerMigrationsNoLegacyPolicyTest(ServerMigrationsScopeTypePolicyTest):
+class ServerMigrationsScopeTypeNoLegacyPolicyTest(
+ ServerMigrationsScopeTypePolicyTest):
"""Test Server Migrations APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
- def setUp(self):
- super(ServerMigrationsNoLegacyPolicyTest, self).setUp()
- # Check that admin is able to perform operations
- # for server migrations.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non-admin is not able to perform operations
- # for server migrations.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to perform operations
- # for server migrations.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system-reader is not able to perform operations
- # for server migrations.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-class ServerMigrationsOverridePolicyTest(ServerMigrationsNoLegacyPolicyTest):
+class ServerMigrationsOverridePolicyTest(
+ ServerMigrationsScopeTypeNoLegacyPolicyTest):
"""Test Server Migrations APIs policies with system and project scoped
but default to system roles only are allowed for project roles
if override by operators. This test is with system scope enable
@@ -181,38 +135,16 @@ class ServerMigrationsOverridePolicyTest(ServerMigrationsNoLegacyPolicyTest):
rule_force = policies.POLICY_ROOT % 'force_complete'
rule_delete = policies.POLICY_ROOT % 'delete'
# NOTE(gmann): override the rule to project member and verify it
- # work as policy is system and projct scoped.
+ # work as policy is project scoped.
self.policy.set_rules({
- rule_show: base_policy.PROJECT_READER_OR_SYSTEM_READER,
- rule_list: base_policy.PROJECT_READER_OR_SYSTEM_READER,
- rule_force: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN,
- rule_delete: base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN},
+ rule_show: base_policy.PROJECT_READER,
+ rule_list: base_policy.PROJECT_READER,
+ rule_force: base_policy.PROJECT_READER,
+ rule_delete: base_policy.PROJECT_READER},
overwrite=False)
- # Check that system admin or project scoped role as override above
+ # Check that project reader as override above
# is able to migrate the server
- self.admin_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system admin or project role is not able to
- # migrate the server
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to perform operations
- # for server migrations.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context]
- # Check that non-system-reader is not able to perform operations
- # for server migrations.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.other_project_member_context, self.project_foo_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_authorized_contexts = [
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context]
diff --git a/nova/tests/unit/policies/test_server_password.py b/nova/tests/unit/policies/test_server_password.py
index 1a28cf9f20..b163c6c562 100644
--- a/nova/tests/unit/policies/test_server_password.py
+++ b/nova/tests/unit/policies/test_server_password.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_password
@@ -41,51 +42,55 @@ class ServerPasswordPolicyTest(base.BasePolicyTest):
id=1, uuid=uuids.fake_id, project_id=self.project_id,
system_metadata={}, expected_attrs=['system_metadata'])
self.mock_get.return_value = self.instance
-
- # Check that admin or and server owner is able to
- # delete the server password.
- self.admin_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to delete
- # the server password.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that admin or and server owner is able to get
- # the server password.
- self.reader_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to delete,
+ # the server Password.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.system_member_context, self.system_reader_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to get
- # the server password.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # and they can get their own server password.
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
@mock.patch('nova.api.metadata.password.extract_password')
def test_index_server_password_policy(self, mock_pass):
rule_name = policies.BASE_POLICY_NAME % 'show'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.api.metadata.password.convert_password')
def test_clear_server_password_policy(self, mock_pass):
rule_name = policies.BASE_POLICY_NAME % 'clear'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.clear,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.clear,
+ self.req, self.instance.uuid)
+
+
+class ServerPasswordNoLegacyNoScopePolicyTest(ServerPasswordPolicyTest):
+ """Test Server Password APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.BASE_POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.BASE_POLICY_NAME % 'clear':
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
+
+ def setUp(self):
+ super(ServerPasswordNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, legacy admin loose power.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
@@ -101,50 +106,30 @@ class ServerPasswordScopeTypePolicyTest(ServerPasswordPolicyTest):
def setUp(self):
super(ServerPasswordScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerPasswordNoLegacyPolicyTest(ServerPasswordScopeTypePolicyTest):
+class ServerPasswordScopeTypeNoLegacyPolicyTest(
+ ServerPasswordScopeTypePolicyTest):
"""Test Server Password APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
rules_without_deprecation = {
policies.BASE_POLICY_NAME % 'show':
- base_policy.PROJECT_READER_OR_SYSTEM_READER,
+ base_policy.PROJECT_READER_OR_ADMIN,
policies.BASE_POLICY_NAME % 'clear':
- base_policy.PROJECT_MEMBER_OR_SYSTEM_ADMIN}
+ base_policy.PROJECT_MEMBER_OR_ADMIN}
def setUp(self):
- super(ServerPasswordNoLegacyPolicyTest, self).setUp()
-
- # Check that system or projct admin or owner is able to clear
- # server password.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system and non-admin/owner is not able to clear
- # server password.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.project_reader_context,
- self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context]
-
- # Check that system reader or projct owner is able to get
- # server password.
- self.reader_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context,
- ]
-
- # Check that non-system reader nd non-admin/owner is not able to get
- # server password.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(ServerPasswordScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to allowed operation on server password.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_tags.py b/nova/tests/unit/policies/test_server_tags.py
index b7efe86364..412177408c 100644
--- a/nova/tests/unit/policies/test_server_tags.py
+++ b/nova/tests/unit/policies/test_server_tags.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import server_tags
@@ -50,51 +51,32 @@ class ServerTagsPolicyTest(base.BasePolicyTest):
self.stub_out('nova.objects.InstanceMapping.get_by_instance_uuid',
lambda s, c, u: inst_map)
- # Check that admin or and server owner is able to perform
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to perform,
# operations on server tags.
- self.admin_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context
- ]
- # Check that non-admin/owner is not able to perform operations
- # on server tags
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that reader or and server owner is able to perform operations
- # on server tags.
- self.reader_or_owner_authorized_contexts = [
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.system_member_context, self.system_reader_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-reader/owner is not able to perform operations
- # on server tags.
- self.reader_or_owner_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
@mock.patch('nova.objects.TagList.get_by_resource_id')
def test_index_server_tags_policy(self, mock_tag):
rule_name = policies.POLICY_ROOT % 'index'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
@mock.patch('nova.objects.Tag.exists')
def test_show_server_tags_policy(self, mock_exists):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, self.instance.uuid, uuids.fake_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, self.instance.uuid, uuids.fake_id)
@mock.patch('nova.notifications.base.send_instance_update_notification')
@mock.patch('nova.db.main.api.instance_tag_get_by_instance_uuid')
@@ -102,33 +84,30 @@ class ServerTagsPolicyTest(base.BasePolicyTest):
def test_update_server_tags_policy(self, mock_create, mock_tag,
mock_notf):
rule_name = policies.POLICY_ROOT % 'update'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, self.instance.uuid, uuids.fake_id,
- body=None)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, self.instance.uuid, uuids.fake_id,
+ body=None)
@mock.patch('nova.notifications.base.send_instance_update_notification')
@mock.patch('nova.db.main.api.instance_tag_set')
def test_update_all_server_tags_policy(self, mock_set, mock_notf):
rule_name = policies.POLICY_ROOT % 'update_all'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update_all,
- self.req, self.instance.uuid,
- body={'tags': ['tag1', 'tag2']})
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.update_all,
+ self.req, self.instance.uuid,
+ body={'tags': ['tag1', 'tag2']})
@mock.patch('nova.notifications.base.send_instance_update_notification')
@mock.patch('nova.objects.TagList.destroy')
def test_delete_all_server_tags_policy(self, mock_destroy, mock_notf):
rule_name = policies.POLICY_ROOT % 'delete_all'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete_all,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete_all,
+ self.req, self.instance.uuid)
@mock.patch('nova.notifications.base.send_instance_update_notification')
@mock.patch('nova.db.main.api.instance_tag_get_by_instance_uuid')
@@ -136,11 +115,27 @@ class ServerTagsPolicyTest(base.BasePolicyTest):
def test_delete_server_tags_policy(self, mock_destroy, mock_get,
mock_notf):
rule_name = policies.POLICY_ROOT % 'delete'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, self.instance.uuid, uuids.fake_id)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, self.instance.uuid, uuids.fake_id)
+
+
+class ServerTagsNoLegacyNoScopePolicyTest(ServerTagsPolicyTest):
+ """Test Server Tags APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerTagsNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, legacy admin loose power.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
@@ -156,9 +151,14 @@ class ServerTagsScopeTypePolicyTest(ServerTagsPolicyTest):
def setUp(self):
super(ServerTagsScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With Scope enable, system users no longer allowed.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerTagsNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
+class ServerTagsScopeTypeNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
"""Test Server Tags APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -166,32 +166,10 @@ class ServerTagsNoLegacyPolicyTest(ServerTagsScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(ServerTagsNoLegacyPolicyTest, self).setUp()
- # Check that system admin or project member is able to
- # perform operations on server tags.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context, self.project_admin_context,
- self.project_member_context]
- # Check that non-system/admin/member is not able to
- # perform operations on server tags.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_reader_context,
- self.system_foo_context, self.system_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system reader or owner is able to
- # perform operations on server tags.
- self.reader_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context]
- # Check that non-system/reader/owner is not able to
- # perform operations on server tags.
- self.reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
+ super(ServerTagsScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to allowed operation on server tags.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_server_topology.py b/nova/tests/unit/policies/test_server_topology.py
index 51a3206a97..e2f81dfaad 100644
--- a/nova/tests/unit/policies/test_server_topology.py
+++ b/nova/tests/unit/policies/test_server_topology.py
@@ -51,40 +51,23 @@ class ServerTopologyPolicyTest(base.BasePolicyTest):
# Check that system reader or and server owner is able to get
# the server topology.
- self.system_reader_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin is able to get
+ # server topology wth host info.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context]
- # Check that non-stem reader/owner is not able to get
- # the server topology.
- self.system_reader_or_owner_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that system reader is able to get the server topology
- # host information.
- self.system_reader_authorized_contexts = [
+ self.project_admin_context]
+ # and project reader can get their server topology without host info.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to get the server topology
- # host information.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
- ]
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
def test_index_server_topology_policy(self):
rule_name = policies.BASE_POLICY_NAME % 'index'
- self.common_policy_check(
- self.system_reader_or_owner_authorized_contexts,
- self.system_reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req, self.instance.uuid)
def test_index_host_server_topology_policy(self):
rule_name = policies.BASE_POLICY_NAME % 'host:index'
@@ -93,9 +76,8 @@ class ServerTopologyPolicyTest(base.BasePolicyTest):
# fail first for unauthorized contexts.
rule = policies.BASE_POLICY_NAME % 'index'
self.policy.set_rules({rule: "@"}, overwrite=False)
- authorize_res, unauthorize_res = self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.index, self.req, self.instance.uuid,
fatal=False)
for resp in authorize_res:
@@ -106,6 +88,20 @@ class ServerTopologyPolicyTest(base.BasePolicyTest):
self.assertNotIn('cpu_pinning', resp['nodes'][0])
+class ServerTopologyNoLegacyNoScopePolicyTest(ServerTopologyPolicyTest):
+ """Test Server Topology APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServerTopologyNoLegacyNoScopePolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+
class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
"""Test Server Topology APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -119,24 +115,15 @@ class ServerTopologyScopeTypePolicyTest(ServerTopologyPolicyTest):
def setUp(self):
super(ServerTopologyScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system reader is able to get the server topology
- # host information.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system/reader is not able to get the server topology
- # host information.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context,
- ]
+ # With Scope enable, system users no longer allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class ServerTopologyNoLegacyPolicyTest(ServerTopologyScopeTypePolicyTest):
+class ServerTopologyScopeTypeNoLegacyPolicyTest(
+ ServerTopologyScopeTypePolicyTest):
"""Test Server Topology APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
access system APIs.
@@ -144,18 +131,8 @@ class ServerTopologyNoLegacyPolicyTest(ServerTopologyScopeTypePolicyTest):
without_deprecated_rules = True
def setUp(self):
- super(ServerTopologyNoLegacyPolicyTest, self).setUp()
- # Check that system reader/owner is able to get
- # the server topology.
- self.system_reader_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.system_member_context, self.system_reader_context,
- self.project_reader_context]
- # Check that non-system/reader/owner is not able to get
- # the server topology.
- self.system_reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.other_project_member_context, self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(ServerTopologyScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With no legacy and scope enable, only project admin, member,
+ # and reader will be able to get server topology.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_servers.py b/nova/tests/unit/policies/test_servers.py
index 33aadb948f..eee1e4ba51 100644
--- a/nova/tests/unit/policies/test_servers.py
+++ b/nova/tests/unit/policies/test_servers.py
@@ -10,9 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import functools
+from unittest import mock
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -20,12 +21,14 @@ from nova.api.openstack.compute import migrate_server
from nova.api.openstack.compute import servers
from nova.compute import api as compute
from nova.compute import vm_states
+import nova.conf
from nova import exception
from nova.network import model
from nova.network import neutron
from nova import objects
from nova.objects import fields
from nova.objects.instance_group import InstanceGroup
+from nova.policies import base as base_policy
from nova.policies import extended_server_attributes as ea_policies
from nova.policies import servers as policies
from nova.tests.unit.api.openstack import fakes
@@ -33,6 +36,8 @@ from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit.policies import base
+CONF = nova.conf.CONF
+
class ServersPolicyTest(base.BasePolicyTest):
"""Test Servers APIs policies with all possible context.
@@ -114,137 +119,41 @@ class ServersPolicyTest(base.BasePolicyTest):
'OS-EXT-SRV-ATTR:user_data'
]
- # Check that admin or and owner is able to update, delete
- # or perform server action.
- self.admin_or_owner_authorized_contexts = [
+ # Users that can take action on *our* project resources
+ self.project_action_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to update, delete
- # or perform server action.
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ self.project_reader_context, self.project_foo_context,
+ ])
- # Check that system reader or owner is able to get
- # the server.
- self.system_reader_or_owner_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.system_member_context,
- self.system_reader_context, self.project_foo_context
- ]
- self.system_reader_or_owner_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # Users that can read *our* project resources
+ self.project_reader_authorized_contexts = (
+ self.project_action_authorized_contexts)
- # Check that everyone is able to list their own server.
- self.everyone_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context]
- self.everyone_unauthorized_contexts = [
- ]
- # Check that admin is able to create server with host request
- # and get server extended attributes or host status.
- self.admin_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to create server with host request
- # and get server extended attributes or host status.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that sustem reader is able to list the server
- # for all projects.
- self.system_reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to list the server
- # for all projects.
- self.system_reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that project member is able to create serve
- self.project_member_authorized_contexts = [
+ # Users that _see_ project-scoped resources that they own
+ self.everyone_authorized_contexts = set(self.all_contexts)
+
+ # Users that can _do_ things to project-scoped resources they own
+ self.project_member_authorized_contexts = set(self.all_contexts)
+
+ # Users able to do admin things on project resources
+ self.project_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context, self.system_foo_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_reader_context]
- # Check that non-project member is not able to create server
- self.project_member_unauthorized_contexts = [
- ]
- # Check that project admin is able to create server with requested
- # destination.
- self.project_admin_authorized_contexts = [
+ self.project_admin_context])
+
+ # Admin (for APIs does not pass the project id as policy target
+ # for example, create server, list detail server) able to get
+ # all projects servers, create server on specific host etc.
+ # This is admin on any project because policy does not check
+ # the project id but they will be able to create server, get
+ # servers(unless all-tenant policy is allowed) of their own
+ # project only.
+ self.all_projects_admin_authorized_contexts = set([
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-project admin is not able to create server with
- # requested destination
- self.project_admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that no one is able to resize cross cell.
+ self.project_admin_context])
+
+ # Users able to do cross-cell migrations
self.cross_cell_authorized_contexts = []
- self.cross_cell_unauthorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context]
- # Check that admin is able to access the zero disk flavor
- # and external network policies.
- self.zero_disk_external_net_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to caccess the zero disk flavor
- # and external network policies.
- self.zero_disk_external_net_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that admin is able to get server extended attributes
- # or host status.
- self.server_attr_admin_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context]
- # Check that non-admin is not able to get server extended attributes
- # or host status.
- self.server_attr_admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
def test_index_server_policy(self):
@@ -261,9 +170,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.mock_get_all.side_effect = fake_get_all
rule_name = policies.SERVERS % 'index'
- self.common_policy_check(
+ self.common_policy_auth(
self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
rule_name,
self.controller.index,
self.req)
@@ -287,11 +195,16 @@ class ServersPolicyTest(base.BasePolicyTest):
self.mock_get_all.side_effect = fake_get_all
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- req)
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(
+ base.rule_if_system, rule, rule_name)
+
+ self.common_policy_auth(self.all_projects_admin_authorized_contexts,
+ check_rule,
+ self.controller.index,
+ req)
@mock.patch('nova.compute.api.API.get_all')
def test_detail_list_server_policy(self, mock_get):
@@ -309,9 +222,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.mock_get_all.side_effect = fake_get_all
rule_name = policies.SERVERS % 'detail'
- self.common_policy_check(
+ self.common_policy_auth(
self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
rule_name,
self.controller.detail,
self.req)
@@ -335,11 +247,16 @@ class ServersPolicyTest(base.BasePolicyTest):
self.mock_get_all.side_effect = fake_get_all
- self.common_policy_check(self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
- rule_name,
- self.controller.detail,
- req)
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(
+ base.rule_if_system, rule, rule_name)
+
+ self.common_policy_auth(self.all_projects_admin_authorized_contexts,
+ check_rule,
+ self.controller.detail,
+ req)
def test_index_server_allow_all_filters_policy(self):
# 'index' policy is checked before 'allow_all_filters' so
@@ -353,9 +270,9 @@ class ServersPolicyTest(base.BasePolicyTest):
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
- if context in self.system_reader_unauthorized_contexts:
+ if context not in self.all_projects_admin_authorized_contexts:
self.assertNotIn('host', search_opts)
- if context in self.system_reader_authorized_contexts:
+ if context in self.all_projects_admin_authorized_contexts:
self.assertIn('host', search_opts)
return objects.InstanceList(objects=self.servers)
@@ -363,9 +280,8 @@ class ServersPolicyTest(base.BasePolicyTest):
req = fakes.HTTPRequest.blank('/servers?host=1')
rule_name = policies.SERVERS % 'allow_all_filters'
- self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name,
self.controller.index,
req, fatal=False)
@@ -382,18 +298,17 @@ class ServersPolicyTest(base.BasePolicyTest):
expected_attrs=None, sort_keys=None, sort_dirs=None,
cell_down_support=False, all_tenants=False):
self.assertIsNotNone(search_opts)
- if context in self.system_reader_unauthorized_contexts:
+ if context not in self.all_projects_admin_authorized_contexts:
self.assertNotIn('host', search_opts)
- if context in self.system_reader_authorized_contexts:
+ if context in self.all_projects_admin_authorized_contexts:
self.assertIn('host', search_opts)
return objects.InstanceList(objects=self.servers)
self.mock_get_all.side_effect = fake_get_all
req = fakes.HTTPRequest.blank('/servers?host=1')
rule_name = policies.SERVERS % 'allow_all_filters'
- self.common_policy_check(
- self.system_reader_authorized_contexts,
- self.system_reader_unauthorized_contexts,
+ self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name,
self.controller.detail,
req, fatal=False)
@@ -401,22 +316,117 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
def test_show_server_policy(self, mock_bdm):
rule_name = policies.SERVERS % 'show'
- self.common_policy_check(
- self.system_reader_or_owner_authorized_contexts,
- self.system_reader_or_owner_unauthorized_contexts,
+ # Show includes readers
+ self.common_policy_auth(
+ self.project_reader_authorized_contexts,
rule_name,
self.controller.show,
self.req, self.instance.uuid)
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ def test_server_show_with_extra_specs_policy(self, mock_get, mock_block):
+ rule = policies.SERVERS % 'show'
+ # server 'show' policy is checked before flavor extra specs
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.SERVERS % 'show:flavor-extra-specs'
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_reader_authorized_contexts,
+ rule_name, self.controller.show, req,
+ self.instance.uuid, fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['server']['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['server']['flavor'])
+
+ @mock.patch('nova.compute.api.API.get_all')
+ def test_server_detail_with_extra_specs_policy(self, mock_get):
+
+ def fake_get_all(context, search_opts=None,
+ limit=None, marker=None,
+ expected_attrs=None, sort_keys=None, sort_dirs=None,
+ cell_down_support=False, all_tenants=False):
+ self.assertIsNotNone(search_opts)
+ if 'project_id' in search_opts or 'user_id' in search_opts:
+ return objects.InstanceList(objects=self.servers)
+ else:
+ raise
+
+ self.mock_get_all.side_effect = fake_get_all
+ rule = policies.SERVERS % 'detail'
+ # server 'detail' policy is checked before flavor extra specs
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.SERVERS % 'show:flavor-extra-specs'
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.everyone_authorized_contexts,
+ rule_name, self.controller.detail, req,
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['servers'][0]['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['servers'][0]['flavor'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
+ @mock.patch('nova.compute.api.API.rebuild')
+ def test_server_rebuild_with_extra_specs_policy(self, mock_rebuild,
+ mock_get, mock_bdm):
+ rule = policies.SERVERS % 'rebuild'
+ # server 'rebuild' policy is checked before flavor extra specs
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.SERVERS % 'show:flavor-extra-specs'
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_reader_authorized_contexts,
+ rule_name, self.controller._action_rebuild,
+ req, self.instance.uuid,
+ body={'rebuild': {"imageRef": uuids.fake_id}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp.obj['server']['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp.obj['server']['flavor'])
+
+ @mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
+ @mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
+ @mock.patch('nova.compute.api.API.update_instance')
+ def test_server_update_with_extra_specs_policy(self,
+ mock_update, mock_group, mock_bdm):
+ mock_update.return_value = self.instance
+ rule = policies.SERVERS % 'update'
+ # server 'update' policy is checked before flavor extra specs
+ # policy so we have to allow it for everyone otherwise it will fail
+ # first for unauthorized contexts.
+ self.policy.set_rules({rule: "@"}, overwrite=False)
+ req = fakes.HTTPRequest.blank('', version='2.47')
+ rule_name = policies.SERVERS % 'show:flavor-extra-specs'
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_reader_authorized_contexts,
+ rule_name, self.controller.update,
+ req, self.instance.uuid,
+ body={'server': {'name': 'test'}},
+ fatal=False)
+ for resp in authorize_res:
+ self.assertIn('extra_specs', resp['server']['flavor'])
+ for resp in unauthorize_res:
+ self.assertNotIn('extra_specs', resp['server']['flavor'])
+
@mock.patch('nova.compute.api.API.create')
def test_create_server_policy(self, mock_create):
mock_create.return_value = ([self.instance], '')
rule_name = policies.SERVERS % 'create'
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- rule_name,
- self.controller.create,
- self.req, body=self.body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name,
+ self.controller.create,
+ self.req, body=self.body)
@mock.patch('nova.compute.api.API.create')
@mock.patch('nova.compute.api.API.parse_availability_zone')
@@ -431,11 +441,10 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
mock_create.return_value = ([self.instance], '')
mock_az.return_value = ('test', 'host', None)
- self.common_policy_check(self.project_admin_authorized_contexts,
- self.project_admin_unauthorized_contexts,
- self.rule_forced_host,
- self.controller.create,
- self.req, body=self.body)
+ self.common_policy_auth(self.all_projects_admin_authorized_contexts,
+ self.rule_forced_host,
+ self.controller.create,
+ self.req, body=self.body)
@mock.patch('nova.compute.api.API.create')
def test_create_attach_volume_server_policy(self, mock_create):
@@ -453,11 +462,10 @@ class ServersPolicyTest(base.BasePolicyTest):
'block_device_mapping': [{'device_name': 'foo'}],
},
}
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- self.rule_attach_volume,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ self.rule_attach_volume,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.compute.api.API.create')
def test_create_attach_network_server_policy(self, mock_create):
@@ -477,11 +485,10 @@ class ServersPolicyTest(base.BasePolicyTest):
}],
},
}
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- self.rule_attach_network,
- self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ self.rule_attach_network,
+ self.controller.create,
+ self.req, body=body)
@mock.patch('nova.compute.api.API.create')
def test_create_trusted_certs_server_policy(self, mock_create):
@@ -504,20 +511,18 @@ class ServersPolicyTest(base.BasePolicyTest):
},
}
- self.common_policy_check(self.project_member_authorized_contexts,
- self.project_member_unauthorized_contexts,
- self.rule_trusted_certs,
- self.controller.create,
- req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ self.rule_trusted_certs,
+ self.controller.create,
+ req, body=body)
@mock.patch('nova.compute.api.API.delete')
def test_delete_server_policy(self, mock_delete):
rule_name = policies.SERVERS % 'delete'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.delete,
- self.req, self.instance.uuid)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller.delete,
+ self.req, self.instance.uuid)
def test_delete_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -547,11 +552,10 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_name = policies.SERVERS % 'update'
body = {'server': {'name': 'test'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.update,
- self.req, self.instance.uuid, body=body)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller.update,
+ self.req, self.instance.uuid, body=body)
def test_update_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -582,44 +586,40 @@ class ServersPolicyTest(base.BasePolicyTest):
def test_confirm_resize_server_policy(self, mock_confirm_resize):
rule_name = policies.SERVERS % 'confirm_resize'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_confirm_resize,
- self.req, self.instance.uuid,
- body={'confirmResize': 'null'})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_confirm_resize,
+ self.req, self.instance.uuid,
+ body={'confirmResize': 'null'})
@mock.patch('nova.compute.api.API.revert_resize')
def test_revert_resize_server_policy(self, mock_revert_resize):
rule_name = policies.SERVERS % 'revert_resize'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_revert_resize,
- self.req, self.instance.uuid,
- body={'revertResize': 'null'})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_revert_resize,
+ self.req, self.instance.uuid,
+ body={'revertResize': 'null'})
@mock.patch('nova.compute.api.API.reboot')
def test_reboot_server_policy(self, mock_reboot):
rule_name = policies.SERVERS % 'reboot'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_reboot,
- self.req, self.instance.uuid,
- body={'reboot': {'type': 'soft'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_reboot,
+ self.req, self.instance.uuid,
+ body={'reboot': {'type': 'soft'}})
@mock.patch('nova.compute.api.API.resize')
def test_resize_server_policy(self, mock_resize):
rule_name = policies.SERVERS % 'resize'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_resize,
- self.req, self.instance.uuid,
- body={'resize': {'flavorRef': 'f1'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_resize,
+ self.req, self.instance.uuid,
+ body={'resize': {'flavorRef': 'f1'}})
def test_resize_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -650,23 +650,21 @@ class ServersPolicyTest(base.BasePolicyTest):
def test_start_server_policy(self, mock_start):
rule_name = policies.SERVERS % 'start'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._start_server,
- self.req, self.instance.uuid,
- body={'os-start': 'null'})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._start_server,
+ self.req, self.instance.uuid,
+ body={'os-start': 'null'})
@mock.patch('nova.compute.api.API.stop')
def test_stop_server_policy(self, mock_stop):
rule_name = policies.SERVERS % 'stop'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._stop_server,
- self.req, self.instance.uuid,
- body={'os-stop': 'null'})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._stop_server,
+ self.req, self.instance.uuid,
+ body={'os-stop': 'null'})
def test_stop_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -696,12 +694,11 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.API.rebuild')
def test_rebuild_server_policy(self, mock_rebuild):
rule_name = policies.SERVERS % 'rebuild'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_rebuild,
- self.req, self.instance.uuid,
- body={'rebuild': {"imageRef": uuids.fake_id}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_rebuild,
+ self.req, self.instance.uuid,
+ body={'rebuild': {"imageRef": uuids.fake_id}})
def test_rebuild_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -743,11 +740,17 @@ class ServersPolicyTest(base.BasePolicyTest):
'trusted_image_certificates': [uuids.fake_id],
},
}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_rebuild,
- req, self.instance.uuid, body=body)
+
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(
+ base.rule_if_system, rule, rule_name)
+
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ check_rule,
+ self.controller._action_rebuild,
+ req, self.instance.uuid, body=body)
def test_rebuild_trusted_certs_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -796,12 +799,11 @@ class ServersPolicyTest(base.BasePolicyTest):
def test_create_image_server_policy(self, mock_snapshot, mock_image,
mock_bdm):
rule_name = policies.SERVERS % 'create_image'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_create_image,
- self.req, self.instance.uuid,
- body={'createImage': {"name": 'test'}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_create_image,
+ self.req, self.instance.uuid,
+ body={'createImage': {"name": 'test'}})
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.image.glance.API.generate_image_url')
@@ -816,23 +818,26 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
rule_name = policies.SERVERS % 'create_image:allow_volume_backed'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_create_image,
- self.req, self.instance.uuid,
- body={'createImage': {"name": 'test'}})
+ if not CONF.oslo_policy.enforce_scope:
+ check_rule = rule_name
+ else:
+ check_rule = functools.partial(
+ base.rule_if_system, rule, rule_name)
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ check_rule,
+ self.controller._action_create_image,
+ self.req, self.instance.uuid,
+ body={'createImage': {"name": 'test'}})
@mock.patch('nova.compute.api.API.trigger_crash_dump')
def test_trigger_crash_dump_server_policy(self, mock_crash):
rule_name = policies.SERVERS % 'trigger_crash_dump'
req = fakes.HTTPRequest.blank('', version='2.17')
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._action_trigger_crash_dump,
- req, self.instance.uuid,
- body={'trigger_crash_dump': None})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._action_trigger_crash_dump,
+ req, self.instance.uuid,
+ body={'trigger_crash_dump': None})
def test_trigger_crash_dump_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -876,9 +881,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.3')
rule_name = ea_policies.BASE_POLICY_NAME
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for attr in self.extended_attr:
@@ -897,9 +901,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.3')
rule_name = ea_policies.BASE_POLICY_NAME
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show, req, 'fake',
fatal=False)
for attr in self.extended_attr:
@@ -920,9 +923,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = ea_policies.BASE_POLICY_NAME
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
@@ -940,8 +942,11 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
@mock.patch('nova.compute.api.API.update_instance')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_update_with_extended_attr_policy(self,
- mock_update, mock_group, mock_bdm):
+ mock_status, mock_update, mock_group, mock_bdm):
+ mock_update.return_value = self.instance
+ mock_status.return_value = fields.HostStatus.UP
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before extended attributes
# policy so we have to allow it for everyone otherwise it will fail
@@ -949,9 +954,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = ea_policies.BASE_POLICY_NAME
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
@@ -977,9 +981,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for resp in authorize_res:
@@ -998,9 +1001,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show, req, 'fake',
fatal=False)
for resp in authorize_res:
@@ -1020,9 +1022,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
@@ -1035,8 +1036,11 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.objects.BlockDeviceMappingList.bdms_by_instance_uuid')
@mock.patch.object(InstanceGroup, 'get_by_instance_uuid')
@mock.patch('nova.compute.api.API.update_instance')
+ @mock.patch('nova.compute.api.API.get_instance_host_status')
def test_server_update_with_host_status_policy(self,
- mock_update, mock_group, mock_bdm):
+ mock_status, mock_update, mock_group, mock_bdm):
+ mock_update.return_value = self.instance
+ mock_status.return_value = fields.HostStatus.UP
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before host_status
# policy so we have to allow it for everyone otherwise it will fail
@@ -1044,9 +1048,8 @@ class ServersPolicyTest(base.BasePolicyTest):
self.policy.set_rules({rule: "@"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
@@ -1079,9 +1082,8 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.all_projects_admin_authorized_contexts,
rule_name, self.controller.detail, req,
fatal=False)
for resp in authorize_res:
@@ -1107,9 +1109,8 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.16')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.show, req, 'fake',
fatal=False)
for resp in authorize_res:
@@ -1136,9 +1137,8 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller._action_rebuild,
req, self.instance.uuid,
body={'rebuild': {"imageRef": uuids.fake_id}},
@@ -1156,6 +1156,7 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.API.update_instance')
def test_server_update_with_unknown_host_status_policy(self,
mock_update, mock_group, mock_status, mock_bdm):
+ mock_update.return_value = self.instance
mock_status.return_value = fields.HostStatus.UNKNOWN
rule = policies.SERVERS % 'update'
# server 'update' policy is checked before unknown host_status
@@ -1168,9 +1169,8 @@ class ServersPolicyTest(base.BasePolicyTest):
rule_host_status: "!"}, overwrite=False)
req = fakes.HTTPRequest.blank('', version='2.75')
rule_name = policies.SERVERS % 'show:host_status:unknown-only'
- authorize_res, unauthorize_res = self.common_policy_check(
- self.server_attr_admin_authorized_contexts,
- self.server_attr_admin_unauthorized_contexts,
+ authorize_res, unauthorize_res = self.common_policy_auth(
+ self.project_admin_authorized_contexts,
rule_name, self.controller.update,
req, self.instance.uuid,
body={'server': {'name': 'test'}},
@@ -1194,9 +1194,9 @@ class ServersPolicyTest(base.BasePolicyTest):
def fake_create(context, *args, **kwargs):
for attr in ['requested_host', 'requested_hypervisor_hostname']:
- if context in self.project_admin_authorized_contexts:
+ if context in self.all_projects_admin_authorized_contexts:
self.assertIn(attr, kwargs)
- if context in self.project_admin_unauthorized_contexts:
+ if context not in self.all_projects_admin_authorized_contexts:
self.assertNotIn(attr, kwargs)
return ([self.instance], '')
mock_create.side_effect = fake_create
@@ -1214,11 +1214,10 @@ class ServersPolicyTest(base.BasePolicyTest):
},
}
- self.common_policy_check(self.project_admin_authorized_contexts,
- self.project_admin_unauthorized_contexts,
- self.rule_requested_destination,
- self.controller.create,
- req, body=body)
+ self.common_policy_auth(self.all_projects_admin_authorized_contexts,
+ self.rule_requested_destination,
+ self.controller.create,
+ req, body=body)
@mock.patch(
'nova.servicegroup.api.API.service_is_up',
@@ -1230,10 +1229,9 @@ class ServersPolicyTest(base.BasePolicyTest):
@mock.patch('nova.compute.api.API._allow_resize_to_same_host')
@mock.patch('nova.objects.RequestSpec.get_by_instance_uuid')
@mock.patch('nova.objects.Instance.save')
- @mock.patch('nova.api.openstack.common.get_instance')
@mock.patch('nova.conductor.ComputeTaskAPI.resize_instance')
def test_cross_cell_resize_server_policy(
- self, mock_resize, mock_get, mock_save, mock_rs, mock_allow, m_net
+ self, mock_resize, mock_save, mock_rs, mock_allow, m_net
):
# 'migrate' policy is checked before 'resize:cross_cell' so
@@ -1263,13 +1261,13 @@ class ServersPolicyTest(base.BasePolicyTest):
)
return inst
- mock_get.side_effect = fake_get
+ self.mock_get.side_effect = fake_get
def fake_validate(context, instance,
host_name, allow_cross_cell_resize):
if context in self.cross_cell_authorized_contexts:
self.assertTrue(allow_cross_cell_resize)
- if context in self.cross_cell_unauthorized_contexts:
+ if context not in self.cross_cell_authorized_contexts:
self.assertFalse(allow_cross_cell_resize)
return objects.ComputeNode(host=1, hypervisor_hostname=2)
@@ -1277,23 +1275,24 @@ class ServersPolicyTest(base.BasePolicyTest):
'nova.compute.api.API._validate_host_for_cold_migrate',
fake_validate)
- self.common_policy_check(self.cross_cell_authorized_contexts,
- self.cross_cell_unauthorized_contexts,
- rule_name,
- self.m_controller._migrate,
- req, self.instance.uuid,
- body={'migrate': {'host': 'fake'}},
- fatal=False)
+ self.common_policy_auth(self.cross_cell_authorized_contexts,
+ rule_name,
+ self.m_controller._migrate,
+ req, self.instance.uuid,
+ body={'migrate': {'host': 'fake'}},
+ fatal=False)
def test_network_attach_external_network_policy(self):
# NOTE(gmann): Testing policy 'network:attach_external_network'
# which raise different error then PolicyNotAuthorized
# if not allowed.
neutron_api = neutron.API()
- for context in self.zero_disk_external_net_authorized_contexts:
+ for context in self.all_projects_admin_authorized_contexts:
neutron_api._check_external_network_attach(context,
[{'id': 1, 'router:external': 'ext'}])
- for context in self.zero_disk_external_net_unauthorized_contexts:
+ unauth = (set(self.all_contexts) -
+ set(self.all_projects_admin_authorized_contexts))
+ for context in unauth:
self.assertRaises(exception.ExternalNetworkAttachForbidden,
neutron_api._check_external_network_attach,
context, [{'id': 1, 'router:external': 'ext'}])
@@ -1306,16 +1305,63 @@ class ServersPolicyTest(base.BasePolicyTest):
flavor = objects.Flavor(
vcpus=1, memory_mb=512, root_gb=0, extra_specs={'hw:pmu': "true"})
compute_api = compute.API()
- for context in self.zero_disk_external_net_authorized_contexts:
+ for context in self.all_projects_admin_authorized_contexts:
compute_api._validate_flavor_image_nostatus(context,
image, flavor, None)
- for context in self.zero_disk_external_net_unauthorized_contexts:
+ unauth = (set(self.all_contexts) -
+ set(self.all_projects_admin_authorized_contexts))
+ for context in unauth:
self.assertRaises(
exception.BootFromVolumeRequiredForZeroDiskFlavor,
compute_api._validate_flavor_image_nostatus,
context, image, flavor, None)
+class ServersNoLegacyNoScopeTest(ServersPolicyTest):
+ """Test Servers API policies with deprecated rules disabled, but scope
+ checking still disabled.
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.SERVERS % 'show:flavor-extra-specs':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ }
+
+ def setUp(self):
+ super(ServersNoLegacyNoScopeTest, self).setUp()
+
+ # Disabling legacy rule support means that we no longer allow
+ # random roles on our project to take action on our
+ # resources. Legacy admin will have access.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+ # The only additional role that can read our resources is our
+ # own project_reader.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+ # Disabling legacy support means random roles lose power to
+ # see everything in their project.
+ self.reduce_set('everyone_authorized',
+ self.all_contexts - set([self.project_foo_context,
+ self.system_foo_context]))
+
+ # Disabling legacy support means readers and random roles lose
+ # power to create things on their own projects. Note that
+ # system_admin and system_member are still here because we are
+ # not rejecting them by scope, even though these operations
+ # with those tokens are likely to fail because they have no
+ # project.
+ self.reduce_set('project_member_authorized',
+ self.all_contexts - set([
+ self.system_reader_context,
+ self.system_foo_context,
+ self.project_reader_context,
+ self.project_foo_context,
+ self.other_project_reader_context]))
+
+
class ServersScopeTypePolicyTest(ServersPolicyTest):
"""Test Servers APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -1342,143 +1388,77 @@ class ServersScopeTypePolicyTest(ServersPolicyTest):
self.rule_requested_destination = None
self.rule_forced_host = None
- # Check that system admin is able to create server with host request
- # and get server extended attributes or host status.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non-system/admin is not able to create server with
- # host request and get server extended attributes or host status.
- self.admin_unauthorized_contexts = [
- self.project_admin_context, self.legacy_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that system reader is able to list the server
- # for all projects.
- self.system_reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system reader is not able to list the server
- # for all projects.
- self.system_reader_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
-
- # Check if project member can create the server.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_reader_context
- ]
- # Check if non-project member cannot create the server.
- self.project_member_unauthorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context
- ]
-
- # Check that project admin is able to create server with requested
- # destination.
- self.project_admin_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context]
- # Check that non-project admin is not able to create server with
- # requested destination
- self.project_admin_unauthorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # With scope checking enabled, system admins no longer have
+ # admin-granted project resource access.
+ self.reduce_set('project_action_authorized',
+ set([self.legacy_admin_context,
+ self.project_admin_context,
+ self.project_member_context,
+ self.project_reader_context,
+ self.project_foo_context]))
+
+ # No change from the base behavior here, but we need to
+ # re-build this from project_action_authorized, since we
+ # changed it above.
+ self.project_reader_authorized_contexts = (
+ self.project_action_authorized_contexts)
+
+ # With scope checking enabled, system users no longer have
+ # project access, even to create their own resources.
+ self.reduce_set('project_member_authorized', self.all_project_contexts)
+
+ # With scope checking enabled, system admin is no longer an
+ # admin of project resources.
+ self.reduce_set('project_admin_authorized',
+ set([self.legacy_admin_context,
+ self.project_admin_context]))
+ self.reduce_set('all_projects_admin_authorized',
+ set([self.legacy_admin_context,
+ self.project_admin_context]))
+
+ # With scope checking enabled, system users also lose access to read
+ # project resources.
+ self.reduce_set('everyone_authorized',
+ self.all_contexts - self.all_system_contexts)
class ServersNoLegacyPolicyTest(ServersScopeTypePolicyTest):
"""Test Servers APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system_admin_or_owner APIs.
+ and no more deprecated rules.
"""
without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.SERVERS % 'show:flavor-extra-specs':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ }
def setUp(self):
super(ServersNoLegacyPolicyTest, self).setUp()
- # Check that system admin or owner is able to update, delete
- # or perform server action.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context,
- ]
- # Check that non-system and non-admin/owner is not able to update,
- # delete or perform server action.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context]
-
- # Check that system reader or projct owner is able to get
- # server.
- self.system_reader_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_member_context,
- ]
-
- # Check that non-system reader nd non-admin/owner is not able to get
- # server.
- self.system_reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.project_foo_context,
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- self.everyone_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context,
- self.project_member_context, self.project_reader_context,
- self.system_member_context, self.system_reader_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- self.everyone_unauthorized_contexts = [
- self.project_foo_context,
- self.system_foo_context
- ]
- # Check if project member can create the server.
- self.project_member_authorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.project_member_context,
- self.other_project_member_context
- ]
- # Check if non-project member cannot create the server.
- self.project_member_unauthorized_contexts = [
- self.system_admin_context,
- self.system_member_context, self.project_reader_context,
- self.project_foo_context, self.other_project_reader_context,
- self.system_reader_context, self.system_foo_context
- ]
- # Check that system admin is able to get server extended attributes
- # or host status.
- self.server_attr_admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system admin is not able to get server extended
- # attributes or host status.
- self.server_attr_admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context
- ]
+ # Disabling legacy support means legacy_admin is no longer
+ # powerful on our project. Also, we drop the "any role on the
+ # project means you can do stuff" behavior, so project_reader
+ # and project_foo lose power.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+
+ # Only project_reader has additional read access to our
+ # project resources.
+ self.project_reader_authorized_contexts = (
+ self.project_action_authorized_contexts |
+ set([self.project_reader_context]))
+
+ # Disabling legacy support means random roles lose power to
+ # see everything in their project.
+ self.reduce_set(
+ 'everyone_authorized',
+ self.all_project_contexts - set([self.project_foo_context]))
+
+ # Disabling legacy support means readers and random roles lose
+ # power to create things on their own projects.
+ self.reduce_set('project_member_authorized',
+ self.all_project_contexts - set([
+ self.project_foo_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ ]))
diff --git a/nova/tests/unit/policies/test_services.py b/nova/tests/unit/policies/test_services.py
index cdca5ebc7f..72465eb748 100644
--- a/nova/tests/unit/policies/test_services.py
+++ b/nova/tests/unit/policies/test_services.py
@@ -11,12 +11,9 @@
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import services as services_v21
-from nova import exception
-from nova.policies import base as base_policy
-from nova.tests import fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
@@ -34,64 +31,36 @@ class ServicesPolicyTest(base.BasePolicyTest):
super(ServicesPolicyTest, self).setUp()
self.controller = services_v21.ServiceController()
self.req = fakes.HTTPRequest.blank('/services')
- # Check that admin is able to change the service
- self.admin_authorized_contexts = [
+
+ # With legacy rule and scope check disabled by default, system admin,
+ # legacy admin, and project admin will be able to perform Services
+ # Operations.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to change the service
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system scoped admin, member and reader are able to
- # read the service data.
- # NOTE(gmann): Until old default rule which is admin_api is
- # deprecated and not removed, project admin and legacy admin
- # will be able to read the service data. This make sure that existing
- # tokens will keep working even we have changed this policy defaults
- # to reader role.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.legacy_admin_context,
- self.project_admin_context]
- # Check that non-system-reader are not able to read the service
- # data
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.project_foo_context, self.project_member_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
def test_delete_service_policy(self):
rule_name = "os_compute_api:os-services:delete"
with mock.patch('nova.compute.api.HostAPI.service_get_by_id'):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, 1)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, 1)
def test_index_service_policy(self):
rule_name = "os_compute_api:os-services:list"
with mock.patch('nova.compute.api.HostAPI.service_get_all'):
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
def test_old_update_service_policy(self):
rule_name = "os_compute_api:os-services:update"
body = {'host': 'host1', 'binary': 'nova-compute'}
update = 'nova.compute.api.HostAPI.service_update_by_host_and_binary'
with mock.patch(update):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- self.req, 'enable', body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ self.req, 'enable', body=body)
def test_update_service_policy(self):
rule_name = "os_compute_api:os-services:update"
@@ -100,11 +69,25 @@ class ServicesPolicyTest(base.BasePolicyTest):
service = self.start_service(
'compute', 'fake-compute-host').service_ref
with mock.patch('nova.compute.api.HostAPI.service_update'):
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- req, service.uuid,
- body={'status': 'enabled'})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ req, service.uuid,
+ body={'status': 'enabled'})
+
+
+class ServicesNoLegacyNoScopePolicyTest(ServicesPolicyTest):
+ """Test Services APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only. In this case
+ system admin, legacy admin, and project admin will be able to
+ perform Service Operations. Legacy admin will be allowed as policy
+ is just admin if no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ServicesNoLegacyNoScopePolicyTest, self).setUp()
class ServicesScopeTypePolicyTest(ServicesPolicyTest):
@@ -122,80 +105,16 @@ class ServicesScopeTypePolicyTest(ServicesPolicyTest):
super(ServicesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to change the service
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to change the service
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- # Check that system admin, member and reader are able to read the
- # service data
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system or non-reader are not able to read the service
- # data
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.legacy_admin_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
-
-class ServicesDeprecatedPolicyTest(base.BasePolicyTest):
- """Test os-services APIs Deprecated policies.
-
- This class checks if deprecated policy rules are
- overridden by user on policy.yaml file then they
- still work because oslo.policy add deprecated rules
- in logical OR condition and enforce them for policy
- checks if overridden.
+ # With scope checks enable, only system admin is able to perform
+ # Service Operations.
+ self.project_admin_authorized_contexts = [self.legacy_admin_context,
+ self.project_admin_context]
+
+
+class ServicesScopeTypeNoLegacyPolicyTest(ServicesScopeTypePolicyTest):
+ """Test Services APIs policies with no legacy deprecated rules
+ and scope checks enabled which means scope + new defaults so
+ only system admin is able to perform Services Operations.
"""
- def setUp(self):
- super(ServicesDeprecatedPolicyTest, self).setUp()
- self.controller = services_v21.ServiceController()
- self.member_req = fakes.HTTPRequest.blank('')
- self.member_req.environ['nova.context'] = self.system_reader_context
- self.reader_req = fakes.HTTPRequest.blank('')
- self.reader_req.environ['nova.context'] = self.project_reader_context
- self.deprecated_policy = "os_compute_api:os-services"
- # Overridde rule with different checks than defaults so that we can
- # verify the rule overridden case.
- override_rules = {self.deprecated_policy: base_policy.SYSTEM_READER}
- # NOTE(gmann): Only override the deprecated rule in policy file so
- # that
- # we can verify if overridden checks are considered by oslo.policy.
- # Oslo.policy will consider the overridden rules if:
- # 1. overridden deprecated rule's checks are different than defaults
- # 2. new rules are not present in policy file
- self.policy = self.useFixture(fixtures.OverridePolicyFixture(
- rules_in_file=override_rules))
-
- def test_deprecated_policy_overridden_rule_is_checked(self):
- # Test to verify if deprecatd overridden policy is working.
-
- # check for success as member role. Deprecated rule
- # has been overridden with member checks in policy.yaml
- # If member role pass it means overridden rule is enforced by
- # olso.policy because new default is system admin and the old
- # default is admin.
- with mock.patch('nova.compute.api.HostAPI.service_get_by_id'):
- self.controller.index(self.member_req)
-
- # check for failure with reader context.
- exc = self.assertRaises(exception.PolicyNotAuthorized,
- self.controller.index, self.reader_req)
- self.assertEqual(
- "Policy doesn't allow os_compute_api:os-services:list to be"
- " performed.",
- exc.format_message())
+ without_deprecated_rules = True
diff --git a/nova/tests/unit/policies/test_shelve.py b/nova/tests/unit/policies/test_shelve.py
index c4cf3dedbb..052f844c3d 100644
--- a/nova/tests/unit/policies/test_shelve.py
+++ b/nova/tests/unit/policies/test_shelve.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import shelve
@@ -43,63 +44,48 @@ class ShelveServerPolicyTest(base.BasePolicyTest):
id=1, uuid=uuids.fake_id, project_id=self.project_id,
user_id=user_id, vm_state=vm_states.ACTIVE)
self.mock_get.return_value = self.instance
-
- # Check that admin or and server owner is able to shelve/unshelve
- # the server
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to shelve,
+ # unshelve the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to shelve/unshelve
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
- # Check that admin is able to shelve offload the server.
- self.admin_authorized_contexts = [
+
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to shelve
+ # offload the server.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
- # Check that non-admin is not able to shelve offload the server.
- self.admin_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.shelve')
def test_shelve_server_policy(self, mock_shelve):
rule_name = policies.POLICY_ROOT % 'shelve'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._shelve,
- self.req, self.instance.uuid,
- body={'shelve': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._shelve,
+ self.req, self.instance.uuid,
+ body={'shelve': {}})
@mock.patch('nova.compute.api.API.unshelve')
def test_unshelve_server_policy(self, mock_unshelve):
rule_name = policies.POLICY_ROOT % 'unshelve'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._unshelve,
- self.req, self.instance.uuid,
- body={'unshelve': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._unshelve,
+ self.req, self.instance.uuid,
+ body={'unshelve': {}})
@mock.patch('nova.compute.api.API.shelve_offload')
def test_shelve_offload_server_policy(self, mock_offload):
rule_name = policies.POLICY_ROOT % 'shelve_offload'
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name,
- self.controller._shelve_offload,
- self.req, self.instance.uuid,
- body={'shelveOffload': {}})
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller._shelve_offload,
+ self.req, self.instance.uuid,
+ body={'shelveOffload': {}})
def test_shelve_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -115,7 +101,7 @@ class ShelveServerPolicyTest(base.BasePolicyTest):
exc.format_message())
@mock.patch('nova.compute.api.API.shelve')
- def test_shelve_sevrer_overridden_policy_pass_with_same_user(
+ def test_shelve_server_overridden_policy_pass_with_same_user(
self, mock_shelve):
rule_name = policies.POLICY_ROOT % 'shelve'
self.policy.set_rules({rule_name: "user_id:%(user_id)s"})
@@ -124,6 +110,22 @@ class ShelveServerPolicyTest(base.BasePolicyTest):
body={'shelve': {}})
+class ShelveServerNoLegacyNoScopePolicyTest(ShelveServerPolicyTest):
+ """Test shelve/unshelve server APIs policies with no legacy deprecated
+ rules and no scope checks which means new defaults only.
+
+ """
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(ShelveServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to shelve/unshelve the server and only project admin can
+ # shelve offload the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
"""Test Shelve Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -137,41 +139,23 @@ class ShelveServerScopeTypePolicyTest(ShelveServerPolicyTest):
def setUp(self):
super(ShelveServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to shelve/unshelve the
+ # server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class ShelveServerNoLegacyPolicyTest(ShelveServerScopeTypePolicyTest):
+class ShelveServerScopeTypeNoLegacyPolicyTest(ShelveServerScopeTypePolicyTest):
"""Test Shelve Server APIs policies with system scope enabled,
and no more deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(ShelveServerNoLegacyPolicyTest, self).setUp()
-
- # Check that system admin or and owner is able to shelve/unshelve
- # the server.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to shelve/unshelve
- # the server.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
- # Check that system admin is able to shelve offload the server.
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non system admin is not able to shelve offload the server
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.project_admin_context,
- self.system_member_context, self.system_reader_context,
- self.system_foo_context, self.project_member_context,
- self.project_reader_context, self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ super(ShelveServerScopeTypeNoLegacyPolicyTest, self).setUp()
+ # With scope enable and no legacy rule, only project admin/member
+ # will be able to shelve/unshelve the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_simple_tenant_usage.py b/nova/tests/unit/policies/test_simple_tenant_usage.py
index 60eecdece8..d6aa7af901 100644
--- a/nova/tests/unit/policies/test_simple_tenant_usage.py
+++ b/nova/tests/unit/policies/test_simple_tenant_usage.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.api.openstack.compute import simple_tenant_usage
from nova.policies import simple_tenant_usage as policies
@@ -32,47 +32,46 @@ class SimpleTenantUsagePolicyTest(base.BasePolicyTest):
self.req = fakes.HTTPRequest.blank('')
self.controller._get_instances_all_cells = mock.MagicMock()
- # Check that reader(legacy admin) or and owner is able to get
- # the tenant usage statistics for a specific tenant.
- self.reader_or_owner_authorized_contexts = [
+ # Currently any admin can list other project usage.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context]
+ # and project reader can get their usage statistics.
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
- self.system_member_context, self.system_reader_context]
- # Check that non-reader(legacy non-admin) or owner is not able to get
- # the tenant usage statistics for a specific tenant.
- self.reader_or_owner_unauthorized_contexts = [
- self.system_foo_context, self.other_project_member_context,
- self.other_project_reader_context
- ]
- # Check that reader is able to get the tenant usage statistics.
- self.reader_authorized_contexts = [
- self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-reader is not able to get the tenant usage statistics.
- self.reader_unauthorized_contexts = [
- self.system_foo_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
]
def test_index_simple_tenant_usage_policy(self):
rule_name = policies.POLICY_ROOT % 'list'
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name,
- self.controller.index,
- self.req)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name,
+ self.controller.index,
+ self.req)
def test_show_simple_tenant_usage_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
- self.common_policy_check(self.reader_or_owner_authorized_contexts,
- self.reader_or_owner_unauthorized_contexts,
- rule_name,
- self.controller.show,
- self.req, self.project_id)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name,
+ self.controller.show,
+ self.req, self.project_id)
+
+
+class SimpleTenantUsageNoLegacyNoScopePolicyTest(SimpleTenantUsagePolicyTest):
+ """Test Simple Tenant Usage APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(SimpleTenantUsageNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to get tenant usage.
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
@@ -88,23 +87,14 @@ class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
def setUp(self):
super(SimpleTenantUsageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system reader is able to get the tenant usage statistics.
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context]
- # Check that non-system/reader is not able to get the tenant usage
- # statistics.
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context, self.project_reader_context,
- self.other_project_reader_context
- ]
+ # With Scope enable, system users no longer allowed.
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class SimpleTenantUsageNoLegacyPolicyTest(
+class SimpleTenantUsageScopeTypeNoLegacyPolicyTest(
SimpleTenantUsageScopeTypePolicyTest):
"""Test Simple Tenant Usage APIs policies with system scope enabled,
and no more deprecated rules that allow the legacy admin API to
@@ -113,17 +103,6 @@ class SimpleTenantUsageNoLegacyPolicyTest(
without_deprecated_rules = True
def setUp(self):
- super(SimpleTenantUsageNoLegacyPolicyTest, self).setUp()
- # Check that system reader or owner is able to get
- # the tenant usage statistics for a specific tenant.
- self.reader_or_owner_authorized_contexts = [
- self.system_admin_context, self.system_member_context,
- self.system_reader_context, self.project_admin_context,
- self.project_member_context, self.project_reader_context]
- # Check that non-system reader/owner is not able to get
- # the tenant usage statistics for a specific tenant.
- self.reader_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.other_project_member_context,
- self.project_foo_context, self.other_project_reader_context
- ]
+ super(SimpleTenantUsageScopeTypeNoLegacyPolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_suspend_server.py b/nova/tests/unit/policies/test_suspend_server.py
index ecf0ebb9ab..7d3cde2799 100644
--- a/nova/tests/unit/policies/test_suspend_server.py
+++ b/nova/tests/unit/policies/test_suspend_server.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import suspend_server
@@ -44,40 +45,32 @@ class SuspendServerPolicyTest(base.BasePolicyTest):
user_id=user_id, vm_state=vm_states.ACTIVE)
self.mock_get.return_value = self.instance
- # Check that admin or and server owner is able to suspend/resume
- # the server
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow server
+ # owner- having same project id and no role check) is able to suspend
+ # resume the server.
+ self.project_action_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
- # Check that non-admin/owner is not able to suspend/resume
- # the server
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
@mock.patch('nova.compute.api.API.suspend')
def test_suspend_server_policy(self, mock_suspend):
rule_name = policies.POLICY_ROOT % 'suspend'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._suspend,
- self.req, self.instance.uuid,
- body={'suspend': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._suspend,
+ self.req, self.instance.uuid,
+ body={'suspend': {}})
@mock.patch('nova.compute.api.API.resume')
def test_resume_server_policy(self, mock_resume):
rule_name = policies.POLICY_ROOT % 'resume'
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name,
- self.controller._resume,
- self.req, self.instance.uuid,
- body={'resume': {}})
+ self.common_policy_auth(self.project_action_authorized_contexts,
+ rule_name,
+ self.controller._resume,
+ self.req, self.instance.uuid,
+ body={'resume': {}})
def test_suspend_server_policy_failed_with_other_user(self):
# Change the user_id in request context.
@@ -102,6 +95,22 @@ class SuspendServerPolicyTest(base.BasePolicyTest):
body={'suspend': {}})
+class SuspendServerNoLegacyNoScopePolicyTest(SuspendServerPolicyTest):
+ """Test suspend server APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(SuspendServerNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only project admin or member will be
+ # able to suspend/resume the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+
+
class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
"""Test Suspend Server APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
@@ -115,28 +124,22 @@ class SuspendServerScopeTypePolicyTest(SuspendServerPolicyTest):
def setUp(self):
super(SuspendServerScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # Scope enable will not allow system admin to suspend/resume server.
+ self.project_action_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
-class SuspendServerNoLegacyPolicyTest(SuspendServerScopeTypePolicyTest):
- """Test Suspend Server APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to
- access system APIs.
+class SuspendServerScopeTypeNoLegacyTest(SuspendServerScopeTypePolicyTest):
+ """Test suspend/resume server APIs policies with system scope enabled,
+ and no more deprecated rules which means scope + new defaults so
+ only project admin and member is able to suspend/resume server.
"""
+
without_deprecated_rules = True
def setUp(self):
- super(SuspendServerNoLegacyPolicyTest, self).setUp()
- # Check that system admin or and server owner is able to
- # suspend/resume the server.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context, self.project_member_context]
- # Check that non-system/admin/owner is not able to suspend/resume
- # the server.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.other_project_member_context, self.project_reader_context,
- self.project_foo_context,
- self.other_project_reader_context,
- ]
+ super(SuspendServerScopeTypeNoLegacyTest, self).setUp()
+ # With scope enable and no legacy rule only project admin/member
+ # will be able to suspend/resume the server.
+ self.project_action_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
diff --git a/nova/tests/unit/policies/test_tenant_networks.py b/nova/tests/unit/policies/test_tenant_networks.py
index 12e8731582..a5bc614902 100644
--- a/nova/tests/unit/policies/test_tenant_networks.py
+++ b/nova/tests/unit/policies/test_tenant_networks.py
@@ -10,10 +10,13 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova.api.openstack.compute import tenant_networks
+from nova.policies import base as base_policy
+from nova.policies import tenant_networks as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
@@ -36,7 +39,7 @@ class TenantNetworksPolicyTest(base.BasePolicyTest):
# enforcement so will be passing context's project_id as target to
# policy and always pass. If requester is not admin or owner
# of networks then neutron will be returning the appropriate error.
- self.everyone_authorized_contexts = [
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -45,23 +48,46 @@ class TenantNetworksPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.everyone_unauthorized_contexts = []
@mock.patch('nova.network.neutron.API.get_all')
def test_list_tenant_networks_policy(self, mock_get):
- rule_name = "os_compute_api:os-tenant-networks"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ rule_name = "os_compute_api:os-tenant-networks:list"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.network.neutron.API.get')
def test_show_tenant_network_policy(self, mock_get):
- rule_name = "os_compute_api:os-tenant-networks"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-tenant-networks:show"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, uuids.fake_id)
+
+
+class TenantNetworksNoLegacyNoScopePolicyTest(TenantNetworksPolicyTest):
+ """Test Tenant Networks APIs policies with no legacy deprecated rules
+ and no scope checks.
+
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN}
+
+ def setUp(self):
+ super(TenantNetworksNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to get tenant network.
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class TenantNetworksScopeTypePolicyTest(TenantNetworksPolicyTest):
@@ -78,3 +104,31 @@ class TenantNetworksScopeTypePolicyTest(TenantNetworksPolicyTest):
def setUp(self):
super(TenantNetworksScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+
+
+class TenantNetworksScopeTypeNoLegacyPolicyTest(
+ TenantNetworksScopeTypePolicyTest):
+ """Test Tenant Networks APIs policies with system scope enabled,
+ and no more deprecated rules.
+ """
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN}
+
+ def setUp(self):
+ super(TenantNetworksScopeTypeNoLegacyPolicyTest, self).setUp()
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.other_project_member_context,
+ self.other_project_reader_context,
+ ]
diff --git a/nova/tests/unit/policies/test_volumes.py b/nova/tests/unit/policies/test_volumes.py
index 4ee33d0694..896881c03f 100644
--- a/nova/tests/unit/policies/test_volumes.py
+++ b/nova/tests/unit/policies/test_volumes.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -20,6 +21,8 @@ from nova.compute import vm_states
from nova import exception
from nova import objects
from nova.objects import block_device as block_device_obj
+from nova.policies import base as base_policy
+from nova.policies import volumes as v_policies
from nova.policies import volumes_attachments as va_policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_block_device
@@ -92,77 +95,50 @@ class VolumeAttachPolicyTest(base.BasePolicyTest):
task_state=None, launched_at=timeutils.utcnow())
self.mock_get.return_value = self.instance
- # Check that admin or owner is able to list/create/show/delete
- # the attached volume.
- self.admin_or_owner_authorized_contexts = [
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow
+ # resource owner- having same project id and no role check) is
+ # able create/delete/update the volume attachment.
+ self.project_member_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.project_admin_context, self.project_foo_context,
- self.project_reader_context, self.project_member_context
- ]
-
- self.admin_or_owner_unauthorized_contexts = [
- self.system_member_context, self.system_reader_context,
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- # Check that admin is able to update the attached volume
- self.admin_authorized_contexts = [
- self.legacy_admin_context,
- self.system_admin_context,
- self.project_admin_context
- ]
- # Check that non-admin is not able to update the attached
- # volume
- self.admin_unauthorized_contexts = [
- self.system_member_context,
- self.system_reader_context,
- self.system_foo_context,
- self.project_member_context,
- self.other_project_member_context,
- self.project_foo_context,
- self.project_reader_context,
- self.other_project_reader_context,
- ]
-
- self.reader_authorized_contexts = [
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context]
+
+ # With legacy rule and no scope checks, all admin, project members
+ # project reader or other project role(because legacy rule allow
+ # resource owner- having same project id and no role check) is
+ # able get the volume attachment.
+ self.project_reader_authorized_contexts = (
+ self.project_member_authorized_contexts)
+
+ # By default, legacy rule are enable and scope check is disabled.
+ # system admin, legacy admin, and project admin is able to update
+ # volume attachment with a different volumeId.
+ self.project_admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
- self.system_reader_context, self.system_member_context,
- self.project_admin_context, self.project_reader_context,
- self.project_member_context, self.project_foo_context
- ]
-
- self.reader_unauthorized_contexts = [
- self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ self.project_admin_context]
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def test_index_volume_attach_policy(self, mock_get_instance):
rule_name = self.policy_root % "index"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req, FAKE_UUID)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req, FAKE_UUID)
def test_show_volume_attach_policy(self):
rule_name = self.policy_root % "show"
- self.common_policy_check(self.reader_authorized_contexts,
- self.reader_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, FAKE_UUID, FAKE_UUID_A)
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, FAKE_UUID, FAKE_UUID_A)
@mock.patch('nova.compute.api.API.attach_volume')
def test_create_volume_attach_policy(self, mock_attach_volume):
rule_name = self.policy_root % "create"
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B,
'device': '/dev/fake'}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, FAKE_UUID, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, FAKE_UUID, body=body)
@mock.patch.object(block_device_obj.BlockDeviceMapping, 'save')
def test_update_volume_attach_policy(self, mock_bdm_save):
@@ -171,28 +147,25 @@ class VolumeAttachPolicyTest(base.BasePolicyTest):
body = {'volumeAttachment': {
'volumeId': FAKE_UUID_A,
'delete_on_termination': True}}
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller.update,
- req, FAKE_UUID,
- FAKE_UUID_A, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.update,
+ req, FAKE_UUID,
+ FAKE_UUID_A, body=body)
@mock.patch('nova.compute.api.API.detach_volume')
def test_delete_volume_attach_policy(self, mock_detach_volume):
rule_name = self.policy_root % "delete"
- self.common_policy_check(self.admin_or_owner_authorized_contexts,
- self.admin_or_owner_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, FAKE_UUID, FAKE_UUID_A)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, FAKE_UUID, FAKE_UUID_A)
@mock.patch('nova.compute.api.API.swap_volume')
def test_swap_volume_attach_policy(self, mock_swap_volume):
rule_name = self.policy_root % "swap"
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- self.req, FAKE_UUID, FAKE_UUID_A, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ self.req, FAKE_UUID, FAKE_UUID_A, body=body)
@mock.patch.object(block_device_obj.BlockDeviceMapping, 'save')
@mock.patch('nova.compute.api.API.swap_volume')
@@ -225,14 +198,31 @@ class VolumeAttachPolicyTest(base.BasePolicyTest):
req = fakes.HTTPRequest.blank('', version='2.85')
body = {'volumeAttachment': {'volumeId': FAKE_UUID_B,
'delete_on_termination': True}}
- self.common_policy_check(self.admin_authorized_contexts,
- self.admin_unauthorized_contexts,
- rule_name, self.controller.update,
- req, FAKE_UUID, FAKE_UUID_A, body=body)
+ self.common_policy_auth(self.project_admin_authorized_contexts,
+ rule_name, self.controller.update,
+ req, FAKE_UUID, FAKE_UUID_A, body=body)
mock_swap_volume.assert_called()
mock_bdm_save.assert_called()
+class VolumeAttachNoLegacyNoScopePolicyTest(VolumeAttachPolicyTest):
+ """Test volume attachment APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+
+ def setUp(self):
+ super(VolumeAttachNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy rule, only admin, member, or reader will be
+ # able to perform volume attachment operation on its own project.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_no_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_no_scope_no_legacy)
+
+
class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
"""Test os-volume-attachments APIs policies with system scope enabled.
@@ -248,77 +238,33 @@ class VolumeAttachScopeTypePolicyTest(VolumeAttachPolicyTest):
super(VolumeAttachScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
- # Check that system admin is able to update the attached volume
- self.admin_authorized_contexts = [
- self.system_admin_context]
- # Check that non-system or non-admin is not able to update
- # the attached volume.
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
+ # Scope enable will not allow system admin to perform the
+ # volume attachments.
+ self.project_member_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_m_r_or_admin_with_scope_and_legacy)
+ self.project_admin_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context]
-class VolumeAttachNoLegacyPolicyTest(VolumeAttachPolicyTest):
+
+class VolumeAttachScopeTypeNoLegacyPolicyTest(VolumeAttachScopeTypePolicyTest):
"""Test os-volume-attachments APIs policies with system scope enabled,
- and no more deprecated rules that allow the legacy admin API to access
- system_admin_or_owner APIs.
+ and no legacy deprecated rules.
"""
without_deprecated_rules = True
def setUp(self):
- super(VolumeAttachNoLegacyPolicyTest, self).setUp()
+ super(VolumeAttachScopeTypeNoLegacyPolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
-
- # Check that system or projct admin or owner is able to
- # list/create/show/delete the attached volume.
- self.admin_or_owner_authorized_contexts = [
- self.system_admin_context,
- self.project_admin_context,
- self.project_member_context
- ]
-
- # Check that non-system and non-admin/owner is not able to
- # list/create/show/delete the attached volume.
- self.admin_or_owner_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.project_reader_context,
- self.project_foo_context, self.system_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
-
- # Check that admin is able to update the attached volume
- self.admin_authorized_contexts = [
- self.system_admin_context
- ]
- # Check that non-admin is not able to update the attached
- # volume
- self.admin_unauthorized_contexts = [
- self.legacy_admin_context, self.system_member_context,
- self.system_reader_context, self.system_foo_context,
- self.project_admin_context, self.project_member_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- self.project_foo_context, self.project_reader_context
- ]
-
- self.reader_authorized_contexts = [
- self.system_admin_context, self.system_reader_context,
- self.system_member_context, self.project_admin_context,
- self.project_reader_context, self.project_member_context
- ]
-
- self.reader_unauthorized_contexts = [
- self.legacy_admin_context, self.system_foo_context,
- self.project_foo_context,
- self.other_project_member_context,
- self.other_project_reader_context,
- ]
+ # With scope enable and no legacy rule, it will not allow
+ # system users and project admin/member/reader will be able to
+ # perform volume attachment operation on its own project.
+ self.project_member_authorized_contexts = (
+ self.project_member_or_admin_with_scope_no_legacy)
+ self.project_reader_authorized_contexts = (
+ self.project_reader_or_admin_with_scope_no_legacy)
class VolumesPolicyTest(base.BasePolicyTest):
@@ -336,14 +282,23 @@ class VolumesPolicyTest(base.BasePolicyTest):
self.snapshot_ctlr = volumes_v21.SnapshotController()
self.req = fakes.HTTPRequest.blank('')
self.controller._translate_volume_summary_view = mock.MagicMock()
- # Check that everyone is able to perform crud operations
+ # Everyone will be able to perform crud operations
# on volume and volume snapshots.
# NOTE: Nova cannot verify the volume/snapshot owner during nova policy
# enforcement so will be passing context's project_id as target to
# policy and always pass. If requester is not admin or owner
# of volume/snapshot then cinder will be returning the appropriate
# error.
- self.everyone_authorized_contexts = [
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context, self.project_foo_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.system_foo_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context,
@@ -352,94 +307,133 @@ class VolumesPolicyTest(base.BasePolicyTest):
self.system_foo_context,
self.other_project_member_context
]
- self.everyone_unauthorized_contexts = []
@mock.patch('nova.volume.cinder.API.get_all')
def test_list_volumes_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.index,
- self.req)
+ rule_name = "os_compute_api:os-volumes:list"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.index,
+ self.req)
@mock.patch('nova.volume.cinder.API.get_all')
def test_list_detail_volumes_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.detail,
- self.req)
+ rule_name = "os_compute_api:os-volumes:detail"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.detail,
+ self.req)
@mock.patch('nova.volume.cinder.API.get')
def test_show_volume_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.show,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-volumes:show"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.controller.show,
+ self.req, uuids.fake_id)
@mock.patch('nova.api.openstack.compute.volumes.'
'_translate_volume_detail_view')
@mock.patch('nova.volume.cinder.API.create')
def test_create_volumes_policy(self, mock_create, mock_view):
- rule_name = "os_compute_api:os-volumes"
+ rule_name = "os_compute_api:os-volumes:create"
body = {"volume": {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}}
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.create,
+ self.req, body=body)
@mock.patch('nova.volume.cinder.API.delete')
def test_delete_volume_policy(self, mock_delete):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.controller.delete,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-volumes:delete"
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.controller.delete,
+ self.req, uuids.fake_id)
@mock.patch('nova.volume.cinder.API.get_all_snapshots')
def test_list_snapshots_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.index,
- self.req)
+ rule_name = "os_compute_api:os-volumes:snapshots:list"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.snapshot_ctlr.index,
+ self.req)
@mock.patch('nova.volume.cinder.API.get_all_snapshots')
def test_list_detail_snapshots_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.detail,
- self.req)
+ rule_name = "os_compute_api:os-volumes:snapshots:detail"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.snapshot_ctlr.detail,
+ self.req)
@mock.patch('nova.volume.cinder.API.get_snapshot')
def test_show_snapshot_policy(self, mock_get):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.show,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-volumes:snapshots:show"
+ self.common_policy_auth(self.project_reader_authorized_contexts,
+ rule_name, self.snapshot_ctlr.show,
+ self.req, uuids.fake_id)
@mock.patch('nova.volume.cinder.API.create_snapshot')
def test_create_snapshot_policy(self, mock_create):
- rule_name = "os_compute_api:os-volumes"
+ rule_name = "os_compute_api:os-volumes:snapshots:create"
body = {"snapshot": {"volume_id": uuids.fake_id}}
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.create,
- self.req, body=body)
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.snapshot_ctlr.create,
+ self.req, body=body)
@mock.patch('nova.volume.cinder.API.delete_snapshot')
def test_delete_snapshot_policy(self, mock_delete):
- rule_name = "os_compute_api:os-volumes"
- self.common_policy_check(self.everyone_authorized_contexts,
- self.everyone_unauthorized_contexts,
- rule_name, self.snapshot_ctlr.delete,
- self.req, uuids.fake_id)
+ rule_name = "os_compute_api:os-volumes:snapshots:delete"
+ self.common_policy_auth(self.project_member_authorized_contexts,
+ rule_name, self.snapshot_ctlr.delete,
+ self.req, uuids.fake_id)
+
+
+class VolumesNoLegacyNoScopePolicyTest(VolumesPolicyTest):
+ """Test Volume APIs policies with no legacy deprecated rules
+ and no scope checks which means new defaults only.
+
+ """
+
+ without_deprecated_rules = True
+ rules_without_deprecation = {
+ v_policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'detail':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:detail':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ }
+
+ def setUp(self):
+ super(VolumesNoLegacyNoScopePolicyTest, self).setUp()
+ # With no legacy, project other roles like foo will not be able
+ # to operate on volume and snapshot.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.system_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.system_admin_context,
+ self.project_admin_context, self.project_member_context,
+ self.project_reader_context,
+ self.other_project_reader_context,
+ self.system_member_context, self.system_reader_context,
+ self.other_project_member_context
+ ]
class VolumesScopeTypePolicyTest(VolumesPolicyTest):
@@ -456,3 +450,65 @@ class VolumesScopeTypePolicyTest(VolumesPolicyTest):
def setUp(self):
super(VolumesScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
+ # With scope enabled, system users will not be able to
+ # operate on volume and snapshot.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.project_foo_context, self.other_project_reader_context,
+ self.other_project_member_context
+ ]
+
+
+class VolumesScopeTypeNoLegacyPolicyTest(VolumesScopeTypePolicyTest):
+ """Test Volume APIs policies with system scope enabled,
+ and no legacy deprecated rules.
+ """
+ without_deprecated_rules = True
+
+ rules_without_deprecation = {
+ v_policies.POLICY_NAME % 'list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'detail':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:list':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:detail':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:delete':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:create':
+ base_policy.PROJECT_MEMBER_OR_ADMIN,
+ v_policies.POLICY_NAME % 'snapshots:show':
+ base_policy.PROJECT_READER_OR_ADMIN,
+ }
+
+ def setUp(self):
+ super(VolumesScopeTypeNoLegacyPolicyTest, self).setUp()
+ self.flags(enforce_scope=True, group="oslo_policy")
+ # With no legacy and scope enabled, system users and project
+ # other roles like foo will not be able to operate on volume
+ # and snapshot.
+ self.project_member_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context,
+ self.other_project_member_context
+ ]
+ self.project_reader_authorized_contexts = [
+ self.legacy_admin_context, self.project_admin_context,
+ self.project_member_context, self.project_reader_context,
+ self.other_project_reader_context,
+ self.other_project_member_context
+ ]
diff --git a/nova/tests/unit/privsep/test_fs.py b/nova/tests/unit/privsep/test_fs.py
index 89062acce9..919b6c553d 100644
--- a/nova/tests/unit/privsep/test_fs.py
+++ b/nova/tests/unit/privsep/test_fs.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.privsep.fs
from nova import test
diff --git a/nova/tests/unit/privsep/test_idmapshift.py b/nova/tests/unit/privsep/test_idmapshift.py
index 2b5acbe33c..7c6f7833ff 100644
--- a/nova/tests/unit/privsep/test_idmapshift.py
+++ b/nova/tests/unit/privsep/test_idmapshift.py
@@ -13,9 +13,9 @@
# limitations under the License.
from io import StringIO
+from unittest import mock
import fixtures
-import mock
import nova.privsep.idmapshift
from nova import test
diff --git a/nova/tests/unit/privsep/test_libvirt.py b/nova/tests/unit/privsep/test_libvirt.py
index 32d375bb1c..eebcf6c231 100644
--- a/nova/tests/unit/privsep/test_libvirt.py
+++ b/nova/tests/unit/privsep/test_libvirt.py
@@ -15,8 +15,9 @@
# under the License.
import binascii
+from unittest import mock
+
import ddt
-import mock
import os
import nova.privsep.libvirt
diff --git a/nova/tests/unit/privsep/test_linux_net.py b/nova/tests/unit/privsep/test_linux_net.py
index 5bdac6ca02..6b226359c3 100644
--- a/nova/tests/unit/privsep/test_linux_net.py
+++ b/nova/tests/unit/privsep/test_linux_net.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from oslo_concurrency import processutils
diff --git a/nova/tests/unit/privsep/test_path.py b/nova/tests/unit/privsep/test_path.py
index 1b4955837d..853ee01d09 100644
--- a/nova/tests/unit/privsep/test_path.py
+++ b/nova/tests/unit/privsep/test_path.py
@@ -14,8 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import os
+from unittest import mock
+
import tempfile
from nova import exception
diff --git a/nova/tests/unit/privsep/test_qemu.py b/nova/tests/unit/privsep/test_qemu.py
index 85c48aa4ae..f3fe5599f2 100644
--- a/nova/tests/unit/privsep/test_qemu.py
+++ b/nova/tests/unit/privsep/test_qemu.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.privsep.qemu
from nova import test
diff --git a/nova/tests/unit/privsep/test_utils.py b/nova/tests/unit/privsep/test_utils.py
index 84d0767c29..887e6dfa8b 100644
--- a/nova/tests/unit/privsep/test_utils.py
+++ b/nova/tests/unit/privsep/test_utils.py
@@ -13,8 +13,8 @@
# under the License.
import errno
-import mock
import os
+from unittest import mock
import nova.privsep.utils
from nova import test
diff --git a/nova/tests/unit/scheduler/client/test_query.py b/nova/tests/unit/scheduler/client/test_query.py
index f8ea4aa337..fe23cf88e3 100644
--- a/nova/tests/unit/scheduler/client/test_query.py
+++ b/nova/tests/unit/scheduler/client/test_query.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py
index a6097cf164..40ebac9af9 100644
--- a/nova/tests/unit/scheduler/client/test_report.py
+++ b/nova/tests/unit/scheduler/client/test_report.py
@@ -9,13 +9,15 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
import copy
+import ddt
import time
+from unittest import mock
from urllib import parse
import fixtures
from keystoneauth1 import exceptions as ks_exc
-import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -41,8 +43,14 @@ class SafeConnectedTestCase(test.NoDBTestCase):
super(SafeConnectedTestCase, self).setUp()
self.context = context.get_admin_context()
- with mock.patch('keystoneauth1.loading.load_auth_from_conf_options'):
- self.client = report.SchedulerReportClient()
+ # need to mock this globally as SchedulerReportClient._create_client
+ # is called again when EndpointNotFound is raised
+ self.useFixture(
+ fixtures.MonkeyPatch(
+ 'keystoneauth1.loading.load_auth_from_conf_options',
+ mock.MagicMock()))
+
+ self.client = report.SchedulerReportClient()
@mock.patch('keystoneauth1.session.Session.request')
def test_missing_endpoint(self, req):
@@ -150,6 +158,60 @@ class SafeConnectedTestCase(test.NoDBTestCase):
self.assertTrue(req.called)
+@ddt.ddt
+class TestSingleton(test.NoDBTestCase):
+ def test_singleton(self):
+ # Make sure we start with a clean slate
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Make sure the first call creates the singleton, sets it
+ # globally, and returns it
+ client = report.report_client_singleton()
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure that a subsequent call returns the same thing
+ # again and that the global is unchanged
+ self.assertEqual(client, report.report_client_singleton())
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ @ddt.data(ks_exc.EndpointNotFound,
+ ks_exc.MissingAuthPlugin,
+ ks_exc.Unauthorized,
+ ks_exc.DiscoveryFailure,
+ ks_exc.ConnectFailure,
+ ks_exc.RequestTimeout,
+ ks_exc.GatewayTimeout,
+ test.TestingException)
+ def test_errors(self, exc):
+ self._test_error(exc)
+
+ @mock.patch.object(report, 'LOG')
+ def _test_error(self, exc, mock_log):
+ with mock.patch.object(report.SchedulerReportClient, '_create_client',
+ side_effect=exc):
+ self.assertRaises(exc, report.report_client_singleton)
+ mock_log.error.assert_called_once()
+
+ def test_error_then_success(self):
+ # Simulate an error
+ self._test_error(ks_exc.ConnectFailure)
+
+ # Ensure we did not set the global client
+ self.assertIsNone(report.PLACEMENTCLIENT)
+
+ # Call again, with no error
+ client = report.report_client_singleton()
+
+ # Make sure we got a client and that it was set as the global
+ # one
+ self.assertIsNotNone(client)
+ self.assertEqual(client, report.PLACEMENTCLIENT)
+
+ # Make sure we keep getting the same one
+ client2 = report.report_client_singleton()
+ self.assertEqual(client, client2)
+
+
class TestConstructor(test.NoDBTestCase):
def setUp(self):
super(TestConstructor, self).setUp()
@@ -4637,3 +4699,31 @@ class TestUsages(SchedulerReportClientTestCase):
expected = {'project': {'cores': 4, 'ram': 0},
'user': {'cores': 4, 'ram': 0}}
self.assertDictEqual(expected, counts)
+
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
+ def test_get_usages_counts_for_limits(self, mock_get):
+ fake_responses = fake_requests.FakeResponse(
+ 200,
+ content=jsonutils.dumps({'usages': {orc.VCPU: 2, orc.PCPU: 2}}))
+ mock_get.return_value = fake_responses
+
+ counts = self.client.get_usages_counts_for_limits(
+ self.context, 'fake-project')
+
+ expected = {orc.VCPU: 2, orc.PCPU: 2}
+ self.assertDictEqual(expected, counts)
+ self.assertEqual(1, mock_get.call_count)
+
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.get')
+ def test_get_usages_counts_for_limits_fails(self, mock_get):
+ fake_failure_response = fake_requests.FakeResponse(500)
+ mock_get.side_effect = [ks_exc.ConnectFailure, fake_failure_response]
+
+ e = self.assertRaises(exception.UsagesRetrievalFailed,
+ self.client.get_usages_counts_for_limits,
+ self.context, 'fake-project')
+
+ expected = "Failed to retrieve usages for project 'fake-project' " \
+ "and user 'N/A'."
+ self.assertEqual(expected, str(e))
+ self.assertEqual(2, mock_get.call_count)
diff --git a/nova/tests/unit/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py
index 658c82c20e..f5dcf87e4a 100644
--- a/nova/tests/unit/scheduler/fakes.py
+++ b/nova/tests/unit/scheduler/fakes.py
@@ -34,6 +34,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=16, total=387184, used=0),
@@ -46,6 +47,7 @@ NUMA_TOPOLOGY = objects.NUMATopology(cells=[
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=1548736, used=0),
diff --git a/nova/tests/unit/scheduler/filters/test_affinity_filters.py b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
index 45c4d9834c..778fbd9073 100644
--- a/nova/tests/unit/scheduler/filters/test_affinity_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_affinity_filters.py
@@ -10,7 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
index f17a7168f1..09b8d728b2 100644
--- a/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_image_properties_isolation_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_image_properties_isolation as aipi
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
index 3567d85a62..971e1a366c 100644
--- a/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_instance_extra_specs_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_instance_extra_specs as agg_specs
diff --git a/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
index 6e6ae9a421..7f2f75a5bd 100644
--- a/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_aggregate_multitenancy_isolation_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import aggregate_multitenancy_isolation as ami
diff --git a/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
index 2c1a43225e..38a75452ba 100644
--- a/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_availability_zone_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import availability_zone_filter
diff --git a/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
index a90ec4367d..cbb8c31601 100644
--- a/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_compute_capabilities_filters.py
@@ -48,7 +48,7 @@ class TestComputeCapabilitiesFilter(test.NoDBTestCase):
flavor=objects.Flavor(memory_mb=1024, extra_specs=especs))
self.assertFalse(self.filt_cls.host_passes(None, spec_obj))
- def test_compute_filter_fails_without_capabilites(self):
+ def test_compute_filter_fails_without_capabilities(self):
cpu_info = """ { } """
cpu_info = str(cpu_info)
diff --git a/nova/tests/unit/scheduler/filters/test_compute_filters.py b/nova/tests/unit/scheduler/filters/test_compute_filters.py
index d9cee4c410..335b9d07be 100644
--- a/nova/tests/unit/scheduler/filters/test_compute_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_compute_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import compute_filter
diff --git a/nova/tests/unit/scheduler/filters/test_io_ops_filters.py b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
index fd0dc3aca1..3b06aaf069 100644
--- a/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_io_ops_filters.py
@@ -11,7 +11,7 @@
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import io_ops_filter
diff --git a/nova/tests/unit/scheduler/filters/test_num_instances_filters.py b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
index 070cc3a785..b43a9b1dc1 100644
--- a/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_num_instances_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import num_instances_filter
diff --git a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
index 0ebe95d5e4..ba9073e0df 100644
--- a/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_numa_topology_filters.py
@@ -11,6 +11,7 @@
# under the License.
import itertools
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
@@ -53,7 +54,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
@@ -132,7 +135,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 21,
- 'ram_allocation_ratio': 1.3})
+ 'ram_allocation_ratio': 1.3,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
limits = host.limits['numa_topology']
self.assertEqual(limits.cpu_allocation_ratio, 21)
@@ -180,7 +185,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': numa_topology,
'pci_stats': None,
'cpu_allocation_ratio': 1,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
assertion = self.assertTrue if passes else self.assertFalse
# test combinations of image properties and extra specs
@@ -237,7 +244,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}]
+ })
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_mempages(self):
@@ -287,7 +296,9 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
'numa_topology': host_topology,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
- 'ram_allocation_ratio': 1.5})
+ 'ram_allocation_ratio': 1.5,
+ 'allocation_candidates': [{"mappings": {}}],
+ })
def test_numa_topology_filter_pass_networks(self):
host = self._get_fake_host_state_with_networks()
@@ -329,3 +340,79 @@ class TestNUMATopologyFilter(test.NoDBTestCase):
network_metadata=network_metadata)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filters_candidates(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 3 candidates for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+ # and that from those candidates only the second matches the numa logic
+ mock_numa_fit.side_effect = [False, True, False]
+
+ # run the filter and expect that the host passes as it has at least
+ # one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+ # also assert that the filter checked all three candidates
+ self.assertEqual(3, len(mock_numa_fit.mock_calls))
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ @mock.patch("nova.virt.hardware.numa_fit_instance_to_host")
+ def test_filter_fails_if_no_matching_candidate_left(self, mock_numa_fit):
+ instance_topology = objects.InstanceNUMATopology(
+ cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([1]), pcpuset=set(), memory=512
+ ),
+ ]
+ )
+ spec_obj = self._get_spec_obj(numa_topology=instance_topology)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ {
+ "numa_topology": fakes.NUMA_TOPOLOGY,
+ "pci_stats": None,
+ "cpu_allocation_ratio": 16.0,
+ "ram_allocation_ratio": 1.5,
+ # simulate that placement returned 1 candidate for this host
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+ # simulate that the only candidate we have does not match
+ mock_numa_fit.side_effect = [False]
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+ self.assertEqual(1, len(mock_numa_fit.mock_calls))
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
index c500b4a887..27d80b884e 100644
--- a/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_pci_passthrough_filters.py
@@ -10,7 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
+from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.pci import stats
@@ -33,11 +35,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_fail(self):
pci_stats_mock = mock.MagicMock()
@@ -47,11 +54,16 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
requests = objects.InstancePCIRequests(requests=[request])
spec_obj = objects.RequestSpec(pci_requests=requests)
host = fakes.FakeHostState(
- 'host1', 'node1',
- attribute_dict={'pci_stats': pci_stats_mock})
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ "allocation_candidates": [{"mappings": {}}],
+ },
+ )
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
pci_stats_mock.support_requests.assert_called_once_with(
- requests.requests)
+ requests.requests, provider_mapping={})
def test_pci_passthrough_no_pci_request(self):
spec_obj = objects.RequestSpec(pci_requests=None)
@@ -82,3 +94,92 @@ class TestPCIPassthroughFilter(test.NoDBTestCase):
host = fakes.FakeHostState('host1', 'node1',
attribute_dict={'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ def test_filters_candidates(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that only the second allocation candidate fits
+ pci_stats_mock.support_requests.side_effect = [False, True, False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_3"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it passes the host as there is at
+ # least one viable candidate
+ self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked all three candidates
+ pci_stats_mock.support_requests.assert_has_calls(
+ [
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_2"]},
+ ),
+ mock.call(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_3"]},
+ ),
+ ]
+ )
+ # and also it reduced the candidates in the host state to the only
+ # matching one
+ self.assertEqual(1, len(host.allocation_candidates))
+ self.assertEqual(
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_2"]}},
+ host.allocation_candidates[0],
+ )
+
+ def test_filter_fails_if_no_matching_candidate_left(self):
+ pci_stats_mock = mock.MagicMock()
+ # simulate that the only candidate we have does not match
+ pci_stats_mock.support_requests.side_effect = [False]
+ request = objects.InstancePCIRequest(
+ count=1,
+ spec=[{"vendor_id": "8086"}],
+ request_id=uuids.req1,
+ )
+ requests = objects.InstancePCIRequests(requests=[request])
+ spec_obj = objects.RequestSpec(pci_requests=requests)
+ host = fakes.FakeHostState(
+ "host1",
+ "node1",
+ attribute_dict={
+ "pci_stats": pci_stats_mock,
+ # simulate the placement returned 3 possible candidates
+ "allocation_candidates": [
+ {"mappings": {f"{uuids.req1}-0": ["candidate_rp_1"]}},
+ ],
+ },
+ )
+
+ # run the filter and expect that it fails the host as there is no
+ # viable candidate left
+ self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+
+ # also assert that the filter checked our candidate
+ pci_stats_mock.support_requests.assert_called_once_with(
+ requests.requests,
+ provider_mapping={f"{uuids.req1}-0": ["candidate_rp_1"]},
+ )
+ # and also it made the candidates list empty in the host state
+ self.assertEqual(0, len(host.allocation_candidates))
diff --git a/nova/tests/unit/scheduler/filters/test_type_filters.py b/nova/tests/unit/scheduler/filters/test_type_filters.py
index d3f01a5c0e..c2567b5205 100644
--- a/nova/tests/unit/scheduler/filters/test_type_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_type_filters.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler.filters import type_filter
diff --git a/nova/tests/unit/scheduler/test_filters.py b/nova/tests/unit/scheduler/test_filters.py
index cb1c3ec32b..64f4121eb0 100644
--- a/nova/tests/unit/scheduler/test_filters.py
+++ b/nova/tests/unit/scheduler/test_filters.py
@@ -16,8 +16,8 @@ Tests For Scheduler Host Filters.
"""
import inspect
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import filters
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
index 5a1e665be3..1a7daa515f 100644
--- a/nova/tests/unit/scheduler/test_host_manager.py
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -19,8 +19,8 @@ Tests For HostManager
import collections
import contextlib
import datetime
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
@@ -1562,10 +1562,14 @@ class HostStateTestCase(test.NoDBTestCase):
self.assertIsNone(host.updated)
host.consume_from_request(spec_obj)
- numa_fit_mock.assert_called_once_with(fake_host_numa_topology,
- fake_numa_topology,
- limits=None, pci_requests=None,
- pci_stats=None)
+ numa_fit_mock.assert_called_once_with(
+ fake_host_numa_topology,
+ fake_numa_topology,
+ limits=None,
+ pci_requests=None,
+ pci_stats=None,
+ provider_mapping=None,
+ )
numa_usage_mock.assert_called_once_with(fake_host_numa_topology,
fake_numa_topology)
sync_mock.assert_called_once_with(("fakehost", "fakenode"))
diff --git a/nova/tests/unit/scheduler/test_manager.py b/nova/tests/unit/scheduler/test_manager.py
index 70689f6047..e992fe6034 100644
--- a/nova/tests/unit/scheduler/test_manager.py
+++ b/nova/tests/unit/scheduler/test_manager.py
@@ -17,7 +17,9 @@
Tests For Scheduler
"""
-import mock
+from unittest import mock
+
+from keystoneauth1 import exceptions as ks_exc
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -25,6 +27,7 @@ from oslo_utils.fixture import uuidsentinel as uuids
from nova import context
from nova import exception
from nova import objects
+from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import manager
from nova.scheduler import utils as scheduler_utils
@@ -395,9 +398,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, aggregates=[])
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
@@ -458,20 +468,29 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=group)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- limits={}, cell_uuid=uuids.cell, instances={}, aggregates=[])
+ instance_group=group,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ limits={},
+ cell_uuid=uuids.cell,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
instance_uuids = None
ctx = mock.Mock()
selected_hosts = self.manager._schedule(ctx, spec_obj,
- instance_uuids, mock.sentinel.alloc_reqs_by_rp_uuid,
- mock.sentinel.provider_summaries)
+ instance_uuids, None, mock.sentinel.provider_summaries)
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
@@ -509,14 +528,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance]
@@ -582,11 +611,16 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
project_id=uuids.project_id,
instance_group=None)
- host_state = mock.Mock(spec=host_manager.HostState,
- host=mock.sentinel.host, uuid=uuids.cn1, cell_uuid=uuids.cell1)
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host=mock.sentinel.host,
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ allocations_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = False
instance_uuids = [uuids.instance]
@@ -603,7 +637,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
mock_get_all_states.assert_called_once_with(
ctx.elevated.return_value, spec_obj,
mock.sentinel.provider_summaries)
- mock_get_hosts.assert_called_once_with(spec_obj, all_host_states, 0)
+ mock_get_hosts.assert_called_once_with(spec_obj, mock.ANY, 0)
mock_claim.assert_called_once_with(ctx.elevated.return_value,
self.manager.placement_client, spec_obj, uuids.instance,
alloc_reqs_by_rp_uuid[uuids.cn1][0],
@@ -634,18 +668,41 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState,
- host="fake_host", nodename="fake_node", uuid=uuids.cn1,
- cell_uuid=uuids.cell1, limits={}, updated='fake')
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ nodename="fake_node",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ limits={},
+ updated="fake",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [
- all_host_states, # first instance: return all the hosts (only one)
- [], # second: act as if no more hosts that meet criteria
- all_host_states, # the final call when creating alternates
- ]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ # first instance: return all the hosts (only one)
+ if c == 0:
+ return hosts
+ # second: act as if no more hosts that meet criteria
+ elif c == 1:
+ return []
+ # the final call when creating alternates
+ elif c == 2:
+ return hosts
+ else:
+ raise StopIteration()
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
instance_uuids = [uuids.instance1, uuids.instance2]
@@ -678,20 +735,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -743,20 +824,44 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
swap=0,
vcpus=1),
project_id=uuids.project_id,
- instance_group=None)
-
- host_state0 = mock.Mock(spec=host_manager.HostState,
- host="fake_host0", nodename="fake_node0", uuid=uuids.cn0,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state1 = mock.Mock(spec=host_manager.HostState,
- host="fake_host1", nodename="fake_node1", uuid=uuids.cn1,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
- host_state2 = mock.Mock(spec=host_manager.HostState,
- host="fake_host2", nodename="fake_node2", uuid=uuids.cn2,
- cell_uuid=uuids.cell, limits={}, aggregates=[])
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state0 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host0",
+ nodename="fake_node0",
+ uuid=uuids.cn0,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host1",
+ nodename="fake_node1",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ host_state2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host2",
+ nodename="fake_node2",
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell,
+ limits={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [host_state0, host_state1, host_state2]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.return_value = all_host_states
+ # simulate that every host passes the filtering
+ mock_get_hosts.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
instance_uuids = [uuids.instance0]
@@ -813,17 +918,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
is_public=True,
name="small_flavor"),
project_id=uuids.project_id,
- instance_group=ig, instance_uuid=uuids.instance0)
+ instance_group=ig,
+ instance_uuid=uuids.instance0,
+ requested_resources=[],
+ )
# Reset the RequestSpec changes so they don't interfere with the
# assertion at the end of the test.
spec_obj.obj_reset_changes(recursive=True)
- hs1 = mock.Mock(spec=host_manager.HostState, host='host1',
- nodename="node1", limits={}, uuid=uuids.cn1,
- cell_uuid=uuids.cell1, instances={}, aggregates=[])
- hs2 = mock.Mock(spec=host_manager.HostState, host='host2',
- nodename="node2", limits={}, uuid=uuids.cn2,
- cell_uuid=uuids.cell2, instances={}, aggregates=[])
+ hs1 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host1",
+ nodename="node1",
+ limits={},
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell1,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
+ hs2 = mock.Mock(
+ spec=host_manager.HostState,
+ host="host2",
+ nodename="node2",
+ limits={},
+ uuid=uuids.cn2,
+ cell_uuid=uuids.cell2,
+ instances={},
+ aggregates=[],
+ allocation_candidates=[],
+ )
all_host_states = [hs1, hs2]
mock_get_all_states.return_value = all_host_states
mock_claim.return_value = True
@@ -837,13 +961,18 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# _get_sorted_hosts() in the two iterations for each instance in
# num_instances
visited_instances = set([])
+ get_sorted_hosts_called_with_host_states = []
def fake_get_sorted_hosts(_spec_obj, host_states, index):
# Keep track of which instances are passed to the filters.
visited_instances.add(_spec_obj.instance_uuid)
if index % 2:
- return [hs1, hs2]
- return [hs2, hs1]
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return s
+ s = list(host_states)
+ get_sorted_hosts_called_with_host_states.append(s)
+ return reversed(s)
mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [
getattr(uuids, 'instance%d' % x) for x in range(num_instances)
@@ -870,10 +999,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
# second time, we pass it the hosts that were returned from
# _get_sorted_hosts() the first time
sorted_host_calls = [
- mock.call(spec_obj, all_host_states, 0),
- mock.call(spec_obj, [hs2, hs1], 1),
+ mock.call(spec_obj, mock.ANY, 0),
+ mock.call(spec_obj, mock.ANY, 1),
]
mock_get_hosts.assert_has_calls(sorted_host_calls)
+ self.assertEqual(
+ all_host_states, get_sorted_hosts_called_with_host_states[0])
+ self.assertEqual(
+ [hs1], get_sorted_hosts_called_with_host_states[1])
# The instance group object should have both host1 and host2 in its
# instance group hosts list and there should not be any "changes" to
@@ -1167,14 +1300,36 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
name="small_flavor"),
project_id=uuids.project_id,
instance_uuid=uuids.instance_id,
- instance_group=None)
-
- host_state = mock.Mock(spec=host_manager.HostState, host="fake_host",
- uuid=uuids.cn1, cell_uuid=uuids.cell, nodename="fake_node",
- limits={}, updated="Not None")
+ instance_group=None,
+ requested_resources=[],
+ )
+
+ host_state = mock.Mock(
+ spec=host_manager.HostState,
+ host="fake_host",
+ uuid=uuids.cn1,
+ cell_uuid=uuids.cell,
+ nodename="fake_node",
+ limits={},
+ updated="Not None",
+ allocation_candidates=[],
+ )
all_host_states = [host_state]
mock_get_all_states.return_value = all_host_states
- mock_get_hosts.side_effect = [all_host_states, []]
+
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return []
+ else:
+ raise StopIteration
+
+ mock_get_hosts.side_effect = fake_get_sorted_hosts
instance_uuids = [uuids.inst1, uuids.inst2]
fake_allocs_by_rp = {uuids.cn1: [{}]}
@@ -1203,7 +1358,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
total_returned = num_alternates + 1
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1211,14 +1366,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
for num in range(num_instances)]
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1269,11 +1424,24 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
+
# There are two instances so _get_sorted_hosts is called once per
# instance and then once again before picking alternates.
- mock_sorted.side_effect = [all_host_states,
- list(reversed(all_host_states)),
- all_host_states]
+ calls = []
+
+ def fake_get_sorted_hosts(spec_obj, hosts, num):
+ c = len(calls)
+ calls.append(1)
+ if c == 0:
+ return list(hosts)
+ elif c == 1:
+ return list(reversed(all_host_states))
+ elif c == 2:
+ return list(hosts)
+ else:
+ raise StopIteration()
+
+ mock_sorted.side_effect = fake_get_sorted_hosts
mock_claim.return_value = True
total_returned = 3
self.flags(max_attempts=total_returned, group="scheduler")
@@ -1281,14 +1449,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1322,7 +1490,7 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
alloc_reqs[hs.uuid] = [{}]
mock_get_all_hosts.return_value = all_host_states
- mock_sorted.return_value = all_host_states
+ mock_sorted.side_effect = lambda spec_obj, hosts, num: list(hosts)
mock_claim.return_value = True
# Set the total returned to more than the number of available hosts
self.flags(max_attempts=max_attempts, group="scheduler")
@@ -1330,14 +1498,14 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
num_instances = len(instance_uuids)
spec_obj = objects.RequestSpec(
- num_instances=num_instances,
- flavor=objects.Flavor(memory_mb=512,
- root_gb=512,
- ephemeral_gb=0,
- swap=0,
- vcpus=1),
- project_id=uuids.project_id,
- instance_group=None)
+ num_instances=num_instances,
+ flavor=objects.Flavor(
+ memory_mb=512, root_gb=512, ephemeral_gb=0, swap=0, vcpus=1
+ ),
+ project_id=uuids.project_id,
+ instance_group=None,
+ requested_resources=[],
+ )
dests = self.manager._schedule(self.context, spec_obj,
instance_uuids, alloc_reqs, None, return_alternates=True)
@@ -1520,3 +1688,541 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
self.manager._discover_hosts_in_cells(mock.sentinel.context)
mock_log_warning.assert_not_called()
mock_log_debug.assert_called_once_with(msg)
+
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
+ @mock.patch.object(manager, 'LOG')
+ @mock.patch('nova.scheduler.host_manager.HostManager')
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.rpc.get_notifier')
+ def test_init_lazy_placement_client(self, mock_rpc, mock_sg, mock_hm,
+ mock_log, mock_report):
+ # Simulate keytone or placement being offline at startup
+ mock_report.side_effect = ks_exc.RequestTimeout
+ mgr = manager.SchedulerManager()
+ mock_report.assert_called_once_with()
+ self.assertTrue(mock_log.warning.called)
+
+ # Make sure we're raising the actual error to subsequent callers
+ self.assertRaises(ks_exc.RequestTimeout, lambda: mgr.placement_client)
+
+ # Simulate recovery of the keystone or placement service
+ mock_report.reset_mock(side_effect=True)
+ mgr.placement_client
+ mock_report.assert_called_once_with()
+
+ @mock.patch('nova.scheduler.client.report.report_client_singleton')
+ @mock.patch('nova.scheduler.host_manager.HostManager')
+ @mock.patch('nova.servicegroup.API')
+ @mock.patch('nova.rpc.get_notifier')
+ def test_init_lazy_placement_client_failures(self, mock_rpc, mock_sg,
+ mock_hm, mock_report):
+ # Certain keystoneclient exceptions are fatal
+ mock_report.side_effect = ks_exc.Unauthorized
+ self.assertRaises(ks_exc.Unauthorized, manager.SchedulerManager)
+
+ # Anything else is fatal
+ mock_report.side_effect = test.TestingException
+ self.assertRaises(test.TestingException, manager.SchedulerManager)
+
+
+class SchedulerManagerAllocationCandidateTestCase(test.NoDBTestCase):
+
+ class ACRecorderFilter(filters.BaseHostFilter):
+ """A filter that records what allocation candidates it saw on each host
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.seen_candidates = []
+
+ def host_passes(self, host_state, filter_properties):
+ # record what candidate the filter saw for each host
+ self.seen_candidates.append(list(host_state.allocation_candidates))
+ return True
+
+ class DropFirstFilter(filters.BaseHostFilter):
+ """A filter that removes one candidate and keeps the rest on each
+ host
+ """
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates.pop(0)
+ return bool(host_state.allocation_candidates)
+
+ @mock.patch.object(
+ host_manager.HostManager, '_init_instance_info', new=mock.Mock())
+ @mock.patch.object(
+ host_manager.HostManager, '_init_aggregates', new=mock.Mock())
+ def setUp(self):
+ super().setUp()
+ self.context = context.RequestContext('fake_user', 'fake_project')
+ self.manager = manager.SchedulerManager()
+ self.manager.host_manager.weighers = []
+ self.request_spec = objects.RequestSpec(
+ ignore_hosts=[],
+ force_hosts=[],
+ force_nodes=[],
+ requested_resources=[],
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_see_allocation_candidates_for_each_host(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # have a single filter configured where we can assert that the filter
+ # see the allocation_candidates of each host
+ filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [filter]
+
+ instance_uuids = [uuids.inst1]
+
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts with different candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ mock.sentinel.host1_a_c_1,
+ mock.sentinel.host1_a_c_2,
+ ]
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ mock.sentinel.host2_a_c_1,
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ mock.sentinel.allocation_request_version,
+ )
+
+ # we expect that our filter seen the allocation candidate list of
+ # each host respectively
+ self.assertEqual(
+ [
+ alloc_reqs_by_rp_uuid[uuids.host1],
+ alloc_reqs_by_rp_uuid[uuids.host2],
+ ],
+ filter.seen_candidates,
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_scheduler_selects_filtered_a_c_from_hosts_state(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ """Assert that if a filter removes an allocation candidate from a host
+ then even if that host is selected the removed allocation candidate
+ is not used by the scheduler.
+ """
+
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we have requested one instance to be scheduled so expect on set
+ # of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(
+ "host1-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch("nova.objects.selection.Selection.from_host_state")
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consecutive_filter_sees_filtered_a_c_list(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ mock_selection_from_host_state,
+ ):
+ # create two filters
+ # 1) DropFirstFilter runs first and drops the first candidate from each
+ # host
+ # 2) ACRecorderFilter runs next and records what candidates it saw
+ recorder_filter = self.ACRecorderFilter()
+ self.manager.host_manager.enabled_filters = [
+ self.DropFirstFilter(),
+ recorder_filter,
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have a host with two candidates
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ "host1-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ # we expect that the second filter saw one host with one candidate and
+ # as candidate1 was already filtered out by the run of the first filter
+ self.assertEqual(
+ [["host1-candidate2"]],
+ recorder_filter.seen_candidates
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_filters_removes_all_a_c_host_is_not_selected(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have two hosts
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ "host1-candidate1",
+ ]
+ # second with two candidates
+ host2 = host_manager.HostState("host2", "node2", uuids.cell1)
+ host2.uuid = uuids.host2
+ alloc_reqs_by_rp_uuid[uuids.host2] = [
+ "host2-candidate1",
+ "host2-candidate2",
+ ]
+ mock_get_all_host_states.return_value = iter([host1, host2])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+ # we expect that the first host is not selected as the filter
+ # removed every candidate from the host
+ # also we expect that on the second host only candidate2 could have
+ # been selected
+ # we asked for one instance, so we expect one set of selections
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives so a single selection is expected
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ # we expect that candidate2 is used as candidate1 is dropped by
+ # the filter
+ self.assertEqual(uuids.host2, selection.compute_node_uuid)
+ self.assertEqual(
+ "host2-candidate2",
+ jsonutils.loads(selection.allocation_request)
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ new=mock.Mock(return_value=True),
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_consume_selected_host_sees_updated_request_spec(
+ self,
+ mock_get_all_host_states,
+ mock_consume,
+ ):
+ # simulate that nothing is filtered out, by not having any filters
+ self.manager.host_manager.enabled_filters = []
+
+ # set up the request spec with a request group to be updated
+ # by the selected candidate
+ self.request_spec.requested_resources = [
+ objects.RequestGroup(
+ requester_id=uuids.group_req1, provider_uuids=[]
+ )
+ ]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have single host with a single candidate
+ # first with a single candidate
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ # simulate that placement fulfilled the above RequestGroup from
+ # a certain child RP of the host.
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child_rp],
+ }
+ }
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ # make asserts on the request_spec passed to consume
+ def assert_request_spec_updated_with_selected_candidate(
+ selected_host, spec_obj, instance_uuid=None
+ ):
+ # we expect that the scheduler updated the request_spec based
+ # the selected candidate before called consume
+ self.assertEqual(
+ [uuids.host1_child_rp],
+ spec_obj.requested_resources[0].provider_uuids,
+ )
+
+ mock_consume.side_effect = (
+ assert_request_spec_updated_with_selected_candidate)
+
+ self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ )
+
+ mock_consume.assert_called_once()
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_main_selection_with_claimed_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that the first (a.k.a main) selection returned for an
+ instance always maps to the allocation candidate, that was claimed by
+ the scheduler in placement.
+ """
+ # use the filter that always drops the first candidate on each host
+ self.manager.host_manager.enabled_filters = [self.DropFirstFilter()]
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have one host with 3 candidates each fulfilling a request group
+ # from different child RP
+ host1 = host_manager.HostState("host1", "node1", uuids.cell1)
+ host1.uuid = uuids.host1
+ alloc_reqs_by_rp_uuid[uuids.host1] = [
+ {
+ "mappings": {
+ # This is odd but the un-name request group uses "" as the
+ # name of the group.
+ "": [uuids.host1],
+ uuids.group_req1: [getattr(uuids, f"host1_child{i}")],
+ }
+ } for i in [1, 2, 3]
+ ]
+ mock_get_all_host_states.return_value = iter([host1])
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we did not ask for alternatives
+ self.assertEqual(1, len(selections))
+ selection = selections[0]
+ self.assertEqual(uuids.host1, selection.compute_node_uuid)
+ # we expect that host1_child2 candidate is selected as the
+ # DropFirstFilter will drop host1_child1
+ expected_a_c = {
+ "mappings": {
+ "": [uuids.host1],
+ uuids.group_req1: [uuids.host1_child2],
+ }
+ }
+ self.assertEqual(
+ expected_a_c,
+ jsonutils.loads(selection.allocation_request),
+ )
+ # and we expect that the same candidate was claimed in placement
+ mock_claim.assert_called_once_with(
+ mock.ANY,
+ self.manager.placement_client,
+ self.request_spec,
+ uuids.inst1,
+ expected_a_c,
+ allocation_request_version="fake-alloc-req-version",
+ )
+
+ @mock.patch(
+ "nova.scheduler.manager.SchedulerManager._consume_selected_host",
+ )
+ @mock.patch(
+ "nova.scheduler.utils.claim_resources",
+ return_value=True,
+ )
+ @mock.patch("nova.scheduler.manager.SchedulerManager._get_all_host_states")
+ def test_get_alternate_hosts_returns_alts_with_filtered_a_c(
+ self,
+ mock_get_all_host_states,
+ mock_claim,
+ mock_consume,
+ ):
+ """Assert that alternate generation also works based on filtered
+ candidates.
+ """
+
+ class RPFilter(filters.BaseHostFilter):
+ """A filter that only allows candidates with specific RPs"""
+
+ def __init__(self, allowed_rp_uuids):
+ self.allowed_rp_uuids = allowed_rp_uuids
+
+ def host_passes(self, host_state, filter_properties):
+ host_state.allocation_candidates = [
+ a_c
+ for a_c in host_state.allocation_candidates
+ if a_c["mappings"][uuids.group_req1][0]
+ in self.allowed_rp_uuids
+ ]
+ return True
+
+ instance_uuids = [uuids.inst1]
+ alloc_reqs_by_rp_uuid = {}
+ # have 3 hosts each with 2 allocation candidates fulfilling a request
+ # group from a different child RP
+ hosts = []
+ for i in [1, 2, 3]:
+ host = host_manager.HostState(f"host{i}", f"node{i}", uuids.cell1)
+ host.uuid = getattr(uuids, f"host{i}")
+ alloc_reqs_by_rp_uuid[host.uuid] = [
+ {
+ "mappings": {
+ "": [host.uuid],
+ uuids.group_req1: [
+ getattr(uuids, f"host{i}_child{j}")
+ ],
+ }
+ }
+ for j in [1, 2]
+ ]
+ hosts.append(host)
+ mock_get_all_host_states.return_value = iter(hosts)
+
+ # configure a filter that only "likes" host1_child2 and host3_child2
+ # RPs. This means host2 is totally out and host1 and host3 only have
+ # one viable candidate
+ self.manager.host_manager.enabled_filters = [
+ RPFilter(allowed_rp_uuids=[uuids.host1_child2, uuids.host3_child2])
+ ]
+
+ result = self.manager._schedule(
+ self.context,
+ self.request_spec,
+ instance_uuids,
+ alloc_reqs_by_rp_uuid,
+ mock.sentinel.provider_summaries,
+ 'fake-alloc-req-version',
+ return_alternates=True,
+ )
+ # we scheduled one instance
+ self.assertEqual(1, len(result))
+ selections = result[0]
+ # we expect a main selection and a single alternative
+ # (host1, and host3) on both selection we expect child2 as selected
+ # candidate
+ self.assertEqual(2, len(selections))
+ main_selection = selections[0]
+ self.assertEqual(uuids.host1, main_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host1_child2],
+ jsonutils.loads(main_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
+
+ alt_selection = selections[1]
+ self.assertEqual(uuids.host3, alt_selection.compute_node_uuid)
+ self.assertEqual(
+ [uuids.host3_child2],
+ jsonutils.loads(alt_selection.allocation_request)["mappings"][
+ uuids.group_req1
+ ],
+ )
diff --git a/nova/tests/unit/scheduler/test_request_filter.py b/nova/tests/unit/scheduler/test_request_filter.py
index 7be7f8341d..77e538006a 100644
--- a/nova/tests/unit/scheduler/test_request_filter.py
+++ b/nova/tests/unit/scheduler/test_request_filter.py
@@ -10,8 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
import os_traits as ot
+from unittest import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -406,13 +406,15 @@ class TestRequestFilter(test.NoDBTestCase):
self.assertIn('took %.1f seconds', log_lines[1])
@mock.patch.object(request_filter, 'LOG', new=mock.Mock())
- def test_transform_image_metadata(self):
+ def test_transform_image_metadata_x86(self):
self.flags(image_metadata_prefilter=True, group='scheduler')
properties = objects.ImageMetaProps(
hw_disk_bus=objects.fields.DiskBus.SATA,
hw_cdrom_bus=objects.fields.DiskBus.IDE,
hw_video_model=objects.fields.VideoModel.QXL,
- hw_vif_model=network_model.VIF_MODEL_VIRTIO
+ hw_vif_model=network_model.VIF_MODEL_VIRTIO,
+ hw_architecture=objects.fields.Architecture.X86_64,
+ hw_emulation_architecture=objects.fields.Architecture.AARCH64
)
reqspec = objects.RequestSpec(
image=objects.ImageMeta(properties=properties),
@@ -426,6 +428,36 @@ class TestRequestFilter(test.NoDBTestCase):
'COMPUTE_NET_VIF_MODEL_VIRTIO',
'COMPUTE_STORAGE_BUS_IDE',
'COMPUTE_STORAGE_BUS_SATA',
+ 'HW_ARCH_X86_64',
+ 'COMPUTE_ARCH_AARCH64',
+ }
+ self.assertEqual(expected, reqspec.root_required)
+
+ @mock.patch.object(request_filter, 'LOG', new=mock.Mock())
+ def test_transform_image_metadata_aarch64(self):
+ self.flags(image_metadata_prefilter=True, group='scheduler')
+ properties = objects.ImageMetaProps(
+ hw_disk_bus=objects.fields.DiskBus.SATA,
+ hw_cdrom_bus=objects.fields.DiskBus.IDE,
+ hw_video_model=objects.fields.VideoModel.QXL,
+ hw_vif_model=network_model.VIF_MODEL_VIRTIO,
+ hw_architecture=objects.fields.Architecture.AARCH64,
+ hw_emulation_architecture=objects.fields.Architecture.X86_64
+ )
+ reqspec = objects.RequestSpec(
+ image=objects.ImageMeta(properties=properties),
+ flavor=objects.Flavor(extra_specs={}),
+ )
+ self.assertTrue(
+ request_filter.transform_image_metadata(None, reqspec)
+ )
+ expected = {
+ 'COMPUTE_GRAPHICS_MODEL_QXL',
+ 'COMPUTE_NET_VIF_MODEL_VIRTIO',
+ 'COMPUTE_STORAGE_BUS_IDE',
+ 'COMPUTE_STORAGE_BUS_SATA',
+ 'HW_ARCH_AARCH64',
+ 'COMPUTE_ARCH_X86_64',
}
self.assertEqual(expected, reqspec.root_required)
@@ -580,3 +612,90 @@ class TestRequestFilter(test.NoDBTestCase):
mock_get_aggs_network.assert_has_calls([
mock.call(self.context, mock.ANY, mock.ANY, uuids.net1),
mock.call(self.context, mock.ANY, mock.ANY, uuids.net2)])
+
+ def test_ephemeral_encryption_filter_no_encryption(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ # Assert that the filter returns false and doesn't update the reqspec
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_disabled(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps(
+ hw_ephemeral_encryption=False)))
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(
+ self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={
+ 'hw:ephemeral_encryption': 'False'}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertFalse(
+ request_filter.ephemeral_encryption_filter(
+ self.context, reqspec))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_no_format(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(extra_specs={
+ 'hw:ephemeral_encryption': 'True'}),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+ self.assertTrue(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(
+ {ot.COMPUTE_EPHEMERAL_ENCRYPTION}, reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+
+ def test_ephemeral_encryption_filter_encryption_and_format(self):
+ # First ensure that ephemeral_encryption_filter is included
+ self.assertIn(request_filter.ephemeral_encryption_filter,
+ request_filter.ALL_REQUEST_FILTERS)
+
+ reqspec = objects.RequestSpec(
+ flavor=objects.Flavor(
+ extra_specs={
+ 'hw:ephemeral_encryption': 'True',
+ 'hw:ephemeral_encryption_format': 'luks'
+ }),
+ image=objects.ImageMeta(
+ properties=objects.ImageMetaProps()))
+ self.assertEqual(set(), reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
+ self.assertTrue(
+ request_filter.ephemeral_encryption_filter(self.context, reqspec))
+ self.assertEqual(
+ {ot.COMPUTE_EPHEMERAL_ENCRYPTION,
+ ot.COMPUTE_EPHEMERAL_ENCRYPTION_LUKS},
+ reqspec.root_required)
+ self.assertEqual(set(), reqspec.root_forbidden)
diff --git a/nova/tests/unit/scheduler/test_rpcapi.py b/nova/tests/unit/scheduler/test_rpcapi.py
index 3c56946975..51582891aa 100644
--- a/nova/tests/unit/scheduler/test_rpcapi.py
+++ b/nova/tests/unit/scheduler/test_rpcapi.py
@@ -16,7 +16,8 @@
Unit Tests for nova.scheduler.rpcapi
"""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import conf
diff --git a/nova/tests/unit/scheduler/test_utils.py b/nova/tests/unit/scheduler/test_utils.py
index 8aff5b902e..55957f3d55 100644
--- a/nova/tests/unit/scheduler/test_utils.py
+++ b/nova/tests/unit/scheduler/test_utils.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/scheduler/weights/test_weights_affinity.py b/nova/tests/unit/scheduler/weights/test_weights_affinity.py
index 10ec7e698d..3048e9f06c 100644
--- a/nova/tests/unit/scheduler/weights/test_weights_affinity.py
+++ b/nova/tests/unit/scheduler/weights/test_weights_affinity.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import objects
from nova.scheduler import weights
diff --git a/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py b/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py
new file mode 100644
index 0000000000..c6e4abd4cd
--- /dev/null
+++ b/nova/tests/unit/scheduler/weights/test_weights_hypervisor_version.py
@@ -0,0 +1,97 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler hypervisor version weights.
+"""
+
+from nova.scheduler import weights
+from nova.scheduler.weights import hypervisor_version
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class HypervisorVersionWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super().setUp()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weighers = [hypervisor_version.HypervisorVersionWeigher()]
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weighers,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ host_values = [
+ ('host1', 'node1', {'hypervisor_version': 1}),
+ ('host2', 'node2', {'hypervisor_version': 200}),
+ ('host3', 'node3', {'hypervisor_version': 100}),
+ ('host4', 'node4', {'hypervisor_version': 1000}),
+ ]
+ return [fakes.FakeHostState(host, node, values)
+ for host, node, values in host_values]
+
+ def test_multiplier_default(self):
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_multiplier_default_full_ordering(self):
+ hostinfo_list = self._get_all_hosts()
+ weighed_hosts = self.weight_handler.get_weighed_objects(
+ self.weighers, hostinfo_list, {}
+ )
+ expected_hosts = [fakes.FakeHostState(host, node, values)
+ for host, node, values in [
+ ('host4', 'node4', {'hypervisor_version': 1000}),
+ ('host2', 'node2', {'hypervisor_version': 200}),
+ ('host3', 'node3', {'hypervisor_version': 100}),
+ ('host1', 'node1', {'hypervisor_version': 1}),
+ ]]
+ for actual, expected in zip(
+ weighed_hosts,
+ expected_hosts
+ ):
+ self.assertEqual(actual.obj.host, expected.host)
+
+ def test_multiplier_none(self):
+ multi = 0.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(multi, weighed_host.weight)
+
+ def test_multiplier_positive(self):
+ multi = 2.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0 * multi, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_multiplier_negative(self):
+ multi = -1.0
+ self.flags(
+ hypervisor_version_weight_multiplier=multi,
+ group='filter_scheduler'
+ )
+ hostinfo_list = self._get_all_hosts()
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual('host1', weighed_host.obj.host)
diff --git a/nova/tests/unit/scheduler/weights/test_weights_metrics.py b/nova/tests/unit/scheduler/weights/test_weights_metrics.py
index 21667813e3..d507000b12 100644
--- a/nova/tests/unit/scheduler/weights/test_weights_metrics.py
+++ b/nova/tests/unit/scheduler/weights/test_weights_metrics.py
@@ -114,7 +114,7 @@ class MetricsWeigherTestCase(test.NoDBTestCase):
setting = [idle + '=-2', idle + '=1']
self._do_test(setting, 1.0, 'host1')
- def test_single_resourcenegtive_ratio(self):
+ def test_single_resourcenegative_ratio(self):
# host1: idle=512
# host2: idle=1024
# host3: idle=3072
diff --git a/nova/tests/unit/scheduler/weights/test_weights_pci.py b/nova/tests/unit/scheduler/weights/test_weights_pci.py
index d257c67c1f..3bdc94f357 100644
--- a/nova/tests/unit/scheduler/weights/test_weights_pci.py
+++ b/nova/tests/unit/scheduler/weights/test_weights_pci.py
@@ -127,7 +127,7 @@ class PCIWeigherTestCase(test.NoDBTestCase):
"""Test weigher with a PCI device instance and huge hosts.
Ensure that the weigher gracefully degrades when the number of PCI
- devices on the host exceeeds MAX_DEVS.
+ devices on the host exceeds MAX_DEVS.
"""
hosts = [
('host1', 'node1', [500]), # 500 devs
diff --git a/nova/tests/unit/servicegroup/test_api.py b/nova/tests/unit/servicegroup/test_api.py
index b451285e4e..4ded10360a 100644
--- a/nova/tests/unit/servicegroup/test_api.py
+++ b/nova/tests/unit/servicegroup/test_api.py
@@ -15,7 +15,7 @@
"""
Test the base class for the servicegroup API
"""
-import mock
+from unittest import mock
from nova import servicegroup
from nova import test
diff --git a/nova/tests/unit/servicegroup/test_db_servicegroup.py b/nova/tests/unit/servicegroup/test_db_servicegroup.py
index 9e04451ec7..9f718e17b7 100644
--- a/nova/tests/unit/servicegroup/test_db_servicegroup.py
+++ b/nova/tests/unit/servicegroup/test_db_servicegroup.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
diff --git a/nova/tests/unit/servicegroup/test_mc_servicegroup.py b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
index 3b8399dfe3..e3896bb375 100644
--- a/nova/tests/unit/servicegroup/test_mc_servicegroup.py
+++ b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
@@ -16,7 +16,7 @@
# under the License.
import iso8601
-import mock
+from unittest import mock
from nova import servicegroup
from nova import test
diff --git a/nova/tests/unit/storage/test_rbd.py b/nova/tests/unit/storage/test_rbd.py
index 396f22c643..f89c2dee89 100644
--- a/nova/tests/unit/storage/test_rbd.py
+++ b/nova/tests/unit/storage/test_rbd.py
@@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
from eventlet import tpool
-import mock
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -524,7 +524,7 @@ class RbdTestCase(test.NoDBTestCase):
self.driver.destroy_volume(vol)
# Make sure both params have the expected values
- retryctx = mock_loopingcall.call_args.args[3]
+ retryctx = mock_loopingcall.call_args[0][3]
self.assertEqual(retryctx, {'retries': 6})
loopingcall.start.assert_called_with(interval=10)
diff --git a/nova/tests/unit/test_availability_zones.py b/nova/tests/unit/test_availability_zones.py
index 438e8dba24..f2e02e39c7 100644
--- a/nova/tests/unit/test_availability_zones.py
+++ b/nova/tests/unit/test_availability_zones.py
@@ -17,7 +17,8 @@
Tests for availability zones
"""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from nova import availability_zones as az
diff --git a/nova/tests/unit/test_block_device.py b/nova/tests/unit/test_block_device.py
index f5a4fc5694..40020a203f 100644
--- a/nova/tests/unit/test_block_device.py
+++ b/nova/tests/unit/test_block_device.py
@@ -17,7 +17,8 @@
Tests for Block Device utility functions.
"""
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
diff --git a/nova/tests/unit/test_cache.py b/nova/tests/unit/test_cache.py
index b7059796f1..3f656a49b0 100644
--- a/nova/tests/unit/test_cache.py
+++ b/nova/tests/unit/test_cache.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import cache_utils
from nova import test
diff --git a/nova/tests/unit/test_cinder.py b/nova/tests/unit/test_cinder.py
index 00e79711ec..e758343549 100644
--- a/nova/tests/unit/test_cinder.py
+++ b/nova/tests/unit/test_cinder.py
@@ -13,9 +13,9 @@
# under the License.
import collections
+from unittest import mock
from cinderclient.v3 import client as cinder_client_v3
-import mock
from requests_mock.contrib import fixture
import nova.conf
diff --git a/nova/tests/unit/test_conf.py b/nova/tests/unit/test_conf.py
index 95a7c45114..4496922e26 100644
--- a/nova/tests/unit/test_conf.py
+++ b/nova/tests/unit/test_conf.py
@@ -14,8 +14,8 @@
import os
import tempfile
+from unittest import mock
-import mock
from oslo_config import cfg
import nova.conf.compute
diff --git a/nova/tests/unit/test_configdrive2.py b/nova/tests/unit/test_configdrive2.py
index 4c0ae0acb4..d04310639b 100644
--- a/nova/tests/unit/test_configdrive2.py
+++ b/nova/tests/unit/test_configdrive2.py
@@ -16,8 +16,8 @@
import os
import tempfile
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import fileutils
diff --git a/nova/tests/unit/test_context.py b/nova/tests/unit/test_context.py
index cc3d7c7eea..53c8825046 100644
--- a/nova/tests/unit/test_context.py
+++ b/nova/tests/unit/test_context.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
@@ -196,7 +197,6 @@ class ContextTestCase(test.NoDBTestCase):
'roles': [],
'service_catalog': [],
'show_deleted': False,
- 'tenant': 222,
'timestamp': '2015-03-02T22:31:56.641629',
'user': 111,
'user_domain': None,
diff --git a/nova/tests/unit/test_crypto.py b/nova/tests/unit/test_crypto.py
index 30152b2b01..5cf92af448 100644
--- a/nova/tests/unit/test_crypto.py
+++ b/nova/tests/unit/test_crypto.py
@@ -18,11 +18,11 @@ Tests for Crypto module.
import io
import os
+from unittest import mock
from castellan.common import exception as castellan_exception
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import serialization
-import mock
from oslo_concurrency import processutils
from oslo_utils.fixture import uuidsentinel as uuids
import paramiko
diff --git a/nova/tests/unit/test_exception_wrapper.py b/nova/tests/unit/test_exception_wrapper.py
index 56eadf6952..71da124fd9 100644
--- a/nova/tests/unit/test_exception_wrapper.py
+++ b/nova/tests/unit/test_exception_wrapper.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context as nova_context
from nova import exception_wrapper
diff --git a/nova/tests/unit/test_filesystem.py b/nova/tests/unit/test_filesystem.py
new file mode 100644
index 0000000000..85f16157ee
--- /dev/null
+++ b/nova/tests/unit/test_filesystem.py
@@ -0,0 +1,52 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+
+from nova import exception
+from nova import filesystem
+from nova import test
+
+
+class TestFSCommon(test.NoDBTestCase):
+
+ def test_read_sys(self):
+ open_mock = mock.mock_open(read_data='bar')
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertEqual('bar', filesystem.read_sys('foo'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_read_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.read_sys, 'foo')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='r')
+
+ def test_write_sys(self):
+ open_mock = mock.mock_open()
+ with mock.patch('builtins.open', open_mock) as m_open:
+ self.assertIsNone(filesystem.write_sys('foo', 'bar'))
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
+ open_mock().write.assert_called_once_with('bar')
+
+ def test_write_sys_error(self):
+ with mock.patch('builtins.open',
+ side_effect=OSError('fake_error')) as m_open:
+ self.assertRaises(exception.FileNotFound,
+ filesystem.write_sys, 'foo', 'bar')
+ expected_path = os.path.join(filesystem.SYS, 'foo')
+ m_open.assert_called_once_with(expected_path, mode='w')
diff --git a/nova/tests/unit/test_fixtures.py b/nova/tests/unit/test_fixtures.py
index 22b278771a..8a5db79855 100644
--- a/nova/tests/unit/test_fixtures.py
+++ b/nova/tests/unit/test_fixtures.py
@@ -17,10 +17,10 @@
import copy
import datetime
import io
+from unittest import mock
import fixtures as fx
import futurist
-import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
@@ -51,6 +51,20 @@ CONF = cfg.CONF
class TestLogging(testtools.TestCase):
def test_default_logging(self):
+ # This test validates that in default logging mode,
+ # we have two logging handlers:
+ # 1 x to display default messages (info, error, warnings...)
+ # 1 x to redirect debug messages to null and so don't display them.
+
+ # However, if OS_DEBUG=True is set in a shell session, then the test is
+ # run and fails. Because, in debug mode, we should have
+ # only one handler to display all messages.
+
+ # Here, we explicitly set OS_DEBUG=0.
+ # So it will ensure we have two handlers whatever
+ # OS_DEBUG value set in the user shell.
+ self.useFixture(fx.EnvironmentVariable('OS_DEBUG', '0'))
+
stdlog = self.useFixture(fixtures.StandardLogging())
root = logging.getLogger()
# there should be a null handler as well at DEBUG
diff --git a/nova/tests/unit/test_hacking.py b/nova/tests/unit/test_hacking.py
index 03b7692217..41cbada99f 100644
--- a/nova/tests/unit/test_hacking.py
+++ b/nova/tests/unit/test_hacking.py
@@ -13,8 +13,8 @@
# under the License.
import textwrap
+from unittest import mock
-import mock
import pycodestyle
from nova.hacking import checks
@@ -1020,3 +1020,47 @@ class HackingTestCase(test.NoDBTestCase):
nova_utils.ReaderWriterLock()
"""
self._assert_has_no_errors(code, checks.check_lockutils_rwlocks)
+
+ def test_check_six(self):
+ code = """
+ import six
+ from six import moves
+ from six.moves import range
+ import six.moves.urllib.parse as urlparse
+ """
+ errors = [(x + 1, 0, 'N370') for x in range(4)]
+ self._assert_has_errors(code, checks.check_six, expected_errors=errors)
+
+ def test_import_stock_mock(self):
+ self._assert_has_errors(
+ "import mock",
+ checks.import_stock_mock, expected_errors=[(1, 0, 'N371')])
+ self._assert_has_errors(
+ "from mock import patch",
+ checks.import_stock_mock, expected_errors=[(1, 0, 'N371')])
+ code = """
+ from unittest import mock
+ import unittest.mock
+ """
+ self._assert_has_no_errors(code, checks.import_stock_mock)
+
+ def test_check_set_daemon(self):
+ code = """
+ self.setDaemon(True)
+ worker.setDaemon(True)
+ self._event_thread.setDaemon(True)
+ mythread.setDaemon(False)
+ self.thread.setDaemon(1)
+ """
+ errors = [(x + 1, 0, 'N372') for x in range(5)]
+ self._assert_has_errors(
+ code, checks.check_set_daemon, expected_errors=errors)
+
+ code = """
+ self.setDaemon = True
+ worker.setDaemonFlag(True)
+ self._event_thread.resetDaemon(True)
+ self.set.Daemon(True)
+ self.thread.setdaemon(True)
+ """
+ self._assert_has_no_errors(code, checks.check_set_daemon)
diff --git a/nova/tests/unit/test_identity.py b/nova/tests/unit/test_identity.py
index 099a9182d7..2bb5e7f9c0 100644
--- a/nova/tests/unit/test_identity.py
+++ b/nova/tests/unit/test_identity.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from keystoneauth1.adapter import Adapter
from keystoneauth1 import exceptions as kse
@@ -29,7 +29,7 @@ class IdentityValidationTest(test.NoDBTestCase):
There are times when Nova stores keystone project_id and user_id
in our database as strings. Until the Pike release none of this
- data was validated, so it was very easy for adminstrators to think
+ data was validated, so it was very easy for administrators to think
they were adjusting quota for a project (by name) when instead
they were just inserting keys in a database that would not get used.
diff --git a/nova/tests/unit/test_json_ref.py b/nova/tests/unit/test_json_ref.py
index 5a139055f5..e7cbbc9133 100644
--- a/nova/tests/unit/test_json_ref.py
+++ b/nova/tests/unit/test_json_ref.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import copy
-import mock
+from unittest import mock
from nova import test
from nova.tests import json_ref
diff --git a/nova/tests/unit/test_metadata.py b/nova/tests/unit/test_metadata.py
index 630cb54418..d013aeb651 100644
--- a/nova/tests/unit/test_metadata.py
+++ b/nova/tests/unit/test_metadata.py
@@ -22,10 +22,10 @@ import hmac
import os
import pickle
import re
+from unittest import mock
from keystoneauth1 import exceptions as ks_exceptions
from keystoneauth1 import session
-import mock
from oslo_config import cfg
from oslo_serialization import base64
from oslo_serialization import jsonutils
@@ -1200,7 +1200,7 @@ class MetadataHandlerTestCase(test.TestCase):
def _fake_x_get_metadata(self, self_app, instance_id, remote_address):
if remote_address is None:
- raise Exception('Expected X-Forwared-For header')
+ raise Exception('Expected X-Forwarded-For header')
if encodeutils.to_utf8(instance_id) == self.expected_instance_id:
return self.mdinst
@@ -1458,20 +1458,17 @@ class MetadataHandlerTestCase(test.TestCase):
for c in range(ord('a'), ord('z'))]
mock_client.list_subnets.return_value = {
'subnets': subnet_list}
+ mock_client.list_ports.side_effect = fake_list_ports
- with mock.patch.object(
- mock_client, 'list_ports',
- side_effect=fake_list_ports) as mock_list_ports:
-
- response = fake_request(
- self, self.mdinst,
- relpath="/2009-04-04/user-data",
- address="192.192.192.2",
- fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
- headers={'X-Forwarded-For': '192.192.192.2',
- 'X-Metadata-Provider': proxy_lb_id})
-
- self.assertEqual(3, mock_list_ports.call_count)
+ response = fake_request(
+ self, self.mdinst,
+ relpath="/2009-04-04/user-data",
+ address="192.192.192.2",
+ fake_get_metadata_by_instance_id=self._fake_x_get_metadata,
+ headers={'X-Forwarded-For': '192.192.192.2',
+ 'X-Metadata-Provider': proxy_lb_id})
+
+ self.assertEqual(3, mock_client.list_ports.call_count)
self.assertEqual(200, response.status_int)
diff --git a/nova/tests/unit/test_notifications.py b/nova/tests/unit/test_notifications.py
index 344f62e758..062eeb7f4f 100644
--- a/nova/tests/unit/test_notifications.py
+++ b/nova/tests/unit/test_notifications.py
@@ -17,8 +17,8 @@
import copy
import datetime
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_context import fixture as o_fixture
from oslo_utils.fixture import uuidsentinel as uuids
@@ -112,7 +112,7 @@ class NotificationsTestCase(test.TestCase):
# test config disable of just the task state notifications
self.flags(notify_on_state_change="vm_state", group='notifications')
- # we should not get a notification on task stgate chagne now
+ # we should not get a notification on task state change now
old = copy.copy(self.instance)
self.instance.task_state = task_states.SPAWNING
diff --git a/nova/tests/unit/test_notifier.py b/nova/tests/unit/test_notifier.py
index 95366cdf28..fc01b1cf83 100644
--- a/nova/tests/unit/test_notifier.py
+++ b/nova/tests/unit/test_notifier.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import rpc
from nova import test
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index f6f5c3e64b..752b872381 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -16,8 +16,8 @@
"""Test of Policy Engine For Nova."""
import os.path
+from unittest import mock
-import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
import requests_mock
@@ -303,10 +303,10 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(nova_fixtures.RealPolicyFixture())
- self.non_admin_context = context.RequestContext('fake', 'fake',
- roles=['member'])
- self.admin_context = context.RequestContext('fake', 'fake', True,
- roles=['member'])
+ self.non_admin_context = context.RequestContext(
+ 'fake', 'fake', roles=['member', 'reader'])
+ self.admin_context = context.RequestContext(
+ 'fake', 'fake', True, roles=['admin', 'member', 'reader'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
@@ -358,6 +358,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-services:update",
"os_compute_api:os-services:delete",
"os_compute_api:os-shelve:shelve_offload",
+"os_compute_api:os-shelve:unshelve_to_host",
"os_compute_api:os-availability-zone:detail",
"os_compute_api:os-assisted-volume-snapshots:create",
"os_compute_api:os-assisted-volume-snapshots:delete",
@@ -366,6 +367,27 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-server-external-events:create",
"os_compute_api:os-volumes-attachments:swap",
"os_compute_api:servers:create:zero_disk_flavor",
+"os_compute_api:os-baremetal-nodes:list",
+"os_compute_api:os-baremetal-nodes:show",
+"os_compute_api:servers:migrations:index",
+"os_compute_api:servers:migrations:show",
+"os_compute_api:os-simple-tenant-usage:list",
+"os_compute_api:os-migrations:index",
+"os_compute_api:os-services:list",
+"os_compute_api:os-instance-actions:events:details",
+"os_compute_api:os-instance-usage-audit-log:list",
+"os_compute_api:os-instance-usage-audit-log:show",
+"os_compute_api:os-hosts:list",
+"os_compute_api:os-hosts:show",
+"os_compute_api:os-hypervisors:list",
+"os_compute_api:os-hypervisors:list-detail",
+"os_compute_api:os-hypervisors:show",
+"os_compute_api:os-hypervisors:statistics",
+"os_compute_api:os-hypervisors:uptime",
+"os_compute_api:os-hypervisors:search",
+"os_compute_api:os-hypervisors:servers",
+"os_compute_api:limits:other_project",
+"os_compute_api:os-flavor-access",
)
self.admin_or_owner_rules = (
@@ -409,6 +431,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:servers:resize",
"os_compute_api:servers:revert_resize",
"os_compute_api:servers:show",
+"os_compute_api:servers:show:flavor-extra-specs",
"os_compute_api:servers:update",
"os_compute_api:servers:create_image:allow_volume_backed",
"os_compute_api:os-admin-password",
@@ -418,7 +441,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-remote-consoles",
"os_compute_api:os-deferred-delete:restore",
"os_compute_api:os-deferred-delete:force",
-"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-floating-ips:add",
@@ -454,44 +476,6 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-volumes-attachments:create",
"os_compute_api:os-volumes-attachments:delete",
"os_compute_api:os-volumes-attachments:update",
-)
-
- self.allow_all_rules = (
-"os_compute_api:os-quota-sets:defaults",
-"os_compute_api:os-availability-zone:list",
-"os_compute_api:limits",
-"os_compute_api:extensions",
-"os_compute_api:os-floating-ip-pools",
-)
-
- self.system_reader_rules = (
-"os_compute_api:os-tenant-networks:list",
-"os_compute_api:os-tenant-networks:show",
-"os_compute_api:os-baremetal-nodes:list",
-"os_compute_api:os-baremetal-nodes:show",
-"os_compute_api:servers:migrations:index",
-"os_compute_api:servers:migrations:show",
-"os_compute_api:os-simple-tenant-usage:list",
-"os_compute_api:os-migrations:index",
-"os_compute_api:os-services:list",
-"os_compute_api:os-instance-actions:events:details",
-"os_compute_api:os-instance-usage-audit-log:list",
-"os_compute_api:os-instance-usage-audit-log:show",
-"os_compute_api:os-hosts:list",
-"os_compute_api:os-hosts:show",
-"os_compute_api:os-hypervisors:list",
-"os_compute_api:os-hypervisors:list-detail",
-"os_compute_api:os-hypervisors:show",
-"os_compute_api:os-hypervisors:statistics",
-"os_compute_api:os-hypervisors:uptime",
-"os_compute_api:os-hypervisors:search",
-"os_compute_api:os-hypervisors:servers",
-"os_compute_api:limits:other_project",
-"os_compute_api:os-networks:list",
-"os_compute_api:os-networks:show",
-)
-
- self.system_reader_or_owner_rules = (
"os_compute_api:os-simple-tenant-usage:show",
"os_compute_api:os-security-groups:get",
"os_compute_api:os-security-groups:show",
@@ -513,6 +497,18 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-volumes:snapshots:show",
"os_compute_api:os-volumes:snapshots:list",
"os_compute_api:os-volumes:snapshots:detail",
+"os_compute_api:os-networks:list",
+"os_compute_api:os-networks:show",
+"os_compute_api:os-tenant-networks:list",
+"os_compute_api:os-tenant-networks:show",
+)
+
+ self.allow_all_rules = (
+"os_compute_api:os-quota-sets:defaults",
+"os_compute_api:os-availability-zone:list",
+"os_compute_api:limits",
+"os_compute_api:extensions",
+"os_compute_api:os-floating-ip-pools",
)
self.allow_nobody_rules = (
@@ -557,13 +553,11 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
# admin_only, non_admin, admin_or_user, empty_rule
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show',
- 'system_admin_api', 'system_reader_api',
'project_admin_api', 'project_member_api',
- 'project_reader_api', 'system_admin_or_owner',
- 'system_or_project_reader')
+ 'project_reader_api', 'project_member_or_admin',
+ 'project_reader_or_admin')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules +
- self.allow_all_rules + self.system_reader_rules +
- self.system_reader_or_owner_rules +
+ self.allow_all_rules +
self.allow_nobody_rules + special_rules)
self.assertEqual(set([]), result)
diff --git a/nova/tests/unit/test_quota.py b/nova/tests/unit/test_quota.py
index 312449b13a..7979d83e91 100644
--- a/nova/tests/unit/test_quota.py
+++ b/nova/tests/unit/test_quota.py
@@ -14,15 +14,20 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_db.sqlalchemy import enginefacade
+from oslo_limit import fixture as limit_fixture
+from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import api as compute
import nova.conf
from nova import context
from nova.db.main import models
from nova import exception
+from nova.limit import local as local_limit
+from nova.limit import placement as placement_limit
from nova import objects
from nova import quota
from nova import test
@@ -57,6 +62,7 @@ class QuotaIntegrationTestCase(test.TestCase):
super(QuotaIntegrationTestCase, self).setUp()
self.flags(instances=2,
cores=4,
+ ram=16384,
group='quota')
self.user_id = 'admin'
@@ -97,7 +103,7 @@ class QuotaIntegrationTestCase(test.TestCase):
# _instances_cores_ram_count().
inst_map = objects.InstanceMapping(
self.context, instance_uuid=inst.uuid, project_id=inst.project_id,
- cell_mapping=cell1)
+ user_id=inst.user_id, cell_mapping=cell1)
inst_map.create()
return inst
@@ -109,15 +115,15 @@ class QuotaIntegrationTestCase(test.TestCase):
self.compute_api.create(
self.context, min_count=1, max_count=1,
flavor=self.flavor, image_href=image_uuid)
- except exception.QuotaError as e:
+ except exception.OverQuota as e:
expected_kwargs = {'code': 413,
- 'req': '1, 1',
- 'used': '8, 2',
- 'allowed': '4, 2',
- 'overs': 'cores, instances'}
+ 'req': '1, 1, 2048',
+ 'used': '8, 2, 16384',
+ 'allowed': '4, 2, 16384',
+ 'overs': 'cores, instances, ram'}
self.assertEqual(expected_kwargs, e.kwargs)
else:
- self.fail('Expected QuotaError exception')
+ self.fail('Expected OverQuota exception')
def test_too_many_cores(self):
self._create_instance()
@@ -126,7 +132,7 @@ class QuotaIntegrationTestCase(test.TestCase):
self.compute_api.create(
self.context, min_count=1, max_count=1, flavor=self.flavor,
image_href=image_uuid)
- except exception.QuotaError as e:
+ except exception.OverQuota as e:
expected_kwargs = {'code': 413,
'req': '1',
'used': '4',
@@ -134,7 +140,7 @@ class QuotaIntegrationTestCase(test.TestCase):
'overs': 'cores'}
self.assertEqual(expected_kwargs, e.kwargs)
else:
- self.fail('Expected QuotaError exception')
+ self.fail('Expected OverQuota exception')
def test_many_cores_with_unlimited_quota(self):
# Setting cores quota to unlimited:
@@ -150,7 +156,7 @@ class QuotaIntegrationTestCase(test.TestCase):
metadata['key%s' % i] = 'value%s' % i
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
self.assertRaises(
- exception.QuotaError, self.compute_api.create,
+ exception.OverQuota, self.compute_api.create,
self.context, min_count=1, max_count=1, flavor=self.flavor,
image_href=image_uuid, metadata=metadata)
@@ -170,41 +176,127 @@ class QuotaIntegrationTestCase(test.TestCase):
files = []
for i in range(CONF.quota.injected_files):
files.append(('/my/path%d' % i, 'config = test\n'))
- self._create_with_injected_files(files) # no QuotaError
+ self._create_with_injected_files(files) # no OverQuota
def test_too_many_injected_files(self):
files = []
for i in range(CONF.quota.injected_files + 1):
files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
- self.assertRaises(exception.QuotaError,
+ self.assertRaises(exception.OverQuota,
self._create_with_injected_files, files)
def test_max_injected_file_content_bytes(self):
max = CONF.quota.injected_file_content_bytes
content = ''.join(['a' for i in range(max)])
files = [('/test/path', content)]
- self._create_with_injected_files(files) # no QuotaError
+ self._create_with_injected_files(files) # no OverQuota
def test_too_many_injected_file_content_bytes(self):
max = CONF.quota.injected_file_content_bytes
content = ''.join(['a' for i in range(max + 1)])
files = [('/test/path', content)]
- self.assertRaises(exception.QuotaError,
+ self.assertRaises(exception.OverQuota,
self._create_with_injected_files, files)
def test_max_injected_file_path_bytes(self):
max = CONF.quota.injected_file_path_length
path = ''.join(['a' for i in range(max)])
files = [(path, 'config = quotatest')]
- self._create_with_injected_files(files) # no QuotaError
+ self._create_with_injected_files(files) # no OverQuota
def test_too_many_injected_file_path_bytes(self):
max = CONF.quota.injected_file_path_length
path = ''.join(['a' for i in range(max + 1)])
files = [(path, 'config = quotatest')]
- self.assertRaises(exception.QuotaError,
+ self.assertRaises(exception.OverQuota,
self._create_with_injected_files, files)
+ def _test_with_server_group_members(self):
+ # use a known image uuid to avoid ImageNotFound errors
+ image_uuid = nova_fixtures.GlanceFixture.image4['id']
+
+ instance_group = objects.InstanceGroup(self.context,
+ policy="anti-affinity")
+ instance_group.name = "foo"
+ instance_group.project_id = self.context.project_id
+ instance_group.user_id = self.context.user_id
+ instance_group.uuid = uuids.instance_group
+ instance_group.create()
+
+ self.addCleanup(instance_group.destroy)
+
+ self.compute_api.create(
+ self.context, flavor=self.flavor,
+ image_href=image_uuid,
+ scheduler_hints={'group': uuids.instance_group},
+ check_server_group_quota=True)
+
+ exc = self.assertRaises(exception.OverQuota, self.compute_api.create,
+ self.context,
+ flavor=self.flavor,
+ image_href=image_uuid,
+ scheduler_hints={
+ 'group': uuids.instance_group},
+ check_server_group_quota=True)
+ return exc
+
+ def test_with_server_group_members(self):
+ self.flags(server_group_members=1, group="quota")
+ exc = self._test_with_server_group_members()
+ self.assertEqual("Quota exceeded, too many servers in group", str(exc))
+
+
+class UnifiedLimitsIntegrationTestCase(QuotaIntegrationTestCase):
+ """Test that API and DB resources enforce properly with unified limits.
+
+ Note: coverage for instances, cores, ram, and disk is located under
+ nova/tests/functional/. We don't attempt to test it here as the
+ PlacementFixture is needed to provide resource usages and it is only
+ available in the functional tests environment.
+
+ Note that any test that will succeed in creating a server also needs to be
+ able to use the PlacementFixture as cores, ram, and disk quota are enforced
+ while booting a server. These tests are also located under
+ nova/tests/functional/.
+ """
+
+ def setUp(self):
+ super(UnifiedLimitsIntegrationTestCase, self).setUp()
+ self.flags(driver="nova.quota.UnifiedLimitsDriver", group="quota")
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 10,
+ local_limit.SERVER_GROUP_MEMBERS: 10,
+ 'servers': 10,
+ 'class:VCPU': 20,
+ 'class:MEMORY_MB': 50 * 1024,
+ 'class:DISK_GB': 100}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ def test_too_many_instances(self):
+ pass
+
+ def test_too_many_cores(self):
+ pass
+
+ def test_no_injected_files(self):
+ pass
+
+ def test_max_injected_files(self):
+ pass
+
+ def test_max_injected_file_content_bytes(self):
+ pass
+
+ def test_max_injected_file_path_bytes(self):
+ pass
+
+ def test_with_server_group_members(self):
+ pass
+
@enginefacade.transaction_context_provider
class FakeContext(context.RequestContext):
@@ -340,6 +432,19 @@ class QuotaEngineTestCase(test.TestCase):
quota_obj = quota.QuotaEngine(quota_driver=FakeDriver)
self.assertEqual(quota_obj._driver, FakeDriver)
+ def test_init_with_flag_set(self):
+ quota_obj = quota.QuotaEngine()
+ self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
+
+ self.flags(group="quota", driver="nova.quota.NoopQuotaDriver")
+ self.assertIsInstance(quota_obj._driver, quota.NoopQuotaDriver)
+
+ self.flags(group="quota", driver="nova.quota.UnifiedLimitsDriver")
+ self.assertIsInstance(quota_obj._driver, quota.UnifiedLimitsDriver)
+
+ self.flags(group="quota", driver="nova.quota.DbQuotaDriver")
+ self.assertIsInstance(quota_obj._driver, quota.DbQuotaDriver)
+
def _get_quota_engine(self, driver, resources=None):
resources = resources or [
quota.AbsoluteResource('test_resource4'),
@@ -1871,6 +1976,133 @@ class NoopQuotaDriverTestCase(test.TestCase):
self.assertEqual(self.expected_settable_quotas, result)
+class UnifiedLimitsDriverTestCase(NoopQuotaDriverTestCase):
+ def setUp(self):
+ super(UnifiedLimitsDriverTestCase, self).setUp()
+ self.driver = quota.UnifiedLimitsDriver()
+ # Set this so all limits get a different value but we also test as much
+ # as possible with the default config
+ reglimits = {local_limit.SERVER_METADATA_ITEMS: 128,
+ local_limit.INJECTED_FILES: 5,
+ local_limit.INJECTED_FILES_CONTENT: 10 * 1024,
+ local_limit.INJECTED_FILES_PATH: 255,
+ local_limit.KEY_PAIRS: 100,
+ local_limit.SERVER_GROUPS: 12,
+ local_limit.SERVER_GROUP_MEMBERS: 10}
+ self.useFixture(limit_fixture.LimitFixture(reglimits, {}))
+
+ self.expected_without_dict = {
+ 'cores': 2,
+ 'fixed_ips': -1,
+ 'floating_ips': -1,
+ 'injected_file_content_bytes': 10240,
+ 'injected_file_path_bytes': 255,
+ 'injected_files': 5,
+ 'instances': 1,
+ 'key_pairs': 100,
+ 'metadata_items': 128,
+ 'ram': 0,
+ 'security_group_rules': -1,
+ 'security_groups': -1,
+ 'server_group_members': 10,
+ 'server_groups': 12,
+ }
+ self.expected_without_usages = {
+ 'cores': {'limit': 2},
+ 'fixed_ips': {'limit': -1},
+ 'floating_ips': {'limit': -1},
+ 'injected_file_content_bytes': {'limit': 10240},
+ 'injected_file_path_bytes': {'limit': 255},
+ 'injected_files': {'limit': 5},
+ 'instances': {'limit': 1},
+ 'key_pairs': {'limit': 100},
+ 'metadata_items': {'limit': 128},
+ 'ram': {'limit': 3},
+ 'security_group_rules': {'limit': -1},
+ 'security_groups': {'limit': -1},
+ 'server_group_members': {'limit': 10},
+ 'server_groups': {'limit': 12}
+ }
+ self.expected_with_usages = {
+ 'cores': {'in_use': 5, 'limit': 2},
+ 'fixed_ips': {'in_use': 0, 'limit': -1},
+ 'floating_ips': {'in_use': 0, 'limit': -1},
+ 'injected_file_content_bytes': {'in_use': 0, 'limit': 10240},
+ 'injected_file_path_bytes': {'in_use': 0, 'limit': 255},
+ 'injected_files': {'in_use': 0, 'limit': 5},
+ 'instances': {'in_use': 4, 'limit': 1},
+ 'key_pairs': {'in_use': 0, 'limit': 100},
+ 'metadata_items': {'in_use': 0, 'limit': 128},
+ 'ram': {'in_use': 6, 'limit': 3},
+ 'security_group_rules': {'in_use': 0, 'limit': -1},
+ 'security_groups': {'in_use': 0, 'limit': -1},
+ 'server_group_members': {'in_use': 0, 'limit': 10},
+ 'server_groups': {'in_use': 9, 'limit': 12}
+ }
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_get_defaults(self, mock_default):
+ # zero for ram simulates no registered limit for ram
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 0}
+ result = self.driver.get_defaults(None, quota.QUOTAS._resources)
+ self.assertEqual(self.expected_without_dict, result)
+ mock_default.assert_called_once_with()
+
+ @mock.patch.object(placement_limit, "get_legacy_default_limits")
+ def test_get_class_quotas(self, mock_default):
+ mock_default.return_value = {"instances": 1, "cores": 2, "ram": 0}
+ result = self.driver.get_class_quotas(
+ None, quota.QUOTAS._resources, 'test_class')
+ self.assertEqual(self.expected_without_dict, result)
+ mock_default.assert_called_once_with()
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_project_quotas(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ result = self.driver.get_project_quotas(
+ None, quota.QUOTAS._resources, 'test_project')
+ self.assertEqual(self.expected_with_usages, result)
+ mock_count.assert_called_once_with(None, "test_project")
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_project_quotas_no_usages(self, mock_count, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ result = self.driver.get_project_quotas(
+ None, quota.QUOTAS._resources, 'test_project', usages=False)
+ self.assertEqual(self.expected_without_usages, result)
+ # ensure usages not fetched when not required
+ self.assertEqual(0, mock_count.call_count)
+ mock_proj.assert_called_once_with("test_project")
+
+ @mock.patch.object(placement_limit, "get_legacy_counts")
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_user_quotas(self, mock_count, mock_proj, mock_kcount):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ mock_kcount.return_value = {"instances": 4, "cores": 5, "ram": 6}
+ mock_count.return_value = {'project': {'server_groups': 9}}
+ result = self.driver.get_user_quotas(
+ None, quota.QUOTAS._resources, 'test_project', 'fake_user')
+ self.assertEqual(self.expected_with_usages, result)
+ mock_count.assert_called_once_with(None, "test_project")
+
+ @mock.patch.object(placement_limit, "get_legacy_project_limits")
+ @mock.patch.object(objects.InstanceGroupList, "get_counts")
+ def test_get_user_quotas_no_usages(self, mock_count, mock_proj):
+ mock_proj.return_value = {"instances": 1, "cores": 2, "ram": 3}
+ result = self.driver.get_user_quotas(
+ None, quota.QUOTAS._resources, 'test_project', 'fake_user',
+ usages=False)
+ self.assertEqual(self.expected_without_usages, result)
+ # ensure usages not fetched when not required
+ self.assertEqual(0, mock_count.call_count)
+
+
@ddt.ddt
class QuotaCountTestCase(test.NoDBTestCase):
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
diff --git a/nova/tests/unit/test_rpc.py b/nova/tests/unit/test_rpc.py
index eece75af96..40a914b5f7 100644
--- a/nova/tests/unit/test_rpc.py
+++ b/nova/tests/unit/test_rpc.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
import oslo_messaging as messaging
from oslo_messaging.rpc import dispatcher
from oslo_serialization import jsonutils
@@ -213,20 +214,20 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', None)
@mock.patch.object(rpc, 'RequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client(self, mock_client, mock_ser, mock_TRANSPORT):
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client(self, mock_get, mock_ser, mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -252,21 +253,21 @@ class TestRPC(test.NoDBTestCase):
@mock.patch.object(rpc, 'TRANSPORT')
@mock.patch.object(rpc, 'profiler', mock.Mock())
@mock.patch.object(rpc, 'ProfilerRequestContextSerializer')
- @mock.patch.object(messaging, 'RPCClient')
- def test_get_client_profiler_enabled(self, mock_client, mock_ser,
+ @mock.patch.object(messaging, 'get_rpc_client')
+ def test_get_client_profiler_enabled(self, mock_get, mock_ser,
mock_TRANSPORT):
tgt = mock.Mock()
ser = mock.Mock()
- mock_client.return_value = 'client'
+ mock_get.return_value = 'client'
mock_ser.return_value = ser
client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
mock_ser.assert_called_once_with('foo')
- mock_client.assert_called_once_with(mock_TRANSPORT,
- tgt, version_cap='1.0',
- call_monitor_timeout=None,
- serializer=ser)
+ mock_get.assert_called_once_with(mock_TRANSPORT,
+ tgt, version_cap='1.0',
+ call_monitor_timeout=None,
+ serializer=ser)
self.assertEqual('client', client)
@mock.patch.object(rpc, 'TRANSPORT')
@@ -431,11 +432,11 @@ class TestProfilerRequestContextSerializer(test.NoDBTestCase):
class TestClientRouter(test.NoDBTestCase):
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = mock.sentinel.transport
@@ -443,7 +444,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
# verify a client was created by ClientRouter
- mock_rpcclient.assert_called_once_with(
+ mock_get.assert_called_once_with(
mock.sentinel.transport, default_client.target,
version_cap=default_client.version_cap,
call_monitor_timeout=default_client.call_monitor_timeout,
@@ -451,11 +452,11 @@ class TestClientRouter(test.NoDBTestCase):
# verify cell client was returned
self.assertEqual(cell_client, client)
- @mock.patch('oslo_messaging.RPCClient')
- def test_by_instance_untargeted(self, mock_rpcclient):
+ @mock.patch('oslo_messaging.get_rpc_client')
+ def test_by_instance_untargeted(self, mock_get):
default_client = mock.Mock()
cell_client = mock.Mock()
- mock_rpcclient.return_value = cell_client
+ mock_get.return_value = cell_client
ctxt = mock.Mock()
ctxt.mq_connection = None
@@ -463,7 +464,7 @@ class TestClientRouter(test.NoDBTestCase):
client = router.client(ctxt)
self.assertEqual(router.default_client, client)
- self.assertFalse(mock_rpcclient.called)
+ self.assertFalse(mock_get.called)
class TestIsNotificationsEnabledDecorator(test.NoDBTestCase):
diff --git a/nova/tests/unit/test_service.py b/nova/tests/unit/test_service.py
index b5721696db..acc1aeca7f 100644
--- a/nova/tests/unit/test_service.py
+++ b/nova/tests/unit/test_service.py
@@ -18,7 +18,8 @@
Unit Tests for remote procedure calls using queue
"""
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_service import service as _service
@@ -127,7 +128,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
# init_host is called before any service record is created
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host, self.binary)
mock_create.assert_called_once_with()
@@ -157,7 +158,7 @@ class ServiceTestCase(test.NoDBTestCase):
service_obj = mock.Mock()
service_obj.binary = 'fake-binary'
service_obj.host = 'fake-host'
- service_obj.version = -42
+ service_obj.version = 42
mock_get_by_host_and_binary.return_value = service_obj
serv = service.Service(self.host, self.binary, self.topic,
@@ -185,7 +186,7 @@ class ServiceTestCase(test.NoDBTestCase):
mock_create.side_effect = ex
serv.manager = mock_manager
self.assertRaises(test.TestingException, serv.start)
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(None)
mock_get_by_host_and_binary.assert_has_calls([
mock.call(mock.ANY, self.host, self.binary),
mock.call(mock.ANY, self.host, self.binary)])
@@ -215,7 +216,7 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.service_name = self.topic
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_once_with()
+ serv.manager.init_host.assert_called_once_with(None)
mock_get_by_host_and_binary.assert_called_once_with(mock.ANY,
self.host,
self.binary)
@@ -240,7 +241,8 @@ class ServiceTestCase(test.NoDBTestCase):
serv.manager.additional_endpoints = []
serv.start()
- serv.manager.init_host.assert_called_with()
+ serv.manager.init_host.assert_called_with(
+ mock_svc_get_by_host_and_binary.return_value)
serv.stop()
serv.manager.cleanup_host.assert_called_with()
diff --git a/nova/tests/unit/test_service_auth.py b/nova/tests/unit/test_service_auth.py
index db2a2e2899..5f07515188 100644
--- a/nova/tests/unit/test_service_auth.py
+++ b/nova/tests/unit/test_service_auth.py
@@ -10,9 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import service_token
-import mock
from nova import context
from nova import service_auth
diff --git a/nova/tests/unit/test_test.py b/nova/tests/unit/test_test.py
index 8381792de6..1042153b10 100644
--- a/nova/tests/unit/test_test.py
+++ b/nova/tests/unit/test_test.py
@@ -18,9 +18,9 @@
import os.path
import tempfile
+from unittest import mock
import uuid
-import mock
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -361,21 +361,6 @@ class PatchExistsTestCase(test.NoDBTestCase):
self.assertTrue(os.path.exists(os.path.dirname(__file__)))
self.assertFalse(os.path.exists('non-existent/file'))
- @test.patch_exists('fake_file1', True)
- @test.patch_exists('fake_file2', True)
- @test.patch_exists(__file__, False)
- def test_patch_exists_multiple_decorators(self):
- """Test that @patch_exists can be used multiple times on the
- same method.
- """
- self.assertTrue(os.path.exists('fake_file1'))
- self.assertTrue(os.path.exists('fake_file2'))
- self.assertFalse(os.path.exists(__file__))
-
- # Check non-patched parameters
- self.assertTrue(os.path.exists(os.path.dirname(__file__)))
- self.assertFalse(os.path.exists('non-existent/file'))
-
class PatchOpenTestCase(test.NoDBTestCase):
fake_contents = "These file contents don't really exist"
diff --git a/nova/tests/unit/test_utils.py b/nova/tests/unit/test_utils.py
index bd69ccbb65..ca4e09b087 100644
--- a/nova/tests/unit/test_utils.py
+++ b/nova/tests/unit/test_utils.py
@@ -16,13 +16,13 @@ import datetime
import os
import os.path
import tempfile
+from unittest import mock
import eventlet
import fixtures
from keystoneauth1 import adapter as ks_adapter
from keystoneauth1.identity import base as ks_identity
from keystoneauth1 import session as ks_session
-import mock
import netaddr
from openstack import exceptions as sdk_exc
from oslo_config import cfg
diff --git a/nova/tests/unit/test_weights.py b/nova/tests/unit/test_weights.py
index 5758e9aa2f..ad0a203ff4 100644
--- a/nova/tests/unit/test_weights.py
+++ b/nova/tests/unit/test_weights.py
@@ -16,7 +16,7 @@
Tests For weights.
"""
-import mock
+from unittest import mock
from nova.scheduler import weights as scheduler_weights
from nova.scheduler.weights import ram
diff --git a/nova/tests/unit/test_wsgi.py b/nova/tests/unit/test_wsgi.py
index e46318cd17..45a0406b5c 100644
--- a/nova/tests/unit/test_wsgi.py
+++ b/nova/tests/unit/test_wsgi.py
@@ -19,10 +19,10 @@
import os.path
import socket
import tempfile
+from unittest import mock
import eventlet
import eventlet.wsgi
-import mock
from oslo_config import cfg
import requests
import testtools
diff --git a/nova/tests/unit/utils.py b/nova/tests/unit/utils.py
index 6311475522..51edc45686 100644
--- a/nova/tests/unit/utils.py
+++ b/nova/tests/unit/utils.py
@@ -17,8 +17,7 @@ import errno
import platform
import socket
import sys
-
-import mock
+from unittest import mock
from nova.compute import flavors
import nova.conf
diff --git a/nova/tests/unit/virt/disk/mount/test_api.py b/nova/tests/unit/virt/disk/mount/test_api.py
index d2d040dd84..7d8a741914 100644
--- a/nova/tests/unit/virt/disk/mount/test_api.py
+++ b/nova/tests/unit/virt/disk/mount/test_api.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_service import fixture as service_fixture
from nova import test
@@ -202,7 +203,7 @@ class MountTestCase(test.NoDBTestCase):
device)
self.assertIsInstance(inst, block.BlockMount)
- def test_instance_for_device_block_partiton(self,):
+ def test_instance_for_device_block_partiton(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
diff --git a/nova/tests/unit/virt/disk/mount/test_loop.py b/nova/tests/unit/virt/disk/mount/test_loop.py
index 3c0c18fa60..312b88db35 100644
--- a/nova/tests/unit/virt/disk/mount/test_loop.py
+++ b/nova/tests/unit/virt/disk/mount/test_loop.py
@@ -14,8 +14,9 @@
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova import test
from nova.virt.disk.mount import loop
diff --git a/nova/tests/unit/virt/disk/mount/test_nbd.py b/nova/tests/unit/virt/disk/mount/test_nbd.py
index 0024b2f6d6..cc0e04337e 100644
--- a/nova/tests/unit/virt/disk/mount/test_nbd.py
+++ b/nova/tests/unit/virt/disk/mount/test_nbd.py
@@ -14,10 +14,10 @@
# under the License.
-import mock
import os
import tempfile
import time
+from unittest import mock
import eventlet
import fixtures
diff --git a/nova/tests/unit/virt/disk/test_api.py b/nova/tests/unit/virt/disk/test_api.py
index 5b90fd186e..135558e145 100644
--- a/nova/tests/unit/virt/disk/test_api.py
+++ b/nova/tests/unit/virt/disk/test_api.py
@@ -14,8 +14,8 @@
# under the License.
import tempfile
+from unittest import mock
-import mock
from oslo_concurrency import processutils
from oslo_utils import units
@@ -40,6 +40,7 @@ class FakeMount(object):
class APITestCase(test.NoDBTestCase):
+ @mock.patch('nova.virt.disk.vfs.guestfs.VFSGuestFS', new=mock.Mock())
def test_can_resize_need_fs_type_specified(self):
imgfile = tempfile.NamedTemporaryFile()
self.addCleanup(imgfile.close)
diff --git a/nova/tests/unit/virt/disk/vfs/test_guestfs.py b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
index b1c619c955..9dc937202a 100644
--- a/nova/tests/unit/virt/disk/vfs/test_guestfs.py
+++ b/nova/tests/unit/virt/disk/vfs/test_guestfs.py
@@ -13,9 +13,9 @@
# under the License.
import collections
+from unittest import mock
import fixtures
-import mock
from nova import exception
from nova import test
diff --git a/nova/tests/unit/virt/hyperv/__init__.py b/nova/tests/unit/virt/hyperv/__init__.py
index e69de29bb2..2190f0570f 100644
--- a/nova/tests/unit/virt/hyperv/__init__.py
+++ b/nova/tests/unit/virt/hyperv/__init__.py
@@ -0,0 +1,20 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+try:
+ import os_win # noqa: F401
+except ImportError:
+ raise unittest.SkipTest(
+ "The 'os-win' dependency is not installed."
+ )
diff --git a/nova/tests/unit/virt/hyperv/test_base.py b/nova/tests/unit/virt/hyperv/test_base.py
index e895fc600e..1dd7db367b 100644
--- a/nova/tests/unit/virt/hyperv/test_base.py
+++ b/nova/tests/unit/virt/hyperv/test_base.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from os_win import utilsfactory
from nova import test
diff --git a/nova/tests/unit/virt/hyperv/test_block_device_manager.py b/nova/tests/unit/virt/hyperv/test_block_device_manager.py
index ded2ffa0d4..0d914a55a5 100644
--- a/nova/tests/unit/virt/hyperv/test_block_device_manager.py
+++ b/nova/tests/unit/virt/hyperv/test_block_device_manager.py
@@ -12,8 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_win import constants as os_win_const
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_driver.py b/nova/tests/unit/virt/hyperv/test_driver.py
index 07f251390e..c9ccc6e8f1 100644
--- a/nova/tests/unit/virt/hyperv/test_driver.py
+++ b/nova/tests/unit/virt/hyperv/test_driver.py
@@ -19,8 +19,8 @@ Unit tests for the Hyper-V Driver.
import platform
import sys
+from unittest import mock
-import mock
from os_win import exceptions as os_win_exc
from nova import exception
diff --git a/nova/tests/unit/virt/hyperv/test_eventhandler.py b/nova/tests/unit/virt/hyperv/test_eventhandler.py
index 658a49c5c1..9825bc9141 100644
--- a/nova/tests/unit/virt/hyperv/test_eventhandler.py
+++ b/nova/tests/unit/virt/hyperv/test_eventhandler.py
@@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_win import constants
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
+from unittest import mock
from nova.tests.unit.virt.hyperv import test_base
from nova import utils
diff --git a/nova/tests/unit/virt/hyperv/test_hostops.py b/nova/tests/unit/virt/hyperv/test_hostops.py
index ebe2979f8a..04434dd37e 100644
--- a/nova/tests/unit/virt/hyperv/test_hostops.py
+++ b/nova/tests/unit/virt/hyperv/test_hostops.py
@@ -14,8 +14,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
import os_resource_classes as orc
from os_win import constants as os_win_const
from oslo_config import cfg
diff --git a/nova/tests/unit/virt/hyperv/test_imagecache.py b/nova/tests/unit/virt/hyperv/test_imagecache.py
index 4c0c1318ae..827d52133d 100644
--- a/nova/tests/unit/virt/hyperv/test_imagecache.py
+++ b/nova/tests/unit/virt/hyperv/test_imagecache.py
@@ -14,10 +14,10 @@
# under the License.
import os
+from unittest import mock
import ddt
import fixtures
-import mock
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
diff --git a/nova/tests/unit/virt/hyperv/test_livemigrationops.py b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
index 8a3df843b9..79cb4318c5 100644
--- a/nova/tests/unit/virt/hyperv/test_livemigrationops.py
+++ b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_win import exceptions as os_win_exc
+from unittest import mock
+
from oslo_config import cfg
from nova import exception
diff --git a/nova/tests/unit/virt/hyperv/test_migrationops.py b/nova/tests/unit/virt/hyperv/test_migrationops.py
index 86844b11cf..d0b7ff32fd 100644
--- a/nova/tests/unit/virt/hyperv/test_migrationops.py
+++ b/nova/tests/unit/virt/hyperv/test_migrationops.py
@@ -13,8 +13,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from os_win import exceptions as os_win_exc
from oslo_utils import units
diff --git a/nova/tests/unit/virt/hyperv/test_pathutils.py b/nova/tests/unit/virt/hyperv/test_pathutils.py
index 573fe557a5..7bd9e91e3f 100644
--- a/nova/tests/unit/virt/hyperv/test_pathutils.py
+++ b/nova/tests/unit/virt/hyperv/test_pathutils.py
@@ -14,8 +14,7 @@
import os
import time
-
-import mock
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py b/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py
index ffc1e4cd0c..5e6bf9a3c3 100644
--- a/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py
+++ b/nova/tests/unit/virt/hyperv/test_rdpconsoleops.py
@@ -17,7 +17,7 @@
Unit tests for the Hyper-V RDPConsoleOps.
"""
-import mock
+from unittest import mock
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import rdpconsoleops
diff --git a/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py b/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py
index 4240b8eb95..e9461408c4 100644
--- a/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py
+++ b/nova/tests/unit/virt/hyperv/test_serialconsolehandler.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_serialconsoleops.py b/nova/tests/unit/virt/hyperv/test_serialconsoleops.py
index 1e8a9c7557..4a4b7c8e4f 100644
--- a/nova/tests/unit/virt/hyperv/test_serialconsoleops.py
+++ b/nova/tests/unit/virt/hyperv/test_serialconsoleops.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_serialproxy.py b/nova/tests/unit/virt/hyperv/test_serialproxy.py
index 4d1cf80f80..b7e08a67dd 100644
--- a/nova/tests/unit/virt/hyperv/test_serialproxy.py
+++ b/nova/tests/unit/virt/hyperv/test_serialproxy.py
@@ -14,8 +14,8 @@
# under the License.
import socket
+from unittest import mock
-import mock
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
diff --git a/nova/tests/unit/virt/hyperv/test_snapshotops.py b/nova/tests/unit/virt/hyperv/test_snapshotops.py
index 60f5876296..1bb2f8dd4b 100644
--- a/nova/tests/unit/virt/hyperv/test_snapshotops.py
+++ b/nova/tests/unit/virt/hyperv/test_snapshotops.py
@@ -14,8 +14,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from nova.compute import task_states
from nova.tests.unit import fake_instance
diff --git a/nova/tests/unit/virt/hyperv/test_vif.py b/nova/tests/unit/virt/hyperv/test_vif.py
index c1f5951b79..d4c8d7af58 100644
--- a/nova/tests/unit/virt/hyperv/test_vif.py
+++ b/nova/tests/unit/virt/hyperv/test_vif.py
@@ -14,7 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.conf
from nova import exception
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
index dd4dc52d5b..07e1774f9a 100644
--- a/nova/tests/unit/virt/hyperv/test_vmops.py
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -13,10 +13,10 @@
# under the License.
import os
+from unittest import mock
import ddt
from eventlet import timeout as etimeout
-import mock
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from oslo_concurrency import processutils
@@ -1374,12 +1374,10 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
def test_get_vm_state(self):
summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED}
- with mock.patch.object(self._vmops._vmutils,
- 'get_vm_summary_info') as mock_get_summary_info:
- mock_get_summary_info.return_value = summary_info
+ self._vmops._vmutils.get_vm_summary_info.return_value = summary_info
- response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
- self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
+ response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
+ self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
@@ -1418,12 +1416,11 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
- with mock.patch.object(self._vmops._vmutils,
- 'list_instance_notes') as mock_list_notes:
- mock_list_notes.return_value = [('fake_name', [fake_uuid])]
+ self._vmops._vmutils.list_instance_notes.return_value = (
+ [('fake_name', [fake_uuid])])
- response = self._vmops.list_instance_uuids()
- mock_list_notes.assert_called_once_with()
+ response = self._vmops.list_instance_uuids()
+ self._vmops._vmutils.list_instance_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])
@@ -1830,7 +1827,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self.assertEqual(fake_local_disks, ret_val)
def test_get_scoped_flavor_extra_specs(self):
- # The flavor extra spect dict contains only string values.
+ # The flavor extra specs dict contains only string values.
fake_total_bytes_sec = '8'
mock_instance = fake_instance.fake_instance_obj(self.context)
diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py
index da7262085d..66d2c2527f 100644
--- a/nova/tests/unit/virt/hyperv/test_volumeops.py
+++ b/nova/tests/unit/virt/hyperv/test_volumeops.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from os_brick.initiator import connector
from oslo_config import cfg
from oslo_utils import units
diff --git a/nova/tests/unit/virt/ironic/test_client_wrapper.py b/nova/tests/unit/virt/ironic/test_client_wrapper.py
index 9c2ffe3dca..512f1438d6 100644
--- a/nova/tests/unit/virt/ironic/test_client_wrapper.py
+++ b/nova/tests/unit/virt/ironic/test_client_wrapper.py
@@ -13,11 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from ironicclient import client as ironic_client
from ironicclient import exc as ironic_exception
from keystoneauth1 import discover as ksa_disc
import keystoneauth1.session
-import mock
from oslo_config import cfg
import nova.conf
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 7b377b21c2..52aa37ac13 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -15,9 +15,10 @@
"""Tests for the ironic driver."""
+from unittest import mock
+
import fixtures
from ironicclient import exc as ironic_exception
-import mock
from openstack import exceptions as sdk_exc
from oslo_config import cfg
from oslo_service import loopingcall
@@ -934,6 +935,48 @@ class IronicDriverTestCase(test.NoDBTestCase):
expected = {
'CUSTOM_IRON_NFV': {
'total': 1,
+ 'reserved': 1,
+ 'min_unit': 1,
+ 'max_unit': 1,
+ 'step_size': 1,
+ 'allocation_ratio': 1.0,
+ },
+ }
+ mock_nfc.assert_called_once_with(mock.sentinel.nodename)
+ mock_nr.assert_called_once_with(mock_nfc.return_value)
+ mock_res_used.assert_called_once_with(mock_nfc.return_value)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
+ result = self.ptree.data(mock.sentinel.nodename).inventory
+ self.assertEqual(expected, result)
+
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_used', return_value=True)
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_node_resources_unavailable', return_value=False)
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_resource')
+ @mock.patch.object(ironic_driver.IronicDriver, '_node_from_cache')
+ def test_update_provider_tree_with_rc_occupied_workaround(self,
+ mock_nfc, mock_nr, mock_res_unavail, mock_res_used):
+ """Ensure that when a node is used, we report the inventory matching
+ the consumed resources.
+ """
+ self.flags(skip_reserve_in_use_ironic_nodes=True,
+ group="workarounds")
+ mock_nr.return_value = {
+ 'vcpus': 24,
+ 'vcpus_used': 24,
+ 'memory_mb': 1024,
+ 'memory_mb_used': 1024,
+ 'local_gb': 100,
+ 'local_gb_used': 100,
+ 'resource_class': 'iron-nfv',
+ }
+
+ self.driver.update_provider_tree(self.ptree, mock.sentinel.nodename)
+
+ expected = {
+ 'CUSTOM_IRON_NFV': {
+ 'total': 1,
'reserved': 0,
'min_unit': 1,
'max_unit': 1,
@@ -944,7 +987,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).inventory
self.assertEqual(expected, result)
@@ -1015,7 +1058,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(), result)
@@ -1047,7 +1090,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_nfc.assert_called_once_with(mock.sentinel.nodename)
mock_nr.assert_called_once_with(mock_nfc.return_value)
mock_res_used.assert_called_once_with(mock_nfc.return_value)
- self.assertFalse(mock_res_unavail.called)
+ mock_res_unavail.assert_called_once_with(mock_nfc.return_value)
result = self.ptree.data(mock.sentinel.nodename).traits
self.assertEqual(set(traits), result)
@@ -2499,7 +2542,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_prepare_for_spawn(self, mock_call):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
@@ -2531,7 +2577,10 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance)
def test_prepare_for_spawn_conflict(self):
- node = ironic_utils.get_test_node(driver='fake')
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.AVAILABLE,
+ power_state=ironic_states.POWER_OFF)
self.mock_conn.get_node.return_value = node
self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
@@ -2539,6 +2588,18 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.prepare_for_spawn,
instance)
+ def test_prepare_for_spawn_not_available(self):
+ node = ironic_utils.get_test_node(
+ driver='fake', instance_uuid=None,
+ provision_state=ironic_states.CLEANWAIT,
+ power_state=ironic_states.POWER_OFF)
+ self.mock_conn.get_node.return_value = node
+ self.mock_conn.update_node.side_effect = sdk_exc.ConflictException
+ instance = fake_instance.fake_instance_obj(self.ctx, node=node.id)
+ self.assertRaises(exception.ComputeResourcesUnavailable,
+ self.driver.prepare_for_spawn,
+ instance)
+
@mock.patch.object(ironic_driver.IronicDriver, '_cleanup_deploy')
def test_failed_spawn_cleanup(self, mock_cleanup):
node = ironic_utils.get_test_node(driver='fake')
@@ -2597,9 +2658,6 @@ class IronicDriverSyncTestCase(IronicDriverTestCase):
# that the thread completes.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
- self.mock_conn = self.useFixture(
- fixtures.MockPatchObject(self.driver, '_ironic_connection')).mock
-
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
def test_rescue(self, mock_sps, mock_looping):
diff --git a/nova/db/main/legacy_migrations/__init__.py b/nova/tests/unit/virt/libvirt/cpu/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/db/main/legacy_migrations/__init__.py
+++ b/nova/tests/unit/virt/libvirt/cpu/__init__.py
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_api.py b/nova/tests/unit/virt/libvirt/cpu/test_api.py
new file mode 100644
index 0000000000..b5bcb762f3
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_api.py
@@ -0,0 +1,194 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import objects
+from nova import test
+from nova.virt.libvirt.cpu import api
+from nova.virt.libvirt.cpu import core
+
+
+class TestAPI(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestAPI, self).setUp()
+ self.core_1 = api.Core(1)
+
+ # Create a fake instance with two pinned CPUs but only one is on the
+ # dedicated set
+ numa_topology = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(cpu_pinning_raw={'0': '0', '2': '2'}),
+ ])
+ self.fake_inst = objects.Instance(numa_topology=numa_topology)
+
+ @mock.patch.object(core, 'get_online')
+ def test_online(self, mock_get_online):
+ mock_get_online.return_value = True
+ self.assertTrue(self.core_1.online)
+ mock_get_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_online')
+ def test_set_online(self, mock_set_online):
+ self.core_1.online = True
+ mock_set_online.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_offline')
+ def test_set_offline(self, mock_set_offline):
+ self.core_1.online = False
+ mock_set_offline.assert_called_once_with(self.core_1.ident)
+
+ def test_hash(self):
+ self.assertEqual(hash(self.core_1.ident), hash(self.core_1))
+
+ @mock.patch.object(core, 'get_governor')
+ def test_governor(self, mock_get_governor):
+ mock_get_governor.return_value = 'fake_governor'
+ self.assertEqual('fake_governor', self.core_1.governor)
+ mock_get_governor.assert_called_once_with(self.core_1.ident)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_low(self, mock_set_governor):
+ self.flags(cpu_power_governor_low='fake_low_gov', group='libvirt')
+ self.core_1.set_low_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_low_gov')
+
+ @mock.patch.object(core, 'set_governor')
+ def test_set_governor_high(self, mock_set_governor):
+ self.flags(cpu_power_governor_high='fake_high_gov', group='libvirt')
+ self.core_1.set_high_governor()
+ mock_set_governor.assert_called_once_with(self.core_1.ident,
+ 'fake_high_gov')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_online(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_online.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_up_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_up(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'performance')
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped(self, mock_online):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_up(self.fake_inst)
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_online')
+ def test_power_up_skipped_if_standard_instance(self, mock_online):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_up(objects.Instance(numa_topology=None))
+ mock_online.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).online calls set_online(i)
+ mock_offline.assert_called_once_with(0)
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down(self.fake_inst)
+ # only core #0 can be set as core #2 is not on the dedicated set
+ # As a reminder, core(i).set_high_governor calls set_governor(i)
+ mock_set_governor.assert_called_once_with(0, 'powersave')
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down(self.fake_inst)
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_skipped_if_standard_instance(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ api.power_down(objects.Instance(numa_topology=None))
+ mock_offline.assert_not_called()
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_offline(self, mock_offline):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_offline.assert_has_calls([mock.call(0), mock.call(1)])
+
+ @mock.patch.object(core, 'set_governor')
+ def test_power_down_all_dedicated_cpus_governor(self, mock_set_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+
+ api.power_down_all_dedicated_cpus()
+ # All dedicated CPUs are turned offline
+ mock_set_governor.assert_has_calls([mock.call(0, 'powersave'),
+ mock.call(1, 'powersave')])
+
+ @mock.patch.object(core, 'set_offline')
+ def test_power_down_all_dedicated_cpus_skipped(self, mock_offline):
+ self.flags(cpu_power_management=False, group='libvirt')
+ api.power_down_all_dedicated_cpus()
+ mock_offline.assert_not_called()
+
+ def test_power_down_all_dedicated_cpus_wrong_config(self):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set=None, group='compute')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.power_down_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_governor(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='governor', group='libvirt')
+ mock_get_governor.return_value = 'performance'
+ mock_get_online.side_effect = (True, False)
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core, 'get_online')
+ def test_validate_all_dedicated_cpus_for_cpu_state(self, mock_get_online,
+ mock_get_governor):
+ self.flags(cpu_power_management=True, group='libvirt')
+ self.flags(cpu_dedicated_set='0-1', group='compute')
+ self.flags(cpu_power_management_strategy='cpu_state', group='libvirt')
+ mock_get_online.return_value = True
+ mock_get_governor.side_effect = ('powersave', 'performance')
+ self.assertRaises(exception.InvalidConfiguration,
+ api.validate_all_dedicated_cpus)
diff --git a/nova/tests/unit/virt/libvirt/cpu/test_core.py b/nova/tests/unit/virt/libvirt/cpu/test_core.py
new file mode 100644
index 0000000000..a3cba00d3b
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/cpu/test_core.py
@@ -0,0 +1,122 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures
+from nova.virt.libvirt.cpu import core
+
+
+class TestCore(test.NoDBTestCase):
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores(self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = '1-2'
+ mock_parse_cpu_spec.return_value = set([1, 2])
+ self.assertEqual(set([1, 2]), core.get_available_cores())
+ mock_read_sys.assert_called_once_with(core.AVAILABLE_PATH)
+ mock_parse_cpu_spec.assert_called_once_with('1-2')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ @mock.patch.object(core.hardware, 'parse_cpu_spec')
+ def test_get_available_cores_none(
+ self, mock_parse_cpu_spec, mock_read_sys):
+ mock_read_sys.return_value = ''
+ self.assertEqual(set(), core.get_available_cores())
+ mock_parse_cpu_spec.assert_not_called()
+
+ @mock.patch.object(core, 'get_available_cores')
+ def test_exists(self, mock_get_available_cores):
+ mock_get_available_cores.return_value = set([1])
+ self.assertTrue(core.exists(1))
+ mock_get_available_cores.assert_called_once_with()
+ self.assertFalse(core.exists(2))
+
+ @mock.patch.object(
+ core, 'CPU_PATH_TEMPLATE',
+ new_callable=mock.PropertyMock(return_value='/sys/blah%(core)s'))
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path(self, mock_exists, mock_cpu_path):
+ mock_exists.return_value = True
+ self.assertEqual('/sys/blah1', core.gen_cpu_path(1))
+ mock_exists.assert_called_once_with(1)
+
+ @mock.patch.object(core, 'exists')
+ def test_gen_cpu_path_raises(self, mock_exists):
+ mock_exists.return_value = False
+ self.assertRaises(ValueError, core.gen_cpu_path, 1)
+ self.assertIn('Unable to access CPU: 1', self.stdlog.logger.output)
+
+
+class TestCoreHelpers(test.NoDBTestCase):
+
+ def setUp(self):
+ super(TestCoreHelpers, self).setUp()
+ self.useFixture(fixtures.PrivsepFixture())
+ _p1 = mock.patch.object(core, 'exists', return_value=True)
+ self.mock_exists = _p1.start()
+ self.addCleanup(_p1.stop)
+
+ _p2 = mock.patch.object(core, 'gen_cpu_path',
+ side_effect=lambda x: '/fakesys/blah%s' % x)
+ self.mock_gen_cpu_path = _p2.start()
+ self.addCleanup(_p2.stop)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online(self, mock_read_sys):
+ mock_read_sys.return_value = '1'
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_online_not_exists(self, mock_read_sys):
+ mock_read_sys.side_effect = exception.FileNotFound(file_path='foo')
+ self.assertTrue(core.get_online(1))
+ mock_read_sys.assert_called_once_with('/fakesys/blah1/online')
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_online(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = True
+ self.assertTrue(core.set_online(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='1')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'write_sys')
+ @mock.patch.object(core, 'get_online')
+ def test_set_offline(self, mock_get_online, mock_write_sys):
+ mock_get_online.return_value = False
+ self.assertTrue(core.set_offline(1))
+ mock_write_sys.assert_called_once_with('/fakesys/blah1/online',
+ data='0')
+ mock_get_online.assert_called_once_with(1)
+
+ @mock.patch.object(core.filesystem, 'read_sys')
+ def test_get_governor(self, mock_read_sys):
+ mock_read_sys.return_value = 'fake_gov'
+ self.assertEqual('fake_gov', core.get_governor(1))
+ mock_read_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor')
+
+ @mock.patch.object(core, 'get_governor')
+ @mock.patch.object(core.filesystem, 'write_sys')
+ def test_set_governor(self, mock_write_sys, mock_get_governor):
+ mock_get_governor.return_value = 'fake_gov'
+ self.assertEqual('fake_gov',
+ core.set_governor(1, 'fake_gov'))
+ mock_write_sys.assert_called_once_with(
+ '/fakesys/blah1/cpufreq/scaling_governor', data='fake_gov')
+ mock_get_governor.assert_called_once_with(1)
diff --git a/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py b/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py
index cd45bac54a..28c93e4855 100644
--- a/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py
+++ b/nova/tests/unit/virt/libvirt/storage/test_dmcrypt.py
@@ -14,7 +14,8 @@
# under the License.
import binascii
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from nova import test
diff --git a/nova/tests/unit/virt/libvirt/storage/test_lvm.py b/nova/tests/unit/virt/libvirt/storage/test_lvm.py
index fbec2dcae9..04d9ffdcbf 100644
--- a/nova/tests/unit/virt/libvirt/storage/test_lvm.py
+++ b/nova/tests/unit/virt/libvirt/storage/test_lvm.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import units
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
index 7707f745e3..5a0dbb40ce 100644
--- a/nova/tests/unit/virt/libvirt/test_blockinfo.py
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -14,9 +14,9 @@
# under the License.
import copy
+from unittest import mock
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import block_device
@@ -74,6 +74,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
def _test_block_device_info(self, with_eph=True, with_swap=True,
with_bdms=True):
swap = {'device_name': '/dev/vdb', 'swap_size': 1}
+ image = [{'device_type': 'disk', 'boot_index': 0}]
ephemerals = [{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdc1', 'size': 10},
{'disk_bus': 'ide', 'guest_format': None,
@@ -84,6 +85,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'device_path': 'fake_device'}]
return {'root_device_name': '/dev/vda',
'swap': swap if with_swap else {},
+ 'image': image,
'ephemerals': ephemerals if with_eph else [],
'block_device_mapping':
block_device_mapping if with_bdms else []}
@@ -178,11 +180,16 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
with mock.patch.object(instance_ref, 'get_flavor',
return_value=instance_ref.flavor) as get_flavor:
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
# Since there was no block_device_info passed to get_disk_mapping we
# expect to get the swap info from the flavor in the instance.
get_flavor.assert_called_once_with()
@@ -202,7 +209,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
- 'root_device_name': '/dev/sda'
+ 'root_device_name': '/dev/sda',
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
@@ -490,9 +498,12 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("lxc", instance_ref,
- "lxc", "lxc",
- image_meta)
+ block_device_info = {
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "lxc", instance_ref, "lxc", "lxc", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'lxc', 'dev': None,
'type': 'disk', 'boot_index': '1'},
@@ -527,9 +538,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref.flavor.swap = 5
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
@@ -549,6 +565,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref.ephemeral_gb = 0
block_dev_info = {'swap': None, 'root_device_name': u'/dev/vda',
+ 'image': [],
'ephemerals': [],
'block_device_mapping': [{'boot_index': None,
'mount_device': u'/dev/vdb',
@@ -591,8 +608,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
mapping = blockinfo.get_disk_mapping(
- "kvm", instance_ref, "virtio", "ide", image_meta)
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
# Pick the first drive letter on the bus that is available
# as the config drive. Delete the last device hardcode as
@@ -647,8 +670,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
mapping = blockinfo.get_disk_mapping(
- "kvm", instance_ref, "virtio", "ide", image_meta)
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {
@@ -697,9 +726,14 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
- mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
- "virtio", "ide",
- image_meta)
+ block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ]
+ }
+ mapping = blockinfo.get_disk_mapping(
+ "kvm", instance_ref, "virtio", "ide", image_meta,
+ block_device_info=block_device_info)
expect = {
'disk': {'bus': 'virtio', 'dev': 'vda',
@@ -718,6 +752,9 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [
+ {'device_type': 'disk', 'boot_index': 0},
+ ],
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
@@ -754,6 +791,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
block_device_info = {
'swap': {'device_name': '/dev/vdb',
'swap_size': 10},
+ 'image': [{'device_type': 'disk',
+ 'boot_index': 0}],
}
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
"virtio", "ide",
@@ -775,6 +814,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
@@ -803,6 +843,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = {}
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': None,
'mount_device': None,
@@ -858,6 +899,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
block_device_info = {
+ 'image': [],
'block_device_mapping': [
{'connection_info': "fake",
'mount_device': "/dev/vda",
@@ -899,6 +941,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'root_device_name': '/dev/vdf',
'swap': {'device_name': '/dev/vdy',
'swap_size': 10},
+ 'image': [{'device_type': 'disk', 'boot_index': 0}],
'ephemerals': [
{'device_type': 'disk', 'guest_format': 'ext4',
'device_name': '/dev/vdb', 'size': 10},
@@ -940,6 +983,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'swap': {'device_name': '/dev/vdb',
'device_type': 'really_lame_type',
'swap_size': 10},
+ 'image': [{'device_name': '/dev/vda',
+ 'device_type': 'disk'}],
'ephemerals': [{'disk_bus': 'no_such_bus',
'device_type': 'yeah_right',
'device_name': '/dev/vdc', 'size': 10}],
@@ -951,6 +996,8 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
}
expected_swap = {'device_name': '/dev/vdb', 'disk_bus': 'virtio',
'device_type': 'disk', 'swap_size': 10}
+ expected_image = {'device_name': '/dev/vda', 'device_type': 'disk',
+ 'disk_bus': 'virtio'}
expected_ephemeral = {'disk_bus': 'virtio',
'device_type': 'disk',
'device_name': '/dev/vdc', 'size': 10}
@@ -970,6 +1017,7 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
self.assertFalse(get_flavor_mock.called)
self.assertEqual(expected_swap, block_device_info['swap'])
+ self.assertEqual(expected_image, block_device_info['image'][0])
self.assertEqual(expected_ephemeral,
block_device_info['ephemerals'][0])
self.assertEqual(expected_bdm,
@@ -1124,7 +1172,10 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'device_type': 'lame_type',
'delete_on_termination': True},
{'disk_bus': 'sata', 'guest_format': None,
- 'device_name': '/dev/sda', 'size': 3}]
+ 'device_name': '/dev/sda', 'size': 3},
+ {'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': '{"json": "options"}'}]
expected = [{'dev': 'vds', 'type': 'disk', 'bus': 'usb'},
{'dev': 'vdb', 'type': 'disk',
'bus': 'virtio', 'format': 'ext4'},
@@ -1133,7 +1184,11 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
'bus': 'scsi', 'boot_index': '1'},
{'dev': 'vdo', 'type': 'disk',
'bus': 'scsi', 'boot_index': '2'},
- {'dev': 'sda', 'type': 'disk', 'bus': 'sata'}]
+ {'dev': 'sda', 'type': 'disk', 'bus': 'sata'},
+ {'dev': 'vda', 'type': 'disk', 'bus': 'virtio',
+ 'encrypted': True, 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'luks',
+ 'encryption_options': {'json': 'options'}}]
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
for bdm, expected in zip(bdms, expected):
@@ -1441,6 +1496,15 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
'destination_type': 'volume',
'boot_index': -1}))]
+ self.image = [
+ objects.BlockDeviceMapping(self.context,
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {'id': 6, 'instance_uuid': uuids.instance,
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'device_type': 'disk',
+ 'boot_index': 0}))]
+
def tearDown(self):
super(DefaultDeviceNamesTestCase, self).tearDown()
for patcher in self.patchers:
@@ -1450,7 +1514,7 @@ class DefaultDeviceNamesTestCase(test.NoDBTestCase):
'nova.virt.libvirt.utils.get_arch',
return_value=obj_fields.Architecture.X86_64)
def _test_default_device_names(self, eph, swap, bdm, mock_get_arch):
- bdms = eph + swap + bdm
+ bdms = self.image + eph + swap + bdm
bdi = driver.get_block_device_info(self.instance, bdms)
blockinfo.default_device_names(self.virt_type,
self.context,
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
index 2d690e5dfc..3d0b5ae685 100644
--- a/nova/tests/unit/virt/libvirt/test_config.py
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -16,6 +16,7 @@ from lxml import etree
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
+from nova import exception
from nova.objects import fields as obj_fields
from nova import test
from nova.tests.fixtures import libvirt_data as fake_libvirt_data
@@ -70,6 +71,23 @@ class LibvirtConfigTest(LibvirtConfigBaseTest):
obj = config.LibvirtConfigObject(root_name="demo")
obj.parse_str(inxml)
+ def test_parse_on_off_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertTrue(obj.parse_on_off_str('on'))
+ self.assertFalse(obj.parse_on_off_str('off'))
+ self.assertFalse(obj.parse_on_off_str(None))
+ self.assertRaises(exception.InvalidInput, obj.parse_on_off_str, 'foo')
+
+ def test_get_yes_no_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertEqual('yes', obj.get_yes_no_str(True))
+ self.assertEqual('no', obj.get_yes_no_str(False))
+
+ def test_get_on_off_str(self):
+ obj = config.LibvirtConfigObject(root_name="demo")
+ self.assertEqual('on', obj.get_on_off_str(True))
+ self.assertEqual('off', obj.get_on_off_str(False))
+
class LibvirtConfigCapsTest(LibvirtConfigBaseTest):
@@ -1519,7 +1537,7 @@ class LibvirtConfigGuestInputTest(LibvirtConfigBaseTest):
class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
- def test_config_graphics(self):
+ def test_config_graphics_vnc(self):
obj = config.LibvirtConfigGuestGraphics()
obj.type = "vnc"
obj.autoport = True
@@ -1531,11 +1549,38 @@ class LibvirtConfigGuestGraphicsTest(LibvirtConfigBaseTest):
<graphics type="vnc" autoport="yes" keymap="en_US" listen="127.0.0.1"/>
""")
+ def test_config_graphics_spice(self):
+ obj = config.LibvirtConfigGuestGraphics()
+ obj.type = "spice"
+ obj.autoport = False
+ obj.keymap = "en_US"
+ obj.listen = "127.0.0.1"
+
+ obj.image_compression = "auto_glz"
+ obj.jpeg_compression = "auto"
+ obj.zlib_compression = "always"
+ obj.playback_compression = True
+ obj.streaming_mode = "filter"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, """
+ <graphics type="spice" autoport="no" keymap="en_US" listen="127.0.0.1">
+ <image compression="auto_glz"/>
+ <jpeg compression="auto"/>
+ <zlib compression="always"/>
+ <playback compression="on"/>
+ <streaming mode="filter"/>
+ </graphics>
+ """)
+
class LibvirtConfigGuestHostdev(LibvirtConfigBaseTest):
def test_config_pci_guest_host_dev(self):
- obj = config.LibvirtConfigGuestHostdev(mode='subsystem', type='pci')
+ obj = config.LibvirtConfigGuestHostdev()
+ obj.mode = 'subsystem'
+ obj.type = 'pci'
+
xml = obj.to_xml()
expected = """
<hostdev mode="subsystem" type="pci" managed="yes"/>
@@ -1570,7 +1615,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
</hostdev>
"""
- def test_config_guest_hosdev_pci(self):
+ def test_config_guest_hostdev_pci(self):
hostdev = config.LibvirtConfigGuestHostdevPCI()
hostdev.domain = "1234"
hostdev.bus = "11"
@@ -1579,7 +1624,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
xml = hostdev.to_xml()
self.assertXmlEqual(self.expected, xml)
- def test_parse_guest_hosdev_pci(self):
+ def test_parse_guest_hostdev_pci(self):
xmldoc = self.expected
obj = config.LibvirtConfigGuestHostdevPCI()
obj.parse_str(xmldoc)
@@ -1591,7 +1636,7 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigBaseTest):
self.assertEqual(obj.slot, '0x22')
self.assertEqual(obj.function, '0x3')
- def test_parse_guest_hosdev_usb(self):
+ def test_parse_guest_hostdev_usb(self):
xmldoc = """<hostdev mode='subsystem' type='usb'>
<source startupPolicy='optional'>
<vendor id='0x1234'/>
@@ -2318,6 +2363,15 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
obj.vapic = True
obj.spinlocks = True
obj.vendorid_spoof = True
+ obj.vpindex = True
+ obj.runtime = True
+ obj.synic = True
+ obj.reset = True
+ obj.frequencies = True
+ obj.reenlightenment = True
+ obj.tlbflush = True
+ obj.ipi = True
+ obj.evmcs = True
xml = obj.to_xml()
self.assertXmlEqual(xml, """
@@ -2326,6 +2380,15 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
<vapic state="on"/>
<spinlocks state="on" retries="4095"/>
<vendor_id state="on" value="1234567890ab"/>
+ <vpindex state='on'/>
+ <runtime state='on'/>
+ <synic state='on'/>
+ <reset state='on'/>
+ <frequencies state='on'/>
+ <reenlightenment state='on'/>
+ <tlbflush state='on'/>
+ <ipi state='on'/>
+ <evmcs state='on'/>
</hyperv>""")
def test_feature_pmu(self):
@@ -2344,6 +2407,13 @@ class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
xml = obj.to_xml()
self.assertXmlEqual(xml, "<pmu state='off'/>")
+ def test_feature_ioapic(self):
+ obj = config.LibvirtConfigGuestFeatureIOAPIC()
+ obj.driver = "libvirt"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(xml, "<ioapic driver='libvirt'/>")
+
class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
@@ -3135,6 +3205,32 @@ class LibvirtConfigNodeDeviceTest(LibvirtConfigBaseTest):
config.LibvirtConfigNodeDeviceMdevInformation)
self.assertEqual("nvidia-11", obj.mdev_information.type)
self.assertEqual(12, obj.mdev_information.iommu_group)
+ self.assertIsNone(obj.mdev_information.uuid)
+
+ def test_config_mdev_device_uuid(self):
+ xmlin = """
+ <device>
+ <name>mdev_b2107403_110c_45b0_af87_32cc91597b8a_0000_41_00_0</name>
+ <path>/sys/devices/pci0000:40/0000:40:03.1/0000:41:00.0/b2107403-110c-45b0-af87-32cc91597b8a</path>
+ <parent>pci_0000_41_00_0</parent>
+ <driver>
+ <name>vfio_mdev</name>
+ </driver>
+ <capability type='mdev'>
+ <type id='nvidia-442'/>
+ <uuid>b2107403-110c-45b0-af87-32cc91597b8a</uuid>
+ <iommuGroup number='57'/>
+ </capability>
+ </device>"""
+
+ obj = config.LibvirtConfigNodeDevice()
+ obj.parse_str(xmlin)
+ self.assertIsInstance(obj.mdev_information,
+ config.LibvirtConfigNodeDeviceMdevInformation)
+ self.assertEqual("nvidia-442", obj.mdev_information.type)
+ self.assertEqual(57, obj.mdev_information.iommu_group)
+ self.assertEqual("b2107403-110c-45b0-af87-32cc91597b8a",
+ obj.mdev_information.uuid)
def test_config_vdpa_device(self):
xmlin = """
@@ -3273,6 +3369,86 @@ class LibvirtConfigNodeDevicePciCapTest(LibvirtConfigBaseTest):
'name': 'GRID M60-0B',
'type': 'nvidia-11'}], obj.mdev_capability[0].mdev_types)
+ def test_config_device_pci_vpd(self):
+ xmlin = """
+ <capability type='pci'>
+ <class>0x020000</class>
+ <domain>0</domain>
+ <bus>130</bus>
+ <slot>0</slot>
+ <function>1</function>
+ <product id='0xa2d6'>MT42822 BlueField-2</product>
+ <vendor id='0x15b3'>Mellanox Technologies</vendor>
+ <capability type='virt_functions' maxCount='16'/>
+ <capability type='vpd'>
+ <name>BlueField-2 DPU 25GbE</name>
+ <fields access='readonly'>
+ <change_level>B1</change_level>
+ <manufacture_id>foobar</manufacture_id>
+ <part_number>MBF2H332A-AEEOT</part_number>
+ <serial_number>MT2113X00000</serial_number>
+ <vendor_field index='0'>PCIeGen4 x8</vendor_field>
+ <vendor_field index='2'>MBF2H332A-AEEOT</vendor_field>
+ <vendor_field index='3'>3c53d07eec484d8aab34dabd24fe575aa</vendor_field>
+ <vendor_field index='A'>MLX:MN=MLNX:CSKU=V2:UUID=V3:PCI=V0:MODL=BF2H332A</vendor_field>
+ </fields>
+ <fields access='readwrite'>
+ <asset_tag>fooasset</asset_tag>
+ <vendor_field index='0'>vendorfield0</vendor_field>
+ <vendor_field index='2'>vendorfield2</vendor_field>
+ <vendor_field index='A'>vendorfieldA</vendor_field>
+ <system_field index='B'>systemfieldB</system_field>
+ <system_field index='0'>systemfield0</system_field>
+ </fields>
+ </capability>
+ <iommuGroup number='66'>
+ <address domain='0x0000' bus='0x82' slot='0x00' function='0x1'/>
+ </iommuGroup>
+ <numa node='1'/>
+ <pci-express>
+ <link validity='cap' port='0' speed='16' width='8'/>
+ <link validity='sta' speed='8' width='8'/>
+ </pci-express>
+ </capability>""" # noqa: E501
+ obj = config.LibvirtConfigNodeDevicePciCap()
+ obj.parse_str(xmlin)
+
+ # Asserting common PCI attribute parsing.
+ self.assertEqual(0, obj.domain)
+ self.assertEqual(130, obj.bus)
+ self.assertEqual(0, obj.slot)
+ self.assertEqual(1, obj.function)
+ # Asserting vpd capability parsing.
+ self.assertEqual("MT42822 BlueField-2", obj.product)
+ self.assertEqual(0xA2D6, obj.product_id)
+ self.assertEqual("Mellanox Technologies", obj.vendor)
+ self.assertEqual(0x15B3, obj.vendor_id)
+ self.assertEqual(obj.numa_node, 1)
+ self.assertIsInstance(obj.vpd_capability,
+ config.LibvirtConfigNodeDeviceVpdCap)
+ self.assertEqual(obj.vpd_capability.card_name, 'BlueField-2 DPU 25GbE')
+
+ self.assertEqual(obj.vpd_capability.change_level, 'B1')
+ self.assertEqual(obj.vpd_capability.manufacture_id, 'foobar')
+ self.assertEqual(obj.vpd_capability.part_number, 'MBF2H332A-AEEOT')
+ self.assertEqual(obj.vpd_capability.card_serial_number, 'MT2113X00000')
+ self.assertEqual(obj.vpd_capability.asset_tag, 'fooasset')
+ self.assertEqual(obj.vpd_capability.ro_vendor_fields, {
+ '0': 'PCIeGen4 x8',
+ '2': 'MBF2H332A-AEEOT',
+ '3': '3c53d07eec484d8aab34dabd24fe575aa',
+ 'A': 'MLX:MN=MLNX:CSKU=V2:UUID=V3:PCI=V0:MODL=BF2H332A',
+ })
+ self.assertEqual(obj.vpd_capability.rw_vendor_fields, {
+ '0': 'vendorfield0',
+ '2': 'vendorfield2',
+ 'A': 'vendorfieldA',
+ })
+ self.assertEqual(obj.vpd_capability.rw_system_fields, {
+ '0': 'systemfield0',
+ 'B': 'systemfieldB',
+ })
+
class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigBaseTest):
@@ -3869,8 +4045,10 @@ class LibvirtConfigSecretTest(LibvirtConfigBaseTest):
class LibvirtConfigGuestVPMEMTest(LibvirtConfigBaseTest):
def test_config_vpmem(self):
- obj = config.LibvirtConfigGuestVPMEM(
- devpath='/dev/dax0.0', size_kb=4096 * units.Ki, align_kb=2048)
+ obj = config.LibvirtConfigGuestVPMEM()
+ obj.source_path = '/dev/dax0.0'
+ obj.target_size = 4096 * units.Ki
+ obj.align_size = 2048
xml = obj.to_xml()
self.assertXmlEqual(xml, """
@@ -3890,6 +4068,28 @@ class LibvirtConfigGuestVPMEMTest(LibvirtConfigBaseTest):
</memory>""")
+class LibvirtConfigGuestIOMMUTest(LibvirtConfigBaseTest):
+
+ def test_config_iommu(self):
+ obj = config.LibvirtConfigGuestIOMMU()
+ obj.model = "intel"
+ obj.interrupt_remapping = True
+ obj.caching_mode = True
+ obj.aw_bits = 48
+ obj.eim = True
+ obj.iotlb = True
+
+ xml = obj.to_xml()
+ self.assertXmlEqual(
+ xml,
+ """
+<iommu model='intel'>
+ <driver intremap='on' caching_mode='on' aw_bits='48' eim='on' iotlb='on'/>
+</iommu>
+ """,
+ )
+
+
class LibvirtConfigDomainCapsVideoModelsTests(LibvirtConfigBaseTest):
def test_parse_video_model(self):
@@ -4006,7 +4206,8 @@ class LibvirtConfigDomainCapsDevicesTests(LibvirtConfigBaseTest):
obj.parse_str(xml)
# we only use the video and disk devices today.
device_types = [config.LibvirtConfigDomainCapsDiskBuses,
- config.LibvirtConfigDomainCapsVideoModels]
+ config.LibvirtConfigDomainCapsVideoModels,
+ ]
# so we assert there are only two device types parsed
self.assertEqual(2, len(obj.devices))
# we then assert that the parsed devices are of the correct type
diff --git a/nova/tests/unit/virt/libvirt/test_designer.py b/nova/tests/unit/virt/libvirt/test_designer.py
index a6ad7f9ccc..cb435286e9 100644
--- a/nova/tests/unit/virt/libvirt/test_designer.py
+++ b/nova/tests/unit/virt/libvirt/test_designer.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.pci import utils as pci_utils
from nova import test
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 50cb5536ef..66dbf795d8 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -31,6 +31,7 @@ import testtools
import threading
import time
import unittest
+from unittest import mock
from castellan import key_manager
import ddt
@@ -38,7 +39,6 @@ import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
-import mock
from os_brick import encryptors
from os_brick import exception as brick_exception
from os_brick.initiator import connector
@@ -76,7 +76,6 @@ from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
from nova.objects import virtual_interface as obj_vif
-from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.fs
import nova.privsep.libvirt
@@ -740,16 +739,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'resolve_driver_format',
imagebackend.Image._get_driver_format)
- self.useFixture(nova_fixtures.LibvirtFixture())
+ self.libvirt = self.useFixture(nova_fixtures.LibvirtFixture())
+ self.cgroups = self.useFixture(nova_fixtures.CGroupsFixture())
# ensure tests perform the same on all host architectures; this is
# already done by the fakelibvirt fixture but we want to change the
# architecture in some tests
- _p = mock.patch('os.uname')
- self.mock_uname = _p.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.X86_64)
- self.addCleanup(_p.stop)
self.test_instance = _create_test_instance()
network_info = objects.InstanceInfoCache(
@@ -820,6 +818,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Driver capabilities for 'supports_socket_pci_numa_affinity' "
"is invalid",
)
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption'],
+ "Driver capabilities for 'supports_ephemeral_encryption' "
+ "is invalid",
+ )
+ self.assertFalse(
+ drvr.capabilities['supports_ephemeral_encryption_luks'],
+ "Driver capabilities for 'supports_ephemeral_encryption_luks' "
+ " is invalid",
+ )
def test_driver_capabilities_qcow2_with_rbd(self):
self.flags(images_type='rbd', group='libvirt')
@@ -870,9 +878,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"'swtpm_enabled=True'"
)
- @mock.patch.object(
- libvirt_driver.LibvirtDriver, '_register_instance_machine_type',
- new=mock.Mock())
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(
host.Host, 'supports_secure_boot', new_callable=mock.PropertyMock)
def test_driver_capabilities_secure_boot(self, mock_supports):
@@ -885,6 +893,23 @@ class LibvirtConnTestCase(test.NoDBTestCase,
)
mock_supports.assert_called_once_with()
+ @mock.patch.object(
+ libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
+ @mock.patch.object(
+ host.Host, 'supports_remote_managed_ports',
+ new_callable=mock.PropertyMock)
+ def test_driver_capabilities_remote_managed_ports(self, mock_supports):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.init_host("dummyhost")
+ self.assertTrue(
+ drvr.capabilities['supports_remote_managed_ports'],
+ "Driver capabilities for 'supports_remote_managed_ports' "
+ "is invalid when host should support this feature"
+ )
+ mock_supports.assert_called_once_with()
+
def test_driver_raises_on_non_linux_platform(self):
with utils.temporary_mutation(sys, platform='darwin'):
self.assertRaises(
@@ -946,9 +971,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_storage_bus_traits')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_video_model_traits')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_vif_model_traits')
+ @mock.patch.object(host.Host, "has_min_version")
def test_static_traits(
- self, mock_vif_traits, mock_video_traits, mock_storage_traits,
- mock_cpu_traits,
+ self, mock_version, mock_vif_traits, mock_video_traits,
+ mock_storage_traits, mock_cpu_traits,
):
"""Ensure driver capabilities are correctly retrieved and cached."""
@@ -959,14 +985,21 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_video_traits.return_value = {'COMPUTE_GRAPHICS_MODEL_VGA': True}
mock_vif_traits.return_value = {'COMPUTE_NET_VIF_MODEL_VIRTIO': True}
+ # for support COMPUTE_VIOMMU_MODEL_VIRTIO
+ mock_version.return_value = True
+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected = {
- 'HW_CPU_HYPERTHREADING': True,
- 'COMPUTE_STORAGE_BUS_VIRTIO': True,
'COMPUTE_GRAPHICS_MODEL_VGA': True,
'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
'COMPUTE_SECURITY_TPM_1_2': False,
'COMPUTE_SECURITY_TPM_2_0': False,
+ 'COMPUTE_STORAGE_BUS_VIRTIO': True,
+ 'COMPUTE_VIOMMU_MODEL_AUTO': True,
+ 'COMPUTE_VIOMMU_MODEL_INTEL': True,
+ 'COMPUTE_VIOMMU_MODEL_SMMUV3': True,
+ 'COMPUTE_VIOMMU_MODEL_VIRTIO': True,
+ 'HW_CPU_HYPERTHREADING': True
}
static_traits = drvr.static_traits
@@ -1012,6 +1045,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'COMPUTE_NET_VIF_MODEL_VIRTIO': True,
'COMPUTE_SECURITY_TPM_1_2': False,
'COMPUTE_SECURITY_TPM_2_0': False,
+ 'COMPUTE_VIOMMU_MODEL_AUTO': True,
+ 'COMPUTE_VIOMMU_MODEL_INTEL': True,
+ 'COMPUTE_VIOMMU_MODEL_SMMUV3': True,
+ 'COMPUTE_VIOMMU_MODEL_VIRTIO': False
}
static_traits = drvr.static_traits
@@ -1025,7 +1062,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
any_order=True)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_ok(self, mock_version):
mock_version.return_value = True
@@ -1041,7 +1079,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
@@ -1071,7 +1110,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(version_arg_found)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION) - 1)
@@ -1101,7 +1141,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(version_arg_found)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
@@ -1131,7 +1172,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertFalse(version_arg_found)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.NEXT_MIN_QEMU_VERSION))
@@ -1161,7 +1203,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertFalse(version_arg_found)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_min_version_ppc_ok(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.PPC64)
@@ -1169,7 +1212,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr.init_host("dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_min_version_s390_ok(self):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.S390X)
@@ -1177,7 +1221,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr.init_host("dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_file_backed_memory_support_called(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(drvr,
@@ -1232,7 +1277,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__prepare_cpu_flag(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -1262,7 +1308,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertXmlEqual(expected_xml, cpu.to_xml())
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_start_ok(self):
self.flags(cpu_mode="custom",
cpu_models=["Penryn"],
@@ -1284,7 +1331,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_model(self, mocked_compare):
mocked_compare.side_effect = (2, 0)
self.flags(cpu_mode="custom",
@@ -1295,7 +1343,24 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr.init_host, "dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
+ @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ def test__check_cpu_compatibility_skip_compare_at_init(
+ self, mocked_compare
+ ):
+ self.flags(group='workarounds', skip_cpu_compare_at_startup=True)
+ self.flags(cpu_mode="custom",
+ cpu_models=["Icelake-Server-noTSX"],
+ cpu_model_extra_flags = ["-mpx"],
+ group="libvirt")
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+ mocked_compare.assert_not_called()
+
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_with_flag(self):
self.flags(cpu_mode="custom",
cpu_models=["Penryn"],
@@ -1304,9 +1369,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_advance_flag(self, mocked_compare):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["qemu64"],
cpu_model_extra_flags = ["avx", "avx2"],
@@ -1315,11 +1381,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_wrong_flag(self, mocked_compare):
# here, and in the surrounding similar tests, the non-zero error
# code in the compareCPU() side effect indicates error
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(cpu_mode="custom",
cpu_models=["Broadwell-noTSX"],
cpu_model_extra_flags = ["a v x"],
@@ -1328,11 +1395,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.InvalidCPUInfo,
drvr.init_host, "dummyhost")
- @mock.patch('nova.virt.libvirt.host.libvirt.Connection.compareCPU')
+ @mock.patch(
+ 'nova.virt.libvirt.host.libvirt.Connection.compareHypervisorCPU')
def test__check_cpu_compatibility_enabled_and_disabled_flags(
self, mocked_compare
):
- mocked_compare.side_effect = (2, 0)
+ mocked_compare.side_effect = (-1, 0)
self.flags(
cpu_mode="custom",
cpu_models=["Cascadelake-Server"],
@@ -1354,7 +1422,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.Invalid, drvr.init_host, "dummyhost")
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_cpu_compatibility_aarch64_qemu_custom_start_OK(self):
"""Test getting CPU traits when using a virt_type that doesn't support
the feature, only kvm and qemu supports reporting CPU traits.
@@ -1372,6 +1441,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test__check_vtpm_support_non_qemu(self):
"""Test checking for vTPM support when we're not using QEMU or KVM."""
self.flags(swtpm_enabled=True, virt_type='lxc', group='libvirt')
@@ -1459,7 +1531,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_getgrnam.assert_called_with('admins')
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch('shutil.which')
@mock.patch('pwd.getpwnam')
@mock.patch('grp.getgrnam')
@@ -1780,6 +1853,22 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_guest.set_user_password.assert_called_once_with("root", "123")
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest')
+ def test_qemu_announce_self(self, mock_get_guest):
+ # Enable the workaround, configure to call announce_self 3 times
+ self.flags(enable_qemu_monitor_announce_self=True, group='workarounds')
+
+ mock_guest = mock.Mock(spec=libvirt_guest.Guest)
+ mock_get_guest.return_value = mock_guest
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr._qemu_monitor_announce_self(mock_guest)
+
+ # Ensure that 3 calls are made as defined by option
+ # enable_qemu_monitor_announce_self_retries default of 3
+ mock_guest.announce_self.assert_any_call()
+ self.assertEqual(3, mock_guest.announce_self.call_count)
+
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@@ -2227,6 +2316,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance_ref.info_cache = objects.InstanceInfoCache(
network_info=network_info)
+ pci_utils.get_mac_by_pci_address.side_effect = None
+ pci_utils.get_mac_by_pci_address.return_value = 'da:d1:f2:91:95:c1'
with test.nested(
mock.patch('nova.objects.VirtualInterfaceList'
'.get_by_instance_uuid', return_value=vifs),
@@ -2236,8 +2327,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value=guest),
mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc',
return_value=xml),
- mock.patch.object(pci_utils, 'get_mac_by_pci_address',
- return_value='da:d1:f2:91:95:c1')):
+ ):
metadata_obj = drvr._build_device_metadata(self.context,
instance_ref)
metadata = metadata_obj.devices
@@ -2434,7 +2524,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(storage_ip, result['ip'])
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_lifecycle_event_registration(self):
calls = []
@@ -2533,6 +2624,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
+ """Generate a "standard" guest with minimal configuration.
+
+ This uses i440fx by default since that's our default machine type and
+ x86 is our default architecture (in our test env, anyway).
+ """
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -2541,177 +2637,251 @@ class LibvirtConnTestCase(test.NoDBTestCase,
test_instance["display_name"] = "purple tomatoes"
test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
test_instance['system_metadata']['owner_user_name'] = 'cupcake'
-
- ctxt = context.RequestContext(project_id=123,
- project_name="aubergine",
- user_id=456,
- user_name="pie")
-
- flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
- vcpus=28,
- root_gb=496,
- ephemeral_gb=8128,
- swap=33550336,
- extra_specs={})
+ ctxt = context.RequestContext(
+ project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie",
+ )
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={},
+ )
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
-
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info,
- context=ctxt)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta, disk_info,
+ context=ctxt,
+ )
self.assertEqual(cfg.uuid, instance_ref["uuid"])
- self.assertEqual(3, len(cfg.features))
- self.assertIsInstance(cfg.features[0],
- vconfig.LibvirtConfigGuestFeatureACPI)
- self.assertIsInstance(cfg.features[1],
- vconfig.LibvirtConfigGuestFeatureAPIC)
- self.assertIsInstance(
- cfg.features[2], vconfig.LibvirtConfigGuestFeatureVMCoreInfo)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, fields.VMMode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
+
+ self.assertEqual(3, len(cfg.features))
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestFeatureACPI,
+ vconfig.LibvirtConfigGuestFeatureAPIC,
+ vconfig.LibvirtConfigGuestFeatureVMCoreInfo,
+ ]):
+ self.assertIsInstance(cfg.features[idx], device_type)
+
self.assertEqual(len(cfg.devices), 11)
- self.assertIsInstance(cfg.devices[0],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[1],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[2],
- vconfig.LibvirtConfigGuestDisk)
- self.assertIsInstance(cfg.devices[3],
- vconfig.LibvirtConfigGuestInterface)
- self.assertIsInstance(cfg.devices[4],
- vconfig.LibvirtConfigGuestSerial)
- self.assertIsInstance(cfg.devices[5],
- vconfig.LibvirtConfigGuestGraphics)
- self.assertIsInstance(cfg.devices[6],
- vconfig.LibvirtConfigGuestVideo)
- self.assertIsInstance(cfg.devices[7],
- vconfig.LibvirtConfigGuestInput)
- self.assertIsInstance(cfg.devices[8],
- vconfig.LibvirtConfigGuestRng)
- self.assertIsInstance(cfg.devices[9],
- vconfig.LibvirtConfigGuestUSBHostController)
- self.assertIsInstance(cfg.devices[10],
- vconfig.LibvirtConfigMemoryBalloon)
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestInterface,
+ vconfig.LibvirtConfigGuestSerial,
+ vconfig.LibvirtConfigGuestGraphics,
+ vconfig.LibvirtConfigGuestVideo,
+ vconfig.LibvirtConfigGuestInput,
+ vconfig.LibvirtConfigGuestRng,
+ vconfig.LibvirtConfigGuestUSBHostController,
+ vconfig.LibvirtConfigMemoryBalloon,
+ ]):
+ self.assertIsInstance(cfg.devices[idx], device_type)
+
self.assertEqual(len(cfg.metadata), 1)
- self.assertIsInstance(cfg.metadata[0],
- vconfig.LibvirtConfigGuestMetaNovaInstance)
- self.assertEqual(version.version_string_with_package(),
- cfg.metadata[0].package)
- self.assertEqual("purple tomatoes",
- cfg.metadata[0].name)
- self.assertEqual(1234567.89,
- cfg.metadata[0].creationTime)
- self.assertEqual("image",
- cfg.metadata[0].roottype)
- self.assertEqual(str(instance_ref["image_ref"]),
- cfg.metadata[0].rootid)
-
- self.assertIsInstance(cfg.metadata[0].owner,
- vconfig.LibvirtConfigGuestMetaNovaOwner)
- self.assertEqual("838a72b0-0d54-4827-8fd6-fb1227633ceb",
- cfg.metadata[0].owner.userid)
- self.assertEqual("cupcake",
- cfg.metadata[0].owner.username)
- self.assertEqual("fake",
- cfg.metadata[0].owner.projectid)
- self.assertEqual("sweetshop",
- cfg.metadata[0].owner.projectname)
+ self.assertIsInstance(
+ cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance)
+ self.assertEqual(
+ version.version_string_with_package(), cfg.metadata[0].package)
+ self.assertEqual("purple tomatoes", cfg.metadata[0].name)
+ self.assertEqual(1234567.89, cfg.metadata[0].creationTime)
+ self.assertEqual("image", cfg.metadata[0].roottype)
+ self.assertEqual(
+ str(instance_ref["image_ref"]), cfg.metadata[0].rootid)
- self.assertIsInstance(cfg.metadata[0].flavor,
- vconfig.LibvirtConfigGuestMetaNovaFlavor)
- self.assertEqual("m1.small",
- cfg.metadata[0].flavor.name)
- self.assertEqual(6,
- cfg.metadata[0].flavor.memory)
- self.assertEqual(28,
- cfg.metadata[0].flavor.vcpus)
- self.assertEqual(496,
- cfg.metadata[0].flavor.disk)
- self.assertEqual(8128,
- cfg.metadata[0].flavor.ephemeral)
- self.assertEqual(33550336,
- cfg.metadata[0].flavor.swap)
+ self.assertIsInstance(
+ cfg.metadata[0].owner, vconfig.LibvirtConfigGuestMetaNovaOwner)
+ self.assertEqual(
+ "838a72b0-0d54-4827-8fd6-fb1227633ceb",
+ cfg.metadata[0].owner.userid)
+ self.assertEqual("cupcake", cfg.metadata[0].owner.username)
+ self.assertEqual("fake", cfg.metadata[0].owner.projectid)
+ self.assertEqual("sweetshop", cfg.metadata[0].owner.projectname)
+ self.assertIsInstance(
+ cfg.metadata[0].flavor, vconfig.LibvirtConfigGuestMetaNovaFlavor)
+ self.assertEqual("m1.small", cfg.metadata[0].flavor.name)
+ self.assertEqual(6, cfg.metadata[0].flavor.memory)
+ self.assertEqual(28, cfg.metadata[0].flavor.vcpus)
+ self.assertEqual(496, cfg.metadata[0].flavor.disk)
+ self.assertEqual(8128, cfg.metadata[0].flavor.ephemeral)
+ self.assertEqual(33550336, cfg.metadata[0].flavor.swap)
- def test_get_guest_config_q35(self):
- self.flags(virt_type="kvm",
- group='libvirt')
+ num_ports = 0
+ for device in cfg.devices:
+ try:
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
+ num_ports += 1
+ except AttributeError:
+ pass
- TEST_AMOUNT_OF_PCIE_SLOTS = 8
- CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
- group='libvirt')
+ # i440fx is not pcie machine so there should be no pcie ports
+ self.assertEqual(0, num_ports)
+
+ @mock.patch.object(time, "time")
+ def test_get_guest_config_no_pcie_ports(self, time_mock):
+ """Generate a "standard" guest with minimal configuration.
+
+ This uses i440fx by default since that's our default machine type and
+ x86 is our default architecture (in our test env, anyway).
+ """
+ time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- instance_ref = objects.Instance(**self.test_instance)
- image_meta = objects.ImageMeta.from_dict({
- "disk_format": "raw",
- "properties": {"hw_machine_type":
- "pc-q35-test"}})
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
+ test_instance = copy.deepcopy(self.test_instance)
+ test_instance["display_name"] = "purple tomatoes"
+ test_instance['system_metadata']['owner_project_name'] = 'sweetshop'
+ test_instance['system_metadata']['owner_user_name'] = 'cupcake'
+ ctxt = context.RequestContext(
+ project_id=123,
+ project_name="aubergine",
+ user_id=456,
+ user_name="pie",
+ )
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs={},
+ )
+ instance_ref = objects.Instance(**test_instance)
+ instance_ref.flavor = flavor
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta, disk_info,
+ context=ctxt,
+ )
num_ports = 0
for device in cfg.devices:
try:
- if (device.root_name == 'controller' and
- device.model == 'pcie-root-port'):
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
num_ports += 1
except AttributeError:
pass
- self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
+ # i440fx is not pcie machine so there should be no pcie ports
+ self.assertEqual(0, num_ports)
- def test_get_guest_config_pcie_i440fx(self):
- self.flags(virt_type="kvm",
- group='libvirt')
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_q35(self):
+ """Generate a "q35" guest with minimal configuration.
+
+ This configures an explicit machine type (q35) but defaults to x86
+ since this is our default architecture (in our test env, anyway).
+ """
+ self.flags(virt_type="kvm", group='libvirt')
TEST_AMOUNT_OF_PCIE_SLOTS = 8
- CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
- group='libvirt')
+ CONF.set_override(
+ "num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
+ group='libvirt',
+ )
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict({
"disk_format": "raw",
- "properties": {"hw_machine_type":
- "pc-i440fx-test"}})
+ "properties": {"hw_machine_type": "q35"},
+ })
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta,
+ )
- cfg = drvr._get_guest_config(instance_ref,
- _fake_network_info(self),
- image_meta, disk_info)
+ cfg = drvr._get_guest_config(
+ instance_ref,
+ _fake_network_info(self),
+ image_meta,
+ disk_info,
+ )
+
+ self.assertEqual(3, len(cfg.features))
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestFeatureACPI,
+ vconfig.LibvirtConfigGuestFeatureAPIC,
+ vconfig.LibvirtConfigGuestFeatureVMCoreInfo,
+ ]):
+ self.assertIsInstance(cfg.features[idx], device_type)
+
+ self.assertEqual(len(cfg.devices), 19)
+ for idx, device_type in enumerate([
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestDisk,
+ vconfig.LibvirtConfigGuestInterface,
+ vconfig.LibvirtConfigGuestSerial,
+ vconfig.LibvirtConfigGuestGraphics,
+ vconfig.LibvirtConfigGuestVideo,
+ vconfig.LibvirtConfigGuestInput,
+ vconfig.LibvirtConfigGuestRng,
+ vconfig.LibvirtConfigGuestPCIeRootController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestPCIeRootPortController,
+ vconfig.LibvirtConfigGuestUSBHostController,
+ vconfig.LibvirtConfigMemoryBalloon,
+ ]):
+ self.assertIsInstance(cfg.devices[idx], device_type)
num_ports = 0
for device in cfg.devices:
try:
- if (device.root_name == 'controller' and
- device.model == 'pcie-root-port'):
+ if (
+ device.root_name == 'controller' and
+ device.model == 'pcie-root-port'
+ ):
num_ports += 1
except AttributeError:
pass
- # i440fx is not pcie machine so there should be no pcie ports
- self.assertEqual(0, num_ports)
+ self.assertEqual(TEST_AMOUNT_OF_PCIE_SLOTS, num_ports)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.virt.libvirt.utils.get_default_machine_type',
new=mock.Mock(return_value='config-machine_type'))
def test_get_guest_config_records_machine_type_in_instance(self):
@@ -2897,7 +3067,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# NOTE(artom) This is a
# (cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
- # tuple. See _get_guest_numa_config() docstring for full documenation.
+ # tuple. See _get_guest_numa_config() docstring for full documentation.
# _get_live_migrate_numa_info() only cares about guest_cpu_tune for CPU
# pinning and emulator thread pinning, and guest_numa_tune for cell
# pinning; so only include those 2 in the tuple.
@@ -2923,9 +3093,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'fake-instance-numa-topology',
'fake-flavor', 'fake-image-meta').obj_to_primitive())
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fits(self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_fits(self):
self.flags(cpu_shared_set=None, cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -2957,14 +3126,13 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_no_fit(self, is_able):
+ def test_get_guest_config_numa_host_instance_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
@@ -2994,7 +3162,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertIsNone(cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
def _test_get_guest_memory_backing_config(
@@ -3093,6 +3261,41 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(membacking.locked)
self.assertFalse(membacking.sharedpages)
+ def test_get_guest_memory_backing_config_locked_flavor(self):
+ extra_specs = {
+ "hw:locked_memory": "True",
+ "hw:mem_page_size": 1000,
+ }
+ flavor = objects.Flavor(
+ name='m1.small', memory_mb=6, vcpus=28, root_gb=496,
+ ephemeral_gb=8128, swap=33550336, extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
+ def test_get_guest_memory_backing_config_locked_image_meta(self):
+ extra_specs = {}
+ flavor = objects.Flavor(
+ name='m1.small',
+ memory_mb=6,
+ vcpus=28,
+ root_gb=496,
+ ephemeral_gb=8128,
+ swap=33550336,
+ extra_specs=extra_specs)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {
+ "hw_locked_memory": "True",
+ "hw_mem_page_size": 1000,
+ }})
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ membacking = drvr._get_guest_memory_backing_config(
+ None, None, flavor, image_meta)
+ self.assertTrue(membacking.locked)
+
def test_get_guest_memory_backing_config_realtime_invalid_share(self):
"""Test behavior when there is no pool of shared CPUS on which to place
the emulator threads, isolating them from the instance CPU processes.
@@ -3196,7 +3399,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(
"Memory encryption requested by hw:mem_encryption extra spec in "
"m1.fake flavor but image fake_image doesn't have "
- "'hw_firmware_type' property set to 'uefi'", str(exc))
+ "'hw_firmware_type' property set to 'uefi' or volume-backed "
+ "instance was requested", str(exc))
def test_sev_enabled_host_extra_spec_no_machine_type(self):
exc = self.assertRaises(exception.InvalidMachineType,
@@ -3355,10 +3559,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self._test_get_guest_memory_backing_config,
host_topology, inst_topology, numa_tune)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_pci_no_numa_info(
- self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
@@ -3385,10 +3587,15 @@ class LibvirtConnTestCase(test.NoDBTestCase,
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
+ instance_ref.pci_devices = objects.PciDeviceList(objects=[pci_device])
+ pci_req = objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name='pci-alias-1')
+ instance_ref.pci_requests = objects.InstancePCIRequests(
+ requests=[pci_req])
with test.nested(
mock.patch.object(host.Host, 'has_min_version',
@@ -3396,20 +3603,18 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
- return_value=set([3])),
- mock.patch.object(pci_manager, "get_instance_pci_devs",
- return_value=[pci_device])):
+ return_value=set([3]))
+ ):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
@mock.patch('nova.privsep.utils.supports_direct_io',
new=mock.Mock(return_value=True))
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able):
+ def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
self.flags(cpu_shared_set='3', cpu_dedicated_set=None,
group='compute')
instance_ref = objects.Instance(**self.test_instance)
@@ -3435,28 +3640,36 @@ class LibvirtConnTestCase(test.NoDBTestCase,
status=fields.PciDeviceStatus.AVAILABLE,
address='0000:00:00.1',
instance_uuid=None,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
+ instance_ref.pci_devices = objects.PciDeviceList(
+ objects=[pci_device, pci_device2]
+ )
+ instance_ref.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name="pci-alias-1"
+ )
+ ]
+ )
with test.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([3])),
mock.patch.object(random, 'choice'),
- mock.patch.object(pci_manager, "get_instance_pci_devs",
- return_value=[pci_device, pci_device2]),
mock.patch.object(conn, '_has_numa_support',
return_value=False)
- ) as (_, _, choice_mock, pci_mock, _):
+ ) as (_, _, choice_mock, _):
cfg = conn._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@@ -3517,10 +3730,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
exception.NUMATopologyUnsupported,
None)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(
- self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
self.flags(cpu_shared_set='2-3', cpu_dedicated_set=None,
group='compute')
@@ -3555,12 +3766,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# NOTE(ndipanov): we make sure that pin_set was taken into account
# when choosing viable cells
self.assertEqual(set([2, 3]), cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.cpu.numa)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_non_numa_host_instance_topo(self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_non_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
@@ -3597,7 +3807,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertIsNone(cfg.cpuset)
- self.assertEqual(0, len(cfg.cputune.vcpupin))
+ self.assertIsNone(cfg.cputune)
self.assertIsNone(cfg.numatune)
self.assertIsNotNone(cfg.cpu.numa)
for instance_cell, numa_cfg_cell in zip(
@@ -3607,9 +3817,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_numa_host_instance_topo(self, is_able):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_topo(self):
self.flags(cpu_shared_set='0-5', cpu_dedicated_set=None,
group='compute')
@@ -3690,6 +3899,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_topo_reordered(self):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
@@ -3765,6 +3975,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
@@ -3843,6 +4054,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_cpu_mixed(self):
"""Test to create mixed instance libvirt configuration which has a
default emulator thread policy and verify the NUMA topology related
@@ -3960,7 +4172,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
- def test_get_guest_config_numa_host_instance_cpu_mixed_isolated_emu(self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_cpu_mixed_isolated_emu(
+ self):
"""Test to create mixed instance libvirt configuration which has an
ISOLATED emulator thread policy and verify the NUMA topology related
settings.
@@ -4047,6 +4261,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_cpu_mixed_realtime(self):
"""Test of creating mixed instance libvirt configuration. which is
created through 'hw:cpu_realtime_mask' and 'hw:cpu_realtime' extra
@@ -4172,6 +4387,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler)
self.assertEqual(set([0, 1, 4, 5]), cfg.cputune.vcpusched[0].vcpus)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_mempages_shared(self):
self.flags(cpu_shared_set='2-5', cpu_dedicated_set=None,
group='compute')
@@ -4245,7 +4461,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(0, len(cfg.cputune.vcpusched))
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
- def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(
+ self):
self.flags(cpu_shared_set=None, cpu_dedicated_set='4-7',
group='compute')
@@ -4340,6 +4558,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# which are 6, 7
self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_numa_host_instance_isolated_emulthreads(self):
self.flags(cpu_shared_set=None, cpu_dedicated_set='4-8',
group='compute')
@@ -4446,8 +4665,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertRaises(exception.Invalid, drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
- def test_get_guest_config_numa_host_instance_shared_emulator_threads(
- self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_numa_host_instance_shared_emulator_threads(self):
self.flags(cpu_shared_set='0,1', cpu_dedicated_set='2-7',
group='compute')
instance_topology = objects.InstanceNUMATopology(
@@ -4653,6 +4872,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(drvr._wants_hugepages(host_topology,
instance_topology))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_clock(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -4696,6 +4916,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
else:
self.assertEqual(2, len(cfg.clock.timers))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_clock_hpet_false(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -4741,6 +4962,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
else:
self.assertEqual(2, len(cfg.clock.timers))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_clock_hpet_true(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -4787,6 +5009,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
else:
self.assertEqual(2, len(cfg.clock.timers))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_clock_hpet_invalid(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -5066,6 +5289,44 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual('/usr/share/OVMF/OVMF_CODE.fd', cfg.os_loader)
self.assertEqual('/usr/share/OVMF/OVMF_VARS.fd', cfg.os_nvram_template)
+ def test_get_guest_config_with_secure_boot_and_smm_required(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ # uefi only used with secure boot
+ drvr._host._supports_uefi = True
+ # smm only used with secure boot
+ drvr._host._supports_secure_boot = True
+
+ # NOTE(imranh2): Current way of gathering firmwares is inflexible
+ # nova/tests/fixtures/libvirt.py FakeLoaders has requires-smm
+ # defined. do the following to make sure we get this programtically
+ # in the future we should test firmwares that both do and don't
+ # require smm but the current way firmware is selected doesn't
+ # make it possible to do so.
+ loader, nvram_template, requires_smm = drvr._host.get_loader(
+ 'x86_64', 'q35', True)
+
+ image_meta = objects.ImageMeta.from_dict({
+ 'disk_format': 'raw',
+ # secure boot requires UEFI
+ 'properties': {
+ 'hw_firmware_type': 'uefi',
+ 'hw_machine_type': 'q35',
+ 'os_secure_boot': 'required',
+ },
+ })
+ instance_ref = objects.Instance(**self.test_instance)
+
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance_ref, image_meta)
+
+ cfg = drvr._get_guest_config(
+ instance_ref, [], image_meta, disk_info)
+ # if we require it make sure it's there
+ if requires_smm:
+ self.assertTrue(any(isinstance(feature,
+ vconfig.LibvirtConfigGuestFeatureSMM)
+ for feature in cfg.features))
+
@ddt.data(True, False)
def test_get_guest_config_with_secure_boot_required(
self, host_has_support,
@@ -5146,7 +5407,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.AARCH64)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- self.assertTrue(drvr._check_uefi_support(None))
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ self.assertTrue(drvr._check_uefi_support(image_meta))
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -5241,6 +5503,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[2].target_dev, 'vdd')
mock_save.assert_called_with()
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_with_configdrive(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
@@ -5559,6 +5822,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'vnc')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(enabled=True, group='vnc')
@@ -5589,6 +5857,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, 'vnc')
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
def test_get_guest_config_with_spice_and_tablet(self):
@@ -5625,8 +5898,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].type, 'spice')
self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
self.assertIsNone(cfg.devices[3].keymap)
+ self.assertIsNone(cfg.devices[3].image_compression)
+ self.assertIsNone(cfg.devices[3].jpeg_compression)
+ self.assertIsNone(cfg.devices[3].zlib_compression)
+ self.assertIsNone(cfg.devices[3].playback_compression)
+ self.assertIsNone(cfg.devices[3].streaming_mode)
self.assertEqual(cfg.devices[5].type, 'tablet')
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_with_spice_and_agent(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm', group='libvirt')
@@ -5683,8 +5962,57 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[3].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[3].type, 'spicevmc')
self.assertEqual(cfg.devices[4].type, "spice")
+ self.assertIsNone(cfg.devices[4].image_compression)
+ self.assertIsNone(cfg.devices[4].jpeg_compression)
+ self.assertIsNone(cfg.devices[4].zlib_compression)
+ self.assertIsNone(cfg.devices[4].playback_compression)
+ self.assertIsNone(cfg.devices[4].streaming_mode)
self.assertEqual(cfg.devices[5].type, video_type)
+ def test_get_guest_config_with_spice_compression(self):
+ self.flags(enabled=False, group='vnc')
+ self.flags(virt_type='kvm', group='libvirt')
+ self.flags(enabled=True,
+ agent_enabled=False,
+ image_compression='auto_lz',
+ jpeg_compression='never',
+ zlib_compression='always',
+ playback_compression=False,
+ streaming_mode='all',
+ server_listen='10.0.0.1',
+ group='spice')
+ self.flags(pointer_model='usbtablet')
+
+ cfg = self._get_guest_config_with_graphics()
+
+ self.assertEqual(len(cfg.devices), 9)
+ self.assertIsInstance(cfg.devices[0],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[1],
+ vconfig.LibvirtConfigGuestDisk)
+ self.assertIsInstance(cfg.devices[2],
+ vconfig.LibvirtConfigGuestSerial)
+ self.assertIsInstance(cfg.devices[3],
+ vconfig.LibvirtConfigGuestGraphics)
+ self.assertIsInstance(cfg.devices[4],
+ vconfig.LibvirtConfigGuestVideo)
+ self.assertIsInstance(cfg.devices[5],
+ vconfig.LibvirtConfigGuestInput)
+ self.assertIsInstance(cfg.devices[6],
+ vconfig.LibvirtConfigGuestRng)
+ self.assertIsInstance(cfg.devices[7],
+ vconfig.LibvirtConfigGuestUSBHostController)
+ self.assertIsInstance(cfg.devices[8],
+ vconfig.LibvirtConfigMemoryBalloon)
+
+ self.assertEqual(cfg.devices[3].type, 'spice')
+ self.assertEqual(cfg.devices[3].listen, '10.0.0.1')
+ self.assertEqual(cfg.devices[3].image_compression, 'auto_lz')
+ self.assertEqual(cfg.devices[3].jpeg_compression, 'never')
+ self.assertEqual(cfg.devices[3].zlib_compression, 'always')
+ self.assertFalse(cfg.devices[3].playback_compression)
+ self.assertEqual(cfg.devices[3].streaming_mode, 'all')
+
@mock.patch.object(host.Host, 'get_guest')
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_get_serial_ports_from_guest')
@@ -5697,6 +6025,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(**self.test_instance)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
expected = {
fields.Architecture.X86_64: vconfig.LibvirtConfigGuestSerial,
@@ -5709,7 +6038,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = vconfig.LibvirtConfigGuest()
drvr._create_consoles(
- guest_cfg=guest, instance=instance, flavor={}, image_meta={})
+ guest_cfg=guest,
+ instance=instance,
+ flavor={},
+ image_meta=image_meta)
self.assertEqual(1, len(guest.devices))
console_device = guest.devices[0]
self.assertIsInstance(console_device, device_type)
@@ -5921,9 +6253,13 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(enabled=serial_enabled, group='serial_console')
guest_cfg = vconfig.LibvirtConfigGuest()
instance = objects.Instance(**self.test_instance)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
drvr._create_consoles(
- guest_cfg, instance=instance, flavor=None, image_meta=None)
+ guest_cfg,
+ instance=instance,
+ flavor=None,
+ image_meta=image_meta)
self.assertEqual(1, len(guest_cfg.devices))
device = guest_cfg.devices[0]
@@ -6094,6 +6430,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
i = drvr._get_scsi_controller_next_unit(guest)
self.assertEqual(expect_num, i)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_with_type_kvm_on_s390(self):
self.flags(enabled=False, group='vnc')
self.flags(virt_type='kvm', group='libvirt')
@@ -6893,14 +7230,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(cfg.devices[5].rate_bytes, 1024)
self.assertEqual(cfg.devices[5].rate_period, 2)
- @mock.patch('nova.virt.libvirt.driver.os.path.exists')
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
- def test_get_guest_config_with_rng_backend(self, mock_path):
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
+ def test_get_guest_config_with_rng_backend(self):
self.flags(virt_type='kvm',
rng_dev_path='/dev/hw_rng',
group='libvirt')
self.flags(pointer_model='ps2mouse')
- mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
@@ -6957,29 +7292,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
[],
image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_guest_cpu_shares_with_multi_vcpu(self, is_able):
- self.flags(virt_type='kvm', group='libvirt')
-
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
-
- instance_ref = objects.Instance(**self.test_instance)
- instance_ref.flavor.vcpus = 4
- image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
-
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance_ref,
- image_meta)
-
- cfg = drvr._get_guest_config(instance_ref, [],
- image_meta, disk_info)
-
- self.assertEqual(4096, cfg.cputune.shares)
-
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_cpu_quota(self, is_able):
+ def test_get_guest_config_with_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7315,9 +7628,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.flags(images_type='rbd', group='libvirt')
self._test_get_guest_config_disk_cachemodes('rbd')
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=True)
- def test_get_guest_config_with_bogus_cpu_quota(self, is_able):
+ def test_get_guest_config_with_bogus_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -7335,9 +7646,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_guest_config,
instance_ref, [], image_meta, disk_info)
- @mock.patch.object(
- host.Host, "is_cpu_control_policy_capable", return_value=False)
- def test_get_update_guest_cputune(self, is_able):
+ def test_get_update_guest_cputune(self):
+ # No CPU controller on the host
+ self.cgroups.version = 0
+
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
@@ -7389,12 +7701,19 @@ class LibvirtConnTestCase(test.NoDBTestCase,
address='0000:00:00.1',
compute_id=compute_ref.id,
instance_uuid=instance.uuid,
- request_id=None,
+ request_id=uuids.pci_req1,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
+ instance.pci_requests = objects.InstancePCIRequests(
+ requests=[
+ objects.InstancePCIRequest(
+ request_id=uuids.pci_req1, alias_name="pci-alias"
+ )
+ ]
+ )
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
@@ -7510,11 +7829,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
- @mock.patch('os.path.exists', return_value=True)
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
- def test_get_guest_config_aarch64(
- self, mock_path_exists, mock_numa, mock_storage,
- ):
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
+ def test_get_guest_config_aarch64(self, mock_numa, mock_storage):
TEST_AMOUNT_OF_PCIE_SLOTS = 8
CONF.set_override("num_pcie_ports", TEST_AMOUNT_OF_PCIE_SLOTS,
group='libvirt')
@@ -7534,7 +7850,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self),
image_meta, disk_info)
- self.assertTrue(mock_path_exists.called)
self.assertEqual(cfg.os_mach_type, "virt")
num_ports = 0
@@ -7551,10 +7866,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_guest_storage_config")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support")
- @mock.patch('os.path.exists', return_value=True)
- @test.patch_exists(SEV_KERNEL_PARAM_FILE, False)
+ @test.patch_exists(SEV_KERNEL_PARAM_FILE, result=False, other=True)
def test_get_guest_config_aarch64_with_graphics(
- self, mock_path_exists, mock_numa, mock_storage,
+ self, mock_numa, mock_storage,
):
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.4.0-0-generic', '', fields.Architecture.AARCH64)
@@ -7564,7 +7878,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg = self._get_guest_config_with_graphics()
- self.assertTrue(mock_path_exists.called)
self.assertEqual(cfg.os_mach_type, "virt")
usbhost_exists = False
@@ -7593,16 +7906,20 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return drvr._get_guest_config(
instance_ref, _fake_network_info(self), image_meta, disk_info)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_machine_type_through_image_meta(self):
cfg = self._get_guest_config_machine_type_through_image_meta(
"fake_machine_type")
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
- def test_get_guest_config_machine_type_through_image_meta_sev(self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_machine_type_through_image_meta_sev(
+ self):
fake_q35 = "fake-q35-2.11"
cfg = self._get_guest_config_machine_type_through_image_meta(fake_q35)
self.assertEqual(cfg.os_mach_type, fake_q35)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_machine_type_from_config(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(hw_machine_type=['x86_64=fake_machine_type'],
@@ -7731,11 +8048,14 @@ class LibvirtConnTestCase(test.NoDBTestCase,
cfg.devices[device_index], vconfig.LibvirtConfigGuestVideo)
self.assertEqual(cfg.devices[device_index].type, 'vga')
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
self.flags(enabled=True, group='vnc')
self._test_get_guest_config_ppc64(4)
- def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ def test_get_guest_config_ppc64_through_image_meta_spice_enabled(
+ self):
self.flags(enabled=True,
agent_enabled=True,
group='spice')
@@ -7823,6 +8143,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta, disk_info)
self.assertIsNone(conf.cpu)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_cpu_config_automatic(self):
expected = {
fields.Architecture.X86_64: "host-model",
@@ -7903,6 +8224,33 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, 'custom')
+ def test_get_x86_64_hw_emulated_architecture_aarch64(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ 'properties': {
+ 'hw_architecture': 'x86_64',
+ 'hw_emulation_architecture': 'aarch64',
+ 'hw_machine_type': 'virt',
+ 'hw_firmware_type': 'uefi',
+ }})
+
+ self.assertEqual(drvr._check_emulation_arch(image_meta),
+ 'aarch64')
+
+ def test_get_x86_64_hw_emulated_architecture_ppc64(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ 'properties': {
+ 'hw_architecture': 'x86_64',
+ 'hw_emulation_architecture': 'ppc64le',
+ 'hw_machine_type': 'pseries',
+ }})
+
+ self.assertEqual(drvr._check_emulation_arch(image_meta),
+ 'ppc64le')
+
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_get_guest_cpu_config_custom_with_extra_flags(self,
mock_warn):
@@ -8284,6 +8632,206 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
+ def test_get_guest_iommu_not_enabled(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ test_instance = _create_test_instance()
+ instance_ref = objects.Instance(**test_instance)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ for device in cfg.devices:
+ self.assertNotEqual('iommu', device.root_name)
+
+ def test_get_guest_iommu_config_model(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'intel',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('intel', device.model)
+ self.assertFalse(hasattr(device, "aw_bits"))
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=True)
+ def test_get_guest_iommu_config_model_auto(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('virtio', device.model)
+ self.assertEqual(48, device.aw_bits)
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=False)
+ def test_get_guest_iommu_config_model_auto_intel(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "q35"},
+ })
+
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('intel', device.model)
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertTrue(device.eim)
+ self.assertTrue(device.iotlb)
+
+ self.assertEqual(1, count)
+ self.assertEqual('q35', cfg.os_mach_type)
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=False)
+ def test_get_guest_iommu_config_model_auto_aarch64(self, has_min_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_viommu_model": 'auto',
+ "hw_architecture": fields.Architecture.AARCH64,
+ "hw_machine_type": "virt"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ count = 0
+ for device in cfg.devices:
+ if device.root_name == 'iommu':
+ count += 1
+ self.assertIsInstance(device,
+ vconfig.LibvirtConfigGuestIOMMU)
+ self.assertEqual('smmuv3', device.model)
+ self.assertFalse(hasattr(device, "aw_bits"))
+ self.assertTrue(device.interrupt_remapping)
+ self.assertTrue(device.caching_mode)
+ self.assertFalse(device.eim)
+ self.assertTrue(device.iotlb)
+ self.assertEqual(1, count)
+
+ def test_get_guest_iommu_config_not_support_machine_type(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_machine_type": "pc-i440fx-2.11"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ self.assertRaises(
+ exception.InvalidVIOMMUMachineType, drvr._get_guest_config,
+ instance_ref, [], image_meta, disk_info
+ )
+
+ def test_get_guest_iommu_config_not_support_architecture(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_architecture": fields.Architecture.PPC64LE,
+ "hw_machine_type": "pc-i440fx-2.11"},
+ })
+ extra_specs = {
+ "hw:viommu_model": 'auto',
+ }
+ test_instance = _create_test_instance()
+ test_instance["flavor"]["extra_specs"] = extra_specs
+ instance_ref = objects.Instance(**test_instance)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+
+ self.assertRaises(
+ exception.InvalidVIOMMUArchitecture, drvr._get_guest_config,
+ instance_ref, [], image_meta, disk_info
+ )
+
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
@@ -8373,6 +8921,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
break
self.assertTrue(no_exist)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_get_guest_usb_controller(self):
self.flags(enabled=True, group='vnc')
@@ -8525,6 +9074,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
None,
(("disk", "virtio", "vda"),))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_xml_disk_bus_ide(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
@@ -8560,6 +9110,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
block_device_info,
(expected,))
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_xml_disk_bus_ide_and_virtio(self):
expected = {
fields.Architecture.X86_64: ("cdrom", "ide", "hda"),
@@ -8683,6 +9234,34 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([0, 1, 2, 3]))
+ def test_get_pcpu_available_for_power_mgmt(self, get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set and power management is defined.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='2-3', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ pcpus = drvr._get_pcpu_available()
+ self.assertEqual(set([2, 3]), pcpus)
+
+ @mock.patch('nova.virt.libvirt.host.Host.get_available_cpus',
+ return_value=set([4, 5]))
+ def test_get_pcpu_available__cpu_dedicated_set_invalid_for_pm(self,
+ get_available_cpus):
+ """Test what happens when the '[compute] cpu_dedicated_set' config
+ option is set but it's invalid with power management set.
+ """
+ self.flags(vcpu_pin_set=None)
+ self.flags(cpu_dedicated_set='4-6', cpu_shared_set=None,
+ group='compute')
+ self.flags(cpu_power_management=True, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertRaises(exception.Invalid, drvr._get_pcpu_available)
+
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus',
return_value=set([0, 1, 2, 3]))
def test_get_vcpu_available(self, get_online_cpus):
@@ -8783,6 +9362,26 @@ class LibvirtConnTestCase(test.NoDBTestCase,
image_meta))
mock_fsthaw.assert_called_once_with()
+ def test_set_quiesced_agent_connection_fails(self):
+ # This is require to mock guest host
+ self.create_fake_libvirt_mock(lookupByUUIDString=self.fake_lookup)
+
+ with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
+ error = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "QEMU guest agent is not connected",
+ error_code=fakelibvirt.VIR_ERR_AGENT_UNRESPONSIVE)
+
+ mock_fsfreeze.side_effect = error
+ mock_fsfreeze.error_code = error.get_error_code()
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
+ instance = objects.Instance(**self.test_instance)
+ image_meta = objects.ImageMeta.from_dict(
+ {"properties": {"hw_qemu_guest_agent": "yes", }})
+ self.assertRaises(exception.InstanceQuiesceFailed,
+ drvr._set_quiesced, self.context, instance, image_meta, True)
+
def test_create_snapshot_metadata(self):
base = objects.ImageMeta.from_dict(
{'disk_format': 'raw'})
@@ -9925,6 +10524,61 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# is called with the LUKSv1 payload offset taken into account.
block_device.resize.assert_called_once_with(new_size_minus_offset)
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
+ @mock.patch('os_brick.encryptors.get_encryption_metadata')
+ def test_extend_volume_os_brick_block(self, mock_get_encryption_metadata,
+ mock_get_encryptor):
+ """Test extend volume that uses an os-brick encryptor."""
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ connection_info = {
+ 'serial': uuids.volume_id,
+ 'driver_volume_type': 'fake',
+ 'data': {
+ 'device_path': mock.sentinel.device_path,
+ 'access_mode': 'rw'
+ }
+ }
+
+ block_device = mock.Mock(spec=libvirt_guest.BlockDevice,
+ _disk=mock.sentinel.disk)
+ guest = mock.Mock(spec=libvirt_guest.Guest)
+ guest.get_block_device.return_value = block_device
+ guest.get_power_state.return_value = power_state.RUNNING
+
+ # The requested_size is provided to extend_volume in bytes.
+ new_size = 20 * units.Gi
+ # Decrypted volume size reported by os-brick will be smaller
+ new_size_minus_offset = new_size - (16384 * units.Ki)
+
+ mock_brick_extend = mock_get_encryptor.return_value.extend_volume
+ mock_brick_extend.return_value = new_size_minus_offset
+
+ drvr._host.get_guest = mock.Mock(return_value=guest)
+ drvr._extend_volume = mock.Mock(return_value=new_size)
+
+ encryption = {'provider': 'luks2', 'control_location': 'front-end'}
+ mock_get_encryption_metadata.return_value = encryption
+
+ # Extend the volume to new_size
+ drvr.extend_volume(self.context, connection_info, instance, new_size)
+
+ # Assert that the expected calls are made prior to the device resize.
+ drvr._host.get_guest.assert_called_once_with(instance)
+ guest.get_power_state.assert_called_once_with(drvr._host)
+ guest.get_block_device(mock.sentinel.device_path)
+
+ # Assert calls to the os-brick encryptor extend
+ mock_get_encryptor.assert_called_once_with(connection_info, encryption)
+ mock_brick_extend.assert_called_once_with(self.context, **encryption)
+
+ mock_get_encryption_metadata.assert_called_once_with(
+ self.context, drvr._volume_api, uuids.volume_id, connection_info)
+
+ # Assert that the Libvirt call to resize the device within the instance
+ # is called with the size reported by os-brick
+ block_device.resize.assert_called_once_with(new_size_minus_offset)
+
@mock.patch('os_brick.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_encryptor')
def test_use_encryptor_connection_info_incomplete(self,
@@ -10606,11 +11260,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(drvr._uri(), testuri)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -10645,11 +11299,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_with_over_commit(
self, mock_cpu, mock_test_file,
):
@@ -10685,11 +11339,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file,
):
@@ -10722,12 +11376,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_fills_listen_addrs(
self, mock_cpu, mock_test_file,
):
@@ -10754,12 +11408,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
str(result.serial_listen_addr))
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU',
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU',
return_value=1)
def test_check_can_live_migrate_dest_ensure_serial_adds_not_set(
self, mock_cpu, mock_test_file,
@@ -10775,7 +11429,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertIsNone(result.serial_listen_addr)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
@@ -10808,7 +11462,26 @@ class LibvirtConnTestCase(test.NoDBTestCase,
result.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
+ new=mock.Mock(return_value=False))
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_create_shared_storage_test_file',
+ return_value='fake')
+ @mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu')
+ def test_check_can_live_migrate_guest_cpu_none_model_skip_compare(
+ self, mock_cpu, mock_test_file):
+ self.flags(group='workarounds', skip_cpu_compare_on_dest=True)
+ instance_ref = objects.Instance(**self.test_instance)
+ instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
+ instance_ref.vcpu_model.model = None
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1}
+ drvr.check_can_live_migrate_destination(
+ self.context, instance_ref, compute_info, compute_info)
+ mock_cpu.assert_not_called()
+
+ @mock.patch(
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
@@ -10827,7 +11500,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(result.dst_supports_numa_live_migration)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
@@ -10844,11 +11517,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertNotIn('dst_supports_numa_live_migration', result)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file,
):
@@ -10885,11 +11558,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value.obj_to_primitive()['nova_object.data'])
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=False))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_file_backed(
self, mock_cpu, mock_test_file,
):
@@ -10915,7 +11588,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(return_value.dst_wants_file_backed_memory)
- @mock.patch.object(fakelibvirt.Connection, 'compareCPU')
+ @mock.patch.object(fakelibvirt.Connection, 'compareHypervisorCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
@@ -10930,7 +11603,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
compute_info, compute_info, False)
@mock.patch(
- 'nova.network.neutron.API.supports_port_binding_extension',
+ 'nova.network.neutron.API.has_port_binding_extension',
new=mock.Mock(return_value=True))
@mock.patch.object(
libvirt_driver.LibvirtDriver,
@@ -10951,7 +11624,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
for vif in result.vifs:
self.assertTrue(vif.supports_os_vif_delegation)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
instance = objects.Instance(**self.test_instance)
@@ -10961,7 +11634,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
@@ -10998,7 +11671,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_AARCH64_CPU_COMPARE))
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
def test_compare_cpu_host_aarch64(self,
mock_compare,
mock_get_libversion,
@@ -11021,7 +11694,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_compare.assert_called_once_with(caps.host.cpu.to_xml())
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
@@ -11040,7 +11713,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
ret = conn._compare_cpu(None, None, instance)
self.assertIsNone(ret)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
@@ -11052,7 +11725,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
jsonutils.dumps(_fake_cpu_info),
instance)
- @mock.patch.object(host.Host, 'compare_cpu')
+ @mock.patch.object(host.Host, 'compare_hypervisor_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
@@ -11247,7 +11920,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'_check_shared_storage_test_file')
def _test_check_can_live_migrate_source_block_migration_none(
self, block_migrate, is_shared_instance_path, is_share_block,
- mock_check, mock_shared_block, mock_enough, mock_verson):
+ mock_check, mock_shared_block, mock_enough, mock_version):
mock_check.return_value = is_shared_instance_path
mock_shared_block.return_value = is_share_block
@@ -11283,13 +11956,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
- '_assert_dest_node_has_enough_disk')
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_is_shared_block_storage')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_check_shared_storage_test_file')
def test_check_can_live_migration_source_disk_over_commit_none(self,
- mock_check, mock_shared_block, mock_enough, mock_disk_check):
+ mock_check, mock_shared_block, mock_disk_check):
mock_check.return_value = False
mock_shared_block.return_value = False
@@ -11507,7 +12178,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_migrateToURI3,
mock_min_version):
self.compute = manager.ComputeManager()
- instance_ref = self.test_instance
+ instance_ref = objects.Instance(**self.test_instance)
target_connection = '127.0.0.2'
xml_tmpl = ("<domain type='kvm'>"
@@ -12187,7 +12858,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get,
mock_min_version):
self.compute = manager.ComputeManager()
- instance_ref = self.test_instance
+ instance_ref = objects.Instance(**self.test_instance)
target_connection = '127.0.0.2'
xml_tmpl = ("<domain type='kvm'>"
@@ -12477,7 +13148,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_min_version):
# Prepare data
self.compute = manager.ComputeManager()
- instance_ref = self.test_instance
+ instance_ref = objects.Instance(**self.test_instance)
target_connection = '127.0.0.2'
disk_paths = ['vda', 'vdb']
@@ -13485,6 +14156,85 @@ class LibvirtConnTestCase(test.NoDBTestCase,
def test_live_migration_main_monitoring_failed(self):
self._test_live_migration_main(mon_side_effect=Exception)
+ @mock.patch.object(host.Host, "get_connection", new=mock.Mock())
+ @mock.patch.object(utils, "spawn", new=mock.Mock())
+ @mock.patch.object(host.Host, "get_guest")
+ @mock.patch.object(
+ libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths")
+ def _test_live_migration_monitor_job_stats_exception(
+ self, exc, mock_copy_disk_paths, mock_get_guest, expect_success=True
+ ):
+ # Verify behavior when various exceptions are raised inside of
+ # Guest.get_job_info() during live migration monitoring.
+ mock_domain = mock.Mock(fakelibvirt.virDomain)
+ guest = libvirt_guest.Guest(mock_domain)
+ mock_get_guest.return_value = guest
+
+ # First, raise the exception from jobStats(), then return "completed"
+ # to make sure we exit the monitoring loop.
+ guest._domain.jobStats.side_effect = [
+ exc,
+ {'type': fakelibvirt.VIR_DOMAIN_JOB_COMPLETED},
+ ]
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ post_method = mock.Mock()
+ migrate_data = mock.Mock()
+ disks_to_copy = (['/some/path/one', '/test/path/two'],
+ ['vda', 'vdb'])
+ mock_copy_disk_paths.return_value = disks_to_copy
+
+ func = drvr._live_migration
+ args = (self.context, instance, mock.sentinel.dest, post_method,
+ mock.sentinel.recover_method, mock.sentinel.block_migration,
+ migrate_data)
+
+ if expect_success:
+ func(*args)
+ post_method.assert_called_once_with(
+ self.context, instance, mock.sentinel.dest,
+ mock.sentinel.block_migration, migrate_data
+ )
+ else:
+ actual_exc = self.assertRaises(
+ fakelibvirt.libvirtError, func, *args)
+ self.assertEqual(exc, actual_exc)
+
+ def test_live_migration_monitor_job_stats_no_domain(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'no domain',
+ error_code=fakelibvirt.VIR_ERR_NO_DOMAIN
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_op_invalid(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'operation invalid',
+ error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_no_ram_info_set(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError, 'internal error',
+ error_message='migration was active, but no RAM info was set',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=True)
+
+ def test_live_migration_monitor_job_stats_internal_error(self):
+ exp = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ 'some other internal error',
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR
+ )
+ self._test_live_migration_monitor_job_stats_exception(
+ exp, expect_success=False)
+
@mock.patch('os.path.exists', return_value=False)
@mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(libvirt_driver.LibvirtDriver,
@@ -13504,7 +14254,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_fetch.assert_called_once_with(self.context, instance,
fallback_from_host=None)
mock_create.assert_called_once_with(
- disk_info['type'], mock.ANY, disk_info['virt_disk_size'])
+ '/fake/instance/dir/foo',
+ disk_info['type'],
+ disk_info['virt_disk_size'],
+ )
mock_exists.assert_called_once_with('/fake/instance/dir/foo')
def test_create_images_and_backing_qcow2(self):
@@ -13536,7 +14289,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.context, instance,
"/fake/instance/dir", disk_info)
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.privsep.path.utime')
def test_create_images_and_backing_images_not_exist_fallback(
self, mock_utime, mock_create_cow_image):
@@ -13616,7 +14369,72 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_utime.assert_called()
mock_create_cow_image.assert_called_once_with(
- backfile_path, '/fake/instance/dir/disk_path', virt_disk_size)
+ '/fake/instance/dir/disk_path',
+ 'qcow2',
+ virt_disk_size,
+ backing_file=backfile_path,
+ )
+
+ @mock.patch('nova.virt.libvirt.imagebackend.Image.exists',
+ new=mock.Mock(return_value=True))
+ def test_create_images_backing_images_and_fallback_not_exist(self):
+ self.flags(images_type='raw', group='libvirt')
+ conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ base_dir = os.path.join(CONF.instances_path,
+ CONF.image_cache.subdirectory_name)
+ self.test_instance.update({
+ 'user_id': 'fake-user',
+ 'os_type': None,
+ 'kernel_id': uuids.kernel_id,
+ 'ramdisk_id': uuids.ramdisk_id,
+ 'project_id': 'fake-project'
+ })
+ instance = objects.Instance(**self.test_instance)
+
+ backing_file = imagecache.get_cache_fname(instance.image_ref)
+ backfile_path = os.path.join(base_dir, backing_file)
+ disk_size = 10747904
+ virt_disk_size = 25165824
+ disk_info = [{
+ 'backing_file': backing_file,
+ 'disk_size': disk_size,
+ 'path': 'disk_path',
+ 'type': 'raw',
+ 'virt_disk_size': virt_disk_size
+ }]
+
+ with test.nested(
+ mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'),
+ mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
+ side_effect=exception.ImageNotFound(
+ image_id=uuids.fake_id)),
+ mock.patch.object(imagebackend.Flat, 'resize_image'),
+ ) as (copy_image_mock, fetch_image_mock, resize_image_mock):
+ conn._create_images_and_backing(
+ self.context, instance, "/fake/instance/dir", disk_info,
+ fallback_from_host="fake_host")
+ kernel_path = os.path.join(CONF.instances_path,
+ self.test_instance['uuid'], 'kernel')
+ ramdisk_path = os.path.join(CONF.instances_path,
+ self.test_instance['uuid'], 'ramdisk')
+ copy_image_mock.assert_has_calls([
+ mock.call(dest=kernel_path, src=kernel_path,
+ host='fake_host', receive=True),
+ mock.call(dest=ramdisk_path, src=ramdisk_path,
+ host='fake_host', receive=True)
+ ])
+ fetch_image_mock.assert_has_calls([
+ mock.call(context=self.context,
+ target=backfile_path,
+ image_id=self.test_instance['image_ref'],
+ trusted_certs=None),
+ mock.call(self.context, kernel_path, instance.kernel_id,
+ None),
+ mock.call(self.context, ramdisk_path, instance.ramdisk_id,
+ None)
+ ])
+ resize_image_mock.assert_called_once_with(virt_disk_size)
@mock.patch('nova.virt.libvirt.utils.create_image',
new=mock.NonCallableMock())
@@ -13648,7 +14466,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertFalse(mock_fetch_image.called)
@mock.patch('nova.privsep.path.utime')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_images_and_backing_ephemeral_gets_created(
self, mock_create_cow_image, mock_utime):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -13701,14 +14519,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# TODO(efried): Should these be disk_info[path]??
mock_create_cow_image.assert_has_calls([
mock.call(
- root_backing,
CONF.instances_path + '/disk',
- disk_info_byname['disk']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk']['virt_disk_size'],
+ backing_file=root_backing,
),
mock.call(
- ephemeral_backing,
CONF.instances_path + '/disk.local',
- disk_info_byname['disk.local']['virt_disk_size']
+ 'qcow2',
+ disk_info_byname['disk.local']['virt_disk_size'],
+ backing_file=ephemeral_backing,
),
])
@@ -14523,7 +15343,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('os.path.getsize')
def test_get_instance_disk_info_no_bdinfo_passed(self, mock_get_size,
mock_stat):
- # NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
+ # NOTE(ndipanov): _get_disk_overcommitted_size_total calls this method
# without access to Nova's block device information. We want to make
# sure that we guess volumes mostly correctly in that case as well
instance = objects.Instance(**self.test_instance)
@@ -14566,6 +15386,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_stat.assert_called_once_with(path)
mock_get_size.assert_called_once_with(path)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def test_spawn_with_network_info(self, power_on=True):
def fake_getLibVersion():
return fakelibvirt.FAKE_LIBVIRT_VERSION
@@ -14700,6 +15524,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_build_device_metadata.assert_called_once_with(self.context,
instance)
+ @mock.patch.object(host.Host, "_check_machine_type", new=mock.Mock())
def test_spawn_power_on_false(self):
self.test_spawn_with_network_info(power_on=False)
@@ -14726,6 +15551,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
accel_info=accel_info, power_on=False)
return instance
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml')
def test_spawn_accels_no_accel_info(self, mock_get_guest_xml):
# accel_info should be passed to get_guest_xml even if it is []
@@ -14736,6 +15564,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
block_device_info=None, mdevs=mock.ANY,
accel_info=[])
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml')
def test_spawn_accels_with_accel_info(self, mock_get_guest_xml):
# accel_info should be passed to get_guest_xml if it is not []
@@ -14746,6 +15577,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
block_device_info=None, mdevs=mock.ANY,
accel_info=accel_info)
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
# Methods called directly by spawn()
@mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml')
@mock.patch.object(libvirt_driver.LibvirtDriver,
@@ -14793,6 +15627,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
config_disk.import_file.assert_called_once_with(instance, mock.ANY,
'disk.config')
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def test_spawn_without_image_meta(self):
instance_ref = self.test_instance
instance_ref['image_ref'] = uuids.image_ref
@@ -14817,6 +15654,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(['disk', 'disk.local'],
sorted(backend.created_disks.keys()))
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def _test_spawn_disks(self, image_ref, block_device_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -14877,6 +15717,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# We should have created the root and ephemeral disks
self.assertEqual(['disk', 'disk.local'], disks_created)
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def test_spawn_lxc_from_volume(self):
self.flags(virt_type="lxc",
group='libvirt')
@@ -14967,6 +15810,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
inst_obj.system_metadata.get(
'rootfs_device_name'))
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
def test_spawn_with_pci_devices(self):
class FakeLibvirtPciDevice(object):
def dettach(self):
@@ -15011,6 +15857,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
return_value=mock_connection):
drvr.spawn(self.context, instance, image_meta, [], None, {})
+ @mock.patch.object(libvirt_driver.LibvirtDriver,
+ '_register_undefined_instance_details',
+ new=mock.Mock())
@mock.patch('nova.crypto.ensure_vtpm_secret')
@mock.patch.object(hardware, 'get_vtpm_constraint')
@mock.patch(
@@ -15333,8 +16182,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
filename=filename, size=100 * units.Gi, ephemeral_size=mock.ANY,
specified_fs=None)
- @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
- def test_create_image_resize_snap_backend(self, mock_cache):
+ def test_create_image_resize_snap_backend(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
instance.task_state = task_states.RESIZE_FINISH
@@ -15362,7 +16210,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('nova.privsep.path.utime')
@mock.patch('nova.virt.libvirt.utils.fetch_image')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
def test_create_ephemeral_specified_fs_not_valid(
self, mock_create_cow_image, mock_fetch_image, mock_utime):
CONF.set_override('default_ephemeral_format', 'ext4')
@@ -15378,10 +16226,9 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
- disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
- instance,
- image_meta)
- disk_info['mapping'].pop('disk.local')
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta,
+ block_device_info=block_device_info)
with test.nested(
mock.patch('oslo_concurrency.processutils.execute'),
@@ -15793,9 +16640,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warning')
- @mock.patch('nova.compute.utils.get_machine_ips')
- def test_check_my_ip(self, mock_ips, mock_log):
- mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
+ def test_check_my_ip(self, mock_log):
+
+ self.libvirt.mock_get_machine_ips.return_value = [
+ '8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._check_my_ip()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
@@ -15805,7 +16653,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
'my_ip': mock.ANY})
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_init_host_checks_ip(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(drvr, '_check_my_ip') as mock_check:
@@ -15816,6 +16665,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -15823,8 +16673,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
@@ -15839,6 +16687,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
@@ -15846,8 +16695,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -15861,16 +16708,16 @@ class LibvirtConnTestCase(test.NoDBTestCase,
self.assertTrue(service_mock.disabled)
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
def test_service_resume_after_broken_connection(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
+ drvr._host._init_events.return_value = None
with test.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
- mock.patch.object(drvr._host, "_init_events",
- return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
@@ -16029,7 +16876,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get.return_value = fake_guest
self.stub_out('oslo_service.loopingcall.FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
- self.stub_out('nova.pci.manager.get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
@@ -16041,14 +16887,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get.assert_has_calls([mock.call(instance)] * 2, any_order=True)
self.assertEqual(2, mock_get.call_count)
- @mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, '_get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
- mock_sleep, mock_loopingcall,
- mock_get_instance_pci_devs):
+ mock_sleep, mock_loopingcall):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
@@ -16076,7 +16920,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
- mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@@ -16274,7 +17117,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch('oslo_utils.fileutils.ensure_tree')
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
- @mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_guest_with_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver.'
@@ -16291,7 +17133,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_get_guest_config, mock_get_instance_path,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network,
- mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
+ mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
@@ -16337,10 +17179,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
@mock.patch.object(conn, '_detach_mediated_devices')
@mock.patch.object(conn, '_detach_direct_passthrough_ports')
@mock.patch.object(conn, '_detach_pci_devices')
- @mock.patch.object(pci_manager, 'get_instance_pci_devs',
- return_value='pci devs')
@mock.patch.object(conn._host, 'get_guest', return_value=guest)
- def suspend(mock_get_guest, mock_get_instance_pci_devs,
+ def suspend(mock_get_guest,
mock_detach_pci_devices,
mock_detach_direct_passthrough_ports,
mock_detach_mediated_devices,
@@ -16483,15 +17323,17 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
- @mock.patch.object(host.Host,
- 'has_min_version', return_value=True)
- def _test_detach_direct_passthrough_ports(self,
- mock_has_min_version, vif_type):
+ @mock.patch.object(
+ host.Host, 'has_min_version', new=mock.Mock(return_value=True)
+ )
+ def _test_detach_direct_passthrough_ports(
+ self, vif_type, detach_device=True,
+ vnic_type=network_model.VNIC_TYPE_DIRECT):
instance = objects.Instance(**self.test_instance)
expeted_pci_slot = "0000:00:00.0"
network_info = _fake_network_info(self)
- network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ network_info[0]['vnic_type'] = vnic_type
# some more adjustments for the fake network_info so that
# the correct get_config function will be executed (vif's
# get_config_hw_veb - which is according to the real SRIOV vif)
@@ -16504,32 +17346,55 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
- # pci_manager.get_instance_pci_devs will not return an empty list
+ # instance.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
expected_pci_device_obj = (
- objects.PciDevice(address=expeted_pci_slot, request_id=None))
+ objects.PciDevice(
+ address=expeted_pci_slot, request_id=None, compute_node_id=42
+ )
+ )
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [expected_pci_device_obj]
- domain = FakeVirtDomain()
+ domain = FakeVirtDomain(id=24601, name='Jean Valjean')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
- with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci:
+ with mock.patch.object(
+ drvr, '_detach_pci_devices'
+ ) as mock_detach_pci, mock.patch.object(
+ drvr, 'detach_interface'
+ ) as mock_detach_interface:
drvr._detach_direct_passthrough_ports(
self.context, instance, guest)
- mock_detach_pci.assert_called_once_with(
- guest, [expected_pci_device_obj])
+ if detach_device:
+ mock_detach_pci.assert_called_once_with(
+ guest, [expected_pci_device_obj])
+ else:
+ mock_detach_interface.assert_called_once()
- def test_detach_direct_passthrough_ports_interface_interface_hostdev(self):
+ def test_detach_direct_passthrough_ports_ovs_hw_offload(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestInterface
- self._test_detach_direct_passthrough_ports(vif_type="hw_veb")
+ self._test_detach_direct_passthrough_ports("ovs", detach_device=False)
- def test_detach_direct_passthrough_ports_interface_pci_hostdev(self):
+ def test_detach_direct_passthrough_ports_sriov_nic_agent(self):
+ # Note: test detach_direct_passthrough_ports method for vif with config
+ # LibvirtConfigGuestInterface
+ self._test_detach_direct_passthrough_ports(
+ "hw_veb", detach_device=False
+ )
+
+ def test_detach_direct_physical_passthrough_ports_sriov_nic_agent(self):
+ self._test_detach_direct_passthrough_ports(
+ "hostdev_physical",
+ vnic_type=network_model.VNIC_TYPE_DIRECT_PHYSICAL
+ )
+
+ def test_detach_direct_passthrough_ports_infiniband(self):
# Note: test detach_direct_passthrough_ports method for vif with config
# LibvirtConfigGuestHostdevPCI
- self._test_detach_direct_passthrough_ports(vif_type="ib_hostdev")
+ self._test_detach_direct_passthrough_ports("ib_hostdev")
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
@@ -16539,9 +17404,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
network_info = _fake_network_info(self, 2)
+ direct_physical = network_model.VNIC_TYPE_DIRECT_PHYSICAL
for network_info_inst in network_info:
- network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT
- network_info_inst['type'] = "hw_veb"
+ network_info_inst['vnic_type'] = direct_physical
+ network_info_inst['type'] = "hostdev_physical"
network_info_inst['details'] = dict(vlan="2145")
network_info_inst['address'] = "fa:16:3e:96:2a:48"
@@ -16551,7 +17417,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
# fill the pci_devices of the instance so that
- # pci_manager.get_instance_pci_devs will not return an empty list
+ # instance.get_instance_pci_devs will not return an empty list
# which will eventually fail the assertion for detachDeviceFlags
instance.pci_devices = objects.PciDeviceList()
instance.pci_devices.objects = [
@@ -16606,8 +17472,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(drvr, '_create_guest_with_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
- mock.patch.object(pci_manager, 'get_instance_pci_devs',
- return_value='fake_pci_devs'),
+ mock.patch('nova.objects.Instance.get_pci_devices',
+ return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(guest, 'sync_guest_time'),
mock.patch.object(drvr, '_wait_for_running',
@@ -17358,12 +18224,11 @@ class LibvirtConnTestCase(test.NoDBTestCase,
got = drvr._get_cpu_info()
self.assertEqual(want, got)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
@mock.patch.object(host.Host, 'list_pci_devices',
return_value=['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7'])
- def test_get_pci_passthrough_devices(self, mock_list, mock_get_ifname):
+ def test_get_pci_passthrough_devices(self, mock_list):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -17397,7 +18262,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
"vendor_id": '8086',
"dev_type": fields.PciDeviceType.SRIOV_PF,
"phys_function": None,
- "numa_node": None},
+ "numa_node": None,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
+ },
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
@@ -17433,7 +18301,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
# The first call for every VF is to determine parent_ifname and
# the second call to determine the MAC address.
- mock_get_ifname.assert_has_calls([
+ pci_utils.get_ifname_by_pci_address.assert_has_calls([
mock.call('0000:04:10.7', pf_interface=True),
mock.call('0000:04:11.7', pf_interface=True),
])
@@ -19198,8 +20066,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
instance = objects.Instance(**self.test_instance)
instance.vm_state = vm_states.BUILDING
- vifs = [{'id': uuids.vif_1, 'active': False},
- {'id': uuids.vif_2, 'active': False}]
+ vifs = [
+ network_model.VIF(id=uuids.vif_1, active=False),
+ network_model.VIF(id=uuids.vif_2, active=False)
+ ]
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, '_create_guest')
@@ -19396,6 +20266,23 @@ class LibvirtConnTestCase(test.NoDBTestCase,
events = drvr._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', '1')], events)
+ def test_get_neutron_events_remote_managed(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ network_info = [
+ network_model.VIF(
+ id=uuids.vif_1,
+ vnic_type=network_model.VNIC_TYPE_REMOTE_MANAGED),
+ network_model.VIF(
+ id=uuids.vif_2,
+ vnic_type=network_model.VNIC_TYPE_REMOTE_MANAGED,
+ active=True),
+ ]
+ events = drvr._get_neutron_events(network_info)
+ # For VNIC_TYPE_REMOTE_MANAGED events are only bind-time currently.
+ # Until this changes, they need to be filtered out to avoid waiting
+ # for them unnecessarily.
+ self.assertEqual([], events)
+
def test_unplug_vifs_ignores_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
@@ -19827,11 +20714,12 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch('nova.virt.libvirt.utils.get_disk_size'),
mock.patch('nova.virt.libvirt.utils.get_disk_backing_file'),
- mock.patch('nova.virt.libvirt.utils.create_cow_image'),
+ mock.patch('nova.virt.libvirt.utils.create_image'),
mock.patch('nova.virt.libvirt.utils.extract_snapshot'),
- mock.patch.object(drvr, '_set_quiesced')
+ mock.patch.object(drvr, '_set_quiesced'),
+ mock.patch.object(drvr, '_can_quiesce')
) as (mock_define, mock_size, mock_backing, mock_create_cow,
- mock_snapshot, mock_quiesce):
+ mock_snapshot, mock_quiesce, mock_can_quiesce):
xmldoc = "<domain/>"
srcfile = "/first/path"
@@ -19846,7 +20734,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = libvirt_guest.Guest(mock_dom)
if not can_quiesce:
- mock_quiesce.side_effect = (
+ mock_can_quiesce.side_effect = (
exception.InstanceQuiesceNotSupported(
instance_id=self.test_instance['id'], reason='test'))
@@ -19869,7 +20757,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_size.assert_called_once_with(srcfile, format="qcow2")
mock_backing.assert_called_once_with(srcfile, basename=False,
format="qcow2")
- mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
+ mock_create_cow.assert_called_once_with(
+ dltfile, 'qcow2', 1004009, backing_file=bckfile)
mock_chown.assert_called_once_with(dltfile, uid=os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
@@ -19877,6 +20766,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
mock_define.assert_called_once_with(xmldoc)
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
mock.ANY, True)
+
if can_quiesce:
mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
mock.ANY, False)
@@ -19980,7 +20870,8 @@ class LibvirtConnTestCase(test.NoDBTestCase,
driver.init_host, 'wibble')
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
libvirt_driver.MIN_VIRTUOZZO_VERSION))
@@ -20101,7 +20992,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 0,
'source_type': 'volume', 'destination_type': 'volume',
- 'device_name': '/dev/sda'}))
+ 'device_name': '/dev/sda', 'boot_index': 0}))
info = {'block_device_mapping': driver_block_device.convert_volumes(
[bdm])}
info['block_device_mapping'][0]['connection_info'] = conn_info
@@ -20211,8 +21102,10 @@ class LibvirtConnTestCase(test.NoDBTestCase,
drvr._get_disk_config_image_type())
self.assertEqual(2, drvr.image_backend.by_name.call_count)
- call1 = mock.call(instance, 'disk.config', 'rbd')
- call2 = mock.call(instance, 'disk.config', 'flat')
+ call1 = mock.call(instance, 'disk.config', 'rbd',
+ disk_info_mapping=disk_mapping['disk.config'])
+ call2 = mock.call(instance, 'disk.config', 'flat',
+ disk_info_mapping=disk_mapping['disk.config'])
drvr.image_backend.by_name.assert_has_calls([call1, call2])
self.assertEqual(mock.sentinel.diskconfig, diskconfig)
@@ -20255,7 +21148,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
guest = mock.Mock()
with test.nested(
- mock.patch.object(pci_manager, 'get_instance_pci_devs'),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(drvr, '_attach_direct_passthrough_ports'),
):
@@ -20691,7 +21583,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'min_unit': 1,
'max_unit': self.vcpus,
'step_size': 1,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
'reserved': 0,
},
orc.PCPU: {
@@ -20707,7 +21599,7 @@ class TestUpdateProviderTree(test.NoDBTestCase):
'min_unit': 1,
'max_unit': self.memory_mb,
'step_size': 1,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
'reserved': 512,
},
orc.DISK_GB: {
@@ -21435,6 +22327,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.flags(sysinfo_serial="none", group="libvirt")
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.useFixture(nova_fixtures.LibvirtFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
os_vif.initialize()
self.drvr = libvirt_driver.LibvirtDriver(
@@ -21530,6 +22423,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None)
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_cleanup_failed_instance_base')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unplug_vifs')
@mock.patch('nova.virt.libvirt.utils.save_and_migrate_vtpm_dir')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
@@ -21546,7 +22441,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self, ctxt, flavor_obj, mock_execute, mock_exists, mock_rename,
mock_is_shared, mock_get_host_ip, mock_destroy,
mock_get_disk_info, mock_vtpm, mock_unplug_vifs,
- block_device_info=None, params_for_instance=None):
+ mock_cleanup, block_device_info=None, params_for_instance=None):
"""Test for nova.virt.libvirt.driver.LivirtConnection
.migrate_disk_and_power_off.
"""
@@ -21561,6 +22456,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
ctxt, instance, '10.0.0.2', flavor_obj, None,
block_device_info=block_device_info)
+ mock_cleanup.assert_called_once()
+ mock_cleanup.reset_mock()
self.assertEqual(out, disk_info_text)
mock_vtpm.assert_called_with(
instance.uuid, mock.ANY, mock.ANY, '10.0.0.2', mock.ANY, mock.ANY)
@@ -21571,6 +22468,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
ctxt, instance, '10.0.0.1', flavor_obj, None,
block_device_info=block_device_info)
+ mock_cleanup.assert_called_once()
self.assertEqual(out, disk_info_text)
mock_vtpm.assert_called_with(
instance.uuid, mock.ANY, mock.ANY, '10.0.0.1', mock.ANY, mock.ANY)
@@ -21816,11 +22714,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
- '._get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
- def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
- mock_get_disk_info):
+ def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get):
mappings = [
{
'device_name': '/dev/sdb4',
@@ -21867,7 +22762,6 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
- mock_get_disk_info.return_value = fake_disk_info_json(instance)
self.assertRaises(
exception.InstanceFaultRollback,
@@ -22468,8 +23362,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertFalse(drvr.image_backend.remove_snap.called)
@mock.patch.object(shutil, 'rmtree')
- def test_cleanup_failed_migration(self, mock_rmtree):
- self.drvr._cleanup_failed_migration('/fake/inst')
+ def test_cleanup_failed_instance_base(self, mock_rmtree):
+ self.drvr._cleanup_failed_instance_base('/fake/inst')
mock_rmtree.assert_called_once_with('/fake/inst')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_cleanup_resize')
@@ -22828,6 +23722,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
instance = self._create_instance(params=inst_params)
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': instance.image_ref}
instance_dir = libvirt_utils.get_instance_path(instance)
disk_path = os.path.join(instance_dir, 'disk')
@@ -22847,7 +23744,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
]
drvr._create_and_inject_local_root(
- self.context, instance, False, '', disk_images, None, None)
+ self.context, instance, disk_info['mapping'], False, '',
+ disk_images, None, None)
mock_fetch_calls = [
mock.call(test.MatchType(nova.virt.libvirt.imagebackend.Qcow2),
@@ -22930,9 +23828,13 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
# config_drive is True by default, configdrive.required_by()
# returns True
instance_ref = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance_ref, image_meta)
disk_images = {'image_id': None}
- drvr._create_and_inject_local_root(self.context, instance_ref, False,
+ drvr._create_and_inject_local_root(self.context, instance_ref,
+ disk_info['mapping'], False,
'', disk_images, get_injection_info(),
None)
self.assertFalse(mock_inject.called)
@@ -22952,6 +23854,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_image.get.return_value = {'locations': [], 'disk_format': 'raw'}
instance = self._create_instance()
+ image_meta = objects.ImageMeta.from_dict({})
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, image_meta)
disk_images = {'image_id': 'foo'}
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -22962,6 +23867,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_fetch.reset_mock()
drvr._create_and_inject_local_root(self.context,
instance,
+ disk_info['mapping'],
False,
'',
disk_images,
@@ -23855,7 +24761,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test__detach_with_retry_timeout_retry_succeeds(
self, state, mock_event_wait
):
- """Test that that a live detach times out while waiting for the libvirt
+ """Test that a live detach times out while waiting for the libvirt
event but then the retry succeeds.
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -23910,7 +24816,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test__detach_with_retry_timeout_retry_unplug_in_progress(
self, mock_event_wait
):
- """Test that that a live detach times out while waiting for the libvirt
+ """Test that a live detach times out while waiting for the libvirt
event but then the retry gets a unplug already in progress error from
libvirt, which it ignores, then the detach finishes and the event is
received.
@@ -23990,10 +24896,10 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
def test__detach_with_retry_timeout_run_out_of_retries(
self, state, mock_event_wait
):
- """Test that that a live detach times out while waiting for the libvirt
+ """Test that a live detach times out while waiting for the libvirt
event at every attempt so the driver runs out of retry attempts.
"""
- # decreased the number to simplyfy the test
+ # decreased the number to simplify the test
self.flags(group='libvirt', device_detach_attempts=2)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -24321,7 +25227,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
- self.assertIsNone(disks['disk.rescue'].image_type)
+ self.assertEqual('default', disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue and
# disk, in that order
@@ -24393,7 +25299,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
self.assertEqual('raw', disk.image_type)
# Assert that the root rescue disk was created as the default type
- self.assertIsNone(disks['disk.rescue'].image_type)
+ self.assertEqual('default', disks['disk.rescue'].image_type)
# We expect the generated domain to contain disk.rescue, disk, and
# disk.config.rescue in that order
@@ -24631,7 +25537,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
- 'device_name': '/dev/vda'}))
+ 'device_name': '/dev/vda',
+ 'boot_index': 0}))
bdms = driver_block_device.convert_volumes([bdm])
block_device_info = {'root_device_name': '/dev/vda',
'ephemerals': [],
@@ -25317,9 +26224,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
self._test_get_gpu_inventories(drvr, expected, ['nvidia-11'])
- @mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
- '._get_mdev_capable_devices')
- def test_get_gpu_inventories_with_two_types(self, get_mdev_capable_devs):
+ def test_get_gpu_inventories_with_two_types(self):
self.flags(enabled_mdev_types=['nvidia-11', 'nvidia-12'],
group='devices')
# we need to call the below again to ensure the updated
@@ -25952,7 +26857,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
drvr._get_existing_mdevs_not_assigned(parent=None))
@mock.patch.object(libvirt_driver.LibvirtDriver,
- '_register_instance_machine_type', new=mock.Mock())
+ '_register_all_undefined_instance_details',
+ new=mock.Mock())
@mock.patch('nova.compute.utils.get_machine_ips',
new=mock.Mock(return_value=[]))
@mock.patch.object(nova.privsep.libvirt, 'create_mdev')
@@ -26668,12 +27574,13 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
mock_execute.assert_called_once_with('qemu-img', 'rebase',
'-b', '', 'disk')
+ @mock.patch('nova.objects.instance.Instance.save')
@mock.patch('nova.objects.instance.InstanceList.get_by_host')
@mock.patch('nova.virt.libvirt.host.Host.get_hostname',
new=mock.Mock(return_value=mock.sentinel.hostname))
@mock.patch('nova.context.get_admin_context', new=mock.Mock())
def test_register_machine_type_already_registered_image_metadata(
- self, mock_get_by_host
+ self, mock_get_by_host, mock_instance_save,
):
instance = self._create_instance(
params={
@@ -26683,7 +27590,14 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
}
)
mock_get_by_host.return_value = [instance]
- self.drvr._register_instance_machine_type()
+
+ # We only care about hw_machine_type for this test
+ with mock.patch(
+ 'nova.virt.libvirt.driver.REGISTER_IMAGE_PROPERTY_DEFAULTS',
+ ['hw_machine_type']
+ ):
+ self.drvr._register_all_undefined_instance_details()
+
# Assert that we don't overwrite the existing type
self.assertEqual(
'existing_type',
@@ -26693,6 +27607,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
'existing_type',
instance.system_metadata.get('image_hw_machine_type')
)
+ mock_instance_save.assert_not_called()
@mock.patch('nova.objects.instance.Instance.save')
@mock.patch('nova.objects.instance.InstanceList.get_by_host')
@@ -26705,7 +27620,14 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
):
instance = self._create_instance()
mock_get_by_host.return_value = [instance]
- self.drvr._register_instance_machine_type()
+
+ # We only care about hw_machine_type for this test
+ with mock.patch(
+ 'nova.virt.libvirt.driver.REGISTER_IMAGE_PROPERTY_DEFAULTS',
+ ['hw_machine_type']
+ ):
+ self.drvr._register_all_undefined_instance_details()
+
mock_instance_save.assert_called_once()
self.assertEqual(
'conf_type',
@@ -26716,6 +27638,172 @@ class LibvirtDriverTestCase(test.NoDBTestCase, TraitsComparisonMixin):
instance.system_metadata.get('image_hw_machine_type')
)
+ @mock.patch('nova.virt.libvirt.driver.LOG.exception')
+ @mock.patch('nova.objects.instance.InstanceList.get_by_host')
+ @mock.patch('nova.virt.libvirt.host.Host.get_hostname', new=mock.Mock())
+ def test_register_all_undefined_details_unknown_failure(
+ self, mock_get_by_host, mock_log_exc
+ ):
+ instance = self._create_instance()
+ mock_get_by_host.return_value = [instance]
+
+ # Assert that we swallow anything raised below us
+ with mock.patch.object(
+ self.drvr,
+ '_register_undefined_instance_details',
+ side_effect=test.TestingException()
+ ):
+ self.drvr._register_all_undefined_instance_details()
+
+ # Assert that we logged the failure
+ self.assertEqual(1, mock_log_exc.call_count)
+ self.assertIn('Ignoring unknown failure while attempting '
+ 'to save the defaults for unregistered image properties',
+ mock_log_exc.call_args.args[0])
+
+ @mock.patch('nova.virt.libvirt.driver.LOG.exception')
+ @mock.patch('nova.objects.instance.Instance.save')
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest', new=mock.Mock())
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList.'
+ 'get_by_instance_uuid')
+ @mock.patch('nova.objects.instance.InstanceList.get_by_host')
+ @mock.patch('nova.virt.libvirt.host.Host.get_hostname', new=mock.Mock())
+ def test_register_all_undefined_details_unknown_failure_finding_default(
+ self, mock_get_by_host, mock_get_bdms, mock_save, mock_log_exc
+ ):
+ instance = self._create_instance()
+ mock_get_by_host.return_value = [instance]
+ mock_get_bdms.return_value = []
+
+ # Assert that we swallow anything raised below us
+ with mock.patch.object(
+ self.drvr,
+ '_find_default_for_image_property',
+ side_effect=test.TestingException()
+ ):
+ self.drvr._register_all_undefined_instance_details()
+
+ # Assert that we logged the failures (once for each unregistered
+ # image property)
+ self.assertEqual(len(libvirt_driver.REGISTER_IMAGE_PROPERTY_DEFAULTS),
+ mock_log_exc.call_count)
+ self.assertIn('Ignoring unknown failure while attempting '
+ 'to find the default of',
+ mock_log_exc.call_args.args[0])
+
+ # Assert that we updated the instance
+ mock_save.assert_called_once_with()
+
+ @mock.patch('nova.objects.instance.Instance.save',
+ new=mock.NonCallableMock())
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest',
+ new=mock.NonCallableMock())
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList.'
+ 'get_by_instance_uuid', new=mock.NonCallableMock())
+ def test_register_undefined_instance_details_nothing_to_register(self):
+ instance = self._create_instance()
+
+ # Set a value for all REGISTER_IMAGE_PROPERTY_DEFAULTS
+ for p in libvirt_driver.REGISTER_IMAGE_PROPERTY_DEFAULTS:
+ instance.system_metadata[f"image_{p}"] = 'foo'
+
+ # We should not have pulled bdms or updated the instance
+ self.drvr._register_undefined_instance_details(self.context, instance)
+
+ @mock.patch('nova.objects.instance.Instance.save')
+ @mock.patch('nova.virt.libvirt.host.Host.get_guest')
+ @mock.patch('nova.objects.block_device.BlockDeviceMappingList.'
+ 'get_by_instance_uuid')
+ def test_register_undefined_instance_details_disk_info_and_guest_config(
+ self, mock_get_bdms, mock_get_guest, mock_save
+ ):
+ instance = self._create_instance()
+ mock_get_bdms.return_value = []
+
+ # Test all props unregistered
+ with mock.patch.object(
+ self.drvr,
+ '_find_default_for_image_property'
+ ) as mock_find:
+ self.drvr._register_undefined_instance_details(self.context,
+ instance)
+ # We should have pulled bdms
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ # We should have pulled disk_info
+ self.assertIsNotNone(mock_find.call_args.args[2])
+ # We should have pulled guest config
+ mock_get_guest.return_value.get_config.assert_called_once_with()
+ self.assertIsNotNone(mock_find.call_args.args[3])
+
+ # Set one of ['hw_disk_bus', 'hw_cdrom_bus']
+ # Set one of ['hw_pointer_model', 'hw_input_bus']
+ mock_get_bdms.reset_mock()
+ mock_get_guest.reset_mock()
+ instance.system_metadata['image_hw_disk_bus'] = 'scsi'
+ instance.system_metadata['image_hw_pointer_model'] = None
+ with mock.patch.object(
+ self.drvr,
+ '_find_default_for_image_property'
+ ) as mock_find:
+ self.drvr._register_undefined_instance_details(self.context,
+ instance)
+ # We should have pulled bdms
+ mock_get_bdms.assert_called_once_with(self.context, instance.uuid)
+ # We should have pulled disk_info
+ self.assertIsNotNone(mock_find.call_args.args[2])
+ # We should have pulled guest config
+ mock_get_guest.return_value.get_config.assert_called_once_with()
+ self.assertIsNotNone(mock_find.call_args.args[3])
+
+ # Set the other, now we have both ['hw_disk_bus', 'hw_cdrom_bus']
+ # Set the other, now we have both ['hw_pointer_model', 'hw_input_bus']
+ mock_get_bdms.reset_mock()
+ mock_get_guest.reset_mock()
+ instance.system_metadata['image_hw_cdrom_bus'] = 'scsi'
+ instance.system_metadata['image_hw_input_bus'] = None
+ with mock.patch.object(
+ self.drvr,
+ '_find_default_for_image_property'
+ ) as mock_find:
+ self.drvr._register_undefined_instance_details(self.context,
+ instance)
+ # We should not have pulled bdms at all
+ mock_get_bdms.assert_not_called()
+ # And disk_info should not have been pulled
+ self.assertIsNone(mock_find.call_args.args[2])
+ # We should not have pulled guest config
+ mock_get_guest.return_value.assert_not_called()
+ self.assertIsNone(mock_find.call_args.args[3])
+
+ def test_set_features_windows(self):
+ self.flags(virt_type='kvm', group='libvirt')
+ guest = vconfig.LibvirtConfigGuest()
+ self.drvr._set_features(
+ guest, 'windows',
+ objects.ImageMeta(
+ properties=objects.ImageMetaProps()
+ ),
+ objects.Flavor(extra_specs={})
+ )
+ features = guest.features
+ hv = None
+ for feature in features:
+ if feature.root_name == 'hyperv':
+ hv = feature
+ self.assertTrue(hv.relaxed)
+ self.assertTrue(hv.vapic)
+ self.assertTrue(hv.spinlocks)
+ self.assertEqual(8191, hv.spinlock_retries)
+ self.assertTrue(hv.vpindex)
+ self.assertTrue(hv.runtime)
+ self.assertTrue(hv.synic)
+ self.assertTrue(hv.reset)
+ self.assertTrue(hv.frequencies)
+ self.assertTrue(hv.reenlightenment)
+ self.assertTrue(hv.tlbflush)
+ self.assertTrue(hv.ipi)
+ self.assertTrue(hv.evmcs)
+
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
@@ -27786,7 +28874,7 @@ class _BaseSnapshotTests(test.NoDBTestCase):
@mock.patch.object(host.Host, '_get_domain')
@mock.patch('nova.virt.libvirt.utils.get_disk_size',
new=mock.Mock(return_value=0))
- @mock.patch('nova.virt.libvirt.utils.create_cow_image',
+ @mock.patch('nova.virt.libvirt.utils.create_image',
new=mock.Mock())
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file',
new=mock.Mock(return_value=None))
@@ -28112,13 +29200,11 @@ class LVMSnapshotTests(_BaseSnapshotTests):
new=mock.Mock(return_value=None))
@mock.patch('nova.virt.libvirt.utils.get_disk_type_from_path',
new=mock.Mock(return_value='lvm'))
- @mock.patch('nova.virt.libvirt.utils.file_open',
- side_effect=[io.BytesIO(b''), io.BytesIO(b'')])
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image')
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
- mock_convert_image, mock_file_open):
+ mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
diff --git a/nova/tests/unit/virt/libvirt/test_guest.py b/nova/tests/unit/virt/libvirt/test_guest.py
index 70d438d816..5b181b8f06 100644
--- a/nova/tests/unit/virt/libvirt/test_guest.py
+++ b/nova/tests/unit/virt/libvirt/test_guest.py
@@ -14,7 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_service import fixture as service_fixture
from oslo_utils import encodeutils
@@ -403,9 +404,21 @@ class GuestTestCase(test.NoDBTestCase):
self.assertIsNotNone(
self.guest.get_interface_by_cfg(
cfg, from_persistent_config=True))
+ cfg = vconfig.LibvirtConfigGuestInterface()
+ # NOTE(sean-k-mooney): a default constructed object is not valid
+ # to pass to get_interface_by_cfg as so we just modify the xml to
+ # make it not match
+ cfg.parse_str("""
+ <interface type="wont_match">
+ <mac address="fa:16:3e:f9:af:ae"/>
+ <model type="virtio"/>
+ <driver name="qemu"/>
+ <source bridge="qbr84008d03-11"/>
+ <target dev="tap84008d03-11"/>
+ </interface>""")
self.assertIsNone(
self.guest.get_interface_by_cfg(
- vconfig.LibvirtConfigGuestInterface(),
+ cfg,
from_persistent_config=True))
self.domain.XMLDesc.assert_has_calls(
[
@@ -1040,3 +1053,25 @@ class JobInfoTestCase(test.NoDBTestCase):
mock_stats.assert_called_once_with()
mock_info.assert_called_once_with()
+
+ @mock.patch.object(fakelibvirt.virDomain, "jobInfo")
+ @mock.patch.object(fakelibvirt.virDomain, "jobStats")
+ def test_job_stats_no_ram(self, mock_stats, mock_info):
+ mock_stats.side_effect = fakelibvirt.make_libvirtError(
+ fakelibvirt.libvirtError,
+ "internal error: migration was active, but no RAM info was set",
+ error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR,
+ error_message="migration was active, but no RAM info was set")
+
+ info = self.guest.get_job_info()
+
+ self.assertIsInstance(info, libvirt_guest.JobInfo)
+ self.assertEqual(fakelibvirt.VIR_DOMAIN_JOB_NONE, info.type)
+ self.assertEqual(0, info.time_elapsed)
+ self.assertEqual(0, info.time_remaining)
+ self.assertEqual(0, info.memory_total)
+ self.assertEqual(0, info.memory_processed)
+ self.assertEqual(0, info.memory_remaining)
+
+ mock_stats.assert_called_once_with()
+ self.assertFalse(mock_info.called)
diff --git a/nova/tests/unit/virt/libvirt/test_host.py b/nova/tests/unit/virt/libvirt/test_host.py
index 192909d721..a76dc83105 100644
--- a/nova/tests/unit/virt/libvirt/test_host.py
+++ b/nova/tests/unit/virt/libvirt/test_host.py
@@ -15,11 +15,12 @@
# under the License.
import os
+from unittest import mock
+import ddt
import eventlet
from eventlet import greenthread
from eventlet import tpool
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import uuidutils
@@ -71,11 +72,10 @@ class HostTestCase(test.NoDBTestCase):
self.useFixture(nova_fixtures.LibvirtFixture())
self.host = host.Host("qemu:///system")
- @mock.patch("nova.virt.libvirt.host.Host._init_events")
- def test_repeat_initialization(self, mock_init_events):
+ def test_repeat_initialization(self):
for i in range(3):
self.host.initialize()
- mock_init_events.assert_called_once_with()
+ self.host._init_events.assert_called_once_with()
@mock.patch.object(fakelibvirt.virConnect, "registerCloseCallback")
def test_close_callback(self, mock_close):
@@ -1052,6 +1052,12 @@ Active: 8381604 kB
'iowait': 6121490000000},
stats)
+ @mock.patch.object(fakelibvirt.virConnect, "getCPUMap")
+ def test_get_available_cpus(self, mock_map):
+ mock_map.return_value = (4, [True, True, False, False], None)
+ result = self.host.get_available_cpus()
+ self.assertEqual(result, {0, 1, 2, 3})
+
@mock.patch.object(fakelibvirt.virConnect, "defineXML")
def test_write_instance_config(self, mock_defineXML):
fake_dom_xml = """
@@ -1095,6 +1101,48 @@ Active: 8381604 kB
guest = self.host.write_instance_config(fake_dom_xml)
self.assertIsInstance(guest, libvirt_guest.Guest)
+ def test_check_machine_type_invalid(self):
+ fake_dom_xml = u"""
+ <capabilities>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name="alpha">
+ <emulator>/usr/bin/qemu-system-alpha</emulator>
+ <machine maxCpus="4">q35</machine>
+ <machine maxCpus="1">integratorcp</machine>
+ <machine maxCpus="1">versatileab</machine>
+ <domain type="qemu"/>
+ </arch>
+ </guest>
+ </capabilities>
+ """
+
+ self.assertRaises(
+ exception.InvalidMachineType,
+ self.host._check_machine_type, fake_dom_xml, 'Q35'
+ )
+
+ def test_check_machine_type_valid(self):
+ fake_dom_xml = u"""
+ <capabilities>
+ <guest>
+ <os_type>hvm</os_type>
+ <arch name="alpha">
+ <emulator>/usr/bin/qemu-system-alpha</emulator>
+ <machine maxCpus="4">q35</machine>
+ <machine maxCpus="1">integratorcp</machine>
+ <machine maxCpus="1">versatileab</machine>
+ <domain type="qemu"/>
+ </arch>
+ </guest>
+ </capabilities>
+ """
+
+ self.assertIsNone(
+ self.host._check_machine_type(fake_dom_xml, 'q35'),
+ "None msg"
+ )
+
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_device_lookup_by_name(self, mock_nodeDeviceLookupByName):
self.host.device_lookup_by_name("foo")
@@ -1113,13 +1161,14 @@ Active: 8381604 kB
expect_vf = ["rx", "tx", "sg", "tso", "gso", "gro", "rxvlan", "txvlan"]
self.assertEqual(expect_vf, actualvf)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- def test_get_pcidev_info_non_nic(self, mock_get_ifname):
+ def test_get_pcidev_info_non_nic(self):
+ pci_utils.get_mac_by_pci_address.side_effect = (
+ exception.PciDeviceNotFoundById('0000:04:00.3'))
dev_name = "pci_0000_04_11_7"
pci_dev = fakelibvirt.NodeDevice(
self.host._get_connection(),
xml=fake_libvirt_data._fake_NodeDevXml[dev_name])
- actual_vf = self.host._get_pcidev_info(dev_name, pci_dev, [], [])
+ actual_vf = self.host._get_pcidev_info(dev_name, pci_dev, [], [], [])
expect_vf = {
"dev_id": dev_name, "address": "0000:04:11.7",
"product_id": '1520', "numa_node": 0,
@@ -1128,14 +1177,15 @@ Active: 8381604 kB
'parent_addr': '0000:04:00.3',
}
self.assertEqual(expect_vf, actual_vf)
- mock_get_ifname.assert_not_called()
+ pci_utils.get_ifname_by_pci_address.assert_not_called()
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='ens1')
- def test_get_pcidev_info(self, mock_get_ifname):
+ def test_get_pcidev_info(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'ens1'
devs = {
"pci_0000_04_00_3", "pci_0000_04_10_7", "pci_0000_04_11_7",
- "pci_0000_04_00_1", "pci_0000_03_00_0", "pci_0000_03_00_1"
+ "pci_0000_04_00_1", "pci_0000_03_00_0", "pci_0000_03_00_1",
+ "pci_0000_82_00_0", "pci_0000_82_00_3", "pci_0001_82_00_3",
+ "pci_0002_82_00_3",
}
node_devs = {}
for dev_name in devs:
@@ -1150,11 +1200,13 @@ Active: 8381604 kB
xml=fake_libvirt_data._fake_NodeDevXml[child]))
net_devs = [
dev for dev in node_devs.values() if dev.name() not in devs]
+ pci_devs = [
+ dev for dev in node_devs.values() if dev.name() in devs]
name = "pci_0000_04_00_3"
- actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
- expect_vf = {
+ actual_pf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], [])
+ expect_pf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
@@ -1162,12 +1214,14 @@ Active: 8381604 kB
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
- self.assertEqual(expect_vf, actual_vf)
+ self.assertEqual(expect_pf, actual_pf)
name = "pci_0000_04_10_7"
actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
+ name, node_devs[name], net_devs, [], [])
expect_vf = {
"dev_id": "pci_0000_04_10_7",
"address": "0000:04:10.7",
@@ -1180,13 +1234,15 @@ Active: 8381604 kB
"parent_ifname": "ens1",
"capabilities": {
"network": ["rx", "tx", "sg", "tso", "gso", "gro",
- "rxvlan", "txvlan"]},
+ "rxvlan", "txvlan"],
+ "sriov": {"pf_mac_address": "52:54:00:1e:59:c6",
+ "vf_num": 1}},
}
self.assertEqual(expect_vf, actual_vf)
name = "pci_0000_04_11_7"
actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
+ name, node_devs[name], net_devs, [], [])
expect_vf = {
"dev_id": "pci_0000_04_11_7",
"address": "0000:04:11.7",
@@ -1198,14 +1254,16 @@ Active: 8381604 kB
"parent_addr": '0000:04:00.3',
"capabilities": {
"network": ["rx", "tx", "sg", "tso", "gso", "gro",
- "rxvlan", "txvlan"]},
+ "rxvlan", "txvlan"],
+ "sriov": {"pf_mac_address": "52:54:00:1e:59:c6",
+ "vf_num": 1}},
"parent_ifname": "ens1",
}
self.assertEqual(expect_vf, actual_vf)
name = "pci_0000_04_00_1"
actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
+ name, node_devs[name], net_devs, [], [])
expect_vf = {
"dev_id": "pci_0000_04_00_1",
"address": "0000:04:00.1",
@@ -1218,9 +1276,9 @@ Active: 8381604 kB
self.assertEqual(expect_vf, actual_vf)
name = "pci_0000_03_00_0"
- actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
- expect_vf = {
+ actual_pf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], [])
+ expect_pf = {
"dev_id": "pci_0000_03_00_0",
"address": "0000:03:00.0",
"product_id": '1013',
@@ -1228,13 +1286,15 @@ Active: 8381604 kB
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
- self.assertEqual(expect_vf, actual_vf)
+ self.assertEqual(expect_pf, actual_pf)
name = "pci_0000_03_00_1"
- actual_vf = self.host._get_pcidev_info(
- name, node_devs[name], net_devs, [])
- expect_vf = {
+ actual_pf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], [])
+ expect_pf = {
"dev_id": "pci_0000_03_00_1",
"address": "0000:03:00.1",
"product_id": '1013',
@@ -1242,7 +1302,97 @@ Active: 8381604 kB
"vendor_id": '15b3',
"label": 'label_15b3_1013',
"dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
}
+ self.assertEqual(expect_pf, actual_pf)
+
+ # Parent PF with a VPD cap.
+ name = "pci_0000_82_00_0"
+ actual_pf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], pci_devs)
+ expect_pf = {
+ "dev_id": "pci_0000_82_00_0",
+ "address": "0000:82:00.0",
+ "product_id": "a2d6",
+ "numa_node": 1,
+ "vendor_id": "15b3",
+ "label": "label_15b3_a2d6",
+ "dev_type": obj_fields.PciDeviceType.SRIOV_PF,
+ "capabilities": {
+ # Should be obtained from the parent PF in this case.
+ "vpd": {"card_serial_number": "MT2113X00000"}},
+ # value defined in the LibvirtFixture
+ "mac_address": "52:54:00:1e:59:c6",
+ }
+ self.assertEqual(expect_pf, actual_pf)
+
+ # A VF without a VPD cap with a parent PF that has a VPD cap.
+ name = "pci_0000_82_00_3"
+ actual_vf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], pci_devs)
+ expect_vf = {
+ "dev_id": "pci_0000_82_00_3",
+ "address": "0000:82:00.3",
+ "parent_addr": "0000:82:00.0",
+ "product_id": "101e",
+ "numa_node": 1,
+ "vendor_id": "15b3",
+ "label": "label_15b3_101e",
+ "dev_type": obj_fields.PciDeviceType.SRIOV_VF,
+ "parent_ifname": "ens1",
+ "capabilities": {
+ "network": ["rx", "tx", "sg", "tso", "gso", "gro", "rxvlan",
+ "txvlan", "rxhash"],
+ "sriov": {"pf_mac_address": "52:54:00:1e:59:c6",
+ "vf_num": 1},
+ # Should be obtained from the parent PF in this case.
+ "vpd": {"card_serial_number": "MT2113X00000"}},
+ }
+ self.assertEqual(expect_vf, actual_vf)
+
+ # A VF with a VPD cap without a test parent dev (used to check the
+ # VPD code path when a VF's own VPD capability is used).
+ name = "pci_0001_82_00_3"
+ actual_vf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], pci_devs)
+ expect_vf = {
+ "dev_id": "pci_0001_82_00_3",
+ "address": "0001:82:00.3",
+ "parent_addr": "0001:82:00.0",
+ "product_id": "101e",
+ "numa_node": 1,
+ "vendor_id": "15b3",
+ "label": "label_15b3_101e",
+ "dev_type": obj_fields.PciDeviceType.SRIOV_VF,
+ "capabilities": {
+ # Should be obtained from the parent PF in this case.
+ "vpd": {"card_serial_number": "MT2113XBEEF0"}},
+ }
+
+ # A VF without a VPD cap and without a test parent dev
+ # (used to check the code path where a VF VPD capability is
+ # checked but is not present and a parent PF info is not available).
+ name = "pci_0002_82_00_3"
+ actual_vf = self.host._get_pcidev_info(
+ name, node_devs[name], net_devs, [], pci_devs)
+ expect_vf = {
+ "dev_id": "pci_0002_82_00_3",
+ "address": "0002:82:00.3",
+ "parent_addr": "0002:82:00.0",
+ "product_id": "101e",
+ "numa_node": 1,
+ "vendor_id": "15b3",
+ "label": "label_15b3_101e",
+ "dev_type": obj_fields.PciDeviceType.SRIOV_VF,
+ 'parent_ifname': 'ens1',
+ "capabilities": {
+ "network": ["rx", "tx", "sg", "tso", "gso", "gro",
+ "rxvlan", "txvlan", "rxhash"],
+ "sriov": {"pf_mac_address": "52:54:00:1e:59:c6",
+ "vf_num": 1}},
+ }
+
self.assertEqual(expect_vf, actual_vf)
def test_list_pci_devices(self):
@@ -1469,25 +1619,59 @@ Active: 8381604 kB
self.host.compare_cpu("cpuxml")
mock_compareCPU.assert_called_once_with("cpuxml", 0)
- def test_is_cpu_control_policy_capable_ok(self):
+ def test_is_cpu_control_policy_capable_via_neither(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=0))
+ self.assertFalse(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv1(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=1))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_is_cpu_control_policy_capable_via_cgroupsv2(self):
+ self.useFixture(nova_fixtures.CGroupsFixture(version=2))
+ self.assertTrue(self.host.is_cpu_control_policy_capable())
+
+ def test_has_cgroupsv1_cpu_controller_ok(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertTrue(self.host.is_cpu_control_policy_capable())
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,cpu,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv1_cpu_controller())
+
+ def test_has_cgroupsv1_cpu_controller_ko(self):
+ m = mock.mock_open(
+ read_data=(
+ "cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0"
+ "cg /cgroup/memory cg opt1,opt2 0 0"
+ )
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
+
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv1_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv1_cpu_controller())
+
+ def test_has_cgroupsv2_cpu_controller_ok(self):
+ m = mock.mock_open(
+ read_data="cpuset cpu io memory hugetlb pids rdma misc"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertTrue(self.host._has_cgroupsv2_cpu_controller())
- def test_is_cpu_control_policy_capable_ko(self):
+ def test_has_cgroupsv2_cpu_controller_ko(self):
m = mock.mock_open(
- read_data="""cg /cgroup/cpu,cpuacct cg opt1,opt2,opt3 0 0
-cg /cgroup/memory cg opt1,opt2 0 0
-""")
- with mock.patch('builtins.open', m, create=True):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ read_data="memory pids"
+ )
+ with mock.patch("builtins.open", m, create=True):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
- @mock.patch('builtins.open', side_effect=IOError)
- def test_is_cpu_control_policy_capable_ioerror(self, mock_open):
- self.assertFalse(self.host.is_cpu_control_policy_capable())
+ @mock.patch("builtins.open", side_effect=IOError)
+ def test_has_cgroupsv2_cpu_controller_ioerror(self, _):
+ self.assertFalse(self.host._has_cgroupsv2_cpu_controller())
def test_get_canonical_machine_type(self):
# this test relies on configuration from the FakeLibvirtFixture
@@ -1737,6 +1921,16 @@ cg /cgroup/memory cg opt1,opt2 0 0
"""
self.assertTrue(self.host.supports_secure_boot)
+ @mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
+ def test_supports_remote_managed_ports__true(self, mock_libversion):
+ mock_libversion.return_value = 7009000
+ self.assertTrue(self.host.supports_remote_managed_ports)
+
+ @mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
+ def test_supports_remote_managed_ports__false(self, mock_libversion):
+ mock_libversion.return_value = 7008000
+ self.assertFalse(self.host.supports_remote_managed_ports)
+
@mock.patch.object(host.Host, 'loaders', new_callable=mock.PropertyMock)
@mock.patch.object(host.Host, 'get_canonical_machine_type')
def test_get_loader(self, mock_get_mtype, mock_loaders):
@@ -1811,6 +2005,14 @@ cg /cgroup/memory cg opt1,opt2 0 0
loader = self.host.get_loader('x86_64', 'q35', has_secure_boot=True)
self.assertIsNotNone(loader)
+ # check that SMM bool is false as we don't need it
+ self.assertFalse(loader[2])
+
+ # check that we get SMM bool correctly (True) when required
+ loaders[0]['features'].append('requires-smm')
+ loader = self.host.get_loader('x86_64', 'q35', has_secure_boot=True)
+ self.assertTrue(loader[2])
+
# while it should fail here since we don't want it now
self.assertRaises(
exception.UEFINotSupported,
@@ -1831,6 +2033,7 @@ class TestLibvirtSEV(test.NoDBTestCase):
self.host = host.Host("qemu:///system")
+@ddt.ddt
class TestLibvirtSEVUnsupported(TestLibvirtSEV):
@mock.patch.object(os.path, 'exists', return_value=False)
def test_kernel_parameter_missing(self, fake_exists):
@@ -1838,19 +2041,26 @@ class TestLibvirtSEVUnsupported(TestLibvirtSEV):
fake_exists.assert_called_once_with(
'/sys/module/kvm_amd/parameters/sev')
+ @ddt.data(
+ ('0\n', False),
+ ('N\n', False),
+ ('1\n', True),
+ ('Y\n', True),
+ )
+ @ddt.unpack
@mock.patch.object(os.path, 'exists', return_value=True)
- @mock.patch('builtins.open', mock.mock_open(read_data="0\n"))
- def test_kernel_parameter_zero(self, fake_exists):
- self.assertFalse(self.host._kernel_supports_amd_sev())
- fake_exists.assert_called_once_with(
- '/sys/module/kvm_amd/parameters/sev')
-
- @mock.patch.object(os.path, 'exists', return_value=True)
- @mock.patch('builtins.open', mock.mock_open(read_data="1\n"))
- def test_kernel_parameter_one(self, fake_exists):
- self.assertTrue(self.host._kernel_supports_amd_sev())
- fake_exists.assert_called_once_with(
- '/sys/module/kvm_amd/parameters/sev')
+ def test_kernel_parameter(
+ self, sev_param_value, expected_support, mock_exists
+ ):
+ with mock.patch(
+ 'builtins.open', mock.mock_open(read_data=sev_param_value)
+ ):
+ self.assertIs(
+ expected_support,
+ self.host._kernel_supports_amd_sev()
+ )
+ mock_exists.assert_called_once_with(
+ '/sys/module/kvm_amd/parameters/sev')
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch('builtins.open', mock.mock_open(read_data="1\n"))
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
index decb27f982..0dc1009c92 100644
--- a/nova/tests/unit/virt/libvirt/test_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -19,11 +19,11 @@ import inspect
import os
import shutil
import tempfile
+from unittest import mock
from castellan import key_manager
import ddt
import fixtures
-import mock
from oslo_concurrency import lockutils
from oslo_config import fixture as config_fixture
from oslo_service import loopingcall
@@ -163,7 +163,13 @@ class _ImageTestCase(object):
self.assertEqual(fs.source_file, image.path)
def test_libvirt_info(self):
- image = self.image_class(self.INSTANCE, self.NAME)
+ disk_info = {
+ 'bus': 'virtio',
+ 'dev': '/dev/vda',
+ 'type': 'cdrom',
+ }
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
extra_specs = {
'quota:disk_read_bytes_sec': 10 * units.Mi,
'quota:disk_read_iops_sec': 1 * units.Ki,
@@ -172,15 +178,9 @@ class _ImageTestCase(object):
'quota:disk_total_bytes_sec': 30 * units.Mi,
'quota:disk_total_iops_sec': 3 * units.Ki,
}
- disk_info = {
- 'bus': 'virtio',
- 'dev': '/dev/vda',
- 'type': 'cdrom',
- }
disk = image.libvirt_info(
- disk_info, cache_mode="none", extra_specs=extra_specs,
- boot_order="1")
+ cache_mode="none", extra_specs=extra_specs, boot_order="1")
self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk)
self.assertEqual("/dev/vda", disk.target_dev)
@@ -205,16 +205,18 @@ class _ImageTestCase(object):
get_disk_size.assert_called_once_with(image.path)
def _test_libvirt_info_scsi_with_unit(self, disk_unit):
- # The address should be set if bus is scsi and unit is set.
- # Otherwise, it should not be set at all.
- image = self.image_class(self.INSTANCE, self.NAME)
disk_info = {
'bus': 'scsi',
'dev': '/dev/sda',
'type': 'disk',
}
+ # The address should be set if bus is scsi and unit is set.
+ # Otherwise, it should not be set at all.
+ image = self.image_class(
+ self.INSTANCE, self.NAME, disk_info_mapping=disk_info)
+
disk = image.libvirt_info(
- disk_info, cache_mode='none', extra_specs={}, disk_unit=disk_unit)
+ cache_mode='none', extra_specs={}, disk_unit=disk_unit)
if disk_unit:
self.assertEqual(0, disk.device_addr.controller)
self.assertEqual(disk_unit, disk.device_addr.unit)
@@ -523,7 +525,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.privsep.path.utime')
@@ -544,14 +546,14 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(
- self.TEMPLATE_PATH, self.PATH, self.SIZE)
+ self.PATH, 'qcow2', self.SIZE, backing_file=self.TEMPLATE_PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Qcow2, 'get_disk_size')
@@ -576,7 +578,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@@ -615,7 +617,7 @@ class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
- @mock.patch('nova.virt.libvirt.utils.create_cow_image')
+ @mock.patch('nova.virt.libvirt.utils.create_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
diff --git a/nova/tests/unit/virt/libvirt/test_imagecache.py b/nova/tests/unit/virt/libvirt/test_imagecache.py
index f6e592231d..a005a6cf20 100644
--- a/nova/tests/unit/virt/libvirt/test_imagecache.py
+++ b/nova/tests/unit/virt/libvirt/test_imagecache.py
@@ -18,8 +18,8 @@ import contextlib
import io
import os
import time
+from unittest import mock
-import mock
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_log import formatters
diff --git a/nova/tests/unit/virt/libvirt/test_machine_type_utils.py b/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
index 42043ac495..08c54d02d3 100644
--- a/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_machine_type_utils.py
@@ -10,8 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel
from oslo_utils import uuidutils
diff --git a/nova/tests/unit/virt/libvirt/test_migration.py b/nova/tests/unit/virt/libvirt/test_migration.py
index f4e64fbe53..155c259986 100644
--- a/nova/tests/unit/virt/libvirt/test_migration.py
+++ b/nova/tests/unit/virt/libvirt/test_migration.py
@@ -15,9 +15,9 @@
from collections import deque
import copy
import textwrap
+from unittest import mock
from lxml import etree
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
@@ -28,6 +28,7 @@ from nova import objects
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.fixtures import libvirt as fakelibvirt
+from nova.tests.unit.virt.libvirt import test_driver
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
@@ -80,16 +81,51 @@ class UtilityMigrationTestCase(test.NoDBTestCase):
get_volume_config = mock.MagicMock()
mock_guest.get_xml_desc.return_value = '<domain></domain>'
- migration.get_updated_guest_xml(
- mock.sentinel.instance, mock_guest, data, get_volume_config)
+ instance = objects.Instance(**test_driver._create_test_instance())
+ migration.get_updated_guest_xml(instance, mock_guest, data,
+ get_volume_config)
mock_graphics.assert_called_once_with(mock.ANY, data)
mock_serial.assert_called_once_with(mock.ANY, data)
mock_volume.assert_called_once_with(
- mock.ANY, data, mock.sentinel.instance, get_volume_config)
+ mock.ANY, data, instance, get_volume_config)
mock_perf_events_xml.assert_called_once_with(mock.ANY, data)
mock_memory_backing.assert_called_once_with(mock.ANY, data)
self.assertEqual(1, mock_tostring.called)
+ def test_update_quota_xml(self):
+ old_xml = """<domain>
+ <name>fake-instance</name>
+ <cputune>
+ <shares>42</shares>
+ <period>1337</period>
+ </cputune>
+ </domain>"""
+ instance = objects.Instance(**test_driver._create_test_instance())
+ new_xml = migration._update_quota_xml(instance,
+ etree.fromstring(old_xml))
+ new_xml = etree.tostring(new_xml, encoding='unicode')
+ self.assertXmlEqual(
+ """<domain>
+ <name>fake-instance</name>
+ <cputune>
+ <period>1337</period>
+ </cputune>
+ </domain>""", new_xml)
+
+ def test_update_quota_xml_empty_cputune(self):
+ old_xml = """<domain>
+ <name>fake-instance</name>
+ <cputune>
+ <shares>42</shares>
+ </cputune>
+ </domain>"""
+ instance = objects.Instance(**test_driver._create_test_instance())
+ new_xml = migration._update_quota_xml(instance,
+ etree.fromstring(old_xml))
+ new_xml = etree.tostring(new_xml, encoding='unicode')
+ self.assertXmlEqual('<domain><name>fake-instance</name></domain>',
+ new_xml)
+
def test_update_device_resources_xml_vpmem(self):
# original xml for vpmems, /dev/dax0.1 and /dev/dax0.2 here
# are vpmem device path on source host
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index 4e73c662c5..c648108f56 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -18,9 +18,9 @@ import grp
import os
import pwd
import tempfile
+from unittest import mock
import ddt
-import mock
import os_traits
from oslo_config import cfg
from oslo_utils import fileutils
@@ -103,33 +103,98 @@ class LibvirtUtilsTestCase(test.NoDBTestCase):
def test_valid_hostname_bad(self):
self.assertFalse(libvirt_utils.is_valid_hostname("foo/?com=/bin/sh"))
- @mock.patch('oslo_concurrency.processutils.execute')
- def test_create_image(self, mock_execute):
- libvirt_utils.create_image('raw', '/some/path', '10G')
- libvirt_utils.create_image('qcow2', '/some/stuff', '1234567891234')
- expected_args = [(('qemu-img', 'create', '-f', 'raw',
- '/some/path', '10G'),),
- (('qemu-img', 'create', '-f', 'qcow2',
- '/some/stuff', '1234567891234'),)]
- self.assertEqual(expected_args, mock_execute.call_args_list)
-
- @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('tempfile.NamedTemporaryFile')
@mock.patch('oslo_concurrency.processutils.execute')
@mock.patch('nova.virt.images.qemu_img_info')
- def test_create_cow_image(self, mock_info, mock_execute, mock_exists):
- mock_execute.return_value = ('stdout', None)
+ def _test_create_image(
+ self, path, disk_format, disk_size, mock_info, mock_execute,
+ mock_ntf, backing_file=None, encryption=None
+ ):
mock_info.return_value = mock.Mock(
file_format=mock.sentinel.backing_fmt,
- cluster_size=mock.sentinel.cluster_size)
- libvirt_utils.create_cow_image(mock.sentinel.backing_path,
- mock.sentinel.new_path)
- mock_info.assert_called_once_with(mock.sentinel.backing_path)
- mock_execute.assert_has_calls([mock.call(
- 'qemu-img', 'create', '-f', 'qcow2', '-o',
- 'backing_file=%s,backing_fmt=%s,cluster_size=%s' % (
- mock.sentinel.backing_path, mock.sentinel.backing_fmt,
- mock.sentinel.cluster_size),
- mock.sentinel.new_path)])
+ cluster_size=mock.sentinel.cluster_size,
+ )
+ fh = mock_ntf.return_value.__enter__.return_value
+
+ libvirt_utils.create_image(
+ path, disk_format, disk_size, backing_file=backing_file,
+ encryption=encryption,
+ )
+
+ cow_opts = []
+
+ if backing_file is None:
+ mock_info.assert_not_called()
+ else:
+ mock_info.assert_called_once_with(backing_file)
+ cow_opts = [
+ '-o',
+ f'backing_file={mock.sentinel.backing_file},'
+ f'backing_fmt={mock.sentinel.backing_fmt},'
+ f'cluster_size={mock.sentinel.cluster_size}',
+ ]
+
+ encryption_opts = []
+
+ if encryption:
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={fh.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
+ expected_args = (
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f',
+ disk_format, *cow_opts, *encryption_opts, path,
+ )
+ if disk_size is not None:
+ expected_args += (disk_size,)
+
+ self.assertEqual([(expected_args,)], mock_execute.call_args_list)
+
+ def test_create_image_raw(self):
+ self._test_create_image('/some/path', 'raw', '10G')
+
+ def test_create_image_qcow2(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ )
+
+ def test_create_image_backing_file(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ backing_file=mock.sentinel.backing_file,
+ )
+
+ def test_create_image_size_none(self):
+ self._test_create_image(
+ '/some/stuff', 'qcow2', None,
+ backing_file=mock.sentinel.backing_file,
+ )
+
+ def test_create_image_encryption(self):
+ encryption = {
+ 'secret': 'a_secret',
+ 'format': 'luks',
+ }
+ self._test_create_image(
+ '/some/stuff', 'qcow2', '1234567891234',
+ encryption=encryption,
+ )
@ddt.unpack
@ddt.data({'fs_type': 'some_fs_type',
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
index 43504efeb5..6d87ed727c 100644
--- a/nova/tests/unit/virt/libvirt/test_vif.py
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -12,9 +12,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
from lxml import etree
-import mock
import os_vif
from os_vif import exception as osv_exception
from os_vif import objects as osv_objects
@@ -517,18 +518,17 @@ class LibvirtVifTestCase(test.NoDBTestCase):
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
- self.useFixture(nova_fixtures.LibvirtFixture(stub_os_vif=False))
+ self.libvirt = self.useFixture(
+ nova_fixtures.LibvirtFixture(stub_os_vif=False))
# os_vif.initialize is typically done in nova-compute startup
os_vif.initialize()
self.setup_os_vif_objects()
# multiqueue configuration is host OS specific
- _a = mock.patch('os.uname')
- self.mock_uname = _a.start()
+ self.mock_uname = self.libvirt.mock_uname
self.mock_uname.return_value = fakelibvirt.os_uname(
'Linux', '', '5.10.13-200-generic', '', 'x86_64')
- self.addCleanup(_a.stop)
def _get_node(self, xml):
doc = etree.fromstring(xml)
@@ -899,7 +899,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config',
wraps=vif.designer.set_vif_guest_frontend_config)
- def _test_model_sriov(self, vinc_type, mock_set):
+ def _test_model_sriov(self, vnic_type, mock_set):
"""Direct attach vNICs shouldn't retrieve info from image_meta."""
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
@@ -911,7 +911,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio'}})
conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
- None, 'kvm', vinc_type)
+ None, 'kvm', vnic_type)
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
None, None, None, None)
self.assertIsNone(conf.vhost_queues)
@@ -983,14 +983,9 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.vif_bridge,
self.vif_bridge['network']['bridge'])
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
- @mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
- @mock.patch('nova.privsep.linux_net.set_device_macaddr')
- @mock.patch('nova.privsep.linux_net.set_device_macaddr_and_vlan')
- def _test_hw_veb_op(self, op, vlan, mock_set_macaddr_and_vlan,
- mock_set_macaddr, mock_get_vf_num,
- mock_get_ifname):
- mock_get_ifname.side_effect = ['eth1', 'eth13']
+ def _test_hw_veb_op(self, op, vlan):
+ self.libvirt.mock_get_vf_num_by_pci_address.return_value = 1
+ pci_utils.get_ifname_by_pci_address.side_effect = ['eth1', 'eth13']
vlan_id = int(vlan)
port_state = 'up' if vlan_id > 0 else 'down'
mac = ('00:00:00:00:00:00' if op.__name__ == 'unplug'
@@ -1005,10 +1000,13 @@ class LibvirtVifTestCase(test.NoDBTestCase):
'set_macaddr': [mock.call('eth13', mac, port_state=port_state)]
}
op(self.instance, self.vif_hw_veb_macvtap)
- mock_get_ifname.assert_has_calls(calls['get_ifname'])
- mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
- mock_set_macaddr.assert_has_calls(calls['set_macaddr'])
- mock_set_macaddr_and_vlan.assert_called_once_with(
+ pci_utils.get_ifname_by_pci_address.assert_has_calls(
+ calls['get_ifname'])
+ self.libvirt.mock_get_vf_num_by_pci_address.assert_has_calls(
+ calls['get_vf_num'])
+ self.libvirt.mock_set_device_macaddr.assert_has_calls(
+ calls['set_macaddr'])
+ self.libvirt.mock_set_device_macaddr_and_vlan.assert_called_once_with(
'eth1', 1, mock.ANY, vlan_id)
def test_plug_hw_veb(self):
@@ -1218,9 +1216,8 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_hostdev_physical)
- @mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
- return_value='eth1')
- def test_hw_veb_driver_macvtap(self, mock_get_ifname):
+ def test_hw_veb_driver_macvtap(self):
+ pci_utils.get_ifname_by_pci_address.return_value = 'eth1'
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap)
node = self._get_node(xml)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
index 89a59f2f1a..06065322f6 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
@@ -11,8 +11,8 @@
# under the License.
import platform
+from unittest import mock
-import mock
from os_brick.initiator import connector
from nova.objects import fields as obj_fields
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fs.py b/nova/tests/unit/virt/libvirt/volume/test_fs.py
index eaa6568999..5619dff589 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fs.py
@@ -13,8 +13,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from nova import test
from nova import utils
diff --git a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
index f8a64abea5..bd516b1dd6 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
@@ -10,9 +10,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_brick import exception as os_brick_exception
from os_brick.initiator import connector
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import iscsi
diff --git a/nova/tests/unit/virt/libvirt/volume/test_lightos.py b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
new file mode 100644
index 0000000000..8a85d73059
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/volume/test_lightos.py
@@ -0,0 +1,79 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from unittest import mock
+
+from nova.tests.unit.virt.libvirt.volume import test_volume
+from nova.virt.libvirt.volume import lightos
+
+from os_brick import initiator
+
+
+class LibvirtLightVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
+
+ @mock.patch('nova.utils.get_root_helper')
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory')
+ def test_libvirt_lightos_driver(self, mock_factory, mock_helper):
+ mock_helper.return_value = 'sudo'
+ lightos.LibvirtLightOSVolumeDriver(self.fake_host)
+ mock_factory.assert_called_once_with(
+ initiator.LIGHTOS, root_helper='sudo',
+ device_scan_attempts=5)
+
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
+ new=mock.Mock())
+ def test_libvirt_lightos_driver_connect(self):
+ lightos_driver = lightos.LibvirtLightOSVolumeDriver(
+ self.fake_host)
+ config = {'server_ip': '127.0.0.1', 'server_port': 9898}
+ disk_info = {
+ 'id': '1234567',
+ 'name': 'aLightVolume',
+ 'conf': config}
+ connection_info = {'data': disk_info}
+ lightos_driver.connector.connect_volume.return_value = (
+ {'path': '/dev/dms1234567'})
+
+ lightos_driver.connect_volume(connection_info, None)
+
+ lightos_driver.connector.connect_volume.assert_called_once_with(
+ connection_info['data'])
+ self.assertEqual(
+ '/dev/dms1234567',
+ connection_info['data']['device_path'])
+
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
+ new=mock.Mock(return_value=mock.Mock()))
+ def test_libvirt_lightos_driver_disconnect(self):
+ lightos_driver = lightos.LibvirtLightOSVolumeDriver(self.connr)
+ disk_info = {
+ 'path': '/dev/dms1234567', 'name': 'aLightosVolume',
+ 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0',
+ 'device_path': '/dev/dms123456'}
+ connection_info = {'data': disk_info}
+ lightos_driver.disconnect_volume(connection_info, None)
+ lightos_driver.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None)
+
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
+ new=mock.Mock(return_value=mock.Mock()))
+ def test_libvirt_lightos_driver_get_config(self):
+ lightos_driver = lightos.LibvirtLightOSVolumeDriver(self.fake_host)
+ device_path = '/dev/fake-dev'
+ connection_info = {'data': {'device_path': device_path}}
+
+ conf = lightos_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(device_path, tree.find('./source').get('dev'))
+ self.assertEqual('raw', tree.find('./driver').get('type'))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_mount.py b/nova/tests/unit/virt/libvirt/volume/test_mount.py
index b618e090ba..8ecb117f05 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_mount.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_mount.py
@@ -15,10 +15,10 @@
import os.path
import threading
import time
+from unittest import mock
import eventlet
import fixtures
-import mock
from oslo_concurrency import processutils
from oslo_utils.fixture import uuidsentinel as uuids
diff --git a/nova/tests/unit/virt/libvirt/volume/test_net.py b/nova/tests/unit/virt/libvirt/volume/test_net.py
index a694351629..8d8167b3d7 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_net.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_net.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
import nova.conf
from nova.tests.unit.virt.libvirt.volume import test_volume
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nfs.py b/nova/tests/unit/virt/libvirt/volume/test_nfs.py
index 16c41f5387..a98efaac1c 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nfs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nfs.py
@@ -11,8 +11,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova.tests.unit.virt.libvirt.volume import test_mount
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nvme.py b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
index 0d1f23d7a2..3f593841fa 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nvme.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nvme.py
@@ -10,7 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import nvme
@@ -29,7 +29,21 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
nvme.LibvirtNVMEVolumeDriver(self.fake_host)
mock_factory.assert_called_once_with(
- initiator.NVME, 'sudo',
+ initiator.NVME, 'sudo', use_multipath=False,
+ device_scan_attempts=3)
+
+ @mock.patch('os.path.exists', return_value=True)
+ @mock.patch('nova.utils.get_root_helper')
+ @mock.patch('os_brick.initiator.connector.InitiatorConnector.factory')
+ def test_libvirt_nvme_driver_multipath(self, mock_factory, mock_helper,
+ exists):
+ self.flags(num_nvme_discover_tries=3, volume_use_multipath=True,
+ group='libvirt')
+ mock_helper.return_value = 'sudo'
+
+ nvme.LibvirtNVMEVolumeDriver(self.fake_host)
+ mock_factory.assert_called_once_with(
+ initiator.NVME, 'sudo', use_multipath=True,
device_scan_attempts=3)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
@@ -42,14 +56,15 @@ class LibvirtNVMEVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
'name': 'aNVMEVolume',
'conf': config}
connection_info = {'data': disk_info}
- with mock.patch.object(nvme_driver.connector,
- 'connect_volume',
- return_value={'path': '/dev/dms1234567'}):
- nvme_driver.connect_volume(connection_info, None)
- nvme_driver.connector.connect_volume.assert_called_once_with(
- connection_info['data'])
- self.assertEqual('/dev/dms1234567',
- connection_info['data']['device_path'])
+ nvme_driver.connector.connect_volume.return_value = (
+ {'path': '/dev/dms1234567'})
+
+ nvme_driver.connect_volume(connection_info, None)
+
+ nvme_driver.connector.connect_volume.assert_called_once_with(
+ connection_info['data'])
+ self.assertEqual(
+ '/dev/dms1234567', connection_info['data']['device_path'])
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory',
new=mock.Mock(return_value=mock.Mock()))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_quobyte.py b/nova/tests/unit/virt/libvirt/volume/test_quobyte.py
index 8a0c647fc8..bb3c86083c 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_quobyte.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_quobyte.py
@@ -16,9 +16,9 @@
import os
import traceback
+from unittest import mock
import ddt
-import mock
from oslo_concurrency import processutils
from oslo_utils import fileutils
import psutil
diff --git a/nova/tests/unit/virt/libvirt/volume/test_remotefs.py b/nova/tests/unit/virt/libvirt/volume/test_remotefs.py
index 62060bcf1e..67c126c2b1 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_remotefs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_remotefs.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_concurrency import processutils
from nova import test
diff --git a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
index 6d9247cd2d..f0fcba1deb 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_scaleio.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import scaleio
diff --git a/nova/tests/unit/virt/libvirt/volume/test_smbfs.py b/nova/tests/unit/virt/libvirt/volume/test_smbfs.py
index 2c3ea574a9..0fba137740 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_smbfs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_smbfs.py
@@ -11,8 +11,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova import utils
diff --git a/nova/tests/unit/virt/libvirt/volume/test_storpool.py b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
index e14954f148..678d4f8eb4 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_storpool.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_storpool.py
@@ -13,8 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_brick import initiator
+from unittest import mock
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import storpool as vol_sp
diff --git a/nova/tests/unit/virt/libvirt/volume/test_volume.py b/nova/tests/unit/virt/libvirt/volume/test_volume.py
index ac7bcf247d..9a3710a51d 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_volume.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_volume.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import ddt
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import exception
diff --git a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
index 883cebb55a..168efee944 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_vzstorage.py
@@ -11,8 +11,8 @@
# under the License.
import os
+from unittest import mock
-import mock
from os_brick.initiator import connector
from nova import exception
diff --git a/nova/tests/unit/virt/powervm/__init__.py b/nova/tests/unit/virt/powervm/__init__.py
deleted file mode 100644
index dedb6af7db..0000000000
--- a/nova/tests/unit/virt/powervm/__init__.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-from oslo_utils.fixture import uuidsentinel
-
-from nova.compute import power_state
-from nova.compute import vm_states
-from nova import objects
-
-
-TEST_FLAVOR = objects.flavor.Flavor(
- memory_mb=2048,
- swap=0,
- vcpu_weight=None,
- root_gb=10, id=2,
- name=u'm1.small',
- ephemeral_gb=0,
- rxtx_factor=1.0,
- flavorid=uuidsentinel.flav_id,
- vcpus=1)
-
-TEST_INSTANCE = objects.Instance(
- id=1,
- uuid=uuidsentinel.inst_id,
- display_name='Fake Instance',
- root_gb=10,
- ephemeral_gb=0,
- instance_type_id=TEST_FLAVOR.id,
- system_metadata={'image_os_distro': 'rhel'},
- host='host1',
- flavor=TEST_FLAVOR,
- task_state=None,
- vm_state=vm_states.STOPPED,
- power_state=power_state.SHUTDOWN,
-)
-
-IMAGE1 = {
- 'id': uuidsentinel.img_id,
- 'name': 'image1',
- 'size': 300,
- 'container_format': 'bare',
- 'disk_format': 'raw',
- 'checksum': 'b518a8ba2b152b5607aceb5703fac072',
-}
-TEST_IMAGE1 = objects.image_meta.ImageMeta.from_dict(IMAGE1)
diff --git a/nova/tests/unit/virt/powervm/disk/__init__.py b/nova/tests/unit/virt/powervm/disk/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/tests/unit/virt/powervm/disk/__init__.py
+++ /dev/null
diff --git a/nova/tests/unit/virt/powervm/disk/fake_adapter.py b/nova/tests/unit/virt/powervm/disk/fake_adapter.py
deleted file mode 100644
index c0b4962e54..0000000000
--- a/nova/tests/unit/virt/powervm/disk/fake_adapter.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.virt.powervm.disk import driver as disk_dvr
-
-
-class FakeDiskAdapter(disk_dvr.DiskAdapter):
- """A fake subclass of DiskAdapter.
-
- This is done so that the abstract methods/properties can be stubbed and the
- class can be instantiated for testing.
- """
-
- def _vios_uuids(self):
- pass
-
- def _disk_match_func(self, disk_type, instance):
- pass
-
- def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
- pass
-
- def capacity(self):
- pass
-
- def capacity_used(self):
- pass
-
- def detach_disk(self, instance):
- pass
-
- def delete_disks(self, storage_elems):
- pass
-
- def create_disk_from_image(self, context, instance, image_meta):
- pass
-
- def attach_disk(self, instance, disk_info, stg_ftsk):
- pass
diff --git a/nova/tests/unit/virt/powervm/disk/test_driver.py b/nova/tests/unit/virt/powervm/disk/test_driver.py
deleted file mode 100644
index c27825801f..0000000000
--- a/nova/tests/unit/virt/powervm/disk/test_driver.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from pypowervm import const as pvm_const
-
-from nova import test
-from nova.tests.unit.virt.powervm.disk import fake_adapter
-
-
-class TestDiskAdapter(test.NoDBTestCase):
- """Unit Tests for the generic storage driver."""
-
- def setUp(self):
- super(TestDiskAdapter, self).setUp()
-
- # Return the mgmt uuid
- self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.mgmt.mgmt_uuid')).mock
- self.mgmt_uuid.return_value = 'mp_uuid'
-
- # The values (adapter and host uuid) are not used in the base.
- # Default them to None. We use the fake adapter here because we can't
- # instantiate DiskAdapter which is an abstract base class.
- self.st_adpt = fake_adapter.FakeDiskAdapter(None, None)
-
- @mock.patch("pypowervm.util.sanitize_file_name_for_api")
- def test_get_disk_name(self, mock_san):
- inst = mock.Mock()
- inst.configure_mock(name='a_name_that_is_longer_than_eight',
- uuid='01234567-abcd-abcd-abcd-123412341234')
-
- # Long
- self.assertEqual(mock_san.return_value,
- self.st_adpt._get_disk_name('type', inst))
- mock_san.assert_called_with(inst.name, prefix='type_',
- max_len=pvm_const.MaxLen.FILENAME_DEFAULT)
-
- mock_san.reset_mock()
-
- # Short
- self.assertEqual(mock_san.return_value,
- self.st_adpt._get_disk_name('type', inst, short=True))
- mock_san.assert_called_with('a_name_t_0123', prefix='t_',
- max_len=pvm_const.MaxLen.VDISK_NAME)
diff --git a/nova/tests/unit/virt/powervm/disk/test_localdisk.py b/nova/tests/unit/virt/powervm/disk/test_localdisk.py
deleted file mode 100644
index 25b8395bb2..0000000000
--- a/nova/tests/unit/virt/powervm/disk/test_localdisk.py
+++ /dev/null
@@ -1,312 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-
-from nova import exception
-from nova import test
-from oslo_utils.fixture import uuidsentinel as uuids
-from pypowervm import const as pvm_const
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova.virt.powervm.disk import driver as disk_dvr
-from nova.virt.powervm.disk import localdisk
-
-
-class TestLocalDisk(test.NoDBTestCase):
- """Unit Tests for the LocalDisk storage driver."""
-
- def setUp(self):
- super(TestLocalDisk, self).setUp()
- self.adpt = mock.Mock()
-
- # The mock VIOS needs to have scsi_mappings as a list. Internals are
- # set by individual test cases as needed.
- smaps = [mock.Mock()]
- self.vio_wrap = mock.create_autospec(
- pvm_vios.VIOS, instance=True, scsi_mappings=smaps,
- uuid='vios-uuid')
-
- # Return the mgmt uuid.
- self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.mgmt.mgmt_uuid', autospec=True)).mock
- self.mgmt_uuid.return_value = 'mgmt_uuid'
-
- self.pvm_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.get_pvm_uuid')).mock
- self.pvm_uuid.return_value = 'pvm_uuid'
-
- # Set up for the mocks for the disk adapter.
- self.mock_find_vg = self.useFixture(fixtures.MockPatch(
- 'pypowervm.tasks.storage.find_vg', autospec=True)).mock
- self.vg_uuid = uuids.vg_uuid
- self.vg = mock.Mock(spec=pvm_stg.VG, uuid=self.vg_uuid)
- self.mock_find_vg.return_value = (self.vio_wrap, self.vg)
-
- self.flags(volume_group_name='fakevg', group='powervm')
-
- # Mock the feed tasks.
- self.mock_afs = self.useFixture(fixtures.MockPatch(
- 'pypowervm.utils.transaction.FeedTask.add_functor_subtask',
- autospec=True)).mock
- self.mock_wtsk = mock.create_autospec(
- pvm_tx.WrapperTask, instance=True)
- self.mock_wtsk.configure_mock(wrapper=self.vio_wrap)
- self.mock_ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- self.mock_ftsk.configure_mock(
- wrapper_tasks={'vios-uuid': self.mock_wtsk})
-
- # Create the adapter.
- self.ld_adpt = localdisk.LocalStorage(self.adpt, 'host_uuid')
-
- def test_init(self):
- # Localdisk adapter already initialized in setUp()
- # From super __init__()
- self.assertEqual(self.adpt, self.ld_adpt._adapter)
- self.assertEqual('host_uuid', self.ld_adpt._host_uuid)
- self.assertEqual('mgmt_uuid', self.ld_adpt.mp_uuid)
-
- # From LocalStorage __init__()
- self.assertEqual('fakevg', self.ld_adpt.vg_name)
- self.mock_find_vg.assert_called_once_with(self.adpt, 'fakevg')
- self.assertEqual('vios-uuid', self.ld_adpt._vios_uuid)
- self.assertEqual(self.vg_uuid, self.ld_adpt.vg_uuid)
- self.assertFalse(self.ld_adpt.capabilities['shared_storage'])
- self.assertFalse(self.ld_adpt.capabilities['has_imagecache'])
- self.assertFalse(self.ld_adpt.capabilities['snapshot'])
-
- # Assert snapshot capability is true if hosting I/O on mgmt partition.
- self.mgmt_uuid.return_value = 'vios-uuid'
- self.ld_adpt = localdisk.LocalStorage(self.adpt, 'host_uuid')
- self.assertTrue(self.ld_adpt.capabilities['snapshot'])
-
- # Assert volume_group_name is required.
- self.flags(volume_group_name=None, group='powervm')
- self.assertRaises(exception.OptRequiredIfOtherOptValue,
- localdisk.LocalStorage, self.adpt, 'host_uuid')
-
- def test_vios_uuids(self):
- self.assertEqual(['vios-uuid'], self.ld_adpt._vios_uuids)
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_disk_name')
- def test_disk_match_func(self, mock_disk_name, mock_gen_match):
- mock_disk_name.return_value = 'disk_name'
- func = self.ld_adpt._disk_match_func('disk_type', 'instance')
- mock_disk_name.assert_called_once_with(
- 'disk_type', 'instance', short=True)
- mock_gen_match.assert_called_once_with(
- pvm_stg.VDisk, names=['disk_name'])
- self.assertEqual(mock_gen_match.return_value, func)
-
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._get_vg_wrap')
- def test_capacity(self, mock_vg):
- """Tests the capacity methods."""
- mock_vg.return_value = mock.Mock(
- capacity='5120', available_size='2048')
- self.assertEqual(5120.0, self.ld_adpt.capacity)
- self.assertEqual(3072.0, self.ld_adpt.capacity_used)
-
- @mock.patch('pypowervm.tasks.storage.rm_vg_storage', autospec=True)
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._get_vg_wrap')
- def test_delete_disks(self, mock_vg, mock_rm_vg):
- self.ld_adpt.delete_disks('storage_elems')
- mock_vg.assert_called_once_with()
- mock_rm_vg.assert_called_once_with(
- mock_vg.return_value, vdisks='storage_elems')
-
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- def test_detach_disk(self, mock_match_fn, mock_rm_maps, mock_vios):
- mock_match_fn.return_value = 'match_func'
- mock_vios.return_value = self.vio_wrap
- mock_map1 = mock.Mock(backing_storage='back_stor1')
- mock_map2 = mock.Mock(backing_storage='back_stor2')
- mock_rm_maps.return_value = [mock_map1, mock_map2]
-
- back_stores = self.ld_adpt.detach_disk('instance')
-
- self.assertEqual(['back_stor1', 'back_stor2'], back_stores)
- mock_match_fn.assert_called_once_with(pvm_stg.VDisk)
- mock_vios.assert_called_once_with(
- self.ld_adpt._adapter, uuid='vios-uuid',
- xag=[pvm_const.XAG.VIO_SMAP])
- mock_rm_maps.assert_called_with(self.vio_wrap, 'pvm_uuid',
- match_func=mock_match_fn.return_value)
- mock_vios.return_value.update.assert_called_once()
-
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_vdisk_mapping',
- autospec=True)
- def test_disconnect_disk_from_mgmt(self, mock_rm_vdisk_map):
- self.ld_adpt.disconnect_disk_from_mgmt('vios-uuid', 'disk_name')
- mock_rm_vdisk_map.assert_called_with(
- self.ld_adpt._adapter, 'vios-uuid', 'mgmt_uuid',
- disk_names=['disk_name'])
-
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage._upload_image')
- def test_create_disk_from_image(self, mock_upload_image):
- mock_image_meta = mock.Mock()
- mock_image_meta.size = 30
- mock_upload_image.return_value = 'mock_img'
-
- self.ld_adpt.create_disk_from_image(
- 'context', 'instance', mock_image_meta)
-
- mock_upload_image.assert_called_once_with(
- 'context', 'instance', mock_image_meta)
-
- @mock.patch('nova.image.glance.API.download')
- @mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter')
- @mock.patch('pypowervm.tasks.storage.upload_new_vdisk')
- @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_disk_name')
- def test_upload_image(self, mock_name, mock_upload, mock_iter, mock_dl):
- mock_meta = mock.Mock(id='1', size=1073741824, disk_format='raw')
- mock_upload.return_value = ['mock_img']
-
- mock_img = self.ld_adpt._upload_image('context', 'inst', mock_meta)
-
- self.assertEqual('mock_img', mock_img)
- mock_name.assert_called_once_with(
- disk_dvr.DiskType.BOOT, 'inst', short=True)
- mock_dl.assert_called_once_with('context', '1')
- mock_iter.assert_called_once_with(mock_dl.return_value)
- mock_upload.assert_called_once_with(
- self.adpt, 'vios-uuid', self.vg_uuid, mock_iter.return_value,
- mock_name.return_value, 1073741824, d_size=1073741824,
- upload_type=tsk_stg.UploadType.IO_STREAM, file_format='raw')
-
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
- autospec=True)
- def test_attach_disk(self, mock_bldmap, mock_addmap):
- def test_afs(add_func):
- # Verify the internal add_func
- self.assertEqual(mock_addmap.return_value, add_func(self.vio_wrap))
- mock_bldmap.assert_called_once_with(
- self.ld_adpt._host_uuid, self.vio_wrap, 'pvm_uuid',
- 'disk_info')
- mock_addmap.assert_called_once_with(
- self.vio_wrap, mock_bldmap.return_value)
-
- self.mock_wtsk.add_functor_subtask.side_effect = test_afs
- self.ld_adpt.attach_disk('instance', 'disk_info', self.mock_ftsk)
- self.pvm_uuid.assert_called_once_with('instance')
- self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
-
- @mock.patch('pypowervm.wrappers.storage.VG.get')
- def test_get_vg_wrap(self, mock_vg):
- vg_wrap = self.ld_adpt._get_vg_wrap()
- self.assertEqual(mock_vg.return_value, vg_wrap)
- mock_vg.assert_called_once_with(
- self.adpt, uuid=self.vg_uuid, parent_type=pvm_vios.VIOS,
- parent_uuid='vios-uuid')
-
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('nova.virt.powervm.disk.localdisk.LocalStorage.'
- '_disk_match_func')
- def test_get_bootdisk_path(self, mock_match_fn, mock_findmaps,
- mock_vios):
- mock_vios.return_value = self.vio_wrap
-
- # No maps found
- mock_findmaps.return_value = None
- devname = self.ld_adpt.get_bootdisk_path('inst', 'vios_uuid')
- self.pvm_uuid.assert_called_once_with('inst')
- mock_match_fn.assert_called_once_with(disk_dvr.DiskType.BOOT, 'inst')
- mock_vios.assert_called_once_with(
- self.adpt, uuid='vios_uuid', xag=[pvm_const.XAG.VIO_SMAP])
- mock_findmaps.assert_called_once_with(
- self.vio_wrap.scsi_mappings,
- client_lpar_id='pvm_uuid',
- match_func=mock_match_fn.return_value)
- self.assertIsNone(devname)
-
- # Good map
- mock_lu = mock.Mock()
- mock_lu.server_adapter.backing_dev_name = 'devname'
- mock_findmaps.return_value = [mock_lu]
- devname = self.ld_adpt.get_bootdisk_path('inst', 'vios_uuid')
- self.assertEqual('devname', devname)
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps')
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.wrappers.storage.VG.get', new=mock.Mock())
- def test_get_bootdisk_iter(self, mock_vios, mock_find_maps, mock_lw):
- inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
-
- # Good path
- mock_vios.return_value = vios1
- for vdisk, vios in self.ld_adpt._get_bootdisk_iter(inst):
- self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
- self.assertEqual(vios1.uuid, vios.uuid)
- mock_vios.assert_called_once_with(
- self.adpt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
-
- # Not found, no storage of that name.
- mock_vios.reset_mock()
- mock_find_maps.return_value = []
- for vdisk, vios in self.ld_adpt._get_bootdisk_iter(inst):
- self.fail('Should not have found any storage elements.')
- mock_vios.assert_called_once_with(
- self.adpt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
-
- @mock.patch('nova.virt.powervm.disk.driver.DiskAdapter._get_bootdisk_iter',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True)
- def test_connect_instance_disk_to_mgmt(self, mock_add, mock_lw, mock_iter):
- inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
-
- # Good path
- mock_iter.return_value = [(vios1.scsi_mappings[0].backing_storage,
- vios1)]
- vdisk, vios = self.ld_adpt.connect_instance_disk_to_mgmt(inst)
- self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
- self.assertIs(vios1, vios)
- self.assertEqual(1, mock_add.call_count)
- mock_add.assert_called_with('host_uuid', vios, 'mgmt_uuid', vdisk)
-
- # add_vscsi_mapping raises. Show-stopper since only one VIOS.
- mock_add.reset_mock()
- mock_add.side_effect = Exception
- self.assertRaises(exception.InstanceDiskMappingFailed,
- self.ld_adpt.connect_instance_disk_to_mgmt, inst)
- self.assertEqual(1, mock_add.call_count)
-
- # Not found
- mock_add.reset_mock()
- mock_iter.return_value = []
- self.assertRaises(exception.InstanceDiskMappingFailed,
- self.ld_adpt.connect_instance_disk_to_mgmt, inst)
- self.assertFalse(mock_add.called)
-
- def _bld_mocks_for_instance_disk(self):
- inst = mock.Mock()
- inst.name = 'Name Of Instance'
- inst.uuid = uuids.inst_uuid
- lpar_wrap = mock.Mock()
- lpar_wrap.id = 2
- vios1 = self.vio_wrap
- back_stor_name = 'b_Name_Of__' + inst.uuid[:4]
- vios1.scsi_mappings[0].backing_storage.name = back_stor_name
- return inst, lpar_wrap, vios1
diff --git a/nova/tests/unit/virt/powervm/disk/test_ssp.py b/nova/tests/unit/virt/powervm/disk/test_ssp.py
deleted file mode 100644
index 86705dc29b..0000000000
--- a/nova/tests/unit/virt/powervm/disk/test_ssp.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from oslo_utils import uuidutils
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import cluster as pvm_clust
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import exception
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt.powervm.disk import ssp as ssp_dvr
-from nova.virt.powervm import vm
-
-FAKE_INST_UUID = uuidutils.generate_uuid(dashed=True)
-FAKE_INST_UUID_PVM = vm.get_pvm_uuid(mock.Mock(uuid=FAKE_INST_UUID))
-
-
-class TestSSPDiskAdapter(test.NoDBTestCase):
- """Unit Tests for the LocalDisk storage driver."""
-
- def setUp(self):
- super(TestSSPDiskAdapter, self).setUp()
-
- self.inst = powervm.TEST_INSTANCE
-
- self.apt = mock.Mock()
- self.host_uuid = 'host_uuid'
-
- self.ssp_wrap = mock.create_autospec(pvm_stg.SSP, instance=True)
-
- # SSP.refresh() returns itself
- self.ssp_wrap.refresh.return_value = self.ssp_wrap
- self.node1 = mock.create_autospec(pvm_clust.Node, instance=True)
- self.node2 = mock.create_autospec(pvm_clust.Node, instance=True)
- self.clust_wrap = mock.create_autospec(
- pvm_clust.Cluster, instance=True)
- self.clust_wrap.nodes = [self.node1, self.node2]
- self.clust_wrap.refresh.return_value = self.clust_wrap
- self.tier_wrap = mock.create_autospec(pvm_stg.Tier, instance=True)
- # Tier.refresh() returns itself
- self.tier_wrap.refresh.return_value = self.tier_wrap
- self.vio_wrap = mock.create_autospec(pvm_vios.VIOS, instance=True)
-
- # For _cluster
- self.mock_clust = self.useFixture(fixtures.MockPatch(
- 'pypowervm.wrappers.cluster.Cluster', autospec=True)).mock
- self.mock_clust.get.return_value = [self.clust_wrap]
-
- # For _ssp
- self.mock_ssp_gbhref = self.useFixture(fixtures.MockPatch(
- 'pypowervm.wrappers.storage.SSP.get_by_href')).mock
- self.mock_ssp_gbhref.return_value = self.ssp_wrap
-
- # For _tier
- self.mock_get_tier = self.useFixture(fixtures.MockPatch(
- 'pypowervm.tasks.storage.default_tier_for_ssp',
- autospec=True)).mock
- self.mock_get_tier.return_value = self.tier_wrap
-
- # A FeedTask
- self.mock_wtsk = mock.create_autospec(
- pvm_tx.WrapperTask, instance=True)
- self.mock_wtsk.configure_mock(wrapper=self.vio_wrap)
- self.mock_ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- self.mock_afs = self.mock_ftsk.add_functor_subtask
- self.mock_ftsk.configure_mock(
- wrapper_tasks={self.vio_wrap.uuid: self.mock_wtsk})
-
- self.pvm_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.get_pvm_uuid')).mock
-
- # Return the mgmt uuid
- self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.mgmt.mgmt_uuid')).mock
- self.mgmt_uuid.return_value = 'mp_uuid'
-
- # The SSP disk adapter
- self.ssp_drv = ssp_dvr.SSPDiskAdapter(self.apt, self.host_uuid)
-
- def test_init(self):
- self.assertEqual(self.apt, self.ssp_drv._adapter)
- self.assertEqual(self.host_uuid, self.ssp_drv._host_uuid)
- self.mock_clust.get.assert_called_once_with(self.apt)
- self.assertEqual(self.mock_clust.get.return_value,
- [self.ssp_drv._clust])
- self.mock_ssp_gbhref.assert_called_once_with(
- self.apt, self.clust_wrap.ssp_uri)
- self.assertEqual(self.mock_ssp_gbhref.return_value, self.ssp_drv._ssp)
- self.mock_get_tier.assert_called_once_with(self.ssp_wrap)
- self.assertEqual(self.mock_get_tier.return_value, self.ssp_drv._tier)
-
- def test_init_error(self):
- # Do these in reverse order to verify we trap all of 'em
- for raiser in (self.mock_get_tier, self.mock_ssp_gbhref,
- self.mock_clust.get):
- raiser.side_effect = pvm_exc.TimeoutError("timed out")
- self.assertRaises(exception.NotFound,
- ssp_dvr.SSPDiskAdapter, self.apt, self.host_uuid)
- raiser.side_effect = ValueError
- self.assertRaises(ValueError,
- ssp_dvr.SSPDiskAdapter, self.apt, self.host_uuid)
-
- def test_capabilities(self):
- self.assertTrue(self.ssp_drv.capabilities.get('shared_storage'))
- self.assertFalse(self.ssp_drv.capabilities.get('has_imagecache'))
- self.assertTrue(self.ssp_drv.capabilities.get('snapshot'))
-
- @mock.patch('pypowervm.util.get_req_path_uuid', autospec=True)
- def test_vios_uuids(self, mock_rpu):
- mock_rpu.return_value = self.host_uuid
- vios_uuids = self.ssp_drv._vios_uuids
- self.assertEqual({self.node1.vios_uuid, self.node2.vios_uuid},
- set(vios_uuids))
- mock_rpu.assert_has_calls(
- [mock.call(node.vios_uri, preserve_case=True, root=True)
- for node in [self.node1, self.node2]])
-
- mock_rpu.reset_mock()
-
- # Test VIOSes on other nodes, which won't have uuid or url
- node1 = mock.Mock(vios_uuid=None, vios_uri='uri1')
- node2 = mock.Mock(vios_uuid='2', vios_uri=None)
- # This mock is good and should be returned
- node3 = mock.Mock(vios_uuid='3', vios_uri='uri3')
- self.clust_wrap.nodes = [node1, node2, node3]
- self.assertEqual(['3'], self.ssp_drv._vios_uuids)
- # get_req_path_uuid was only called on the good one
- mock_rpu.assert_called_once_with('uri3', preserve_case=True, root=True)
-
- def test_capacity(self):
- self.tier_wrap.capacity = 10
- self.assertAlmostEqual(10.0, self.ssp_drv.capacity)
- self.tier_wrap.refresh.assert_called_once_with()
-
- def test_capacity_used(self):
- self.ssp_wrap.capacity = 4.56
- self.ssp_wrap.free_space = 1.23
- self.assertAlmostEqual((4.56 - 1.23), self.ssp_drv.capacity_used)
- self.ssp_wrap.refresh.assert_called_once_with()
-
- @mock.patch('pypowervm.tasks.cluster_ssp.get_or_upload_image_lu',
- autospec=True)
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
- new_callable=mock.PropertyMock)
- @mock.patch('pypowervm.util.sanitize_file_name_for_api', autospec=True)
- @mock.patch('pypowervm.tasks.storage.crt_lu', autospec=True)
- @mock.patch('nova.image.glance.API.download')
- @mock.patch('nova.virt.powervm.disk.driver.IterableToFileAdapter',
- autospec=True)
- def test_create_disk_from_image(self, mock_it2f, mock_dl, mock_crt_lu,
- mock_san, mock_vuuid, mock_goru):
- img = powervm.TEST_IMAGE1
-
- mock_crt_lu.return_value = self.ssp_drv._ssp, 'boot_lu'
- mock_san.return_value = 'disk_name'
- mock_vuuid.return_value = ['vuuid']
-
- self.assertEqual('boot_lu', self.ssp_drv.create_disk_from_image(
- 'context', self.inst, img))
- mock_dl.assert_called_once_with('context', img.id)
- mock_san.assert_has_calls([
- mock.call(img.name, prefix='image_', suffix='_' + img.checksum),
- mock.call(self.inst.name, prefix='boot_')])
- mock_it2f.assert_called_once_with(mock_dl.return_value)
- mock_goru.assert_called_once_with(
- self.ssp_drv._tier, 'disk_name', 'vuuid',
- mock_it2f.return_value, img.size,
- upload_type=tsk_stg.UploadType.IO_STREAM)
- mock_crt_lu.assert_called_once_with(
- self.mock_get_tier.return_value, mock_san.return_value,
- self.inst.flavor.root_gb, typ=pvm_stg.LUType.DISK,
- clone=mock_goru.return_value)
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
- new_callable=mock.PropertyMock)
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
- autospec=True)
- @mock.patch('pypowervm.wrappers.storage.LU', autospec=True)
- def test_connect_disk(self, mock_lu, mock_bldmap, mock_addmap,
- mock_vio_uuids):
- disk_info = mock.Mock()
- disk_info.configure_mock(name='dname', udid='dudid')
- mock_vio_uuids.return_value = [self.vio_wrap.uuid]
-
- def test_afs(add_func):
- # Verify the internal add_func
- self.assertEqual(mock_addmap.return_value, add_func(self.vio_wrap))
- mock_bldmap.assert_called_once_with(
- self.host_uuid, self.vio_wrap, self.pvm_uuid.return_value,
- mock_lu.bld_ref.return_value)
- mock_addmap.assert_called_once_with(
- self.vio_wrap, mock_bldmap.return_value)
- self.mock_wtsk.add_functor_subtask.side_effect = test_afs
-
- self.ssp_drv.attach_disk(self.inst, disk_info, self.mock_ftsk)
- mock_lu.bld_ref.assert_called_once_with(self.apt, 'dname', 'dudid')
- self.pvm_uuid.assert_called_once_with(self.inst)
- self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
-
- @mock.patch('pypowervm.tasks.storage.rm_tier_storage', autospec=True)
- def test_delete_disks(self, mock_rm_tstor):
- self.ssp_drv.delete_disks(['disk1', 'disk2'])
- mock_rm_tstor.assert_called_once_with(['disk1', 'disk2'],
- tier=self.ssp_drv._tier)
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._vios_uuids',
- new_callable=mock.PropertyMock)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
- autospec=True)
- def test_disconnect_disk(self, mock_bld_ftsk, mock_gmf, mock_rmmaps,
- mock_findmaps, mock_vio_uuids):
- mock_vio_uuids.return_value = [self.vio_wrap.uuid]
- mock_bld_ftsk.return_value = self.mock_ftsk
- lu1, lu2 = [mock.create_autospec(pvm_stg.LU, instance=True)] * 2
- # Two mappings have the same LU, to verify set behavior
- mock_findmaps.return_value = [
- mock.Mock(spec=pvm_vios.VSCSIMapping, backing_storage=lu)
- for lu in (lu1, lu2, lu1)]
-
- def test_afs(rm_func):
- # verify the internal rm_func
- self.assertEqual(mock_rmmaps.return_value, rm_func(self.vio_wrap))
- mock_rmmaps.assert_called_once_with(
- self.vio_wrap, self.pvm_uuid.return_value,
- match_func=mock_gmf.return_value)
- self.mock_wtsk.add_functor_subtask.side_effect = test_afs
-
- self.assertEqual(
- {lu1, lu2}, set(self.ssp_drv.detach_disk(self.inst)))
- mock_bld_ftsk.assert_called_once_with(
- self.apt, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])
- self.pvm_uuid.assert_called_once_with(self.inst)
- mock_gmf.assert_called_once_with(pvm_stg.LU)
- self.assertEqual(1, self.mock_wtsk.add_functor_subtask.call_count)
- mock_findmaps.assert_called_once_with(
- self.vio_wrap.scsi_mappings,
- client_lpar_id=self.pvm_uuid.return_value,
- match_func=mock_gmf.return_value)
- self.mock_ftsk.execute.assert_called_once_with()
-
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._disk_match_func')
- def test_get_bootdisk_path(self, mock_match_fn, mock_findmaps,
- mock_vios):
- mock_vios.return_value = self.vio_wrap
-
- # No maps found
- mock_findmaps.return_value = None
- devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
- mock_vios.assert_called_once_with(
- self.apt, uuid='vios_uuid', xag=[pvm_const.XAG.VIO_SMAP])
- mock_findmaps.assert_called_once_with(
- self.vio_wrap.scsi_mappings,
- client_lpar_id=self.pvm_uuid.return_value,
- match_func=mock_match_fn.return_value)
- self.assertIsNone(devname)
-
- # Good map
- mock_lu = mock.Mock()
- mock_lu.server_adapter.backing_dev_name = 'devname'
- mock_findmaps.return_value = [mock_lu]
- devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
- self.assertEqual('devname', devname)
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
- '_vios_uuids', new_callable=mock.PropertyMock)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- @mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True)
- def test_connect_instance_disk_to_mgmt(self, mock_add, mock_vio_get,
- mock_lw, mock_vio_uuids):
- inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
- mock_vio_uuids.return_value = [1, 2]
-
- # Test with two VIOSes, both of which contain the mapping
- mock_vio_get.side_effect = [vio1, vio2]
- lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
- self.assertEqual('lu_udid', lu.udid)
- # Should hit on the first VIOS
- self.assertIs(vio1, vios)
- mock_add.assert_called_once_with(self.host_uuid, vio1, 'mp_uuid', lu)
-
- # Now the first VIOS doesn't have the mapping, but the second does
- mock_add.reset_mock()
- mock_vio_get.side_effect = [vio3, vio2]
- lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
- self.assertEqual('lu_udid', lu.udid)
- # Should hit on the second VIOS
- self.assertIs(vio2, vios)
- self.assertEqual(1, mock_add.call_count)
- mock_add.assert_called_once_with(self.host_uuid, vio2, 'mp_uuid', lu)
-
- # No hits
- mock_add.reset_mock()
- mock_vio_get.side_effect = [vio3, vio3]
- self.assertRaises(exception.InstanceDiskMappingFailed,
- self.ssp_drv.connect_instance_disk_to_mgmt, inst)
- self.assertEqual(0, mock_add.call_count)
-
- # First add_vscsi_mapping call raises
- mock_vio_get.side_effect = [vio1, vio2]
- mock_add.side_effect = [Exception("mapping failed"), None]
- # Should hit on the second VIOS
- self.assertIs(vio2, vios)
-
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_lu_mapping', autospec=True)
- def test_disconnect_disk_from_mgmt(self, mock_rm_lu_map):
- self.ssp_drv.disconnect_disk_from_mgmt('vios_uuid', 'disk_name')
- mock_rm_lu_map.assert_called_with(self.apt, 'vios_uuid',
- 'mp_uuid', disk_names=['disk_name'])
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._get_disk_name')
- def test_disk_match_func(self, mock_disk_name, mock_gen_match):
- mock_disk_name.return_value = 'disk_name'
- self.ssp_drv._disk_match_func('disk_type', 'instance')
- mock_disk_name.assert_called_once_with('disk_type', 'instance')
- mock_gen_match.assert_called_with(pvm_stg.LU, names=['disk_name'])
-
- @mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
- '_vios_uuids', new_callable=mock.PropertyMock)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
- def test_get_bootdisk_iter(self, mock_vio_get, mock_lw, mock_vio_uuids):
- inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
- mock_lw.return_value = lpar_wrap
- mock_vio_uuids.return_value = [1, 2]
-
- # Test with two VIOSes, both of which contain the mapping. Force the
- # method to get the lpar_wrap.
- mock_vio_get.side_effect = [vio1, vio2]
- idi = self.ssp_drv._get_bootdisk_iter(inst)
- lu, vios = next(idi)
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios1', vios.name)
- mock_vio_get.assert_called_once_with(self.apt, uuid=1,
- xag=[pvm_const.XAG.VIO_SMAP])
- lu, vios = next(idi)
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios2', vios.name)
- mock_vio_get.assert_called_with(self.apt, uuid=2,
- xag=[pvm_const.XAG.VIO_SMAP])
- self.assertRaises(StopIteration, next, idi)
- self.assertEqual(2, mock_vio_get.call_count)
- mock_lw.assert_called_once_with(self.apt, inst)
-
- # Same, but prove that breaking out of the loop early avoids the second
- # get call. Supply lpar_wrap from here on, and prove no calls to
- # get_instance_wrapper
- mock_vio_get.reset_mock()
- mock_lw.reset_mock()
- mock_vio_get.side_effect = [vio1, vio2]
- for lu, vios in self.ssp_drv._get_bootdisk_iter(inst):
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios1', vios.name)
- break
- mock_vio_get.assert_called_once_with(self.apt, uuid=1,
- xag=[pvm_const.XAG.VIO_SMAP])
-
- # Now the first VIOS doesn't have the mapping, but the second does
- mock_vio_get.reset_mock()
- mock_vio_get.side_effect = [vio3, vio2]
- idi = self.ssp_drv._get_bootdisk_iter(inst)
- lu, vios = next(idi)
- self.assertEqual('lu_udid', lu.udid)
- self.assertEqual('vios2', vios.name)
- mock_vio_get.assert_has_calls(
- [mock.call(self.apt, uuid=uuid, xag=[pvm_const.XAG.VIO_SMAP])
- for uuid in (1, 2)])
- self.assertRaises(StopIteration, next, idi)
- self.assertEqual(2, mock_vio_get.call_count)
-
- # No hits
- mock_vio_get.reset_mock()
- mock_vio_get.side_effect = [vio3, vio3]
- self.assertEqual([], list(self.ssp_drv._get_bootdisk_iter(inst)))
- self.assertEqual(2, mock_vio_get.call_count)
-
- def _bld_mocks_for_instance_disk(self):
- inst = mock.Mock()
- inst.name = 'my-instance-name'
- lpar_wrap = mock.Mock()
- lpar_wrap.id = 4
- lu_wrap = mock.Mock(spec=pvm_stg.LU)
- lu_wrap.configure_mock(name='boot_my_instance_name', udid='lu_udid')
- smap = mock.Mock(backing_storage=lu_wrap,
- server_adapter=mock.Mock(lpar_id=4))
- # Build mock VIOS Wrappers as the returns from VIOS.wrap.
- # vios1 and vios2 will both have the mapping for client ID 4 and LU
- # named boot_my_instance_name.
- smaps = [mock.Mock(), mock.Mock(), mock.Mock(), smap]
- vios1 = mock.Mock(spec=pvm_vios.VIOS)
- vios1.configure_mock(name='vios1', uuid='uuid1', scsi_mappings=smaps)
- vios2 = mock.Mock(spec=pvm_vios.VIOS)
- vios2.configure_mock(name='vios2', uuid='uuid2', scsi_mappings=smaps)
- # vios3 will not have the mapping
- vios3 = mock.Mock(spec=pvm_vios.VIOS)
- vios3.configure_mock(name='vios3', uuid='uuid3',
- scsi_mappings=[mock.Mock(), mock.Mock()])
- return inst, lpar_wrap, vios1, vios2, vios3
diff --git a/nova/tests/unit/virt/powervm/tasks/__init__.py b/nova/tests/unit/virt/powervm/tasks/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/__init__.py
+++ /dev/null
diff --git a/nova/tests/unit/virt/powervm/tasks/test_image.py b/nova/tests/unit/virt/powervm/tasks/test_image.py
deleted file mode 100644
index b9e3560a16..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_image.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import test
-
-from nova.virt.powervm.tasks import image as tsk_img
-
-
-class TestImage(test.TestCase):
- def test_update_task_state(self):
- def func(task_state, expected_state='delirious'):
- self.assertEqual('task_state', task_state)
- self.assertEqual('delirious', expected_state)
- tf = tsk_img.UpdateTaskState(func, 'task_state')
- self.assertEqual('update_task_state_task_state', tf.name)
- tf.execute()
-
- def func2(task_state, expected_state=None):
- self.assertEqual('task_state', task_state)
- self.assertEqual('expected_state', expected_state)
- tf = tsk_img.UpdateTaskState(func2, 'task_state',
- expected_state='expected_state')
- tf.execute()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tsk_img.UpdateTaskState(func, 'task_state')
- tf.assert_called_once_with(
- name='update_task_state_task_state')
-
- @mock.patch('nova.virt.powervm.image.stream_blockdev_to_glance',
- autospec=True)
- @mock.patch('nova.virt.powervm.image.generate_snapshot_metadata',
- autospec=True)
- def test_stream_to_glance(self, mock_metadata, mock_stream):
- mock_metadata.return_value = 'metadata'
- mock_inst = mock.Mock()
- mock_inst.name = 'instance_name'
- tf = tsk_img.StreamToGlance('context', 'image_api', 'image_id',
- mock_inst)
- self.assertEqual('stream_to_glance', tf.name)
- tf.execute('disk_path')
- mock_metadata.assert_called_with('context', 'image_api', 'image_id',
- mock_inst)
- mock_stream.assert_called_with('context', 'image_api', 'image_id',
- 'metadata', 'disk_path')
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tsk_img.StreamToGlance(
- 'context', 'image_api', 'image_id', mock_inst)
- tf.assert_called_once_with(
- name='stream_to_glance', requires='disk_path')
diff --git a/nova/tests/unit/virt/powervm/tasks/test_network.py b/nova/tests/unit/virt/powervm/tasks/test_network.py
deleted file mode 100644
index 9d6951eceb..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_network.py
+++ /dev/null
@@ -1,323 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-import eventlet
-import mock
-from pypowervm.wrappers import network as pvm_net
-
-from nova import exception
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt.powervm.tasks import network as tf_net
-
-
-def cna(mac):
- """Builds a mock Client Network Adapter for unit tests."""
- return mock.MagicMock(mac=mac, vswitch_uri='fake_href')
-
-
-class TestNetwork(test.NoDBTestCase):
- def setUp(self):
- super(TestNetwork, self).setUp()
- self.flags(host='host1')
- self.apt = mock.Mock()
-
- self.mock_lpar_wrap = mock.MagicMock()
- self.mock_lpar_wrap.can_modify_io.return_value = True, None
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- @mock.patch('nova.virt.powervm.vif.unplug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_unplug_vifs(self, mock_vm_get, mock_unplug, mock_get_wrap):
- """Tests that a delete of the vif can be done."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA responses.
- cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')]
- mock_vm_get.return_value = cnas
-
- # Mock up the network info. This also validates that they will be
- # sanitized to upper case.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:22'},
- {'address': 'aa:bb:cc:dd:ee:33'}
- ]
-
- # Mock out the instance wrapper
- mock_get_wrap.return_value = self.mock_lpar_wrap
-
- # Mock out the vif driver
- def validate_unplug(adapter, instance, vif, cna_w_list=None):
- self.assertEqual(adapter, self.apt)
- self.assertEqual(instance, inst)
- self.assertIn(vif, net_info)
- self.assertEqual(cna_w_list, cnas)
-
- mock_unplug.side_effect = validate_unplug
-
- # Run method
- p_vifs = tf_net.UnplugVifs(self.apt, inst, net_info)
- p_vifs.execute()
-
- # Make sure the unplug was invoked, so that we know that the validation
- # code was called
- self.assertEqual(3, mock_unplug.call_count)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_net.UnplugVifs(self.apt, inst, net_info)
- tf.assert_called_once_with(name='unplug_vifs')
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_unplug_vifs_invalid_state(self, mock_get_wrap):
- """Tests that the delete raises an exception if bad VM state."""
- inst = powervm.TEST_INSTANCE
-
- # Mock out the instance wrapper
- mock_get_wrap.return_value = self.mock_lpar_wrap
-
- # Mock that the state is incorrect
- self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
-
- # Run method
- p_vifs = tf_net.UnplugVifs(self.apt, inst, mock.Mock())
- self.assertRaises(exception.VirtualInterfaceUnplugException,
- p_vifs.execute)
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_rmc(self, mock_cna_get, mock_plug):
- """Tests that a crt vif can be done with secure RMC."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. One should already exist, the other
- # should not.
- pre_cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
- mock_cna_get.return_value = copy.deepcopy(pre_cnas)
-
- # Mock up the network info. This also validates that they will be
- # sanitized to upper case.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'},
- ]
-
- # First run the CNA update, then the CNA create.
- mock_new_cna = mock.Mock(spec=pvm_net.CNA)
- mock_plug.side_effect = ['upd_cna', mock_new_cna]
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
-
- all_cnas = p_vifs.execute(self.mock_lpar_wrap)
-
- # new vif should be created twice.
- mock_plug.assert_any_call(self.apt, inst, net_info[0], new_vif=False)
- mock_plug.assert_any_call(self.apt, inst, net_info[1], new_vif=True)
-
- # The Task provides the list of original CNAs plus only CNAs that were
- # created.
- self.assertEqual(pre_cnas + [mock_new_cna], all_cnas)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- tf.assert_called_once_with(
- name='plug_vifs', provides='vm_cnas', requires=['lpar_wrap'])
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_rmc_no_create(self, mock_vm_get, mock_plug):
- """Verifies if no creates are needed, none are done."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. Both should already exist.
- mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
-
- # Mock up the network info. This also validates that they will be
- # sanitized to upper case. This also validates that we don't call
- # get_vnics if no nets have vnic_type 'direct'.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:11', 'vnic_type': 'normal'}
- ]
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- p_vifs.execute(self.mock_lpar_wrap)
-
- # The create should have been called with new_vif as False.
- mock_plug.assert_any_call(self.apt, inst, net_info[0], new_vif=False)
- mock_plug.assert_any_call(self.apt, inst, net_info[1], new_vif=False)
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_invalid_state(self, mock_vm_get, mock_plug):
- """Tests that a crt_vif fails when the LPAR state is bad."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. Only doing one for simplicity
- mock_vm_get.return_value = []
- net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
-
- # Mock that the state is incorrect
- self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- self.assertRaises(exception.VirtualInterfaceCreateException,
- p_vifs.execute, self.mock_lpar_wrap)
-
- # The create should not have been invoked
- self.assertEqual(0, mock_plug.call_count)
-
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_timeout(self, mock_vm_get, mock_plug):
- """Tests that crt vif failure via loss of neutron callback."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the CNA response. Only doing one for simplicity
- mock_vm_get.return_value = [cna('AABBCCDDEE11')]
-
- # Mock up the network info.
- net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
-
- # Ensure that an exception is raised by a timeout.
- mock_plug.side_effect = eventlet.timeout.Timeout()
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- self.assertRaises(exception.VirtualInterfaceCreateException,
- p_vifs.execute, self.mock_lpar_wrap)
-
- # The create should have only been called once.
- self.assertEqual(1, mock_plug.call_count)
-
- @mock.patch('nova.virt.powervm.vif.unplug')
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_vifs_revert(self, mock_vm_get, mock_plug, mock_unplug):
- """Tests that the revert flow works properly."""
- inst = powervm.TEST_INSTANCE
-
- # Fake CNA list. The one pre-existing VIF should *not* get reverted.
- cna_list = [cna('AABBCCDDEEFF'), cna('FFEEDDCCBBAA')]
- mock_vm_get.return_value = cna_list
-
- # Mock up the network info. Three roll backs.
- net_info = [
- {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'},
- {'address': 'aa:bb:cc:dd:ee:33', 'vnic_type': 'normal'}
- ]
-
- # Make sure we test raising an exception
- mock_unplug.side_effect = [exception.NovaException(), None]
-
- # Run method
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- p_vifs.execute(self.mock_lpar_wrap)
- p_vifs.revert(self.mock_lpar_wrap, mock.Mock(), mock.Mock())
-
- # The unplug should be called twice. The exception shouldn't stop the
- # second call.
- self.assertEqual(2, mock_unplug.call_count)
-
- # Make sure each call is invoked correctly. The first plug was not a
- # new vif, so it should not be reverted.
- c2 = mock.call(self.apt, inst, net_info[1], cna_w_list=cna_list)
- c3 = mock.call(self.apt, inst, net_info[2], cna_w_list=cna_list)
- mock_unplug.assert_has_calls([c2, c3])
-
- @mock.patch('pypowervm.tasks.cna.crt_cna')
- @mock.patch('pypowervm.wrappers.network.VSwitch.search')
- @mock.patch('nova.virt.powervm.vif.plug')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_plug_mgmt_vif(self, mock_vm_get, mock_plug, mock_vs_search,
- mock_crt_cna):
- """Tests that a mgmt vif can be created."""
- inst = powervm.TEST_INSTANCE
-
- # Mock up the rmc vswitch
- vswitch_w = mock.MagicMock()
- vswitch_w.href = 'fake_mgmt_uri'
- mock_vs_search.return_value = [vswitch_w]
-
- # Run method such that it triggers a fresh CNA search
- p_vifs = tf_net.PlugMgmtVif(self.apt, inst)
- p_vifs.execute(None)
-
- # With the default get_cnas mock (which returns a Mock()), we think we
- # found an existing management CNA.
- mock_crt_cna.assert_not_called()
- mock_vm_get.assert_called_once_with(
- self.apt, inst, vswitch_uri='fake_mgmt_uri')
-
- # Now mock get_cnas to return no hits
- mock_vm_get.reset_mock()
- mock_vm_get.return_value = []
- p_vifs.execute(None)
-
- # Get was called; and since it didn't have the mgmt CNA, so was plug.
- self.assertEqual(1, mock_crt_cna.call_count)
- mock_vm_get.assert_called_once_with(
- self.apt, inst, vswitch_uri='fake_mgmt_uri')
-
- # Now pass CNAs, but not the mgmt vif, "from PlugVifs"
- cnas = [mock.Mock(vswitch_uri='uri1'), mock.Mock(vswitch_uri='uri2')]
- mock_crt_cna.reset_mock()
- mock_vm_get.reset_mock()
- p_vifs.execute(cnas)
-
- # Get wasn't called, since the CNAs were passed "from PlugVifs"; but
- # since the mgmt vif wasn't included, plug was called.
- mock_vm_get.assert_not_called()
- mock_crt_cna.assert_called()
-
- # Finally, pass CNAs including the mgmt.
- cnas.append(mock.Mock(vswitch_uri='fake_mgmt_uri'))
- mock_crt_cna.reset_mock()
- p_vifs.execute(cnas)
-
- # Neither get nor plug was called.
- mock_vm_get.assert_not_called()
- mock_crt_cna.assert_not_called()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_net.PlugMgmtVif(self.apt, inst)
- tf.assert_called_once_with(
- name='plug_mgmt_vif', provides='mgmt_cna', requires=['vm_cnas'])
-
- def test_get_vif_events(self):
- # Set up common mocks.
- inst = powervm.TEST_INSTANCE
- net_info = [mock.MagicMock(), mock.MagicMock()]
- net_info[0]['id'] = 'a'
- net_info[0].get.return_value = False
- net_info[1]['id'] = 'b'
- net_info[1].get.return_value = True
-
- # Set up the runner.
- p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info)
- p_vifs.crt_network_infos = net_info
- resp = p_vifs._get_vif_events()
-
- # Only one should be returned since only one was active.
- self.assertEqual(1, len(resp))
diff --git a/nova/tests/unit/virt/powervm/tasks/test_storage.py b/nova/tests/unit/virt/powervm/tasks/test_storage.py
deleted file mode 100644
index 39fe9dec72..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_storage.py
+++ /dev/null
@@ -1,354 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from pypowervm import exceptions as pvm_exc
-
-from nova import exception
-from nova import test
-from nova.virt.powervm.tasks import storage as tf_stg
-
-
-class TestStorage(test.NoDBTestCase):
-
- def setUp(self):
- super(TestStorage, self).setUp()
-
- self.adapter = mock.Mock()
- self.disk_dvr = mock.MagicMock()
- self.mock_cfg_drv = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.media.ConfigDrivePowerVM')).mock
- self.mock_mb = self.mock_cfg_drv.return_value
- self.instance = mock.MagicMock()
- self.context = 'context'
-
- def test_create_and_connect_cfg_drive(self):
- # With a specified FeedTask
- task = tf_stg.CreateAndConnectCfgDrive(
- self.adapter, self.instance, 'injected_files',
- 'network_info', 'stg_ftsk', admin_pass='admin_pass')
- task.execute('mgmt_cna')
- self.mock_cfg_drv.assert_called_once_with(self.adapter)
- self.mock_mb.create_cfg_drv_vopt.assert_called_once_with(
- self.instance, 'injected_files', 'network_info', 'stg_ftsk',
- admin_pass='admin_pass', mgmt_cna='mgmt_cna')
-
- # Normal revert
- task.revert('mgmt_cna', 'result', 'flow_failures')
- self.mock_mb.dlt_vopt.assert_called_once_with(self.instance,
- 'stg_ftsk')
-
- self.mock_mb.reset_mock()
-
- # Revert when dlt_vopt fails
- self.mock_mb.dlt_vopt.side_effect = pvm_exc.Error('fake-exc')
- task.revert('mgmt_cna', 'result', 'flow_failures')
- self.mock_mb.dlt_vopt.assert_called_once()
-
- self.mock_mb.reset_mock()
-
- # Revert when media builder not created
- task.mb = None
- task.revert('mgmt_cna', 'result', 'flow_failures')
- self.mock_mb.assert_not_called()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.CreateAndConnectCfgDrive(
- self.adapter, self.instance, 'injected_files',
- 'network_info', 'stg_ftsk', admin_pass='admin_pass')
- tf.assert_called_once_with(name='cfg_drive', requires=['mgmt_cna'])
-
- def test_delete_vopt(self):
- # Test with no FeedTask
- task = tf_stg.DeleteVOpt(self.adapter, self.instance)
- task.execute()
- self.mock_cfg_drv.assert_called_once_with(self.adapter)
- self.mock_mb.dlt_vopt.assert_called_once_with(
- self.instance, stg_ftsk=None)
-
- self.mock_cfg_drv.reset_mock()
- self.mock_mb.reset_mock()
-
- # With a specified FeedTask
- task = tf_stg.DeleteVOpt(self.adapter, self.instance, stg_ftsk='ftsk')
- task.execute()
- self.mock_cfg_drv.assert_called_once_with(self.adapter)
- self.mock_mb.dlt_vopt.assert_called_once_with(
- self.instance, stg_ftsk='ftsk')
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DeleteVOpt(self.adapter, self.instance)
- tf.assert_called_once_with(name='vopt_delete')
-
- def test_delete_disk(self):
- stor_adpt_mappings = mock.Mock()
-
- task = tf_stg.DeleteDisk(self.disk_dvr)
- task.execute(stor_adpt_mappings)
- self.disk_dvr.delete_disks.assert_called_once_with(stor_adpt_mappings)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DeleteDisk(self.disk_dvr)
- tf.assert_called_once_with(
- name='delete_disk', requires=['stor_adpt_mappings'])
-
- def test_detach_disk(self):
- task = tf_stg.DetachDisk(self.disk_dvr, self.instance)
- task.execute()
- self.disk_dvr.detach_disk.assert_called_once_with(self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DetachDisk(self.disk_dvr, self.instance)
- tf.assert_called_once_with(
- name='detach_disk', provides='stor_adpt_mappings')
-
- def test_attach_disk(self):
- stg_ftsk = mock.Mock()
- disk_dev_info = mock.Mock()
-
- task = tf_stg.AttachDisk(self.disk_dvr, self.instance, stg_ftsk)
- task.execute(disk_dev_info)
- self.disk_dvr.attach_disk.assert_called_once_with(
- self.instance, disk_dev_info, stg_ftsk)
-
- task.revert(disk_dev_info, 'result', 'flow failures')
- self.disk_dvr.detach_disk.assert_called_once_with(self.instance)
-
- self.disk_dvr.detach_disk.reset_mock()
-
- # Revert failures are not raised
- self.disk_dvr.detach_disk.side_effect = pvm_exc.TimeoutError(
- "timed out")
- task.revert(disk_dev_info, 'result', 'flow failures')
- self.disk_dvr.detach_disk.assert_called_once_with(self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.AttachDisk(self.disk_dvr, self.instance, stg_ftsk)
- tf.assert_called_once_with(
- name='attach_disk', requires=['disk_dev_info'])
-
- def test_create_disk_for_img(self):
- image_meta = mock.Mock()
-
- task = tf_stg.CreateDiskForImg(
- self.disk_dvr, self.context, self.instance, image_meta)
- task.execute()
- self.disk_dvr.create_disk_from_image.assert_called_once_with(
- self.context, self.instance, image_meta)
-
- task.revert('result', 'flow failures')
- self.disk_dvr.delete_disks.assert_called_once_with(['result'])
-
- self.disk_dvr.delete_disks.reset_mock()
-
- # Delete not called if no result
- task.revert(None, None)
- self.disk_dvr.delete_disks.assert_not_called()
-
- # Delete exception doesn't raise
- self.disk_dvr.delete_disks.side_effect = pvm_exc.TimeoutError(
- "timed out")
- task.revert('result', 'flow failures')
- self.disk_dvr.delete_disks.assert_called_once_with(['result'])
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.CreateDiskForImg(
- self.disk_dvr, self.context, self.instance, image_meta)
- tf.assert_called_once_with(
- name='create_disk_from_img', provides='disk_dev_info')
-
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('nova.virt.powervm.mgmt.discover_vscsi_disk', autospec=True)
- @mock.patch('nova.virt.powervm.mgmt.remove_block_dev', autospec=True)
- def test_instance_disk_to_mgmt(self, mock_rm, mock_discover, mock_find):
- mock_discover.return_value = '/dev/disk'
- mock_instance = mock.Mock()
- mock_instance.name = 'instance_name'
- mock_stg = mock.Mock()
- mock_stg.name = 'stg_name'
- mock_vwrap = mock.Mock()
- mock_vwrap.name = 'vios_name'
- mock_vwrap.uuid = 'vios_uuid'
- mock_vwrap.scsi_mappings = ['mapping1']
-
- disk_dvr = mock.MagicMock()
- disk_dvr.mp_uuid = 'mp_uuid'
- disk_dvr.connect_instance_disk_to_mgmt.return_value = (mock_stg,
- mock_vwrap)
-
- def reset_mocks():
- mock_find.reset_mock()
- mock_discover.reset_mock()
- mock_rm.reset_mock()
- disk_dvr.reset_mock()
-
- # Good path - find_maps returns one result
- mock_find.return_value = ['one_mapping']
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertEqual('instance_disk_to_mgmt', tf.name)
- self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
- stg_elem=mock_stg)
- mock_discover.assert_called_with('one_mapping')
- tf.revert('result', 'failures')
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- mock_rm.assert_called_with('/dev/disk')
-
- # Good path - find_maps returns >1 result
- reset_mocks()
- mock_find.return_value = ['first_mapping', 'second_mapping']
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
- stg_elem=mock_stg)
- mock_discover.assert_called_with('first_mapping')
- tf.revert('result', 'failures')
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- mock_rm.assert_called_with('/dev/disk')
-
- # Management Partition is VIOS and NovaLink hosted storage
- reset_mocks()
- disk_dvr._vios_uuids = ['mp_uuid']
- dev_name = '/dev/vg/fake_name'
- disk_dvr.get_bootdisk_path.return_value = dev_name
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertEqual((None, None, dev_name), tf.execute())
-
- # Management Partition is VIOS and not NovaLink hosted storage
- reset_mocks()
- disk_dvr._vios_uuids = ['mp_uuid']
- disk_dvr.get_bootdisk_path.return_value = None
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- tf.execute()
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
-
- # Bad path - find_maps returns no results
- reset_mocks()
- mock_find.return_value = []
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertRaises(exception.NewMgmtMappingNotFoundException,
- tf.execute)
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- # find_maps was still called
- mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
- stg_elem=mock_stg)
- # discover_vscsi_disk didn't get called
- self.assertEqual(0, mock_discover.call_count)
- tf.revert('result', 'failures')
- # disconnect_disk_from_mgmt got called
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- # ...but remove_block_dev did not.
- self.assertEqual(0, mock_rm.call_count)
-
- # Bad path - connect raises
- reset_mocks()
- disk_dvr.connect_instance_disk_to_mgmt.side_effect = (
- exception.InstanceDiskMappingFailed(instance_name='inst_name'))
- tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- self.assertRaises(exception.InstanceDiskMappingFailed, tf.execute)
- disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
- mock_instance)
- self.assertEqual(0, mock_find.call_count)
- self.assertEqual(0, mock_discover.call_count)
- # revert shouldn't call disconnect or remove
- tf.revert('result', 'failures')
- self.assertEqual(0, disk_dvr.disconnect_disk_from_mgmt.call_count)
- self.assertEqual(0, mock_rm.call_count)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
- tf.assert_called_once_with(
- name='instance_disk_to_mgmt',
- provides=['stg_elem', 'vios_wrap', 'disk_path'])
-
- @mock.patch('nova.virt.powervm.mgmt.remove_block_dev', autospec=True)
- def test_remove_instance_disk_from_mgmt(self, mock_rm):
- disk_dvr = mock.MagicMock()
- mock_instance = mock.Mock()
- mock_instance.name = 'instance_name'
- mock_stg = mock.Mock()
- mock_stg.name = 'stg_name'
- mock_vwrap = mock.Mock()
- mock_vwrap.name = 'vios_name'
- mock_vwrap.uuid = 'vios_uuid'
-
- tf = tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance)
- self.assertEqual('remove_inst_disk_from_mgmt', tf.name)
-
- # Boot disk not mapped to mgmt partition
- tf.execute(None, mock_vwrap, '/dev/disk')
- self.assertEqual(disk_dvr.disconnect_disk_from_mgmt.call_count, 0)
- self.assertEqual(mock_rm.call_count, 0)
-
- # Boot disk mapped to mgmt partition
- tf.execute(mock_stg, mock_vwrap, '/dev/disk')
- disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
- 'stg_name')
- mock_rm.assert_called_with('/dev/disk')
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance)
- tf.assert_called_once_with(
- name='remove_inst_disk_from_mgmt',
- requires=['stg_elem', 'vios_wrap', 'disk_path'])
-
- def test_attach_volume(self):
- vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}})
-
- task = tf_stg.AttachVolume(vol_dvr)
- task.execute()
- vol_dvr.attach_volume.assert_called_once_with()
-
- task.revert('result', 'flow failures')
- vol_dvr.reset_stg_ftsk.assert_called_once_with()
- vol_dvr.detach_volume.assert_called_once_with()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.AttachVolume(vol_dvr)
- tf.assert_called_once_with(name='attach_vol_1')
-
- def test_detach_volume(self):
- vol_dvr = mock.Mock(connection_info={'data': {'volume_id': '1'}})
-
- task = tf_stg.DetachVolume(vol_dvr)
- task.execute()
- vol_dvr.detach_volume.assert_called_once_with()
-
- task.revert('result', 'flow failures')
- vol_dvr.reset_stg_ftsk.assert_called_once_with()
- vol_dvr.detach_volume.assert_called_once_with()
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_stg.DetachVolume(vol_dvr)
- tf.assert_called_once_with(name='detach_vol_1')
diff --git a/nova/tests/unit/virt/powervm/tasks/test_vm.py b/nova/tests/unit/virt/powervm/tasks/test_vm.py
deleted file mode 100644
index fc68646acf..0000000000
--- a/nova/tests/unit/virt/powervm/tasks/test_vm.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from taskflow import engines as tf_eng
-from taskflow.patterns import linear_flow as tf_lf
-from taskflow import task as tf_tsk
-
-from nova import exception
-from nova import test
-from nova.virt.powervm.tasks import vm as tf_vm
-
-
-class TestVMTasks(test.NoDBTestCase):
- def setUp(self):
- super(TestVMTasks, self).setUp()
- self.apt = mock.Mock()
- self.instance = mock.Mock()
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
- def test_get(self, mock_get_wrap):
- get = tf_vm.Get(self.apt, self.instance)
- get.execute()
- mock_get_wrap.assert_called_once_with(self.apt, self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.Get(self.apt, self.instance)
- tf.assert_called_once_with(name='get_vm', provides='lpar_wrap')
-
- @mock.patch('pypowervm.tasks.storage.add_lpar_storage_scrub_tasks',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.create_lpar')
- def test_create(self, mock_vm_crt, mock_stg):
- lpar_entry = mock.Mock()
-
- # Test create with normal (non-recreate) ftsk
- crt = tf_vm.Create(self.apt, 'host_wrapper', self.instance, 'ftsk')
- mock_vm_crt.return_value = lpar_entry
- crt.execute()
-
- mock_vm_crt.assert_called_once_with(self.apt, 'host_wrapper',
- self.instance)
-
- mock_stg.assert_called_once_with(
- [lpar_entry.id], 'ftsk', lpars_exist=True)
- mock_stg.assert_called_once_with([mock_vm_crt.return_value.id], 'ftsk',
- lpars_exist=True)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.Create(self.apt, 'host_wrapper', self.instance, 'ftsk')
- tf.assert_called_once_with(name='crt_vm', provides='lpar_wrap')
-
- @mock.patch('nova.virt.powervm.vm.power_on')
- def test_power_on(self, mock_pwron):
- pwron = tf_vm.PowerOn(self.apt, self.instance)
- pwron.execute()
- mock_pwron.assert_called_once_with(self.apt, self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.PowerOn(self.apt, self.instance)
- tf.assert_called_once_with(name='pwr_vm')
-
- @mock.patch('nova.virt.powervm.vm.power_on')
- @mock.patch('nova.virt.powervm.vm.power_off')
- def test_power_on_revert(self, mock_pwroff, mock_pwron):
- flow = tf_lf.Flow('revert_power_on')
- pwron = tf_vm.PowerOn(self.apt, self.instance)
- flow.add(pwron)
-
- # Dummy Task that fails, triggering flow revert
- def failure(*a, **k):
- raise ValueError()
- flow.add(tf_tsk.FunctorTask(failure))
-
- # When PowerOn.execute doesn't fail, revert calls power_off
- self.assertRaises(ValueError, tf_eng.run, flow)
- mock_pwron.assert_called_once_with(self.apt, self.instance)
- mock_pwroff.assert_called_once_with(self.apt, self.instance,
- force_immediate=True)
-
- mock_pwron.reset_mock()
- mock_pwroff.reset_mock()
-
- # When PowerOn.execute fails, revert doesn't call power_off
- mock_pwron.side_effect = exception.NovaException()
- self.assertRaises(exception.NovaException, tf_eng.run, flow)
- mock_pwron.assert_called_once_with(self.apt, self.instance)
- mock_pwroff.assert_not_called()
-
- @mock.patch('nova.virt.powervm.vm.power_off')
- def test_power_off(self, mock_pwroff):
- # Default force_immediate
- pwroff = tf_vm.PowerOff(self.apt, self.instance)
- pwroff.execute()
- mock_pwroff.assert_called_once_with(self.apt, self.instance,
- force_immediate=False)
-
- mock_pwroff.reset_mock()
-
- # Explicit force_immediate
- pwroff = tf_vm.PowerOff(self.apt, self.instance, force_immediate=True)
- pwroff.execute()
- mock_pwroff.assert_called_once_with(self.apt, self.instance,
- force_immediate=True)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.PowerOff(self.apt, self.instance)
- tf.assert_called_once_with(name='pwr_off_vm')
-
- @mock.patch('nova.virt.powervm.vm.delete_lpar')
- def test_delete(self, mock_dlt):
- delete = tf_vm.Delete(self.apt, self.instance)
- delete.execute()
- mock_dlt.assert_called_once_with(self.apt, self.instance)
-
- # Validate args on taskflow.task.Task instantiation
- with mock.patch('taskflow.task.Task.__init__') as tf:
- tf_vm.Delete(self.apt, self.instance)
- tf.assert_called_once_with(name='dlt_vm')
diff --git a/nova/tests/unit/virt/powervm/test_driver.py b/nova/tests/unit/virt/powervm/test_driver.py
deleted file mode 100644
index 025d823d15..0000000000
--- a/nova/tests/unit/virt/powervm/test_driver.py
+++ /dev/null
@@ -1,649 +0,0 @@
-# Copyright 2016, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import contextlib
-
-import fixtures
-import mock
-from oslo_serialization import jsonutils
-from oslo_utils.fixture import uuidsentinel as uuids
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as pvm_hlp_log
-from pypowervm.helpers import vios_busy as pvm_hlp_vbusy
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import block_device as nova_block_device
-from nova.compute import provider_tree
-from nova import conf as cfg
-from nova import exception
-from nova.objects import block_device as bdmobj
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt import block_device as nova_virt_bdm
-from nova.virt import driver as nova_driver
-from nova.virt.driver import ComputeDriver
-from nova.virt import hardware
-from nova.virt.powervm.disk import ssp
-from nova.virt.powervm import driver
-
-CONF = cfg.CONF
-
-
-class TestPowerVMDriver(test.NoDBTestCase):
-
- def setUp(self):
- super(TestPowerVMDriver, self).setUp()
- self.drv = driver.PowerVMDriver('virtapi')
- self.adp = self.useFixture(fixtures.MockPatch(
- 'pypowervm.adapter.Adapter', autospec=True)).mock
- self.drv.adapter = self.adp
- self.sess = self.useFixture(fixtures.MockPatch(
- 'pypowervm.adapter.Session', autospec=True)).mock
-
- self.pwron = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.power_on')).mock
- self.pwroff = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.power_off')).mock
-
- # Create an instance to test with
- self.inst = powervm.TEST_INSTANCE
-
- def test_driver_capabilities(self):
- """Test the driver capabilities."""
- # check that the driver reports all capabilities
- self.assertEqual(set(ComputeDriver.capabilities),
- set(self.drv.capabilities))
- # check the values for each capability
- self.assertFalse(self.drv.capabilities['has_imagecache'])
- self.assertFalse(self.drv.capabilities['supports_evacuate'])
- self.assertFalse(
- self.drv.capabilities['supports_migrate_to_same_host'])
- self.assertTrue(self.drv.capabilities['supports_attach_interface'])
- self.assertFalse(self.drv.capabilities['supports_device_tagging'])
- self.assertFalse(
- self.drv.capabilities['supports_tagged_attach_interface'])
- self.assertFalse(
- self.drv.capabilities['supports_tagged_attach_volume'])
- self.assertTrue(self.drv.capabilities['supports_extend_volume'])
- self.assertFalse(self.drv.capabilities['supports_multiattach'])
-
- @mock.patch('nova.image.glance.API')
- @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub', autospec=True)
- @mock.patch('oslo_utils.importutils.import_object_ns', autospec=True)
- @mock.patch('pypowervm.wrappers.managed_system.System', autospec=True)
- @mock.patch('pypowervm.tasks.partition.validate_vios_ready', autospec=True)
- def test_init_host(self, mock_vvr, mock_sys, mock_import, mock_scrub,
- mock_img):
- mock_hostw = mock.Mock(uuid='uuid')
- mock_sys.get.return_value = [mock_hostw]
- self.drv.init_host('host')
- self.sess.assert_called_once_with(conn_tries=60)
- self.adp.assert_called_once_with(
- self.sess.return_value, helpers=[
- pvm_hlp_log.log_helper, pvm_hlp_vbusy.vios_busy_retry_helper])
- mock_vvr.assert_called_once_with(self.drv.adapter)
- mock_sys.get.assert_called_once_with(self.drv.adapter)
- self.assertEqual(mock_hostw, self.drv.host_wrapper)
- mock_scrub.assert_called_once_with(self.drv.adapter)
- mock_scrub.return_value.execute.assert_called_once_with()
- mock_import.assert_called_once_with(
- 'nova.virt.powervm.disk', 'localdisk.LocalStorage',
- self.drv.adapter, 'uuid')
- self.assertEqual(mock_import.return_value, self.drv.disk_dvr)
- mock_img.assert_called_once_with()
- self.assertEqual(mock_img.return_value, self.drv.image_api)
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('nova.virt.powervm.vm.get_vm_qp')
- @mock.patch('nova.virt.powervm.vm._translate_vm_state')
- def test_get_info(self, mock_tx_state, mock_qp, mock_uuid):
- mock_tx_state.return_value = 'fake-state'
- self.assertEqual(hardware.InstanceInfo('fake-state'),
- self.drv.get_info('inst'))
- mock_uuid.assert_called_once_with('inst')
- mock_qp.assert_called_once_with(
- self.drv.adapter, mock_uuid.return_value, 'PartitionState')
- mock_tx_state.assert_called_once_with(mock_qp.return_value)
-
- @mock.patch('nova.virt.powervm.vm.get_lpar_names')
- def test_list_instances(self, mock_names):
- mock_names.return_value = ['one', 'two', 'three']
- self.assertEqual(['one', 'two', 'three'], self.drv.list_instances())
- mock_names.assert_called_once_with(self.adp)
-
- def test_get_available_nodes(self):
- self.flags(host='hostname')
- self.assertEqual(['hostname'], self.drv.get_available_nodes('node'))
-
- @mock.patch('pypowervm.wrappers.managed_system.System', autospec=True)
- @mock.patch('nova.virt.powervm.host.build_host_resource_from_ms')
- def test_get_available_resource(self, mock_bhrfm, mock_sys):
- mock_sys.get.return_value = ['sys']
- mock_bhrfm.return_value = {'foo': 'bar'}
- self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
- instance=True)
- self.assertEqual(
- {'foo': 'bar', 'local_gb': self.drv.disk_dvr.capacity,
- 'local_gb_used': self.drv.disk_dvr.capacity_used},
- self.drv.get_available_resource('node'))
- mock_sys.get.assert_called_once_with(self.adp)
- mock_bhrfm.assert_called_once_with('sys')
- self.assertEqual('sys', self.drv.host_wrapper)
-
- @contextlib.contextmanager
- def _update_provider_tree(self, allocations=None):
- """Host resource dict gets converted properly to provider tree inv."""
-
- with mock.patch('nova.virt.powervm.host.'
- 'build_host_resource_from_ms') as mock_bhrfm:
- mock_bhrfm.return_value = {
- 'vcpus': 8,
- 'memory_mb': 2048,
- }
- self.drv.host_wrapper = 'host_wrapper'
- # Validate that this gets converted to int with floor
- self.drv.disk_dvr = mock.Mock(capacity=2091.8)
- exp_inv = {
- 'VCPU': {
- 'total': 8,
- 'max_unit': 8,
- 'allocation_ratio': 16.0,
- 'reserved': 0,
- },
- 'MEMORY_MB': {
- 'total': 2048,
- 'max_unit': 2048,
- 'allocation_ratio': 1.5,
- 'reserved': 512,
- },
- 'DISK_GB': {
- 'total': 2091,
- 'max_unit': 2091,
- 'allocation_ratio': 1.0,
- 'reserved': 0,
- },
- }
- ptree = provider_tree.ProviderTree()
- ptree.new_root('compute_host', uuids.cn)
- # Let the caller muck with these
- yield ptree, exp_inv
- self.drv.update_provider_tree(ptree, 'compute_host',
- allocations=allocations)
- self.assertEqual(exp_inv, ptree.data('compute_host').inventory)
- mock_bhrfm.assert_called_once_with('host_wrapper')
-
- def test_update_provider_tree(self):
- # Basic: no inventory already on the provider, no extra providers, no
- # aggregates or traits.
- with self._update_provider_tree():
- pass
-
- def test_update_provider_tree_ignore_allocations(self):
- with self._update_provider_tree(allocations="This is ignored"):
- pass
-
- def test_update_provider_tree_conf_overrides(self):
- # Non-default CONF values for allocation ratios and reserved.
- self.flags(cpu_allocation_ratio=12.3,
- reserved_host_cpus=4,
- ram_allocation_ratio=4.5,
- reserved_host_memory_mb=32,
- disk_allocation_ratio=6.7,
- # This gets int(ceil)'d
- reserved_host_disk_mb=5432.1)
- with self._update_provider_tree() as (_, exp_inv):
- exp_inv['VCPU']['allocation_ratio'] = 12.3
- exp_inv['VCPU']['reserved'] = 4
- exp_inv['MEMORY_MB']['allocation_ratio'] = 4.5
- exp_inv['MEMORY_MB']['reserved'] = 32
- exp_inv['DISK_GB']['allocation_ratio'] = 6.7
- exp_inv['DISK_GB']['reserved'] = 6
-
- def test_update_provider_tree_complex_ptree(self):
- # Overrides inventory already on the provider; leaves other providers
- # and aggregates/traits alone.
- with self._update_provider_tree() as (ptree, exp_inv):
- ptree.update_inventory('compute_host', {
- # these should get blown away
- 'VCPU': {
- 'total': 16,
- 'max_unit': 2,
- 'allocation_ratio': 1.0,
- 'reserved': 10,
- },
- 'CUSTOM_BOGUS': {
- 'total': 1234,
- }
- })
- ptree.update_aggregates('compute_host',
- [uuids.ss_agg, uuids.other_agg])
- ptree.update_traits('compute_host', ['CUSTOM_FOO', 'CUSTOM_BAR'])
- ptree.new_root('ssp', uuids.ssp)
- ptree.update_inventory('ssp', {'sentinel': 'inventory',
- 'for': 'ssp'})
- ptree.update_aggregates('ssp', [uuids.ss_agg])
- ptree.new_child('sriov', 'compute_host', uuid=uuids.sriov)
- # Since CONF.cpu_allocation_ratio is not set and this is not
- # the initial upt call (so CONF.initial_cpu_allocation_ratio would
- # be used), the existing allocation ratio value from the tree is
- # used.
- exp_inv['VCPU']['allocation_ratio'] = 1.0
-
- # Make sure the compute's agg and traits were left alone
- cndata = ptree.data('compute_host')
- self.assertEqual(set([uuids.ss_agg, uuids.other_agg]),
- cndata.aggregates)
- self.assertEqual(set(['CUSTOM_FOO', 'CUSTOM_BAR']), cndata.traits)
- # And the other providers were left alone
- self.assertEqual(set([uuids.cn, uuids.ssp, uuids.sriov]),
- set(ptree.get_provider_uuids()))
- # ...including the ssp's aggregates
- self.assertEqual(set([uuids.ss_agg]), ptree.data('ssp').aggregates)
-
- @mock.patch('nova.virt.powervm.tasks.storage.AttachVolume.execute')
- @mock.patch('nova.virt.powervm.tasks.network.PlugMgmtVif.execute')
- @mock.patch('nova.virt.powervm.tasks.network.PlugVifs.execute')
- @mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM')
- @mock.patch('nova.virt.configdrive.required_by')
- @mock.patch('nova.virt.powervm.vm.create_lpar')
- @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
- autospec=True)
- @mock.patch('pypowervm.tasks.storage.add_lpar_storage_scrub_tasks',
- autospec=True)
- def test_spawn_ops(self, mock_scrub, mock_bldftsk, mock_crt_lpar,
- mock_cdrb, mock_cfg_drv, mock_plug_vifs,
- mock_plug_mgmt_vif, mock_attach_vol):
- """Validates the 'typical' spawn flow of the spawn of an instance. """
- mock_cdrb.return_value = True
- self.drv.host_wrapper = mock.Mock()
- self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
- instance=True)
- mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
- mock_bldftsk.return_value = mock_ftsk
- block_device_info = self._fake_bdms()
- self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
- 'allocs', network_info='netinfo',
- block_device_info=block_device_info)
- mock_crt_lpar.assert_called_once_with(
- self.adp, self.drv.host_wrapper, self.inst)
- mock_bldftsk.assert_called_once_with(
- self.adp, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
- self.assertTrue(mock_plug_vifs.called)
- self.assertTrue(mock_plug_mgmt_vif.called)
- mock_scrub.assert_called_once_with(
- [mock_crt_lpar.return_value.id], mock_ftsk, lpars_exist=True)
- self.drv.disk_dvr.create_disk_from_image.assert_called_once_with(
- 'context', self.inst, 'img_meta')
- self.drv.disk_dvr.attach_disk.assert_called_once_with(
- self.inst, self.drv.disk_dvr.create_disk_from_image.return_value,
- mock_ftsk)
- self.assertEqual(2, mock_attach_vol.call_count)
- mock_cfg_drv.assert_called_once_with(self.adp)
- mock_cfg_drv.return_value.create_cfg_drv_vopt.assert_called_once_with(
- self.inst, 'files', 'netinfo', mock_ftsk, admin_pass='password',
- mgmt_cna=mock.ANY)
- self.pwron.assert_called_once_with(self.adp, self.inst)
-
- mock_cfg_drv.reset_mock()
- mock_attach_vol.reset_mock()
-
- # No config drive, no bdms
- mock_cdrb.return_value = False
- self.drv.spawn('context', self.inst, 'img_meta', 'files', 'password',
- 'allocs')
- mock_cfg_drv.assert_not_called()
- mock_attach_vol.assert_not_called()
-
- @mock.patch('nova.virt.powervm.tasks.storage.DetachVolume.execute')
- @mock.patch('nova.virt.powervm.tasks.network.UnplugVifs.execute')
- @mock.patch('nova.virt.powervm.vm.delete_lpar')
- @mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM')
- @mock.patch('nova.virt.configdrive.required_by')
- @mock.patch('pypowervm.tasks.partition.build_active_vio_feed_task',
- autospec=True)
- def test_destroy(self, mock_bldftsk, mock_cdrb, mock_cfgdrv,
- mock_dlt_lpar, mock_unplug, mock_detach_vol):
- """Validates PowerVM destroy."""
- self.drv.host_wrapper = mock.Mock()
- self.drv.disk_dvr = mock.create_autospec(ssp.SSPDiskAdapter,
- instance=True)
-
- mock_ftsk = pvm_tx.FeedTask('fake', [mock.Mock(spec=pvm_vios.VIOS)])
- mock_bldftsk.return_value = mock_ftsk
- block_device_info = self._fake_bdms()
-
- # Good path, with config drive, destroy disks
- mock_cdrb.return_value = True
- self.drv.destroy('context', self.inst, [],
- block_device_info=block_device_info)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=True)
- mock_bldftsk.assert_called_once_with(
- self.adp, xag=[pvm_const.XAG.VIO_SMAP])
- mock_unplug.assert_called_once()
- mock_cdrb.assert_called_once_with(self.inst)
- mock_cfgdrv.assert_called_once_with(self.adp)
- mock_cfgdrv.return_value.dlt_vopt.assert_called_once_with(
- self.inst, stg_ftsk=mock_bldftsk.return_value)
- self.assertEqual(2, mock_detach_vol.call_count)
- self.drv.disk_dvr.detach_disk.assert_called_once_with(
- self.inst)
- self.drv.disk_dvr.delete_disks.assert_called_once_with(
- self.drv.disk_dvr.detach_disk.return_value)
- mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)
-
- self.pwroff.reset_mock()
- mock_bldftsk.reset_mock()
- mock_unplug.reset_mock()
- mock_cdrb.reset_mock()
- mock_cfgdrv.reset_mock()
- self.drv.disk_dvr.detach_disk.reset_mock()
- self.drv.disk_dvr.delete_disks.reset_mock()
- mock_detach_vol.reset_mock()
- mock_dlt_lpar.reset_mock()
-
- # No config drive, preserve disks, no block device info
- mock_cdrb.return_value = False
- self.drv.destroy('context', self.inst, [], block_device_info={},
- destroy_disks=False)
- mock_cfgdrv.return_value.dlt_vopt.assert_not_called()
- mock_detach_vol.assert_not_called()
- self.drv.disk_dvr.delete_disks.assert_not_called()
-
- # Non-forced power_off, since preserving disks
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=False)
- mock_bldftsk.assert_called_once_with(
- self.adp, xag=[pvm_const.XAG.VIO_SMAP])
- mock_unplug.assert_called_once()
- mock_cdrb.assert_called_once_with(self.inst)
- mock_cfgdrv.assert_not_called()
- mock_cfgdrv.return_value.dlt_vopt.assert_not_called()
- self.drv.disk_dvr.detach_disk.assert_called_once_with(
- self.inst)
- self.drv.disk_dvr.delete_disks.assert_not_called()
- mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)
-
- self.pwroff.reset_mock()
- mock_bldftsk.reset_mock()
- mock_unplug.reset_mock()
- mock_cdrb.reset_mock()
- mock_cfgdrv.reset_mock()
- self.drv.disk_dvr.detach_disk.reset_mock()
- self.drv.disk_dvr.delete_disks.reset_mock()
- mock_dlt_lpar.reset_mock()
-
- # InstanceNotFound exception, non-forced
- self.pwroff.side_effect = exception.InstanceNotFound(
- instance_id='something')
- self.drv.destroy('context', self.inst, [], block_device_info={},
- destroy_disks=False)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=False)
- self.drv.disk_dvr.detach_disk.assert_not_called()
- mock_unplug.assert_not_called()
- self.drv.disk_dvr.delete_disks.assert_not_called()
- mock_dlt_lpar.assert_not_called()
-
- self.pwroff.reset_mock()
- self.pwroff.side_effect = None
- mock_unplug.reset_mock()
-
- # Convertible (PowerVM) exception
- mock_dlt_lpar.side_effect = pvm_exc.TimeoutError("Timed out")
- self.assertRaises(exception.InstanceTerminationFailure,
- self.drv.destroy, 'context', self.inst, [],
- block_device_info={})
-
- # Everything got called
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=True)
- mock_unplug.assert_called_once()
- self.drv.disk_dvr.detach_disk.assert_called_once_with(self.inst)
- self.drv.disk_dvr.delete_disks.assert_called_once_with(
- self.drv.disk_dvr.detach_disk.return_value)
- mock_dlt_lpar.assert_called_once_with(self.adp, self.inst)
-
- # Other random exception raises directly
- mock_dlt_lpar.side_effect = ValueError()
- self.assertRaises(ValueError,
- self.drv.destroy, 'context', self.inst, [],
- block_device_info={})
-
- @mock.patch('nova.virt.powervm.tasks.image.UpdateTaskState.'
- 'execute', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.storage.InstanceDiskToMgmt.'
- 'execute', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.image.StreamToGlance.execute')
- @mock.patch('nova.virt.powervm.tasks.storage.RemoveInstanceDiskFromMgmt.'
- 'execute')
- def test_snapshot(self, mock_rm, mock_stream, mock_conn, mock_update):
- self.drv.disk_dvr = mock.Mock()
- self.drv.image_api = mock.Mock()
- mock_conn.return_value = 'stg_elem', 'vios_wrap', 'disk_path'
- self.drv.snapshot('context', self.inst, 'image_id',
- 'update_task_state')
- self.assertEqual(2, mock_update.call_count)
- self.assertEqual(1, mock_conn.call_count)
- mock_stream.assert_called_once_with(disk_path='disk_path')
- mock_rm.assert_called_once_with(
- stg_elem='stg_elem', vios_wrap='vios_wrap', disk_path='disk_path')
-
- self.drv.disk_dvr.capabilities = {'snapshot': False}
- self.assertRaises(exception.NotSupportedWithOption, self.drv.snapshot,
- 'context', self.inst, 'image_id', 'update_task_state')
-
- def test_power_on(self):
- self.drv.power_on('context', self.inst, 'network_info')
- self.pwron.assert_called_once_with(self.adp, self.inst)
-
- def test_power_off(self):
- self.drv.power_off(self.inst)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=True, timeout=None)
-
- def test_power_off_timeout(self):
- # Long timeout (retry interval means nothing on powervm)
- self.drv.power_off(self.inst, timeout=500, retry_interval=10)
- self.pwroff.assert_called_once_with(
- self.adp, self.inst, force_immediate=False, timeout=500)
-
- @mock.patch('nova.virt.powervm.vm.reboot')
- def test_reboot_soft(self, mock_reboot):
- inst = mock.Mock()
- self.drv.reboot('context', inst, 'network_info', 'SOFT')
- mock_reboot.assert_called_once_with(self.adp, inst, False)
-
- @mock.patch('nova.virt.powervm.vm.reboot')
- def test_reboot_hard(self, mock_reboot):
- inst = mock.Mock()
- self.drv.reboot('context', inst, 'network_info', 'HARD')
- mock_reboot.assert_called_once_with(self.adp, inst, True)
-
- @mock.patch('nova.virt.powervm.driver.PowerVMDriver.plug_vifs')
- def test_attach_interface(self, mock_plug_vifs):
- self.drv.attach_interface('context', 'inst', 'image_meta', 'vif')
- mock_plug_vifs.assert_called_once_with('inst', ['vif'])
-
- @mock.patch('nova.virt.powervm.driver.PowerVMDriver.unplug_vifs')
- def test_detach_interface(self, mock_unplug_vifs):
- self.drv.detach_interface('context', 'inst', 'vif')
- mock_unplug_vifs.assert_called_once_with('inst', ['vif'])
-
- @mock.patch('nova.virt.powervm.tasks.vm.Get', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.base.run', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.network.PlugVifs', autospec=True)
- @mock.patch('taskflow.patterns.linear_flow.Flow', autospec=True)
- def test_plug_vifs(self, mock_tf, mock_plug_vifs, mock_tf_run, mock_get):
- # Successful plug
- mock_inst = mock.Mock()
- self.drv.plug_vifs(mock_inst, 'net_info')
- mock_get.assert_called_once_with(self.adp, mock_inst)
- mock_plug_vifs.assert_called_once_with(
- self.drv.virtapi, self.adp, mock_inst, 'net_info')
- add_calls = [mock.call(mock_get.return_value),
- mock.call(mock_plug_vifs.return_value)]
- mock_tf.return_value.add.assert_has_calls(add_calls)
- mock_tf_run.assert_called_once_with(
- mock_tf.return_value, instance=mock_inst)
-
- # InstanceNotFound and generic exception both raise
- mock_tf_run.side_effect = exception.InstanceNotFound('id')
- exc = self.assertRaises(exception.VirtualInterfacePlugException,
- self.drv.plug_vifs, mock_inst, 'net_info')
- self.assertIn('instance', str(exc))
- mock_tf_run.side_effect = Exception
- exc = self.assertRaises(exception.VirtualInterfacePlugException,
- self.drv.plug_vifs, mock_inst, 'net_info')
- self.assertIn('unexpected', str(exc))
-
- @mock.patch('nova.virt.powervm.tasks.base.run', autospec=True)
- @mock.patch('nova.virt.powervm.tasks.network.UnplugVifs', autospec=True)
- @mock.patch('taskflow.patterns.linear_flow.Flow', autospec=True)
- def test_unplug_vifs(self, mock_tf, mock_unplug_vifs, mock_tf_run):
- # Successful unplug
- mock_inst = mock.Mock()
- self.drv.unplug_vifs(mock_inst, 'net_info')
- mock_unplug_vifs.assert_called_once_with(self.adp, mock_inst,
- 'net_info')
- mock_tf.return_value.add.assert_called_once_with(
- mock_unplug_vifs.return_value)
- mock_tf_run.assert_called_once_with(mock_tf.return_value,
- instance=mock_inst)
-
- # InstanceNotFound should pass
- mock_tf_run.side_effect = exception.InstanceNotFound(instance_id='1')
- self.drv.unplug_vifs(mock_inst, 'net_info')
-
- # Raise InterfaceDetachFailed otherwise
- mock_tf_run.side_effect = Exception
- self.assertRaises(exception.InterfaceDetachFailed,
- self.drv.unplug_vifs, mock_inst, 'net_info')
-
- @mock.patch('pypowervm.tasks.vterm.open_remotable_vnc_vterm',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid',
- new=mock.Mock(return_value='uuid'))
- def test_get_vnc_console(self, mock_vterm):
- # Success
- mock_vterm.return_value = '10'
- resp = self.drv.get_vnc_console(mock.ANY, self.inst)
- self.assertEqual('127.0.0.1', resp.host)
- self.assertEqual('10', resp.port)
- self.assertEqual('uuid', resp.internal_access_path)
- mock_vterm.assert_called_once_with(
- mock.ANY, 'uuid', mock.ANY, vnc_path='uuid')
-
- # VNC failure - exception is raised directly
- mock_vterm.side_effect = pvm_exc.VNCBasedTerminalFailedToOpen(err='xx')
- self.assertRaises(pvm_exc.VNCBasedTerminalFailedToOpen,
- self.drv.get_vnc_console, mock.ANY, self.inst)
-
- # 404
- mock_vterm.side_effect = pvm_exc.HttpError(mock.Mock(status=404))
- self.assertRaises(exception.InstanceNotFound, self.drv.get_vnc_console,
- mock.ANY, self.inst)
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
- def test_attach_volume(self, mock_vscsi_adpt):
- """Validates the basic PowerVM attach volume."""
- # BDMs
- mock_bdm = self._fake_bdms()['block_device_mapping'][0]
-
- with mock.patch.object(self.inst, 'save') as mock_save:
- # Invoke the method.
- self.drv.attach_volume('context', mock_bdm.get('connection_info'),
- self.inst, mock.sentinel.stg_ftsk)
-
- # Verify the connect volume was invoked
- mock_vscsi_adpt.return_value.attach_volume.assert_called_once_with()
- mock_save.assert_called_once_with()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
- def test_detach_volume(self, mock_vscsi_adpt):
- """Validates the basic PowerVM detach volume."""
- # BDMs
- mock_bdm = self._fake_bdms()['block_device_mapping'][0]
-
- # Invoke the method, good path test.
- self.drv.detach_volume('context', mock_bdm.get('connection_info'),
- self.inst, mock.sentinel.stg_ftsk)
- # Verify the disconnect volume was invoked
- mock_vscsi_adpt.return_value.detach_volume.assert_called_once_with()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter')
- def test_extend_volume(self, mock_vscsi_adpt):
- mock_bdm = self._fake_bdms()['block_device_mapping'][0]
- self.drv.extend_volume(
- 'context', mock_bdm.get('connection_info'), self.inst, 0)
- mock_vscsi_adpt.return_value.extend_volume.assert_called_once_with()
-
- def test_vol_drv_iter(self):
- block_device_info = self._fake_bdms()
- bdms = nova_driver.block_device_info_get_mapping(block_device_info)
- vol_adpt = mock.Mock()
-
- def _get_results(bdms):
- # Patch so we get the same mock back each time.
- with mock.patch('nova.virt.powervm.volume.fcvscsi.'
- 'FCVscsiVolumeAdapter', return_value=vol_adpt):
- return [
- (bdm, vol_drv) for bdm, vol_drv in self.drv._vol_drv_iter(
- 'context', self.inst, bdms)]
-
- results = _get_results(bdms)
- self.assertEqual(
- 'fake_vol1',
- results[0][0]['connection_info']['data']['volume_id'])
- self.assertEqual(vol_adpt, results[0][1])
- self.assertEqual(
- 'fake_vol2',
- results[1][0]['connection_info']['data']['volume_id'])
- self.assertEqual(vol_adpt, results[1][1])
-
- # Test with empty bdms
- self.assertEqual([], _get_results([]))
-
- @staticmethod
- def _fake_bdms():
- def _fake_bdm(volume_id, target_lun):
- connection_info = {'driver_volume_type': 'fibre_channel',
- 'data': {'volume_id': volume_id,
- 'target_lun': target_lun,
- 'initiator_target_map':
- {'21000024F5': ['50050768']}}}
- mapping_dict = {'source_type': 'volume', 'volume_id': volume_id,
- 'destination_type': 'volume',
- 'connection_info':
- jsonutils.dumps(connection_info),
- }
- bdm_dict = nova_block_device.BlockDeviceDict(mapping_dict)
- bdm_obj = bdmobj.BlockDeviceMapping(**bdm_dict)
-
- return nova_virt_bdm.DriverVolumeBlockDevice(bdm_obj)
-
- bdm_list = [_fake_bdm('fake_vol1', 0), _fake_bdm('fake_vol2', 1)]
- block_device_info = {'block_device_mapping': bdm_list}
-
- return block_device_info
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.wwpns', autospec=True)
- def test_get_volume_connector(self, mock_wwpns):
- vol_connector = self.drv.get_volume_connector(mock.Mock())
- self.assertEqual(mock_wwpns.return_value, vol_connector['wwpns'])
- self.assertFalse(vol_connector['multipath'])
- self.assertEqual(vol_connector['host'], CONF.host)
- self.assertIsNone(vol_connector['initiator'])
diff --git a/nova/tests/unit/virt/powervm/test_host.py b/nova/tests/unit/virt/powervm/test_host.py
deleted file mode 100644
index 625e1f9c70..0000000000
--- a/nova/tests/unit/virt/powervm/test_host.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2016 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import mock
-from pypowervm.wrappers import managed_system as pvm_ms
-
-from nova import test
-from nova.virt.powervm import host as pvm_host
-
-
-class TestPowerVMHost(test.NoDBTestCase):
- def test_host_resources(self):
- # Create objects to test with
- ms_wrapper = mock.create_autospec(pvm_ms.System, spec_set=True)
- asio = mock.create_autospec(pvm_ms.ASIOConfig, spec_set=True)
- ms_wrapper.configure_mock(
- proc_units_configurable=500,
- proc_units_avail=500,
- memory_configurable=5242880,
- memory_free=5242752,
- memory_region_size='big',
- asio_config=asio)
- self.flags(host='the_hostname')
-
- # Run the actual test
- stats = pvm_host.build_host_resource_from_ms(ms_wrapper)
- self.assertIsNotNone(stats)
-
- # Check for the presence of fields
- fields = (('vcpus', 500), ('vcpus_used', 0),
- ('memory_mb', 5242880), ('memory_mb_used', 128),
- 'hypervisor_type', 'hypervisor_version',
- ('hypervisor_hostname', 'the_hostname'), 'cpu_info',
- 'supported_instances', 'stats')
- for fld in fields:
- if isinstance(fld, tuple):
- value = stats.get(fld[0], None)
- self.assertEqual(value, fld[1])
- else:
- value = stats.get(fld, None)
- self.assertIsNotNone(value)
- # Check for individual stats
- hstats = (('proc_units', '500.00'), ('proc_units_used', '0.00'))
- for stat in hstats:
- if isinstance(stat, tuple):
- value = stats['stats'].get(stat[0], None)
- self.assertEqual(value, stat[1])
- else:
- value = stats['stats'].get(stat, None)
- self.assertIsNotNone(value)
diff --git a/nova/tests/unit/virt/powervm/test_image.py b/nova/tests/unit/virt/powervm/test_image.py
deleted file mode 100644
index 2db33e6a0f..0000000000
--- a/nova/tests/unit/virt/powervm/test_image.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-
-from nova import test
-from nova.virt.powervm import image
-
-
-class TestImage(test.TestCase):
-
- @mock.patch('nova.utils.temporary_chown', autospec=True)
- @mock.patch('nova.image.glance.API', autospec=True)
- def test_stream_blockdev_to_glance(self, mock_api, mock_chown):
- mock_open = mock.mock_open()
- with mock.patch('builtins.open', new=mock_open):
- image.stream_blockdev_to_glance('context', mock_api, 'image_id',
- 'metadata', '/dev/disk')
- mock_chown.assert_called_with('/dev/disk')
- mock_open.assert_called_with('/dev/disk', 'rb')
- mock_api.update.assert_called_with('context', 'image_id', 'metadata',
- mock_open.return_value)
-
- @mock.patch('nova.image.glance.API', autospec=True)
- def test_generate_snapshot_metadata(self, mock_api):
- mock_api.get.return_value = {'name': 'image_name'}
- mock_instance = mock.Mock()
- mock_instance.project_id = 'project_id'
- ret = image.generate_snapshot_metadata('context', mock_api, 'image_id',
- mock_instance)
- mock_api.get.assert_called_with('context', 'image_id')
- self.assertEqual({
- 'name': 'image_name',
- 'status': 'active',
- 'disk_format': 'raw',
- 'container_format': 'bare',
- 'properties': {
- 'image_location': 'snapshot',
- 'image_state': 'available',
- 'owner_id': 'project_id',
- }
- }, ret)
diff --git a/nova/tests/unit/virt/powervm/test_media.py b/nova/tests/unit/virt/powervm/test_media.py
deleted file mode 100644
index f98769e0de..0000000000
--- a/nova/tests/unit/virt/powervm/test_media.py
+++ /dev/null
@@ -1,203 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from oslo_utils.fixture import uuidsentinel
-from pypowervm import const as pvm_const
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import network as pvm_net
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import test
-from nova.virt.powervm import media as m
-
-
-class TestConfigDrivePowerVM(test.NoDBTestCase):
- """Unit Tests for the ConfigDrivePowerVM class."""
-
- def setUp(self):
- super(TestConfigDrivePowerVM, self).setUp()
-
- self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
-
- self.validate_vopt = self.useFixture(fixtures.MockPatch(
- 'pypowervm.tasks.vopt.validate_vopt_repo_exists',
- autospec=True)).mock
- self.validate_vopt.return_value = 'vios_uuid', 'vg_uuid'
-
- @mock.patch('nova.api.metadata.base.InstanceMetadata')
- @mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
- def test_crt_cfg_dr_iso(self, mock_mkdrv, mock_meta):
- """Validates that the image creation method works."""
- cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
- self.assertTrue(self.validate_vopt.called)
- mock_instance = mock.MagicMock()
- mock_instance.uuid = uuidsentinel.inst_id
- mock_files = mock.MagicMock()
- mock_net = mock.MagicMock()
- iso_path = '/tmp/cfgdrv.iso'
- cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net,
- iso_path)
- self.assertEqual(mock_mkdrv.call_count, 1)
-
- # Test retry iso create
- mock_mkdrv.reset_mock()
- mock_mkdrv.side_effect = [OSError, mock_mkdrv]
- cfg_dr_builder._create_cfg_dr_iso(mock_instance, mock_files, mock_net,
- iso_path)
- self.assertEqual(mock_mkdrv.call_count, 2)
-
- @mock.patch('tempfile.NamedTemporaryFile')
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map')
- @mock.patch('os.path.getsize')
- @mock.patch('pypowervm.tasks.storage.upload_vopt')
- @mock.patch('nova.virt.powervm.media.ConfigDrivePowerVM.'
- '_create_cfg_dr_iso')
- def test_create_cfg_drv_vopt(self, mock_ccdi, mock_upl, mock_getsize,
- mock_addmap, mock_bldmap, mock_vm_id,
- mock_ntf):
- cfg_dr = m.ConfigDrivePowerVM(self.apt)
- mock_instance = mock.MagicMock()
- mock_instance.uuid = uuidsentinel.inst_id
- mock_upl.return_value = 'vopt', 'f_uuid'
- fh = mock_ntf.return_value.__enter__.return_value
- fh.name = 'iso_path'
- wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
- ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- ftsk.configure_mock(wrapper_tasks={'vios_uuid': wtsk})
-
- def test_afs(add_func):
- # Validate the internal add_func
- vio = mock.create_autospec(pvm_vios.VIOS)
- self.assertEqual(mock_addmap.return_value, add_func(vio))
- mock_vm_id.assert_called_once_with(mock_instance)
- mock_bldmap.assert_called_once_with(
- None, vio, mock_vm_id.return_value, 'vopt')
- mock_addmap.assert_called_once_with(vio, mock_bldmap.return_value)
- wtsk.add_functor_subtask.side_effect = test_afs
-
- # calculate expected file name
- expected_file_name = 'cfg_' + mock_instance.uuid.replace('-', '')
- allowed_len = pvm_const.MaxLen.VOPT_NAME - 4 # '.iso' is 4 chars
- expected_file_name = expected_file_name[:allowed_len] + '.iso'
-
- cfg_dr.create_cfg_drv_vopt(
- mock_instance, 'files', 'netinfo', ftsk, admin_pass='pass')
-
- mock_ntf.assert_called_once_with(mode='rb')
- mock_ccdi.assert_called_once_with(mock_instance, 'files', 'netinfo',
- 'iso_path', admin_pass='pass')
- mock_getsize.assert_called_once_with('iso_path')
- mock_upl.assert_called_once_with(self.apt, 'vios_uuid', fh,
- expected_file_name,
- mock_getsize.return_value)
- wtsk.add_functor_subtask.assert_called_once()
-
- def test_sanitize_network_info(self):
- network_info = [{'type': 'lbr'}, {'type': 'pvm_sea'},
- {'type': 'ovs'}]
-
- cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
-
- resp = cfg_dr_builder._sanitize_network_info(network_info)
- expected_ret = [{'type': 'vif'}, {'type': 'vif'},
- {'type': 'ovs'}]
- self.assertEqual(resp, expected_ret)
-
- @mock.patch('pypowervm.wrappers.storage.VG', autospec=True)
- @mock.patch('pypowervm.tasks.storage.rm_vg_storage', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS', autospec=True)
- @mock.patch('taskflow.task.FunctorTask', autospec=True)
- def test_dlt_vopt(self, mock_functask, mock_vios, mock_find_maps, mock_gmf,
- mock_uuid, mock_rmstg, mock_vg):
- cfg_dr = m.ConfigDrivePowerVM(self.apt)
- wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
- ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- ftsk.configure_mock(wrapper_tasks={'vios_uuid': wtsk})
-
- # Test with no media to remove
- mock_find_maps.return_value = []
- cfg_dr.dlt_vopt('inst', ftsk)
- mock_uuid.assert_called_once_with('inst')
- mock_gmf.assert_called_once_with(pvm_stg.VOptMedia)
- wtsk.add_functor_subtask.assert_called_once_with(
- tsk_map.remove_maps, mock_uuid.return_value,
- match_func=mock_gmf.return_value)
- ftsk.get_wrapper.assert_called_once_with('vios_uuid')
- mock_find_maps.assert_called_once_with(
- ftsk.get_wrapper.return_value.scsi_mappings,
- client_lpar_id=mock_uuid.return_value,
- match_func=mock_gmf.return_value)
- mock_functask.assert_not_called()
-
- # Test with media to remove
- mock_find_maps.return_value = [mock.Mock(backing_storage=media)
- for media in ['m1', 'm2']]
-
- def test_functor_task(rm_vopt):
- # Validate internal rm_vopt function
- rm_vopt()
- mock_vg.get.assert_called_once_with(
- self.apt, uuid='vg_uuid', parent_type=pvm_vios.VIOS,
- parent_uuid='vios_uuid')
- mock_rmstg.assert_called_once_with(
- mock_vg.get.return_value, vopts=['m1', 'm2'])
- return 'functor_task'
- mock_functask.side_effect = test_functor_task
-
- cfg_dr.dlt_vopt('inst', ftsk)
- mock_functask.assert_called_once()
- ftsk.add_post_execute.assert_called_once_with('functor_task')
-
- def test_mgmt_cna_to_vif(self):
- mock_cna = mock.Mock(spec=pvm_net.CNA, mac="FAD4433ED120")
-
- # Run
- cfg_dr_builder = m.ConfigDrivePowerVM(self.apt)
- vif = cfg_dr_builder._mgmt_cna_to_vif(mock_cna)
-
- # Validate
- self.assertEqual(vif.get('address'), "fa:d4:43:3e:d1:20")
- self.assertEqual(vif.get('id'), 'mgmt_vif')
- self.assertIsNotNone(vif.get('network'))
- self.assertEqual(1, len(vif.get('network').get('subnets')))
- subnet = vif.get('network').get('subnets')[0]
- self.assertEqual(6, subnet.get('version'))
- self.assertEqual('fe80::/64', subnet.get('cidr'))
- ip = subnet.get('ips')[0]
- self.assertEqual('fe80::f8d4:43ff:fe3e:d120', ip.get('address'))
-
- def test_mac_to_link_local(self):
- mac = 'fa:d4:43:3e:d1:20'
- self.assertEqual('fe80::f8d4:43ff:fe3e:d120',
- m.ConfigDrivePowerVM._mac_to_link_local(mac))
-
- mac = '00:00:00:00:00:00'
- self.assertEqual('fe80::0200:00ff:fe00:0000',
- m.ConfigDrivePowerVM._mac_to_link_local(mac))
-
- mac = 'ff:ff:ff:ff:ff:ff'
- self.assertEqual('fe80::fdff:ffff:feff:ffff',
- m.ConfigDrivePowerVM._mac_to_link_local(mac))
diff --git a/nova/tests/unit/virt/powervm/test_mgmt.py b/nova/tests/unit/virt/powervm/test_mgmt.py
deleted file mode 100644
index 5c0098ceeb..0000000000
--- a/nova/tests/unit/virt/powervm/test_mgmt.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-import retrying
-
-from nova import exception
-from nova import test
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.tests.test_utils import pvmhttp
-
-from nova.virt.powervm import mgmt
-
-LPAR_HTTPRESP_FILE = "lpar.txt"
-
-
-class TestMgmt(test.TestCase):
- def setUp(self):
- super(TestMgmt, self).setUp()
- self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
-
- lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt)
- self.assertIsNotNone(
- lpar_http, "Could not load %s " % LPAR_HTTPRESP_FILE)
-
- self.resp = lpar_http.response
-
- @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
- def test_mgmt_uuid(self, mock_get_partition):
- mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt')
- adpt = mock.Mock()
-
- # First run should call the partition only once
- self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
- mock_get_partition.assert_called_once_with(adpt)
-
- # But a subsequent call should effectively no-op
- mock_get_partition.reset_mock()
- self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
- self.assertEqual(mock_get_partition.call_count, 0)
-
- @mock.patch('glob.glob', autospec=True)
- @mock.patch('nova.privsep.path.writefile', autospec=True)
- @mock.patch('os.path.realpath', autospec=True)
- def test_discover_vscsi_disk(self, mock_realpath, mock_writefile,
- mock_glob):
- scanpath = '/sys/bus/vio/devices/30000005/host*/scsi_host/host*/scan'
- udid = ('275b5d5f88fa5611e48be9000098be9400'
- '13fb2aa55a2d7b8d150cb1b7b6bc04d6')
- devlink = ('/dev/disk/by-id/scsi-SIBM_3303_NVDISK' + udid)
- mapping = mock.Mock()
- mapping.client_adapter.lpar_slot_num = 5
- mapping.backing_storage.udid = udid
- # Realistically, first glob would return e.g. .../host0/.../host0/...
- # but it doesn't matter for test purposes.
- mock_glob.side_effect = [[scanpath], [devlink]]
- mgmt.discover_vscsi_disk(mapping)
- mock_glob.assert_has_calls(
- [mock.call(scanpath), mock.call('/dev/disk/by-id/*' + udid[-32:])])
- mock_writefile.assert_called_once_with(scanpath, 'a', '- - -')
- mock_realpath.assert_called_with(devlink)
-
- @mock.patch('retrying.retry', autospec=True)
- @mock.patch('glob.glob', autospec=True)
- @mock.patch('nova.privsep.path.writefile', autospec=True)
- def test_discover_vscsi_disk_not_one_result(self, mock_writefile,
- mock_glob, mock_retry):
- """Zero or more than one disk is found by discover_vscsi_disk."""
- def validate_retry(kwargs):
- self.assertIn('retry_on_result', kwargs)
- self.assertEqual(250, kwargs['wait_fixed'])
- self.assertEqual(300000, kwargs['stop_max_delay'])
-
- def raiser(unused):
- raise retrying.RetryError(mock.Mock(attempt_number=123))
-
- def retry_passthrough(**kwargs):
- validate_retry(kwargs)
-
- def wrapped(_poll_for_dev):
- return _poll_for_dev
- return wrapped
-
- def retry_timeout(**kwargs):
- validate_retry(kwargs)
-
- def wrapped(_poll_for_dev):
- return raiser
- return wrapped
-
- udid = ('275b5d5f88fa5611e48be9000098be9400'
- '13fb2aa55a2d7b8d150cb1b7b6bc04d6')
- mapping = mock.Mock()
- mapping.client_adapter.lpar_slot_num = 5
- mapping.backing_storage.udid = udid
- # No disks found
- mock_retry.side_effect = retry_timeout
- mock_glob.side_effect = lambda path: []
- self.assertRaises(exception.NoDiskDiscoveryException,
- mgmt.discover_vscsi_disk, mapping)
- # Multiple disks found
- mock_retry.side_effect = retry_passthrough
- mock_glob.side_effect = [['path'], ['/dev/sde', '/dev/sdf']]
- self.assertRaises(exception.UniqueDiskDiscoveryException,
- mgmt.discover_vscsi_disk, mapping)
-
- @mock.patch('time.sleep', autospec=True)
- @mock.patch('os.path.realpath', autospec=True)
- @mock.patch('os.stat', autospec=True)
- @mock.patch('nova.privsep.path.writefile', autospec=True)
- def test_remove_block_dev(self, mock_writefile, mock_stat, mock_realpath,
- mock_sleep):
- link = '/dev/link/foo'
- realpath = '/dev/sde'
- delpath = '/sys/block/sde/device/delete'
- mock_realpath.return_value = realpath
-
- # Good path
- mock_stat.side_effect = (None, None, OSError())
- mgmt.remove_block_dev(link)
- mock_realpath.assert_called_with(link)
- mock_stat.assert_has_calls([mock.call(realpath), mock.call(delpath),
- mock.call(realpath)])
- mock_writefile.assert_called_once_with(delpath, 'a', '1')
- self.assertEqual(0, mock_sleep.call_count)
-
- # Device param not found
- mock_writefile.reset_mock()
- mock_stat.reset_mock()
- mock_stat.side_effect = (OSError(), None, None)
- self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
- link)
- # stat was called once; exec was not called
- self.assertEqual(1, mock_stat.call_count)
- self.assertEqual(0, mock_writefile.call_count)
-
- # Delete special file not found
- mock_writefile.reset_mock()
- mock_stat.reset_mock()
- mock_stat.side_effect = (None, OSError(), None)
- self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
- link)
- # stat was called twice; exec was not called
- self.assertEqual(2, mock_stat.call_count)
- self.assertEqual(0, mock_writefile.call_count)
-
- @mock.patch('retrying.retry')
- @mock.patch('os.path.realpath')
- @mock.patch('os.stat')
- @mock.patch('nova.privsep.path.writefile')
- def test_remove_block_dev_timeout(self, mock_dacw, mock_stat,
- mock_realpath, mock_retry):
-
- def validate_retry(kwargs):
- self.assertIn('retry_on_result', kwargs)
- self.assertEqual(250, kwargs['wait_fixed'])
- self.assertEqual(10000, kwargs['stop_max_delay'])
-
- def raiser(unused):
- raise retrying.RetryError(mock.Mock(attempt_number=123))
-
- def retry_timeout(**kwargs):
- validate_retry(kwargs)
-
- def wrapped(_poll_for_del):
- return raiser
- return wrapped
-
- # Deletion was attempted, but device is still there
- link = '/dev/link/foo'
- delpath = '/sys/block/sde/device/delete'
- realpath = '/dev/sde'
- mock_realpath.return_value = realpath
- mock_stat.side_effect = lambda path: 1
- mock_retry.side_effect = retry_timeout
-
- self.assertRaises(
- exception.DeviceDeletionException, mgmt.remove_block_dev, link)
- mock_realpath.assert_called_once_with(link)
- mock_dacw.assert_called_with(delpath, 'a', '1')
diff --git a/nova/tests/unit/virt/powervm/test_vif.py b/nova/tests/unit/virt/powervm/test_vif.py
deleted file mode 100644
index 985c48abe5..0000000000
--- a/nova/tests/unit/virt/powervm/test_vif.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# Copyright 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from pypowervm import exceptions as pvm_ex
-from pypowervm.wrappers import network as pvm_net
-
-from nova import exception
-from nova.network import model
-from nova import test
-from nova.virt.powervm import vif
-
-
-def cna(mac):
- """Builds a mock Client Network Adapter for unit tests."""
- return mock.Mock(spec=pvm_net.CNA, mac=mac, vswitch_uri='fake_href')
-
-
-class TestVifFunctions(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVifFunctions, self).setUp()
-
- self.adpt = mock.Mock()
-
- @mock.patch('nova.virt.powervm.vif.PvmOvsVifDriver')
- def test_build_vif_driver(self, mock_driver):
- # Valid vif type
- driver = vif._build_vif_driver(self.adpt, 'instance', {'type': 'ovs'})
- self.assertEqual(mock_driver.return_value, driver)
-
- mock_driver.reset_mock()
-
- # Fail if no vif type
- self.assertRaises(exception.VirtualInterfacePlugException,
- vif._build_vif_driver, self.adpt, 'instance',
- {'type': None})
- mock_driver.assert_not_called()
-
- # Fail if invalid vif type
- self.assertRaises(exception.VirtualInterfacePlugException,
- vif._build_vif_driver, self.adpt, 'instance',
- {'type': 'bad_type'})
- mock_driver.assert_not_called()
-
- @mock.patch('oslo_serialization.jsonutils.dumps')
- @mock.patch('pypowervm.wrappers.event.Event')
- def test_push_vif_event(self, mock_event, mock_dumps):
- mock_vif = mock.Mock(mac='MAC', href='HREF')
- vif._push_vif_event(self.adpt, 'action', mock_vif, mock.Mock(),
- 'pvm_sea')
- mock_dumps.assert_called_once_with(
- {'provider': 'NOVA_PVM_VIF', 'action': 'action', 'mac': 'MAC',
- 'type': 'pvm_sea'})
- mock_event.bld.assert_called_once_with(self.adpt, 'HREF',
- mock_dumps.return_value)
- mock_event.bld.return_value.create.assert_called_once_with()
-
- mock_dumps.reset_mock()
- mock_event.bld.reset_mock()
- mock_event.bld.return_value.create.reset_mock()
-
- # Exception reraises
- mock_event.bld.return_value.create.side_effect = IndexError
- self.assertRaises(IndexError, vif._push_vif_event, self.adpt, 'action',
- mock_vif, mock.Mock(), 'pvm_sea')
- mock_dumps.assert_called_once_with(
- {'provider': 'NOVA_PVM_VIF', 'action': 'action', 'mac': 'MAC',
- 'type': 'pvm_sea'})
- mock_event.bld.assert_called_once_with(self.adpt, 'HREF',
- mock_dumps.return_value)
- mock_event.bld.return_value.create.assert_called_once_with()
-
- @mock.patch('nova.virt.powervm.vif._push_vif_event')
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_plug(self, mock_bld_drv, mock_event):
- """Test the top-level plug method."""
- mock_vif = {'address': 'MAC', 'type': 'pvm_sea'}
-
- # 1) With new_vif=True (default)
- vnet = vif.plug(self.adpt, 'instance', mock_vif)
-
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.plug.assert_called_once_with(mock_vif,
- new_vif=True)
- self.assertEqual(mock_bld_drv.return_value.plug.return_value, vnet)
- mock_event.assert_called_once_with(self.adpt, 'plug', vnet, mock.ANY,
- 'pvm_sea')
-
- # Clean up
- mock_bld_drv.reset_mock()
- mock_bld_drv.return_value.plug.reset_mock()
- mock_event.reset_mock()
-
- # 2) Plug returns None (which it should IRL whenever new_vif=False).
- mock_bld_drv.return_value.plug.return_value = None
- vnet = vif.plug(self.adpt, 'instance', mock_vif, new_vif=False)
-
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.plug.assert_called_once_with(mock_vif,
- new_vif=False)
- self.assertIsNone(vnet)
- mock_event.assert_not_called()
-
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_plug_raises(self, mock_vif_drv):
- """HttpError is converted to VirtualInterfacePlugException."""
- vif_drv = mock.Mock(plug=mock.Mock(side_effect=pvm_ex.HttpError(
- resp=mock.Mock())))
- mock_vif_drv.return_value = vif_drv
- mock_vif = {'address': 'vifaddr'}
- self.assertRaises(exception.VirtualInterfacePlugException,
- vif.plug, 'adap', 'inst', mock_vif,
- new_vif='new_vif')
- mock_vif_drv.assert_called_once_with('adap', 'inst', mock_vif)
- vif_drv.plug.assert_called_once_with(mock_vif, new_vif='new_vif')
-
- @mock.patch('nova.virt.powervm.vif._push_vif_event')
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_unplug(self, mock_bld_drv, mock_event):
- """Test the top-level unplug method."""
- mock_vif = {'address': 'MAC', 'type': 'pvm_sea'}
-
- # 1) With default cna_w_list
- mock_bld_drv.return_value.unplug.return_value = 'vnet_w'
- vif.unplug(self.adpt, 'instance', mock_vif)
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.unplug.assert_called_once_with(
- mock_vif, cna_w_list=None)
- mock_event.assert_called_once_with(self.adpt, 'unplug', 'vnet_w',
- mock.ANY, 'pvm_sea')
- # Clean up
- mock_bld_drv.reset_mock()
- mock_bld_drv.return_value.unplug.reset_mock()
- mock_event.reset_mock()
-
- # 2) With specified cna_w_list
- mock_bld_drv.return_value.unplug.return_value = None
- vif.unplug(self.adpt, 'instance', mock_vif, cna_w_list='cnalist')
- mock_bld_drv.assert_called_once_with(self.adpt, 'instance', mock_vif)
- mock_bld_drv.return_value.unplug.assert_called_once_with(
- mock_vif, cna_w_list='cnalist')
- mock_event.assert_not_called()
-
- @mock.patch('nova.virt.powervm.vif._build_vif_driver')
- def test_unplug_raises(self, mock_vif_drv):
- """HttpError is converted to VirtualInterfacePlugException."""
- vif_drv = mock.Mock(unplug=mock.Mock(side_effect=pvm_ex.HttpError(
- resp=mock.Mock())))
- mock_vif_drv.return_value = vif_drv
- mock_vif = {'address': 'vifaddr'}
- self.assertRaises(exception.VirtualInterfaceUnplugException,
- vif.unplug, 'adap', 'inst', mock_vif,
- cna_w_list='cna_w_list')
- mock_vif_drv.assert_called_once_with('adap', 'inst', mock_vif)
- vif_drv.unplug.assert_called_once_with(
- mock_vif, cna_w_list='cna_w_list')
-
-
-class TestVifOvsDriver(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVifOvsDriver, self).setUp()
-
- self.adpt = mock.Mock()
- self.inst = mock.MagicMock(uuid='inst_uuid')
- self.drv = vif.PvmOvsVifDriver(self.adpt, self.inst)
-
- @mock.patch('pypowervm.tasks.cna.crt_p2p_cna', autospec=True)
- @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- def test_plug(self, mock_pvm_uuid, mock_mgmt_lpar, mock_p2p_cna,):
- # Mock the data
- mock_pvm_uuid.return_value = 'lpar_uuid'
- mock_mgmt_lpar.return_value = mock.Mock(uuid='mgmt_uuid')
- # mock_trunk_dev_name.return_value = 'device'
-
- cna_w, trunk_wraps = mock.MagicMock(), [mock.MagicMock()]
- mock_p2p_cna.return_value = cna_w, trunk_wraps
-
- # Run the plug
- network_model = model.Model({'bridge': 'br0', 'meta': {'mtu': 1450}})
- mock_vif = model.VIF(address='aa:bb:cc:dd:ee:ff', id='vif_id',
- network=network_model, devname='device')
- self.drv.plug(mock_vif)
-
- # Validate the calls
- ovs_ext_ids = ('iface-id=vif_id,iface-status=active,'
- 'attached-mac=aa:bb:cc:dd:ee:ff,vm-uuid=inst_uuid')
- mock_p2p_cna.assert_called_once_with(
- self.adpt, None, 'lpar_uuid', ['mgmt_uuid'],
- 'NovaLinkVEABridge', configured_mtu=1450, crt_vswitch=True,
- mac_addr='aa:bb:cc:dd:ee:ff', dev_name='device', ovs_bridge='br0',
- ovs_ext_ids=ovs_ext_ids)
-
- @mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- @mock.patch('pypowervm.tasks.cna.find_trunks', autospec=True)
- def test_plug_existing_vif(self, mock_find_trunks, mock_get_cnas,
- mock_pvm_uuid, mock_mgmt_lpar):
- # Mock the data
- t1, t2 = mock.MagicMock(), mock.MagicMock()
- mock_find_trunks.return_value = [t1, t2]
-
- mock_cna = mock.Mock(mac='aa:bb:cc:dd:ee:ff')
- mock_get_cnas.return_value = [mock_cna]
-
- mock_pvm_uuid.return_value = 'lpar_uuid'
-
- mock_mgmt_lpar.return_value = mock.Mock(uuid='mgmt_uuid')
-
- self.inst = mock.MagicMock(uuid='c2e7ff9f-b9b6-46fa-8716-93bbb795b8b4')
- self.drv = vif.PvmOvsVifDriver(self.adpt, self.inst)
-
- # Run the plug
- network_model = model.Model({'bridge': 'br0', 'meta': {'mtu': 1500}})
- mock_vif = model.VIF(address='aa:bb:cc:dd:ee:ff', id='vif_id',
- network=network_model, devname='devname')
- resp = self.drv.plug(mock_vif, new_vif=False)
-
- self.assertIsNone(resp)
-
- # Validate if trunk.update got invoked for all trunks of CNA of vif
- self.assertTrue(t1.update.called)
- self.assertTrue(t2.update.called)
-
- @mock.patch('pypowervm.tasks.cna.find_trunks')
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_unplug(self, mock_get_cnas, mock_find_trunks):
- # Set up the mocks
- mock_cna = mock.Mock(mac='aa:bb:cc:dd:ee:ff')
- mock_get_cnas.return_value = [mock_cna]
-
- t1, t2 = mock.MagicMock(), mock.MagicMock()
- mock_find_trunks.return_value = [t1, t2]
-
- # Call the unplug
- mock_vif = {'address': 'aa:bb:cc:dd:ee:ff',
- 'network': {'bridge': 'br-int'}}
- self.drv.unplug(mock_vif)
-
- # The trunks and the cna should have been deleted
- self.assertTrue(t1.delete.called)
- self.assertTrue(t2.delete.called)
- self.assertTrue(mock_cna.delete.called)
-
-
-class TestVifSeaDriver(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVifSeaDriver, self).setUp()
-
- self.adpt = mock.Mock()
- self.inst = mock.Mock()
- self.drv = vif.PvmSeaVifDriver(self.adpt, self.inst)
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.tasks.cna.crt_cna')
- def test_plug_from_neutron(self, mock_crt_cna, mock_pvm_uuid):
- """Tests that a VIF can be created. Mocks Neutron net"""
-
- # Set up the mocks. Look like Neutron
- fake_vif = {'details': {'vlan': 5}, 'network': {'meta': {}},
- 'address': 'aabbccddeeff'}
-
- def validate_crt(adpt, host_uuid, lpar_uuid, vlan, mac_addr=None):
- self.assertIsNone(host_uuid)
- self.assertEqual(5, vlan)
- self.assertEqual('aabbccddeeff', mac_addr)
- return pvm_net.CNA.bld(self.adpt, 5, 'host_uuid',
- mac_addr=mac_addr)
- mock_crt_cna.side_effect = validate_crt
-
- # Invoke
- resp = self.drv.plug(fake_vif)
-
- # Validate (along with validate method above)
- self.assertEqual(1, mock_crt_cna.call_count)
- self.assertIsNotNone(resp)
- self.assertIsInstance(resp, pvm_net.CNA)
-
- def test_plug_existing_vif(self):
- """Tests that a VIF need not be created."""
-
- # Set up the mocks
- fake_vif = {'network': {'meta': {'vlan': 5}},
- 'address': 'aabbccddeeff'}
-
- # Invoke
- resp = self.drv.plug(fake_vif, new_vif=False)
-
- self.assertIsNone(resp)
-
- @mock.patch('nova.virt.powervm.vm.get_cnas')
- def test_unplug_vifs(self, mock_vm_get):
- """Tests that a delete of the vif can be done."""
- # Mock up the CNA response. Two should already exist, the other
- # should not.
- cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')]
- mock_vm_get.return_value = cnas
-
- # Run method. The AABBCCDDEE11 won't be unplugged (wasn't invoked
- # below) and the last unplug will also just no-op because its not on
- # the VM.
- self.drv.unplug({'address': 'aa:bb:cc:dd:ee:ff'})
- self.drv.unplug({'address': 'aa:bb:cc:dd:ee:22'})
- self.drv.unplug({'address': 'aa:bb:cc:dd:ee:33'})
-
- # The delete should have only been called once for each applicable vif.
- # The second CNA didn't have a matching mac so it should be skipped.
- self.assertEqual(1, cnas[0].delete.call_count)
- self.assertEqual(0, cnas[1].delete.call_count)
- self.assertEqual(1, cnas[2].delete.call_count)
diff --git a/nova/tests/unit/virt/powervm/test_vm.py b/nova/tests/unit/virt/powervm/test_vm.py
deleted file mode 100644
index ab0f9c35e8..0000000000
--- a/nova/tests/unit/virt/powervm/test_vm.py
+++ /dev/null
@@ -1,563 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as pvm_log
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.utils import lpar_builder as lpar_bld
-from pypowervm.utils import uuid as pvm_uuid
-from pypowervm.wrappers import base_partition as pvm_bp
-from pypowervm.wrappers import logical_partition as pvm_lpar
-
-from nova.compute import power_state
-from nova import exception
-from nova import test
-from nova.tests.unit.virt import powervm
-from nova.virt.powervm import vm
-
-
-class TestVMBuilder(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVMBuilder, self).setUp()
-
- self.adpt = mock.MagicMock()
- self.host_w = mock.MagicMock()
- self.lpar_b = vm.VMBuilder(self.host_w, self.adpt)
-
- self.san_lpar_name = self.useFixture(fixtures.MockPatch(
- 'pypowervm.util.sanitize_partition_name_for_api',
- autospec=True)).mock
-
- self.inst = powervm.TEST_INSTANCE
-
- @mock.patch('pypowervm.utils.lpar_builder.DefaultStandardize',
- autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.utils.lpar_builder.LPARBuilder', autospec=True)
- def test_vm_builder(self, mock_lpar_bldr, mock_uuid2pvm, mock_def_stdz):
- inst = mock.Mock()
- inst.configure_mock(
- name='lpar_name', uuid='lpar_uuid',
- flavor=mock.Mock(memory_mb='mem', vcpus='vcpus', extra_specs={}))
- vmb = vm.VMBuilder('host', 'adap')
- mock_def_stdz.assert_called_once_with('host', proc_units_factor=0.1)
- self.assertEqual(mock_lpar_bldr.return_value,
- vmb.lpar_builder(inst))
- self.san_lpar_name.assert_called_once_with('lpar_name')
- mock_uuid2pvm.assert_called_once_with(inst)
- mock_lpar_bldr.assert_called_once_with(
- 'adap', {'name': self.san_lpar_name.return_value,
- 'uuid': mock_uuid2pvm.return_value,
- 'memory': 'mem',
- 'vcpu': 'vcpus',
- 'srr_capability': True}, mock_def_stdz.return_value)
-
- # Assert non-default proc_units_factor.
- mock_def_stdz.reset_mock()
- self.flags(proc_units_factor=0.2, group='powervm')
- vmb = vm.VMBuilder('host', 'adap')
- mock_def_stdz.assert_called_once_with('host', proc_units_factor=0.2)
-
- def test_format_flavor(self):
- """Perform tests against _format_flavor."""
- # convert instance uuid to pypowervm uuid
- # LP 1561128, simplified remote restart is enabled by default
- lpar_attrs = {'memory': 2048,
- 'name': self.san_lpar_name.return_value,
- 'uuid': pvm_uuid.convert_uuid_to_pvm(
- self.inst.uuid).upper(),
- 'vcpu': 1, 'srr_capability': True}
-
- # Test dedicated procs
- self.inst.flavor.extra_specs = {'powervm:dedicated_proc': 'true'}
- test_attrs = dict(lpar_attrs, dedicated_proc='true')
-
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test dedicated procs, min/max vcpu and sharing mode
- self.inst.flavor.extra_specs = {'powervm:dedicated_proc': 'true',
- 'powervm:dedicated_sharing_mode':
- 'share_idle_procs_active',
- 'powervm:min_vcpu': '1',
- 'powervm:max_vcpu': '3'}
- test_attrs = dict(lpar_attrs,
- dedicated_proc='true',
- sharing_mode='sre idle procs active',
- min_vcpu='1', max_vcpu='3')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test shared proc sharing mode
- self.inst.flavor.extra_specs = {'powervm:uncapped': 'true'}
- test_attrs = dict(lpar_attrs, sharing_mode='uncapped')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test availability priority
- self.inst.flavor.extra_specs = {'powervm:availability_priority': '150'}
- test_attrs = dict(lpar_attrs, avail_priority='150')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test processor compatibility
- self.inst.flavor.extra_specs = {
- 'powervm:processor_compatibility': 'POWER8'}
- test_attrs = dict(lpar_attrs, processor_compatibility='POWER8')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test min, max proc units
- self.inst.flavor.extra_specs = {'powervm:min_proc_units': '0.5',
- 'powervm:max_proc_units': '2.0'}
- test_attrs = dict(lpar_attrs, min_proc_units='0.5',
- max_proc_units='2.0')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test min, max mem
- self.inst.flavor.extra_specs = {'powervm:min_mem': '1024',
- 'powervm:max_mem': '4096'}
- test_attrs = dict(lpar_attrs, min_mem='1024', max_mem='4096')
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
- self.san_lpar_name.assert_called_with(self.inst.name)
- self.san_lpar_name.reset_mock()
-
- # Test remote restart set to false
- self.inst.flavor.extra_specs = {'powervm:srr_capability': 'false'}
- test_attrs = dict(lpar_attrs, srr_capability=False)
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
-
- # Unhandled powervm: key is ignored
- self.inst.flavor.extra_specs = {'powervm:srr_capability': 'false',
- 'powervm:something_new': 'foo'}
- test_attrs = dict(lpar_attrs, srr_capability=False)
- self.assertEqual(self.lpar_b._format_flavor(self.inst), test_attrs)
-
- # If we recognize a key, but don't handle it, we raise
- with mock.patch.object(self.lpar_b, '_is_pvm_valid_key',
- return_value=True):
- self.inst.flavor.extra_specs = {'powervm:srr_capability': 'false',
- 'powervm:something_new': 'foo'}
- self.assertRaises(KeyError, self.lpar_b._format_flavor, self.inst)
-
- @mock.patch('pypowervm.wrappers.shared_proc_pool.SharedProcPool.search')
- def test_spp_pool_id(self, mock_search):
- # The default pool is always zero. Validate the path.
- self.assertEqual(0, self.lpar_b._spp_pool_id('DefaultPool'))
- self.assertEqual(0, self.lpar_b._spp_pool_id(None))
-
- # Further invocations require calls to the adapter. Build a minimal
- # mocked SPP wrapper
- spp = mock.MagicMock()
- spp.id = 1
-
- # Three invocations. First has too many elems. Second has none.
- # Third is just right. :-)
- mock_search.side_effect = [[spp, spp], [], [spp]]
-
- self.assertRaises(exception.ValidationError, self.lpar_b._spp_pool_id,
- 'fake_name')
- self.assertRaises(exception.ValidationError, self.lpar_b._spp_pool_id,
- 'fake_name')
-
- self.assertEqual(1, self.lpar_b._spp_pool_id('fake_name'))
-
-
-class TestVM(test.NoDBTestCase):
- def setUp(self):
- super(TestVM, self).setUp()
-
- self.apt = self.useFixture(pvm_fx.AdapterFx(
- traits=pvm_fx.LocalPVMTraits)).adpt
- self.apt.helpers = [pvm_log.log_helper]
-
- self.san_lpar_name = self.useFixture(fixtures.MockPatch(
- 'pypowervm.util.sanitize_partition_name_for_api')).mock
- self.san_lpar_name.side_effect = lambda name: name
- mock_entries = [mock.Mock(), mock.Mock()]
- self.resp = mock.MagicMock()
- self.resp.feed = mock.MagicMock(entries=mock_entries)
-
- self.get_pvm_uuid = self.useFixture(fixtures.MockPatch(
- 'nova.virt.powervm.vm.get_pvm_uuid')).mock
-
- self.inst = powervm.TEST_INSTANCE
-
- def test_translate_vm_state(self):
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('running'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('migrating running'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('starting'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('open firmware'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('shutting down'))
- self.assertEqual(power_state.RUNNING,
- vm._translate_vm_state('suspending'))
-
- self.assertEqual(power_state.SHUTDOWN,
- vm._translate_vm_state('migrating not active'))
- self.assertEqual(power_state.SHUTDOWN,
- vm._translate_vm_state('not activated'))
-
- self.assertEqual(power_state.NOSTATE,
- vm._translate_vm_state('unknown'))
- self.assertEqual(power_state.NOSTATE,
- vm._translate_vm_state('hardware discovery'))
- self.assertEqual(power_state.NOSTATE,
- vm._translate_vm_state('not available'))
-
- self.assertEqual(power_state.SUSPENDED,
- vm._translate_vm_state('resuming'))
- self.assertEqual(power_state.SUSPENDED,
- vm._translate_vm_state('suspended'))
-
- self.assertEqual(power_state.CRASHED,
- vm._translate_vm_state('error'))
-
- @mock.patch('pypowervm.wrappers.logical_partition.LPAR', autospec=True)
- def test_get_lpar_names(self, mock_lpar):
- inst1 = mock.Mock()
- inst1.configure_mock(name='inst1')
- inst2 = mock.Mock()
- inst2.configure_mock(name='inst2')
- mock_lpar.search.return_value = [inst1, inst2]
- self.assertEqual({'inst1', 'inst2'}, set(vm.get_lpar_names('adap')))
- mock_lpar.search.assert_called_once_with(
- 'adap', is_mgmt_partition=False)
-
- @mock.patch('pypowervm.tasks.vterm.close_vterm', autospec=True)
- def test_dlt_lpar(self, mock_vterm):
- """Performs a delete LPAR test."""
- vm.delete_lpar(self.apt, 'inst')
- self.get_pvm_uuid.assert_called_once_with('inst')
- self.apt.delete.assert_called_once_with(
- pvm_lpar.LPAR.schema_type, root_id=self.get_pvm_uuid.return_value)
- self.assertEqual(1, mock_vterm.call_count)
-
- # Test Failure Path
- # build a mock response body with the expected HSCL msg
- resp = mock.Mock()
- resp.body = 'error msg: HSCL151B more text'
- self.apt.delete.side_effect = pvm_exc.Error(
- 'Mock Error Message', response=resp)
-
- # Reset counters
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- self.assertRaises(pvm_exc.Error, vm.delete_lpar, self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(1, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test HttpError 404
- resp.status = 404
- self.apt.delete.side_effect = pvm_exc.HttpError(resp=resp)
- vm.delete_lpar(self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(1, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test Other HttpError
- resp.status = 111
- self.apt.delete.side_effect = pvm_exc.HttpError(resp=resp)
- self.assertRaises(pvm_exc.HttpError, vm.delete_lpar, self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(1, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test HttpError 404 closing vterm
- resp.status = 404
- mock_vterm.side_effect = pvm_exc.HttpError(resp=resp)
- vm.delete_lpar(self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(0, self.apt.delete.call_count)
-
- self.apt.reset_mock()
- mock_vterm.reset_mock()
-
- # Test Other HttpError closing vterm
- resp.status = 111
- mock_vterm.side_effect = pvm_exc.HttpError(resp=resp)
- self.assertRaises(pvm_exc.HttpError, vm.delete_lpar, self.apt, 'inst')
- self.assertEqual(1, mock_vterm.call_count)
- self.assertEqual(0, self.apt.delete.call_count)
-
- @mock.patch('nova.virt.powervm.vm.VMBuilder', autospec=True)
- @mock.patch('pypowervm.utils.validation.LPARWrapperValidator',
- autospec=True)
- def test_crt_lpar(self, mock_vld, mock_vmbldr):
- self.inst.flavor.extra_specs = {'powervm:dedicated_proc': 'true'}
- mock_bldr = mock.Mock(spec=lpar_bld.LPARBuilder)
- mock_vmbldr.return_value.lpar_builder.return_value = mock_bldr
- mock_pend_lpar = mock.create_autospec(pvm_lpar.LPAR, instance=True)
- mock_bldr.build.return_value = mock_pend_lpar
-
- vm.create_lpar(self.apt, 'host', self.inst)
- mock_vmbldr.assert_called_once_with('host', self.apt)
- mock_vmbldr.return_value.lpar_builder.assert_called_once_with(
- self.inst)
- mock_bldr.build.assert_called_once_with()
- mock_vld.assert_called_once_with(mock_pend_lpar, 'host')
- mock_vld.return_value.validate_all.assert_called_once_with()
- mock_pend_lpar.create.assert_called_once_with(parent='host')
-
- # Test to verify the LPAR Creation with invalid name specification
- mock_vmbldr.side_effect = lpar_bld.LPARBuilderException("Invalid Name")
- self.assertRaises(exception.BuildAbortException,
- vm.create_lpar, self.apt, 'host', self.inst)
-
- # HttpError
- mock_vmbldr.side_effect = pvm_exc.HttpError(mock.Mock())
- self.assertRaises(exception.PowerVMAPIFailed,
- vm.create_lpar, self.apt, 'host', self.inst)
-
- @mock.patch('pypowervm.wrappers.logical_partition.LPAR', autospec=True)
- def test_get_instance_wrapper(self, mock_lpar):
- resp = mock.Mock(status=404)
- mock_lpar.get.side_effect = pvm_exc.Error('message', response=resp)
- # vm.get_instance_wrapper(self.apt, instance, 'lpar_uuid')
- self.assertRaises(exception.InstanceNotFound, vm.get_instance_wrapper,
- self.apt, self.inst)
-
- @mock.patch('pypowervm.tasks.power.power_on', autospec=True)
- @mock.patch('oslo_concurrency.lockutils.lock', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_on(self, mock_wrap, mock_lock, mock_power_on):
- entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
- mock_wrap.return_value = entry
-
- vm.power_on(None, self.inst)
- mock_power_on.assert_called_once_with(entry, None)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
-
- mock_power_on.reset_mock()
- mock_lock.reset_mock()
-
- stop_states = [
- pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING,
- pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN,
- pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING,
- pvm_bp.LPARState.SUSPENDING]
-
- for stop_state in stop_states:
- entry.state = stop_state
- vm.power_on(None, self.inst)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
- mock_lock.reset_mock()
- self.assertEqual(0, mock_power_on.call_count)
-
- @mock.patch('pypowervm.tasks.power.power_on', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_on_negative(self, mock_wrp, mock_power_on):
- mock_wrp.return_value = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
-
- # Convertible (PowerVM) exception
- mock_power_on.side_effect = pvm_exc.VMPowerOnFailure(
- reason='Something bad', lpar_nm='TheLPAR')
- self.assertRaises(exception.InstancePowerOnFailure,
- vm.power_on, None, self.inst)
-
- # Non-pvm error raises directly
- mock_power_on.side_effect = ValueError()
- self.assertRaises(ValueError, vm.power_on, None, self.inst)
-
- @mock.patch('pypowervm.tasks.power.PowerOp', autospec=True)
- @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True)
- @mock.patch('oslo_concurrency.lockutils.lock', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_off(self, mock_wrap, mock_lock, mock_power_off, mock_pop):
- entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
- mock_wrap.return_value = entry
-
- vm.power_off(None, self.inst)
- self.assertEqual(0, mock_power_off.call_count)
- self.assertEqual(0, mock_pop.stop.call_count)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
-
- stop_states = [
- pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING,
- pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN,
- pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING,
- pvm_bp.LPARState.SUSPENDING]
- for stop_state in stop_states:
- entry.state = stop_state
- mock_power_off.reset_mock()
- mock_pop.stop.reset_mock()
- mock_lock.reset_mock()
- vm.power_off(None, self.inst)
- mock_power_off.assert_called_once_with(entry)
- self.assertEqual(0, mock_pop.stop.call_count)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
- mock_power_off.reset_mock()
- mock_lock.reset_mock()
- vm.power_off(None, self.inst, force_immediate=True, timeout=5)
- self.assertEqual(0, mock_power_off.call_count)
- mock_pop.stop.assert_called_once_with(
- entry, opts=mock.ANY, timeout=5)
- self.assertEqual('PowerOff(immediate=true, operation=shutdown)',
- str(mock_pop.stop.call_args[1]['opts']))
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
-
- @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_power_off_negative(self, mock_wrap, mock_power_off):
- """Negative tests."""
- mock_wrap.return_value = mock.Mock(state=pvm_bp.LPARState.RUNNING)
-
- # Raise the expected pypowervm exception
- mock_power_off.side_effect = pvm_exc.VMPowerOffFailure(
- reason='Something bad.', lpar_nm='TheLPAR')
- # We should get a valid Nova exception that the compute manager expects
- self.assertRaises(exception.InstancePowerOffFailure,
- vm.power_off, None, self.inst)
-
- # Non-pvm error raises directly
- mock_power_off.side_effect = ValueError()
- self.assertRaises(ValueError, vm.power_off, None, self.inst)
-
- @mock.patch('pypowervm.tasks.power.power_on', autospec=True)
- @mock.patch('pypowervm.tasks.power.power_off_progressive', autospec=True)
- @mock.patch('pypowervm.tasks.power.PowerOp', autospec=True)
- @mock.patch('oslo_concurrency.lockutils.lock', autospec=True)
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- def test_reboot(self, mock_wrap, mock_lock, mock_pop, mock_pwroff,
- mock_pwron):
- entry = mock.Mock(state=pvm_bp.LPARState.NOT_ACTIVATED)
- mock_wrap.return_value = entry
-
- # No power_off
- vm.reboot('adap', self.inst, False)
- mock_lock.assert_called_once_with('power_%s' % self.inst.uuid)
- mock_wrap.assert_called_once_with('adap', self.inst)
- mock_pwron.assert_called_once_with(entry, None)
- self.assertEqual(0, mock_pwroff.call_count)
- self.assertEqual(0, mock_pop.stop.call_count)
-
- mock_pwron.reset_mock()
-
- # power_off (no power_on) hard
- entry.state = pvm_bp.LPARState.RUNNING
- vm.reboot('adap', self.inst, True)
- self.assertEqual(0, mock_pwron.call_count)
- self.assertEqual(0, mock_pwroff.call_count)
- mock_pop.stop.assert_called_once_with(entry, opts=mock.ANY)
- self.assertEqual(
- 'PowerOff(immediate=true, operation=shutdown, restart=true)',
- str(mock_pop.stop.call_args[1]['opts']))
-
- mock_pop.reset_mock()
-
- # power_off (no power_on) soft
- entry.state = pvm_bp.LPARState.RUNNING
- vm.reboot('adap', self.inst, False)
- self.assertEqual(0, mock_pwron.call_count)
- mock_pwroff.assert_called_once_with(entry, restart=True)
- self.assertEqual(0, mock_pop.stop.call_count)
-
- mock_pwroff.reset_mock()
-
- # PowerVM error is converted
- mock_pop.stop.side_effect = pvm_exc.TimeoutError("Timed out")
- self.assertRaises(exception.InstanceRebootFailure,
- vm.reboot, 'adap', self.inst, True)
-
- # Non-PowerVM error is raised directly
- mock_pwroff.side_effect = ValueError
- self.assertRaises(ValueError, vm.reboot, 'adap', self.inst, False)
-
- @mock.patch('oslo_serialization.jsonutils.loads')
- def test_get_vm_qp(self, mock_loads):
- self.apt.helpers = ['helper1', pvm_log.log_helper, 'helper3']
-
- # Defaults
- self.assertEqual(mock_loads.return_value,
- vm.get_vm_qp(self.apt, 'lpar_uuid'))
- self.apt.read.assert_called_once_with(
- 'LogicalPartition', root_id='lpar_uuid', suffix_type='quick',
- suffix_parm=None)
- mock_loads.assert_called_once_with(self.apt.read.return_value.body)
-
- self.apt.read.reset_mock()
- mock_loads.reset_mock()
-
- # Specific qprop, no logging errors
- self.assertEqual(mock_loads.return_value,
- vm.get_vm_qp(self.apt, 'lpar_uuid', qprop='Prop',
- log_errors=False))
- self.apt.read.assert_called_once_with(
- 'LogicalPartition', root_id='lpar_uuid', suffix_type='quick',
- suffix_parm='Prop', helpers=['helper1', 'helper3'])
-
- resp = mock.MagicMock()
- resp.status = 404
- self.apt.read.side_effect = pvm_exc.HttpError(resp)
- self.assertRaises(exception.InstanceNotFound, vm.get_vm_qp, self.apt,
- 'lpar_uuid', log_errors=False)
-
- self.apt.read.side_effect = pvm_exc.Error("message", response=None)
- self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt,
- 'lpar_uuid', log_errors=False)
-
- resp.status = 500
- self.apt.read.side_effect = pvm_exc.Error("message", response=resp)
- self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt,
- 'lpar_uuid', log_errors=False)
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- @mock.patch('pypowervm.wrappers.network.CNA.search')
- @mock.patch('pypowervm.wrappers.network.CNA.get')
- def test_get_cnas(self, mock_get, mock_search, mock_uuid):
- # No kwargs: get
- self.assertEqual(mock_get.return_value, vm.get_cnas(self.apt, 'inst'))
- mock_uuid.assert_called_once_with('inst')
- mock_get.assert_called_once_with(self.apt, parent_type=pvm_lpar.LPAR,
- parent_uuid=mock_uuid.return_value)
- mock_search.assert_not_called()
- # With kwargs: search
- mock_get.reset_mock()
- mock_uuid.reset_mock()
- self.assertEqual(mock_search.return_value, vm.get_cnas(
- self.apt, 'inst', one=2, three=4))
- mock_uuid.assert_called_once_with('inst')
- mock_search.assert_called_once_with(
- self.apt, parent_type=pvm_lpar.LPAR,
- parent_uuid=mock_uuid.return_value, one=2, three=4)
- mock_get.assert_not_called()
-
- def test_norm_mac(self):
- EXPECTED = "12:34:56:78:90:ab"
- self.assertEqual(EXPECTED, vm.norm_mac("12:34:56:78:90:ab"))
- self.assertEqual(EXPECTED, vm.norm_mac("1234567890ab"))
- self.assertEqual(EXPECTED, vm.norm_mac("12:34:56:78:90:AB"))
- self.assertEqual(EXPECTED, vm.norm_mac("1234567890AB"))
diff --git a/nova/tests/unit/virt/powervm/volume/__init__.py b/nova/tests/unit/virt/powervm/volume/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/tests/unit/virt/powervm/volume/__init__.py
+++ /dev/null
diff --git a/nova/tests/unit/virt/powervm/volume/test_fcvscsi.py b/nova/tests/unit/virt/powervm/volume/test_fcvscsi.py
deleted file mode 100644
index 2db5b1a663..0000000000
--- a/nova/tests/unit/virt/powervm/volume/test_fcvscsi.py
+++ /dev/null
@@ -1,456 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from pypowervm import const as pvm_const
-from pypowervm.tasks import hdisk
-from pypowervm.tests import test_fixtures as pvm_fx
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import storage as pvm_stor
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import conf as cfg
-from nova import exception as exc
-from nova import test
-from nova.virt.powervm.volume import fcvscsi
-
-CONF = cfg.CONF
-
-I_WWPN_1 = '21000024FF649104'
-I_WWPN_2 = '21000024FF649105'
-
-
-class TestVSCSIAdapter(test.NoDBTestCase):
-
- def setUp(self):
- super(TestVSCSIAdapter, self).setUp()
-
- self.adpt = self.useFixture(pvm_fx.AdapterFx()).adpt
- self.wtsk = mock.create_autospec(pvm_tx.WrapperTask, instance=True)
- self.ftsk = mock.create_autospec(pvm_tx.FeedTask, instance=True)
- self.ftsk.configure_mock(wrapper_tasks={'vios_uuid': self.wtsk})
-
- @mock.patch('nova.virt.powervm.vm.get_pvm_uuid')
- def init_vol_adpt(mock_pvm_uuid):
- con_info = {
- 'serial': 'id',
- 'data': {
- 'initiator_target_map': {
- I_WWPN_1: ['t1'],
- I_WWPN_2: ['t2', 't3']
- },
- 'target_lun': '1',
- 'volume_id': 'a_volume_identifier',
- },
- }
- mock_inst = mock.MagicMock()
- mock_pvm_uuid.return_value = '1234'
-
- return fcvscsi.FCVscsiVolumeAdapter(
- self.adpt, mock_inst, con_info, stg_ftsk=self.ftsk)
- self.vol_drv = init_vol_adpt()
-
- @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
- @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS', autospec=True)
- def test_reset_stg_ftsk(self, mock_vios, mock_ftsk):
- self.vol_drv.reset_stg_ftsk('stg_ftsk')
- self.assertEqual('stg_ftsk', self.vol_drv.stg_ftsk)
-
- mock_vios.getter.return_value = 'getter'
- mock_ftsk.return_value = 'local_feed_task'
- self.vol_drv.reset_stg_ftsk()
- self.assertEqual('local_feed_task', self.vol_drv.stg_ftsk)
- mock_vios.getter.assert_called_once_with(
- self.adpt, xag=[pvm_const.XAG.VIO_SMAP])
- mock_ftsk.assert_called_once_with('local_feed_task', 'getter')
-
- @mock.patch('pypowervm.tasks.partition.get_physical_wwpns', autospec=True)
- def test_wwpns(self, mock_vio_wwpns):
- mock_vio_wwpns.return_value = ['aa', 'bb']
- wwpns = fcvscsi.wwpns(self.adpt)
- self.assertListEqual(['aa', 'bb'], wwpns)
- mock_vio_wwpns.assert_called_once_with(self.adpt, force_refresh=False)
-
- def test_set_udid(self):
- # Mock connection info
- self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY] = None
-
- # Set the UDID
- self.vol_drv._set_udid('udid')
-
- # Verify
- self.assertEqual('udid',
- self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY])
-
- def test_get_udid(self):
- # Set the value to retrieve
- self.vol_drv.connection_info['data'][fcvscsi.UDID_KEY] = 'udid'
- retrieved_udid = self.vol_drv._get_udid()
- # Check key found
- self.assertEqual('udid', retrieved_udid)
-
- # Check key not found
- self.vol_drv.connection_info['data'].pop(fcvscsi.UDID_KEY)
- retrieved_udid = self.vol_drv._get_udid()
- # Check key not found
- self.assertIsNone(retrieved_udid)
-
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
- def test_attach_volume(self, mock_feed_task, mock_get_wrap):
- mock_lpar_wrap = mock.MagicMock()
- mock_lpar_wrap.can_modify_io.return_value = True, None
- mock_get_wrap.return_value = mock_lpar_wrap
- mock_attach_ftsk = mock_feed_task.return_value
-
- # Pass if all vioses modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': True}}}
- mock_attach_ftsk.execute.return_value = mock_ret
- self.vol_drv.attach_volume()
- mock_feed_task.assert_called_once()
- mock_attach_ftsk.add_functor_subtask.assert_called_once_with(
- self.vol_drv._attach_volume_to_vio, provides='vio_modified',
- flag_update=False)
- mock_attach_ftsk.execute.assert_called_once()
- self.ftsk.execute.assert_called_once()
-
- mock_feed_task.reset_mock()
- mock_attach_ftsk.reset_mock()
- self.ftsk.reset_mock()
-
- # Pass if 1 vios modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': False}}}
- mock_attach_ftsk.execute.return_value = mock_ret
- self.vol_drv.attach_volume()
- mock_feed_task.assert_called_once()
- mock_attach_ftsk.add_functor_subtask.assert_called_once_with(
- self.vol_drv._attach_volume_to_vio, provides='vio_modified',
- flag_update=False)
- mock_attach_ftsk.execute.assert_called_once()
- self.ftsk.execute.assert_called_once()
-
- # Raise if no vios modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': False},
- 'vios2': {'vio_modified': False}}}
- mock_attach_ftsk.execute.return_value = mock_ret
- self.assertRaises(exc.VolumeAttachFailed, self.vol_drv.attach_volume)
-
- # Raise if vm in invalid state
- mock_lpar_wrap.can_modify_io.return_value = False, None
- self.assertRaises(exc.VolumeAttachFailed, self.vol_drv.attach_volume)
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_set_udid')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_add_append_mapping')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_discover_volume_on_vios')
- @mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
- def test_attach_volume_to_vio(self, mock_good_disc, mock_disc_vol,
- mock_add_map, mock_set_udid):
- # Setup mocks
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid'
- mock_disc_vol.return_value = 'status', 'devname', 'udid'
-
- # Bad discovery
- mock_good_disc.return_value = False
- ret = self.vol_drv._attach_volume_to_vio(mock_vios)
- self.assertFalse(ret)
- mock_disc_vol.assert_called_once_with(mock_vios)
- mock_good_disc.assert_called_once_with('status', 'devname')
-
- # Good discovery
- mock_good_disc.return_value = True
- ret = self.vol_drv._attach_volume_to_vio(mock_vios)
- self.assertTrue(ret)
- mock_add_map.assert_called_once_with(
- 'uuid', 'devname', tag='a_volume_identifier')
- mock_set_udid.assert_called_once_with('udid')
-
- def test_extend_volume(self):
- # Ensure the method is implemented
- self.vol_drv.extend_volume()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.LOG')
- @mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
- @mock.patch('pypowervm.tasks.hdisk.discover_hdisk', autospec=True)
- @mock.patch('pypowervm.tasks.hdisk.build_itls', autospec=True)
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_get_hdisk_itls')
- def test_discover_volume_on_vios(self, mock_get_itls, mock_build_itls,
- mock_disc_hdisk, mock_good_disc,
- mock_log):
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid'
- mock_get_itls.return_value = 'v_wwpns', 't_wwpns', 'lun'
- mock_build_itls.return_value = 'itls'
- mock_disc_hdisk.return_value = 'status', 'devname', 'udid'
-
- # Good discovery
- mock_good_disc.return_value = True
- status, devname, udid = self.vol_drv._discover_volume_on_vios(
- mock_vios)
- self.assertEqual(mock_disc_hdisk.return_value[0], status)
- self.assertEqual(mock_disc_hdisk.return_value[1], devname)
- self.assertEqual(mock_disc_hdisk.return_value[2], udid)
- mock_get_itls.assert_called_once_with(mock_vios)
- mock_build_itls.assert_called_once_with('v_wwpns', 't_wwpns', 'lun')
- mock_disc_hdisk.assert_called_once_with(self.adpt, 'uuid', 'itls')
- mock_good_disc.assert_called_once_with('status', 'devname')
- mock_log.info.assert_called_once()
- mock_log.warning.assert_not_called()
-
- mock_log.reset_mock()
-
- # Bad discovery, not device in use status
- mock_good_disc.return_value = False
- self.vol_drv._discover_volume_on_vios(mock_vios)
- mock_log.warning.assert_not_called()
- mock_log.info.assert_not_called()
-
- # Bad discovery, device in use status
- mock_disc_hdisk.return_value = (hdisk.LUAStatus.DEVICE_IN_USE, 'dev',
- 'udid')
- self.vol_drv._discover_volume_on_vios(mock_vios)
- mock_log.warning.assert_called_once()
-
- def test_get_hdisk_itls(self):
- """Validates the _get_hdisk_itls method."""
-
- mock_vios = mock.MagicMock()
- mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_1]
-
- i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
- self.assertListEqual([I_WWPN_1], i_wwpn)
- self.assertListEqual(['t1'], t_wwpns)
- self.assertEqual('1', lun)
-
- mock_vios.get_active_pfc_wwpns.return_value = [I_WWPN_2]
- i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
- self.assertListEqual([I_WWPN_2], i_wwpn)
- self.assertListEqual(['t2', 't3'], t_wwpns)
-
- mock_vios.get_active_pfc_wwpns.return_value = ['12345']
- i_wwpn, t_wwpns, lun = self.vol_drv._get_hdisk_itls(mock_vios)
- self.assertListEqual([], i_wwpn)
-
- @mock.patch('pypowervm.wrappers.storage.PV', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping',
- autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.add_map', autospec=True)
- def test_add_append_mapping(self, mock_add_map, mock_bld_map, mock_pv):
- def test_afs(add_func):
- mock_vios = mock.create_autospec(pvm_vios.VIOS)
- self.assertEqual(mock_add_map.return_value, add_func(mock_vios))
- mock_pv.bld.assert_called_once_with(self.adpt, 'devname', tag=None)
- mock_bld_map.assert_called_once_with(
- None, mock_vios, self.vol_drv.vm_uuid,
- mock_pv.bld.return_value)
- mock_add_map.assert_called_once_with(
- mock_vios, mock_bld_map.return_value)
-
- self.wtsk.add_functor_subtask.side_effect = test_afs
- self.vol_drv._add_append_mapping('vios_uuid', 'devname')
- self.wtsk.add_functor_subtask.assert_called_once()
-
- @mock.patch('nova.virt.powervm.volume.fcvscsi.LOG.warning')
- @mock.patch('nova.virt.powervm.vm.get_instance_wrapper')
- @mock.patch('pypowervm.utils.transaction.FeedTask', autospec=True)
- def test_detach_volume(self, mock_feed_task, mock_get_wrap, mock_log):
- mock_lpar_wrap = mock.MagicMock()
- mock_lpar_wrap.can_modify_io.return_value = True, None
- mock_get_wrap.return_value = mock_lpar_wrap
- mock_detach_ftsk = mock_feed_task.return_value
-
- # Multiple vioses modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': True}}}
- mock_detach_ftsk.execute.return_value = mock_ret
- self.vol_drv.detach_volume()
- mock_feed_task.assert_called_once()
- mock_detach_ftsk.add_functor_subtask.assert_called_once_with(
- self.vol_drv._detach_vol_for_vio, provides='vio_modified',
- flag_update=False)
- mock_detach_ftsk.execute.assert_called_once_with()
- self.ftsk.execute.assert_called_once_with()
- mock_log.assert_not_called()
-
- # 1 vios modified
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': True},
- 'vios2': {'vio_modified': False}}}
- mock_detach_ftsk.execute.return_value = mock_ret
- self.vol_drv.detach_volume()
- mock_log.assert_not_called()
-
- # No vioses modifed
- mock_ret = {'wrapper_task_rets': {'vios1': {'vio_modified': False},
- 'vios2': {'vio_modified': False}}}
- mock_detach_ftsk.execute.return_value = mock_ret
- self.vol_drv.detach_volume()
- mock_log.assert_called_once()
-
- # Raise if exception during execute
- mock_detach_ftsk.execute.side_effect = Exception()
- self.assertRaises(exc.VolumeDetachFailed, self.vol_drv.detach_volume)
-
- # Raise if vm in invalid state
- mock_lpar_wrap.can_modify_io.return_value = False, None
- self.assertRaises(exc.VolumeDetachFailed, self.vol_drv.detach_volume)
-
- @mock.patch('pypowervm.tasks.hdisk.good_discovery', autospec=True)
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_discover_volume_on_vios')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_add_remove_mapping')
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_add_remove_hdisk')
- @mock.patch('nova.virt.powervm.vm.get_vm_qp')
- def test_detach_vol_for_vio(self, mock_get_qp, mock_rm_hdisk, mock_rm_map,
- mock_disc_vol, mock_good_disc):
- # Good detach, bdm data is found
- self.vol_drv._set_udid('udid')
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'vios_uuid'
- mock_vios.hdisk_from_uuid.return_value = 'devname'
- mock_get_qp.return_value = 'part_id'
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertTrue(ret)
- mock_vios.hdisk_from_uuid.assert_called_once_with('udid')
- mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
- mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
-
- mock_vios.reset_mock()
- mock_rm_map.reset_mock()
- mock_rm_hdisk.reset_mock()
-
- # Good detach, no udid
- self.vol_drv._set_udid(None)
- mock_disc_vol.return_value = 'status', 'devname', 'udid'
- mock_good_disc.return_value = True
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertTrue(ret)
- mock_vios.hdisk_from_uuid.assert_not_called()
- mock_disc_vol.assert_called_once_with(mock_vios)
- mock_good_disc.assert_called_once_with('status', 'devname')
- mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
- mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
-
- mock_vios.reset_mock()
- mock_disc_vol.reset_mock()
- mock_good_disc.reset_mock()
- mock_rm_map.reset_mock()
- mock_rm_hdisk.reset_mock()
-
- # Good detach, no device name
- self.vol_drv._set_udid('udid')
- mock_vios.hdisk_from_uuid.return_value = None
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertTrue(ret)
- mock_vios.hdisk_from_uuid.assert_called_once_with('udid')
- mock_disc_vol.assert_called_once_with(mock_vios)
- mock_good_disc.assert_called_once_with('status', 'devname')
- mock_rm_map.assert_called_once_with('part_id', 'vios_uuid', 'devname')
- mock_rm_hdisk.assert_called_once_with(mock_vios, 'devname')
-
- mock_rm_map.reset_mock()
- mock_rm_hdisk.reset_mock()
-
- # Bad detach, invalid state
- mock_good_disc.return_value = False
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertFalse(ret)
- mock_rm_map.assert_not_called()
- mock_rm_hdisk.assert_not_called()
-
- # Bad detach, exception discovering volume on vios
- mock_disc_vol.side_effect = Exception()
- ret = self.vol_drv._detach_vol_for_vio(mock_vios)
- self.assertFalse(ret)
- mock_rm_map.assert_not_called()
- mock_rm_hdisk.assert_not_called()
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.remove_maps', autospec=True)
- def test_add_remove_mapping(self, mock_rm_maps, mock_gen_match):
- def test_afs(rm_func):
- mock_vios = mock.create_autospec(pvm_vios.VIOS)
- self.assertEqual(mock_rm_maps.return_value, rm_func(mock_vios))
- mock_gen_match.assert_called_once_with(
- pvm_stor.PV, names=['devname'])
- mock_rm_maps.assert_called_once_with(
- mock_vios, 'vm_uuid', mock_gen_match.return_value)
-
- self.wtsk.add_functor_subtask.side_effect = test_afs
- self.vol_drv._add_remove_mapping('vm_uuid', 'vios_uuid', 'devname')
- self.wtsk.add_functor_subtask.assert_called_once()
-
- @mock.patch('pypowervm.tasks.hdisk.remove_hdisk', autospec=True)
- @mock.patch('taskflow.task.FunctorTask', autospec=True)
- @mock.patch('nova.virt.powervm.volume.fcvscsi.FCVscsiVolumeAdapter.'
- '_check_host_mappings')
- def test_add_remove_hdisk(self, mock_check_maps, mock_functask,
- mock_rm_hdisk):
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid'
- mock_check_maps.return_value = True
- self.vol_drv._add_remove_hdisk(mock_vios, 'devname')
- mock_functask.assert_not_called()
- self.ftsk.add_post_execute.assert_not_called()
- mock_check_maps.assert_called_once_with(mock_vios, 'devname')
- self.assertEqual(0, mock_rm_hdisk.call_count)
-
- def test_functor_task(rm_hdisk, name=None):
- rm_hdisk()
- return 'functor_task'
-
- mock_check_maps.return_value = False
- mock_functask.side_effect = test_functor_task
- self.vol_drv._add_remove_hdisk(mock_vios, 'devname')
- mock_functask.assert_called_once()
- self.ftsk.add_post_execute.assert_called_once_with('functor_task')
- mock_rm_hdisk.assert_called_once_with(self.adpt, CONF.host,
- 'devname', 'uuid')
-
- @mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
- @mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
- def test_check_host_mappings(self, mock_find_maps, mock_gen_match):
- mock_vios = mock.MagicMock()
- mock_vios.uuid = 'uuid2'
- mock_v1 = mock.MagicMock(scsi_mappings='scsi_maps_1', uuid='uuid1')
- mock_v2 = mock.MagicMock(scsi_mappings='scsi_maps_2', uuid='uuid2')
- mock_feed = [mock_v1, mock_v2]
- self.ftsk.feed = mock_feed
-
- # Multiple mappings found
- mock_find_maps.return_value = ['map1', 'map2']
- ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
- self.assertTrue(ret)
- mock_gen_match.assert_called_once_with(pvm_stor.PV, names=['devname'])
- mock_find_maps.assert_called_once_with('scsi_maps_2', None,
- mock_gen_match.return_value)
-
- # One mapping found
- mock_find_maps.return_value = ['map1']
- ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
- self.assertFalse(ret)
-
- # No mappings found
- mock_find_maps.return_value = []
- ret = self.vol_drv._check_host_mappings(mock_vios, 'devname')
- self.assertFalse(ret)
diff --git a/nova/tests/unit/virt/test_block_device.py b/nova/tests/unit/virt/test_block_device.py
index aff6c5ef19..703f15967c 100644
--- a/nova/tests/unit/virt/test_block_device.py
+++ b/nova/tests/unit/virt/test_block_device.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
from os_brick import encryptors
+from unittest import mock
+
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
@@ -35,6 +36,9 @@ ATTACHMENT_ID = uuids.attachment_id
class TestDriverBlockDevice(test.NoDBTestCase):
+ # os-brick>=5.1 now uses external file system locks instead of internal
+ # locks so we need to set up locking
+ REQUIRES_LOCKING = True
# This is used to signal if we're dealing with a new style volume
# attachment (Cinder v3.44 flow).
attachment_id = None
@@ -45,7 +49,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'volume': driver_block_device.DriverVolumeBlockDevice,
'volsnapshot': driver_block_device.DriverVolSnapshotBlockDevice,
'volimage': driver_block_device.DriverVolImageBlockDevice,
- 'volblank': driver_block_device.DriverVolBlankBlockDevice
+ 'volblank': driver_block_device.DriverVolBlankBlockDevice,
+ 'image': driver_block_device.DriverImageBlockDevice,
}
swap_bdm_dict = block_device.BlockDeviceDict(
@@ -74,14 +79,22 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
- 'boot_index': -1})
+ 'boot_index': -1,
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
- 'disk_bus': 'scsi'}
+ 'disk_bus': 'scsi',
+ 'encrypted': False,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None}
volume_bdm_dict = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': uuids.instance,
@@ -206,6 +219,35 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'boot_index': -1,
'volume_type': None}
+ image_bdm_dict = block_device.BlockDeviceDict(
+ {'id': 7, 'instance_uuid': uuids.instance,
+ 'device_name': '/dev/vda',
+ 'source_type': 'image',
+ 'destination_type': 'local',
+ 'disk_bus': 'virtio',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'boot_index': 0,
+ 'image_id': 'fake-image-id-1',
+ 'volume_size': 5,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'plain',
+ 'encryption_options': None})
+
+ image_driver_bdm = {
+ 'device_name': '/dev/vda',
+ 'device_type': 'disk',
+ 'guest_format': 'ext4',
+ 'disk_bus': 'virtio',
+ 'boot_index': 0,
+ 'image_id': 'fake-image-id-1',
+ 'size': 5,
+ 'encrypted': True,
+ 'encryption_secret_uuid': uuids.secret,
+ 'encryption_format': 'plain',
+ 'encryption_options': None}
+
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = mock.MagicMock(autospec=cinder.API)
@@ -215,6 +257,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# create bdm objects for testing
self.swap_bdm = fake_block_device.fake_bdm_object(
self.context, self.swap_bdm_dict)
+ self.image_bdm = fake_block_device.fake_bdm_object(
+ self.context, self.image_bdm_dict)
self.ephemeral_bdm = fake_block_device.fake_bdm_object(
self.context, self.ephemeral_bdm_dict)
self.volume_bdm = fake_block_device.fake_bdm_object(
@@ -333,6 +377,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
if field == 'attachment_id':
# Must set UUID values on UUID fields.
fake_value = ATTACHMENT_ID
+ elif isinstance(test_bdm._bdm_obj.fields[fld],
+ fields.UUIDField):
+ # Generically handle other UUID fields.
+ fake_value = uuids.fake_value
else:
fake_value = 'fake_changed_value'
test_bdm[field] = fake_value
@@ -373,6 +421,20 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
+ def test_driver_image_block_device(self):
+ self._test_driver_device("image")
+
+ def test_driver_image_default_size(self):
+ self._test_driver_default_size('image')
+
+ def test_driver_image_block_device_destination_not_local(self):
+ self._test_driver_device('image')
+ bdm = self.image_bdm_dict.copy()
+ bdm['destination_type'] = 'volume'
+ self.assertRaises(driver_block_device._InvalidType,
+ self.driver_classes['image'],
+ fake_block_device.fake_bdm_object(self.context, bdm))
+
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
@@ -402,7 +464,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual(test_bdm.volume_size, 3)
self.assertEqual('fake-snapshot-id-1', test_bdm.get('snapshot_id'))
- def test_driver_image_block_device(self):
+ def test_driver_volume_image_block_device(self):
self._test_driver_device('volimage')
test_bdm = self.driver_classes['volimage'](
@@ -412,7 +474,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
self.assertEqual(test_bdm.volume_size, 1)
self.assertEqual('fake-image-id-1', test_bdm.get('image_id'))
- def test_driver_image_block_device_destination_local(self):
+ def test_driver_volume_image_block_device_destination_local(self):
self._test_driver_device('volimage')
bdm = self.volimage_bdm_dict.copy()
bdm['destination_type'] = 'local'
@@ -433,24 +495,23 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def _test_call_wait_func(self, delete_on_termination, delete_fail=False):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['delete_on_termination'] = delete_on_termination
- with mock.patch.object(self.volume_api, 'delete') as vol_delete:
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
-
- if delete_on_termination and delete_fail:
- vol_delete.side_effect = Exception()
-
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm._call_wait_func,
- context=self.context,
- wait_func=wait_func,
- volume_api=self.volume_api,
- volume_id='fake-id')
- self.assertEqual(delete_on_termination, vol_delete.called)
+ if delete_on_termination and delete_fail:
+ self.volume_api.delete.side_effect = Exception()
+
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm._call_wait_func,
+ context=self.context,
+ wait_func=wait_func,
+ volume_api=self.volume_api,
+ volume_id='fake-id')
+ self.assertEqual(delete_on_termination, self.volume_api.delete.called)
def test_call_wait_delete_volume(self):
self._test_call_wait_func(True)
@@ -483,25 +544,24 @@ class TestDriverBlockDevice(test.NoDBTestCase):
volume['shared_targets'] = True
volume['service_uuid'] = uuids.service_uuid
+ if delete_attachment_raises:
+ self.volume_api.attachment_delete.side_effect = (
+ delete_attachment_raises)
+
+ self.virt_driver.get_volume_connector.return_value = connector
+
with test.nested(
mock.patch.object(driver_bdm, '_get_volume', return_value=volume),
- mock.patch.object(self.virt_driver, 'get_volume_connector',
- return_value=connector),
mock.patch('os_brick.initiator.utils.guard_connection'),
- mock.patch.object(self.volume_api, 'attachment_delete'),
- ) as (mock_get_volume, mock_get_connector, mock_guard,
- vapi_attach_del):
-
- if delete_attachment_raises:
- vapi_attach_del.side_effect = delete_attachment_raises
+ ) as (mock_get_volume, mock_guard):
driver_bdm.detach(elevated_context, instance,
self.volume_api, self.virt_driver,
attachment_id=attachment_id)
mock_guard.assert_called_once_with(volume)
- vapi_attach_del.assert_called_once_with(elevated_context,
- attachment_id)
+ self.volume_api.attachment_delete.assert_called_once_with(
+ elevated_context, attachment_id)
def test_volume_delete_attachment_with_shared_targets(self):
self.test_volume_delete_attachment(include_shared_targets=True)
@@ -952,31 +1012,28 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
- with test.nested(
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_get_snap, vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_get_snap.assert_called_once_with(
- self.context, 'fake-snapshot-id-1')
- vol_create.assert_called_once_with(
- self.context, 3, '', '', availability_zone=None,
- snapshot=snapshot, volume_type=None)
- vol_delete.assert_called_once_with(self.context, volume['id'])
+ self.volume_api.get_snapshot.return_value = snapshot
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
+
+ self.volume_api.get_snapshot.assert_called_once_with(
+ self.context, 'fake-snapshot-id-1')
+ self.volume_api.create.assert_called_once_with(
+ self.context, 3, '', '', availability_zone=None,
+ snapshot=snapshot, volume_type=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['volsnapshot'](
@@ -984,19 +1041,17 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot'),
- mock.patch.object(self.volume_api, 'create'),
- ) as (mock_attach, mock_get_snapshot, mock_create):
+ with mock.patch.object(
+ self.driver_classes['volume'], 'attach'
+ ) as mock_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
- mock_get_snapshot.assert_not_called()
- mock_create.assert_not_called()
+ self.volume_api.get_snapshot.assert_not_called()
+ self.volume_api.create.assert_not_called()
def test_snapshot_attach_no_volume_and_no_volume_type(self):
bdm = self.driver_classes['volsnapshot'](self.volsnapshot_bdm)
@@ -1006,15 +1061,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
original_volume = {'id': uuids.original_volume_id,
'volume_type_id': 'original_volume_type'}
new_volume = {'id': uuids.new_volume_id}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(self.volume_api, 'get',
- return_value=original_volume),
- mock.patch.object(self.volume_api, 'create',
- return_value=new_volume),
- ) as (mock_attach, mock_get_snapshot, mock_get, mock_create):
+ self.volume_api.get_snapshot.return_value = snapshot
+ self.volume_api.get.return_value = original_volume
+ self.volume_api.create.return_value = new_volume
+ with mock.patch.object(self.driver_classes["volume"], "attach"):
bdm.volume_id = None
bdm.volume_type = None
bdm.attach(self.context, instance, self.volume_api,
@@ -1022,10 +1072,11 @@ class TestDriverBlockDevice(test.NoDBTestCase):
# Assert that the original volume type is fetched, stored within
# the bdm and then used to create the new snapshot based volume.
- mock_get.assert_called_once_with(self.context,
- uuids.original_volume_id)
+ self.volume_api.get.assert_called_once_with(
+ self.context, uuids.original_volume_id)
self.assertEqual('original_volume_type', bdm.volume_type)
- mock_create.assert_called_once_with(self.context, bdm.volume_size,
+ self.volume_api.create.assert_called_once_with(
+ self.context, bdm.volume_size,
'', '', volume_type='original_volume_type', snapshot=snapshot,
availability_zone=None)
@@ -1097,27 +1148,25 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
- with test.nested(
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_create.assert_called_once_with(
- self.context, 1, '', '', image_id=image['id'],
- availability_zone=None, volume_type=None)
- vol_delete.assert_called_once_with(self.context, volume['id'])
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
+
+ self.volume_api.create.assert_called_once_with(
+ self.context, 1, '', '', image_id=image['id'],
+ availability_zone=None, volume_type=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_image_attach_volume(self):
test_bdm = self.driver_classes['volimage'](
@@ -1125,19 +1174,17 @@ class TestDriverBlockDevice(test.NoDBTestCase):
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
- with test.nested(
- mock.patch.object(self.driver_classes['volume'], 'attach'),
- mock.patch.object(self.volume_api, 'get_snapshot'),
- mock.patch.object(self.volume_api, 'create'),
- ) as (mock_attch, mock_get_snapshot, mock_create):
+ with mock.patch.object(
+ self.driver_classes['volume'], 'attach'
+ ) as mock_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
- mock_attch.assert_called_once_with(
+ mock_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
# Make sure theses are not called
- mock_get_snapshot.assert_not_called()
- mock_create.assert_not_called()
+ self.volume_api.get_snapshot.assert_not_called()
+ self.volume_api.create.assert_not_called()
def test_blank_attach_fail_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
@@ -1149,30 +1196,26 @@ class TestDriverBlockDevice(test.NoDBTestCase):
**{'uuid': uuids.uuid})
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
+ self.volume_api.create.return_value = volume
+ wait_func = mock.MagicMock()
+ mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
+ seconds=1,
+ attempts=1,
+ volume_status='error')
+ wait_func.side_effect = mock_exception
+ self.assertRaises(exception.VolumeNotCreated,
+ test_bdm.attach, context=self.context,
+ instance=instance,
+ volume_api=self.volume_api,
+ virt_driver=self.virt_driver,
+ wait_func=wait_func)
- with test.nested(
- mock.patch.object(self.volume_api, 'create', return_value=volume),
- mock.patch.object(self.volume_api, 'delete'),
- ) as (vol_create, vol_delete):
- wait_func = mock.MagicMock()
- mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
- seconds=1,
- attempts=1,
- volume_status='error')
- wait_func.side_effect = mock_exception
- self.assertRaises(exception.VolumeNotCreated,
- test_bdm.attach, context=self.context,
- instance=instance,
- volume_api=self.volume_api,
- virt_driver=self.virt_driver,
- wait_func=wait_func)
-
- vol_create.assert_called_once_with(
- self.context, test_bdm.volume_size,
- '%s-blank-vol' % uuids.uuid,
- '', volume_type=None, availability_zone=None)
- vol_delete.assert_called_once_with(
- self.context, volume['id'])
+ self.volume_api.create.assert_called_once_with(
+ self.context, test_bdm.volume_size,
+ '%s-blank-vol' % uuids.uuid,
+ '', volume_type=None, availability_zone=None)
+ self.volume_api.delete.assert_called_once_with(
+ self.context, volume['id'])
def test_blank_attach_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
@@ -1278,12 +1321,8 @@ class TestDriverBlockDevice(test.NoDBTestCase):
def test_is_implemented(self):
for bdm in (self.volimage_bdm, self.volume_bdm, self.swap_bdm,
- self.ephemeral_bdm, self.volsnapshot_bdm):
+ self.ephemeral_bdm, self.volsnapshot_bdm, self.image_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
- local_image = self.volimage_bdm_dict.copy()
- local_image['destination_type'] = 'local'
- self.assertFalse(driver_block_device.is_implemented(
- fake_block_device.fake_bdm_object(self.context, local_image)))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
@@ -1481,13 +1520,9 @@ class TestDriverBlockDevice(test.NoDBTestCase):
'display_name': 'fake-snapshot-vol'}
self.stub_volume_create(volume)
- with test.nested(
- mock.patch.object(self.volume_api, 'get_snapshot',
- return_value=snapshot),
- mock.patch.object(volume_class, 'attach')
- ) as (
- vol_get_snap, vol_attach
- ):
+ self.volume_api.get_snapshot.return_value = snapshot
+
+ with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index 6f552255d4..ab51a3e26c 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -14,11 +14,12 @@
import collections
import copy
-import ddt
+from unittest import mock
-import mock
+import ddt
import testtools
+import nova.conf
from nova import exception
from nova import objects
from nova.objects import fields
@@ -28,6 +29,8 @@ from nova.tests.unit import fake_pci_device_pools as fake_pci
from nova.tests.unit.image.fake import fake_image_obj
from nova.virt import hardware as hw
+CONF = nova.conf.CONF
+
class InstanceInfoTests(test.NoDBTestCase):
@@ -2020,6 +2023,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=0),
@@ -2033,6 +2037,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=1,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=64),
@@ -2046,6 +2051,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=2,
cpu_usage=0,
memory_usage=0,
+ socket=2,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=16)],
@@ -2127,6 +2133,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=160,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=32768, used=32),
@@ -2167,6 +2174,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2178,6 +2186,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2189,6 +2198,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2246,7 +2256,7 @@ class NUMATopologyTest(test.NoDBTestCase):
self.assertEqual(hostusage.cells[2].cpu_usage, 1)
self.assertEqual(hostusage.cells[2].memory_usage, 256)
- def test_host_usage_culmulative_with_free(self):
+ def test_host_usage_cumulative_with_free(self):
hosttopo = objects.NUMATopology(cells=[
objects.NUMACell(
id=0,
@@ -2255,6 +2265,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=1024,
cpu_usage=2,
memory_usage=512,
+ socket=0,
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
siblings=[set([0]), set([1]), set([2]), set([3])],
@@ -2266,6 +2277,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=1,
memory_usage=512,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2277,6 +2289,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=256,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=512, used=0)],
@@ -2327,6 +2340,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=2048, total=512, used=128,
@@ -2339,6 +2353,7 @@ class NUMATopologyTest(test.NoDBTestCase):
memory=512,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=1048576, total=5, used=2,
@@ -2603,6 +2618,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -2613,6 +2629,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=2,
memory_usage=2048,
+ socket=0,
pinned_cpus=set(),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -2635,45 +2652,45 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
def test_get_fitting_success_no_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1)
+ self.host, self.instance1, {})
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3)
+ self.host, self.instance3, {})
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
def test_get_fitting_success_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance.cells[0].id)
def test_get_fitting_fails_no_limits(self):
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance)
- def test_get_fitting_culmulative_fails_limits(self):
+ def test_get_fitting_cumulative_fails_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance2, self.limits)
+ self.host, self.instance2, {}, self.limits)
self.assertIsNone(fitted_instance2)
- def test_get_fitting_culmulative_success_limits(self):
+ def test_get_fitting_cumulative_success_limits(self):
fitted_instance1 = hw.numa_fit_instance_to_host(
- self.host, self.instance1, self.limits)
+ self.host, self.instance1, {}, self.limits)
self.assertIsInstance(fitted_instance1, objects.InstanceNUMATopology)
self.assertEqual(1, fitted_instance1.cells[0].id)
self.host = hw.numa_usage_from_instance_numa(
self.host, fitted_instance1)
fitted_instance2 = hw.numa_fit_instance_to_host(
- self.host, self.instance3, self.limits)
+ self.host, self.instance3, {}, self.limits)
self.assertIsInstance(fitted_instance2, objects.InstanceNUMATopology)
self.assertEqual(2, fitted_instance2.cells[0].id)
@@ -2688,7 +2705,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsInstance(fitted_instance, objects.InstanceNUMATopology)
mock_supports.assert_called_once_with(
@@ -2705,7 +2722,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
network_metadata=network_metadata)
fitted_instance = hw.numa_fit_instance_to_host(
- self.host, self.instance1, limits=limits)
+ self.host, self.instance1, {}, limits=limits)
self.assertIsNone(fitted_instance)
mock_supports.assert_has_calls([
@@ -2722,6 +2739,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
'support_requests', return_value= True):
fitted_instance1 = hw.numa_fit_instance_to_host(self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsInstance(fitted_instance1,
@@ -2737,6 +2755,7 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
fitted_instance1 = hw.numa_fit_instance_to_host(
self.host,
self.instance1,
+ {},
pci_requests=pci_reqs,
pci_stats=pci_stats)
self.assertIsNone(fitted_instance1)
@@ -2753,9 +2772,9 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# the PCI device is found on host cell 1
pci_stats = _create_pci_stats(1)
- # ...threfore an instance without a PCI device should get host cell 2
+ # ...therefore an instance without a PCI device should get host cell 2
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
# TODO(sfinucan): We should be comparing this against the HOST cell
self.assertEqual(2, instance_topology.cells[0].id)
@@ -2763,9 +2782,9 @@ class VirtNUMAHostTopologyTestCase(test.NoDBTestCase):
# the PCI device is now found on host cell 2
pci_stats = _create_pci_stats(2)
- # ...threfore an instance without a PCI device should get host cell 1
+ # ...therefore an instance without a PCI device should get host cell 1
instance_topology = hw.numa_fit_instance_to_host(
- self.host, self.instance1, pci_stats=pci_stats)
+ self.host, self.instance1, {}, pci_stats=pci_stats)
self.assertIsInstance(instance_topology, objects.InstanceNUMATopology)
self.assertEqual(1, instance_topology.cells[0].id)
@@ -2811,6 +2830,54 @@ class NumberOfSerialPortsTest(test.NoDBTestCase):
flavor, image_meta)
+class VirtLockMemoryTestCase(test.NoDBTestCase):
+ def _test_get_locked_memory_constraint(self, spec=None, props=None):
+ flavor = objects.Flavor(vcpus=16, memory_mb=2048,
+ extra_specs=spec or {})
+ image_meta = objects.ImageMeta.from_dict({"properties": props or {}})
+ return hw.get_locked_memory_constraint(flavor, image_meta)
+
+ def test_get_locked_memory_constraint_image(self):
+ self.assertTrue(
+ self._test_get_locked_memory_constraint(
+ spec={"hw:mem_page_size": "small"},
+ props={"hw_locked_memory": "True"}))
+
+ def test_get_locked_memory_conflict(self):
+ ex = self.assertRaises(
+ exception.FlavorImageLockedMemoryConflict,
+ self._test_get_locked_memory_constraint,
+ spec={
+ "hw:locked_memory": "False",
+ "hw:mem_page_size": "small"
+ },
+ props={"hw_locked_memory": "True"}
+ )
+ ex_msg = ("locked_memory value in image (True) and flavor (False) "
+ "conflict. A consistent value is expected if both "
+ "specified.")
+ self.assertEqual(ex_msg, str(ex))
+
+ def test_get_locked_memory_constraint_forbidden(self):
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {"hw:locked_memory": "True"})
+
+ self.assertRaises(
+ exception.LockMemoryForbidden,
+ self._test_get_locked_memory_constraint,
+ {},
+ {"hw_locked_memory": "True"})
+
+ def test_get_locked_memory_constraint_image_false(self):
+ # False value of locked_memory will not raise LockMemoryForbidden
+ self.assertFalse(
+ self._test_get_locked_memory_constraint(
+ spec=None,
+ props={"hw_locked_memory": "False"}))
+
+
class VirtMemoryPagesTestCase(test.NoDBTestCase):
def test_cell_instance_pagesize(self):
cell = objects.InstanceNUMACell(
@@ -3833,11 +3900,18 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -3864,11 +3938,18 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(1,))
@@ -3895,11 +3976,18 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([2]), set([3])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ )
+ ]
+ )
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fit(self):
@@ -3924,13 +4012,25 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([4]), set([5]), set([6]), set([7])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 1))
@@ -3967,13 +4067,25 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
for cell in inst_topo.cells:
self.assertInstanceCellPinned(cell, cell_ids=(0, 2))
@@ -4000,13 +4112,25 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
siblings=[set([4]), set([5]), set([6]), set([7])])
])
inst_topo = objects.InstanceNUMATopology(
- cells=[objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([0, 1]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED),
- objects.InstanceNUMACell(
- cpuset=set(), pcpuset=set([2, 3]), memory=2048,
- cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0, 1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([2, 3]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED,
+ ),
+ ]
+ )
+
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_host_numa_fit_instance_to_host_fail_topology(self):
@@ -4040,7 +4164,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
objects.InstanceNUMACell(
cpuset=set(), pcpuset=set([4, 5]), memory=1024,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_cpu_pinning_usage_from_instances(self):
@@ -4052,6 +4176,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4081,6 +4206,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 3]),
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)],
@@ -4110,6 +4236,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4138,6 +4265,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4164,6 +4292,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0, 2]), set([1, 3])],
mempages=[objects.NUMAPagesTopology(
@@ -4190,6 +4319,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4216,6 +4346,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[set([0]), set([1]), set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4245,6 +4376,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4275,6 +4407,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([2, 6, 7]),
siblings=[set([0, 4]), set([1, 5]), set([2, 6]), set([3, 7])],
mempages=[objects.NUMAPagesTopology(
@@ -4307,6 +4440,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
cpu_usage=2,
memory_usage=0,
pinned_cpus=set(),
+ socket=0,
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
size_kb=4, total=524288, used=0)]
@@ -4340,6 +4474,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 3]),
siblings=[{cpu} for cpu in range(8)],
mempages=[objects.NUMAPagesTopology(
@@ -4382,6 +4517,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4421,6 +4557,7 @@ class CPUPinningTestCase(test.NoDBTestCase, _CPUPinningTestCaseBase):
memory=4096,
cpu_usage=2,
memory_usage=0,
+ socket=0,
pinned_cpus=set([0, 1, 2, 5, 6, 7]),
siblings=[set([0, 5]), set([1, 6]), set([2, 7]), set([3, 8]),
set([4, 9])],
@@ -4656,6 +4793,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([0]), set([1])],
mempages=[objects.NUMAPagesTopology(
@@ -4667,6 +4805,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
memory=2048,
cpu_usage=0,
memory_usage=0,
+ socket=0,
pinned_cpus=set(),
siblings=[set([2]), set([3])],
mempages=[objects.NUMAPagesTopology(
@@ -4680,7 +4819,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4694,7 +4833,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertIsNone(inst_topo.cells[0].cpuset_reserved)
@@ -4708,7 +4847,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4722,7 +4861,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1, 2, 4]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertIsNone(inst_topo)
def test_multi_nodes_isolate(self):
@@ -4739,7 +4878,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2}, inst_topo.cells[1].cpu_pinning)
@@ -4759,7 +4898,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
# The guest NUMA node 0 is requesting 2pCPUs + 1 additional
# pCPU for emulator threads, the host can't handle the
# request.
@@ -4779,7 +4918,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([1, 2]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
self.assertEqual({1: 2, 2: 3}, inst_topo.cells[1].cpu_pinning)
@@ -4854,7 +4993,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 0, 1: 2}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([4]), inst_topo.cells[0].cpuset_reserved)
@@ -4884,7 +5023,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4913,7 +5052,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
if policy:
inst_topo.emulator_threads_policy = policy
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
return inst_topo
def test_mixed_instance_not_define(self):
@@ -4970,7 +5109,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpuset=set(), pcpuset=set([0, 1]), memory=2048,
cpu_policy=fields.CPUAllocationPolicy.DEDICATED)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 3}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -4999,7 +5138,7 @@ class EmulatorThreadsTestCase(test.NoDBTestCase):
cpu_thread_policy=fields.CPUThreadAllocationPolicy.ISOLATE
)])
- inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo)
+ inst_topo = hw.numa_fit_instance_to_host(host_topo, inst_topo, {})
self.assertEqual({0: 2, 1: 4}, inst_topo.cells[0].cpu_pinning)
self.assertEqual(set([1]), inst_topo.cells[0].cpuset_reserved)
@@ -5254,7 +5393,7 @@ class MemEncryptionRequestedWithoutUEFITestCase(
expected_error = (
"Memory encryption requested by %(requesters)s but image "
"%(image_name)s doesn't have 'hw_firmware_type' property "
- "set to 'uefi'"
+ "set to 'uefi' or volume-backed instance was requested"
)
def _test_encrypted_memory_support_no_uefi(self, enc_extra_spec,
@@ -5381,6 +5520,25 @@ class MemEncryptionRequiredTestCase(test.NoDBTestCase):
(self.flavor_name, self.image_id)
)
+ def test_encrypted_memory_support_flavor_for_volume(self):
+ extra_specs = {'hw:mem_encryption': True}
+
+ flavor = objects.Flavor(name=self.flavor_name,
+ extra_specs=extra_specs)
+ # Following image_meta is typical for root Cinder volume
+ image_meta = objects.ImageMeta.from_dict({
+ 'min_disk': 0,
+ 'min_ram': 0,
+ 'properties': {},
+ 'size': 0,
+ 'status': 'active'})
+ # Confirm that exception.FlavorImageConflict is raised when
+ # flavor with hw:mem_encryption flag is used to create
+ # volume-backed instance
+ self.assertRaises(exception.FlavorImageConflict,
+ hw.get_mem_encryption_constraint, flavor,
+ image_meta)
+
class PCINUMAAffinityPolicyTest(test.NoDBTestCase):
@@ -5452,6 +5610,56 @@ class PCINUMAAffinityPolicyTest(test.NoDBTestCase):
image_meta.properties.hw_pci_numa_affinity_policy = "fake"
+class PMUEnabledTest(test.NoDBTestCase):
+
+ def test_pmu_image_and_flavor_conflict(self):
+ """Tests that calling _validate_flavor_image_nostatus()
+ with an image that conflicts with the flavor raises but no
+ exception is raised if there is no conflict.
+ """
+ flavor = objects.Flavor(
+ name='foo', vcpus=1, memory_mb=512, root_gb=1,
+ extra_specs={'hw:pmu': "true"})
+ image_meta = objects.ImageMeta.from_dict({
+ 'name': 'bar', 'properties': {'hw_pmu': False},
+ })
+ self.assertRaises(
+ exception.FlavorImageConflict,
+ hw.get_pmu_constraint,
+ flavor, image_meta)
+
+ def test_pmu_image_and_flavor_same_value(self):
+ # assert that if both the image and flavor are set to the same value
+ # no exception is raised and the function returns nothing.
+ flavor = objects.Flavor(
+ vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "true"})
+ image_meta = objects.ImageMeta.from_dict({
+ 'properties': {'hw_pmu': True},
+ })
+ self.assertTrue(hw.get_pmu_constraint(flavor, image_meta))
+
+ def test_pmu_image_only(self):
+ # assert that if only the image metadata is set then it is valid
+ flavor = objects.Flavor(
+ vcpus=1, memory_mb=512, root_gb=1, extra_specs={})
+
+ # ensure string to bool conversion works for image metadata
+ # property by using "yes".
+ image_meta = objects.ImageMeta.from_dict({
+ 'properties': {'hw_pmu': 'yes'},
+ })
+ self.assertTrue(hw.get_pmu_constraint(flavor, image_meta))
+
+ def test_pmu_flavor_only(self):
+ # assert that if only the flavor extra_spec is set then it is valid
+ # and test the string to bool conversion of "on" works.
+ flavor = objects.Flavor(
+ vcpus=1, memory_mb=512, root_gb=1, extra_specs={'hw:pmu': "on"})
+
+ image_meta = objects.ImageMeta.from_dict({'properties': {}})
+ self.assertTrue(hw.get_pmu_constraint(flavor, image_meta))
+
+
@ddt.ddt
class VIFMultiqueueEnabledTest(test.NoDBTestCase):
@@ -5614,3 +5822,251 @@ class RescuePropertyTestCase(test.NoDBTestCase):
meta = objects.ImageMeta.from_dict({'disk_format': 'raw'})
meta.properties = objects.ImageMetaProps.from_dict(props)
self.assertEqual(expected, hw.check_hw_rescue_props(meta))
+
+
+class HostCellsSortingTestCase(test.NoDBTestCase):
+ # NOTE (IPO) It is possible to test all sorting cases with one defined
+ # host NUMA topo.
+ # We have 4 NUMA cells with the following properties:
+ # NUMA cell 0: have most cpu usage
+ # NUMA cell 1: will have most PCI available
+ # NUMA cell 2: have most free pcpus
+ # NUMA cell 3: have most available memory
+ # So it will be enough to check order of NUMA cell in resulting instance
+ # topo to check particular sorting case.
+
+ def setUp(self):
+ super(HostCellsSortingTestCase, self).setUp()
+
+ def _create_pci_stats(node, count):
+ test_dict = copy.copy(fake_pci.fake_pool_dict)
+ test_dict['numa_node'] = node
+ test_dict['vendor_id'] = '8086'
+ test_dict['product_id'] = 'fake-prod0'
+ test_dict['count'] = count
+ return stats.PciDeviceStats(
+ objects.NUMATopology(),
+ [objects.PciDevicePool.from_dict(test_dict)])
+
+ self.pci_stats = _create_pci_stats(1, 2)
+
+ self.host = objects.NUMATopology(cells=[
+ objects.NUMACell(
+ id=0,
+ cpuset=set([1, 2, 3, 4]),
+ pcpuset=set([1, 2, 3, 4]),
+ memory=4096,
+ cpu_usage=3,
+ memory_usage=2048,
+ pinned_cpus=set([1, 2]),
+ mempages=[objects.NUMAPagesTopology(
+ size_kb=4, total=524288, used=0)],
+ siblings=[set([1]), set([2]), set([3]), set([4])]),
+ objects.NUMACell(
+ id=1,
+ cpuset=set([5, 6, 7, 8]),
+ pcpuset=set([5, 6, 7, 8]),
+ memory=4096,
+ cpu_usage=2,
+ memory_usage=2048,
+ pinned_cpus=set([5, 6]),
+ mempages=[objects.NUMAPagesTopology(
+ size_kb=4, total=524288, used=0)],
+ siblings=[set([5]), set([6]), set([7]), set([8])]),
+ objects.NUMACell(
+ id=2,
+ cpuset=set([9, 10, 11, 12]),
+ pcpuset=set([9, 10, 11, 12]),
+ memory=4096,
+ cpu_usage=2,
+ memory_usage=2048,
+ pinned_cpus=set(),
+ mempages=[objects.NUMAPagesTopology(
+ size_kb=4, total=524288, used=0)],
+ siblings=[set([9]), set([10]), set([11]), set([12])]),
+ objects.NUMACell(
+ id=3,
+ cpuset=set([13, 14, 15, 16]),
+ pcpuset=set([13, 14, 15, 16]),
+ memory=4096,
+ cpu_usage=2,
+ memory_usage=1024,
+ pinned_cpus=set([13, 14]),
+ mempages=[objects.NUMAPagesTopology(
+ size_kb=4, total=524288, used=0)],
+ siblings=[set([13]), set([14]), set([15]), set([16])])
+ ])
+
+ self.instance0 = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([0]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=1, cpuset=set([1]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=2, cpuset=set([2]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=3, cpuset=set([3]), pcpuset=set(), memory=2048)
+ ])
+
+ self.instance1 = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(
+ id=0, cpuset=set([0]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=1, cpuset=set([1]), pcpuset=set(), memory=2048),
+ objects.InstanceNUMACell(
+ id=2, cpuset=set([2]), pcpuset=set(), memory=2048),
+ ])
+
+ self.instance2 = objects.InstanceNUMATopology(cells=[
+ objects.InstanceNUMACell(
+ id=0,
+ cpuset=set(),
+ pcpuset=set([0]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED
+ ),
+ objects.InstanceNUMACell(
+ id=1,
+ cpuset=set(),
+ pcpuset=set([1]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED
+ ),
+ objects.InstanceNUMACell(
+ id=2,
+ cpuset=set(),
+ pcpuset=set([2]),
+ memory=2048,
+ cpu_policy=fields.CPUAllocationPolicy.DEDICATED
+ )])
+
+ def assertInstanceNUMAcellOrder(self, list_to_check, instance_topo):
+ for cell, id in zip(instance_topo.cells, list_to_check):
+ self.assertEqual(cell.id, id)
+
+ def test_sort_host_numa_cell_num_equal_instance_cell_num(self):
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance0, {})
+ self.assertInstanceNUMAcellOrder([0, 1, 2, 3], instance_topology)
+
+ def test_sort_no_pci_stats_no_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2, {})
+ self.assertInstanceNUMAcellOrder([0, 1, 3], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2, {})
+ self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
+
+ def test_sort_no_pci_stats_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1, {})
+ self.assertInstanceNUMAcellOrder([0, 1, 2], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1, {})
+ self.assertInstanceNUMAcellOrder([3, 1, 2], instance_topology)
+
+ def test_sort_pci_stats_pci_req_no_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ pci_request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086', 'product_id': 'fake-prod0'}])
+ pci_reqs = [pci_request]
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2,
+ {},
+ pci_requests = pci_reqs,
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([1, 0, 3], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2,
+ {},
+ pci_requests = pci_reqs,
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([1, 2, 3], instance_topology)
+
+ def test_sort_pci_stats_pci_req_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ pci_request = objects.InstancePCIRequest(count=1,
+ spec=[{'vendor_id': '8086', 'product_id': 'fake-prod0'}])
+ pci_reqs = [pci_request]
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1,
+ {},
+ pci_requests = pci_reqs,
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([1, 0, 2], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1,
+ {},
+ pci_requests = pci_reqs,
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([1, 3, 2], instance_topology)
+
+ def test_sort_pci_stats_no_pci_req_no_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2,
+ {},
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([0, 3, 2], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance2,
+ {},
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([2, 3, 0], instance_topology)
+
+ def test_sort_pci_stats_no_pci_req_shared_cpu_policy(self):
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ True,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1,
+ {},
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([0, 2, 3], instance_topology)
+ CONF.set_override(
+ 'packing_host_numa_cells_allocation_strategy',
+ False,
+ group = 'compute')
+ instance_topology = hw.numa_fit_instance_to_host(
+ self.host, self.instance1,
+ {},
+ pci_stats = self.pci_stats)
+ self.assertInstanceNUMAcellOrder([3, 2, 0], instance_topology)
diff --git a/nova/tests/unit/virt/test_imagecache.py b/nova/tests/unit/virt/test_imagecache.py
index ca4389fa14..b97e520074 100644
--- a/nova/tests/unit/virt/test_imagecache.py
+++ b/nova/tests/unit/virt/test_imagecache.py
@@ -12,7 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel as uuids
from nova import block_device
@@ -64,7 +65,7 @@ ephemeral_bdm = [block_device.BlockDeviceDict(
class ImageCacheManagerTests(test.NoDBTestCase):
- def test_configurationi_defaults(self):
+ def test_configuration_defaults(self):
self.assertEqual(2400, CONF.image_cache.manager_interval)
self.assertEqual('_base', CONF.image_cache.subdirectory_name)
self.assertTrue(CONF.image_cache.remove_unused_base_images)
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 085b169db3..62a61c1e8b 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -13,9 +13,11 @@
# under the License.
import os
+from unittest import mock
-import mock
from oslo_concurrency import processutils
+from oslo_serialization import jsonutils
+from oslo_utils import imageutils
from nova.compute import utils as compute_utils
from nova import exception
@@ -135,3 +137,47 @@ class QemuTestCase(test.NoDBTestCase):
'-O', 'out_format', '-f', 'in_format', 'source', 'dest')
mock_disk_op_sema.__enter__.assert_called_once()
self.assertTupleEqual(expected, mock_execute.call_args[0])
+
+ def test_convert_image_vmdk_allowed_list_checking(self):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+
+ # If the format is not in the allowed list, we should get an error
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With the format in the allowed list, no error
+ self.flags(vmdk_allowed_types=['streamOptimized', 'monolithicFlat',
+ 'monolithicSparse'],
+ group='compute')
+ images.check_vmdk_image('foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ # With an empty list, allow nothing
+ self.flags(vmdk_allowed_types=[], group='compute')
+ self.assertRaises(exception.ImageUnacceptable,
+ images.check_vmdk_image, 'foo',
+ imageutils.QemuImgInfo(jsonutils.dumps(info),
+ format='json'))
+
+ @mock.patch.object(images, 'fetch')
+ @mock.patch('nova.privsep.qemu.unprivileged_qemu_img_info')
+ def test_fetch_checks_vmdk_rules(self, mock_info, mock_fetch):
+ info = {'format': 'vmdk',
+ 'format-specific': {
+ 'type': 'vmdk',
+ 'data': {
+ 'create-type': 'monolithicFlat',
+ }}}
+ mock_info.return_value = jsonutils.dumps(info)
+ with mock.patch('os.path.exists', return_value=True):
+ e = self.assertRaises(exception.ImageUnacceptable,
+ images.fetch_to_raw, None, 'foo', 'anypath')
+ self.assertIn('Invalid VMDK create-type specified', str(e))
diff --git a/nova/tests/unit/virt/test_netutils.py b/nova/tests/unit/virt/test_netutils.py
index de3f451351..fa0e16df19 100644
--- a/nova/tests/unit/virt/test_netutils.py
+++ b/nova/tests/unit/virt/test_netutils.py
@@ -17,6 +17,17 @@ from nova.virt import netutils
class TestNetUtilsTestCase(test.NoDBTestCase):
+
+ def _get_fake_instance_nw_info(self, num_networks, dhcp_server, mtu):
+ network_info = fake_network.fake_get_instance_nw_info(self,
+ num_networks)
+ for vif in network_info:
+ for subnet in vif['network']['subnets']:
+ subnet['meta']['dhcp_server'] = dhcp_server
+ vif['network']['meta']['mtu'] = mtu
+
+ return network_info
+
def test_get_cached_vifs_with_vlan_no_nw_info(self):
# Make sure that an empty dictionary will be returned when
# nw_info is None
@@ -39,3 +50,15 @@ class TestNetUtilsTestCase(test.NoDBTestCase):
expected = {'fa:16:3e:d1:28:e4': '2145'}
self.assertEqual(expected,
netutils.get_cached_vifs_with_vlan(network_info))
+
+ def test__get_link_mtu(self):
+ network_info_dhcp = self._get_fake_instance_nw_info(
+ 1, '192.168.0.100', 9000)
+ network_info_no_dhcp = self._get_fake_instance_nw_info(
+ 1, None, 9000)
+
+ for vif in network_info_dhcp:
+ self.assertIsNone(netutils._get_link_mtu(vif))
+
+ for vif in network_info_no_dhcp:
+ self.assertEqual(9000, netutils._get_link_mtu(vif))
diff --git a/nova/tests/unit/virt/test_node.py b/nova/tests/unit/virt/test_node.py
new file mode 100644
index 0000000000..668b762520
--- /dev/null
+++ b/nova/tests/unit/virt/test_node.py
@@ -0,0 +1,142 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+from unittest import mock
+import uuid
+
+import fixtures
+from oslo_config import cfg
+from oslo_utils.fixture import uuidsentinel as uuids
+import testtools
+
+from nova import exception
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.virt import node
+
+CONF = cfg.CONF
+
+
+# NOTE(danms): We do not inherit from test.TestCase because we need
+# our node methods not stubbed out in order to exercise them.
+class TestNodeIdentity(testtools.TestCase):
+ def flags(self, **kw):
+ """Override flag variables for a test."""
+ group = kw.pop('group', None)
+ for k, v in kw.items():
+ CONF.set_override(k, v, group)
+
+ def setUp(self):
+ super().setUp()
+ self.useFixture(nova_fixtures.ConfFixture(CONF))
+ self.tempdir = self.useFixture(fixtures.TempDir()).path
+ self.identity_file = os.path.join(self.tempdir, node.COMPUTE_ID_FILE)
+ self.fake_config_files = ['%s/etc/nova.conf' % self.tempdir,
+ '%s/etc/nova/nova.conf' % self.tempdir,
+ '%s/opt/etc/nova/nova.conf' % self.tempdir]
+ for fn in self.fake_config_files:
+ os.makedirs(os.path.dirname(fn))
+ self.flags(state_path=self.tempdir,
+ config_file=self.fake_config_files)
+ node.LOCAL_NODE_UUID = None
+
+ def test_generate_local_node_uuid(self):
+ node_uuid = uuids.node
+ node.write_local_node_uuid(node_uuid)
+
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'anything')
+ self.assertIn(
+ 'Identity file %s appeared unexpectedly' % self.identity_file,
+ str(e))
+
+ def test_generate_local_node_uuid_unexpected_open_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_open.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_generate_local_node_uuid_unexpected_write_fail(self):
+ with mock.patch('builtins.open') as mock_open:
+ mock_write = mock_open.return_value.__enter__.return_value.write
+ mock_write.side_effect = IndexError()
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.write_local_node_uuid, 'foo')
+ self.assertIn('Unable to write uuid to %s' % (
+ self.identity_file), str(e))
+
+ def test_get_local_node_uuid_simple_exists(self):
+ node_uuid = uuids.node
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_exists_whitespace(self):
+ node_uuid = uuids.node
+ # Make sure we strip whitespace from the file contents
+ with test.patch_open('%s/etc/nova/compute_id' % self.tempdir,
+ ' %s \n' % node_uuid):
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_simple_generate(self):
+ self.assertIsNone(node.LOCAL_NODE_UUID)
+ node_uuid1 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid1, node.LOCAL_NODE_UUID)
+ node_uuid2 = node.get_local_node_uuid()
+ self.assertEqual(node_uuid2, node.LOCAL_NODE_UUID)
+
+ # Make sure we got the same thing each time, and that it's a
+ # valid uuid. Since we provided no uuid, it must have been
+ # generated the first time and read/returned the second.
+ self.assertEqual(node_uuid1, node_uuid2)
+ uuid.UUID(node_uuid1)
+
+ # Try to read it directly to make sure the file was really
+ # created and with the right value.
+ self.assertEqual(node_uuid1, node.read_local_node_uuid())
+
+ def test_get_local_node_uuid_two(self):
+ node_uuid = uuids.node
+
+ # Write the uuid to two of our locations
+ for cf in (self.fake_config_files[0], self.fake_config_files[1]):
+ open(os.path.join(os.path.dirname(cf),
+ node.COMPUTE_ID_FILE), 'w').write(node_uuid)
+
+ # Make sure we got the expected uuid and that no exceptions
+ # were raised about the files disagreeing
+ self.assertEqual(node_uuid, node.get_local_node_uuid())
+
+ def test_get_local_node_uuid_two_mismatch(self):
+ node_uuids = [uuids.node1, uuids.node2]
+
+ # Write a different uuid to each file
+ for id, fn in zip(node_uuids, self.fake_config_files):
+ open(os.path.join(
+ os.path.dirname(fn),
+ node.COMPUTE_ID_FILE), 'w').write(id)
+
+ # Make sure we get an error that identifies the mismatching
+ # file with its uuid, as well as what we expected to find
+ e = self.assertRaises(exception.InvalidNodeConfiguration,
+ node.get_local_node_uuid)
+ expected = ('UUID %s in %s does not match %s' % (
+ node_uuids[1],
+ os.path.join(os.path.dirname(self.fake_config_files[1]),
+ 'compute_id'),
+ node_uuids[0]))
+ self.assertIn(expected, str(e))
diff --git a/nova/tests/unit/virt/test_osinfo.py b/nova/tests/unit/virt/test_osinfo.py
index af3698b541..5d927deab1 100644
--- a/nova/tests/unit/virt/test_osinfo.py
+++ b/nova/tests/unit/virt/test_osinfo.py
@@ -12,8 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from nova import exception
from nova import objects
diff --git a/nova/tests/unit/virt/test_virt.py b/nova/tests/unit/virt/test_virt.py
index 11f306c761..2d108c6f2d 100644
--- a/nova/tests/unit/virt/test_virt.py
+++ b/nova/tests/unit/virt/test_virt.py
@@ -14,8 +14,8 @@
# under the License.
import io
+from unittest import mock
-import mock
import os_traits
from nova import test
@@ -102,6 +102,33 @@ class TestVirtDriver(test.NoDBTestCase):
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_RAW])
self.assertFalse(traits[os_traits.COMPUTE_IMAGE_TYPE_VHD])
+ def test_block_device_info_get_encrypted_disks(self):
+ block_device_info = {
+ 'swap': {'device_name': '/dev/sdb', 'swap_size': 1},
+ 'image': [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ ],
+ 'ephemerals': [
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ {'device_name': '/dev/vdc', 'encrypted': False},
+ ],
+ }
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [
+ {'device_name': '/dev/vda', 'encrypted': True},
+ {'device_name': '/dev/vdb', 'encrypted': True},
+ ]
+ self.assertEqual(expected, disks)
+ # Try removing 'image'
+ block_device_info.pop('image')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ expected = [{'device_name': '/dev/vdb', 'encrypted': True}]
+ self.assertEqual(expected, disks)
+ # Remove 'ephemerals'
+ block_device_info.pop('ephemerals')
+ disks = driver.block_device_info_get_encrypted_disks(block_device_info)
+ self.assertEqual([], disks)
+
class FakeMount(object):
def __init__(self, image, mount_dir, partition=None, device=None):
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index 8dcad485bc..ed9f1e3822 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -15,9 +15,9 @@
from collections import deque
import sys
import traceback
+from unittest import mock
import fixtures
-import mock
import netaddr
import os_resource_classes as orc
import os_vif
@@ -168,7 +168,7 @@ class VirtDriverLoaderTestCase(_FakeDriverBackendTestCase, test.TestCase):
self.fail("Couldn't load driver %s - %s" % (cls, e))
self.assertEqual(cm.driver.__class__.__name__, driver,
- "Could't load driver %s" % cls)
+ "Couldn't load driver %s" % cls)
@mock.patch.object(sys, 'exit', side_effect=test.TestingException())
def test_fail_to_load_new_drivers(self, mock_exit):
@@ -746,13 +746,13 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.flags(cpu_allocation_ratio=16.1)
self.flags(ram_allocation_ratio=1.6)
self.flags(disk_allocation_ratio=1.1)
- expeced_ratios = {
+ expected_ratios = {
orc.VCPU: CONF.cpu_allocation_ratio,
orc.MEMORY_MB: CONF.ram_allocation_ratio,
orc.DISK_GB: CONF.disk_allocation_ratio
}
# If conf is set, return conf
- self.assertEqual(expeced_ratios,
+ self.assertEqual(expected_ratios,
self.connection._get_allocation_ratios(inv))
self.flags(cpu_allocation_ratio=None)
@@ -761,25 +761,25 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.flags(initial_cpu_allocation_ratio=15.9)
self.flags(initial_ram_allocation_ratio=1.4)
self.flags(initial_disk_allocation_ratio=0.9)
- expeced_ratios = {
+ expected_ratios = {
orc.VCPU: CONF.initial_cpu_allocation_ratio,
orc.MEMORY_MB: CONF.initial_ram_allocation_ratio,
orc.DISK_GB: CONF.initial_disk_allocation_ratio
}
# if conf is unset and inv doesn't exists, return init conf
- self.assertEqual(expeced_ratios,
+ self.assertEqual(expected_ratios,
self.connection._get_allocation_ratios(inv))
inv = {orc.VCPU: {'allocation_ratio': 3.0},
orc.MEMORY_MB: {'allocation_ratio': 3.1},
orc.DISK_GB: {'allocation_ratio': 3.2}}
- expeced_ratios = {
+ expected_ratios = {
orc.VCPU: inv[orc.VCPU]['allocation_ratio'],
orc.MEMORY_MB: inv[orc.MEMORY_MB]['allocation_ratio'],
orc.DISK_GB: inv[orc.DISK_GB]['allocation_ratio']
}
# if conf is unset and inv exists, return inv
- self.assertEqual(expeced_ratios,
+ self.assertEqual(expected_ratios,
self.connection._get_allocation_ratios(inv))
@@ -832,6 +832,7 @@ class LibvirtConnTestCase(_VirtDriverTestCase, test.TestCase):
# This is needed for the live migration tests which spawn off the
# operation for monitoring.
self.useFixture(nova_fixtures.SpawnIsSynchronousFixture())
+ self.useFixture(nova_fixtures.CGroupsFixture())
# When destroying an instance, os-vif will try to execute some commands
# which hang tests so let's just stub out the unplug call to os-vif
# since we don't care about it.
diff --git a/nova/tests/unit/virt/vmwareapi/__init__.py b/nova/tests/unit/virt/vmwareapi/__init__.py
index e69de29bb2..206b60cb8f 100644
--- a/nova/tests/unit/virt/vmwareapi/__init__.py
+++ b/nova/tests/unit/virt/vmwareapi/__init__.py
@@ -0,0 +1,20 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+try:
+ import oslo_vmware # noqa: F401
+except ImportError:
+ raise unittest.SkipTest(
+ "The 'oslo.vmware' dependency is not installed."
+ )
diff --git a/nova/tests/unit/virt/vmwareapi/fake.py b/nova/tests/unit/virt/vmwareapi/fake.py
index b98a287613..2c09afb8ec 100644
--- a/nova/tests/unit/virt/vmwareapi/fake.py
+++ b/nova/tests/unit/virt/vmwareapi/fake.py
@@ -23,11 +23,11 @@ import collections
import sys
from oslo_log import log as logging
-from oslo_serialization import jsonutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
+from oslo_vmware import vim_util
from nova import exception
from nova.virt.vmwareapi import constants
@@ -76,23 +76,34 @@ def cleanup():
_db_content[c] = {}
-def _create_object(table, table_obj):
+def _create_object(table_obj):
"""Create an object in the db."""
- _db_content.setdefault(table, {})
- _db_content[table][table_obj.obj] = table_obj
+ _db_content.setdefault(table_obj.obj._type, {})
+ update_object(table_obj)
-def _get_object(obj_ref):
+def get_object(obj_ref):
"""Get object for the give reference."""
- return _db_content[obj_ref.type][obj_ref]
+ return _db_content[obj_ref.type][obj_ref.value]
-def _get_objects(obj_type):
+def get_objects(obj_type):
"""Get objects of the type."""
- lst_objs = FakeRetrieveResult()
- for key in _db_content[obj_type]:
- lst_objs.add_object(_db_content[obj_type][key])
- return lst_objs
+ return _db_content[obj_type].values()
+
+
+def get_first_object(obj_type):
+ """Get the first object of an object type"""
+ return next(iter(_db_content[obj_type].values()))
+
+
+def get_first_object_ref(obj_type):
+ """Get the first reference of an object type"""
+ return get_first_object(obj_type).obj
+
+
+def _no_objects_of_type(obj_type):
+ return not _db_content.get(obj_type)
def _convert_to_array_of_mor(mors):
@@ -135,21 +146,19 @@ class FakeRetrieveResult(object):
if token is not None:
self.token = token
- def add_object(self, object):
- self.objects.append(object)
+ def add_object(self, obj):
+ self.objects.append(obj)
-def _get_object_refs(obj_type):
- """Get object References of the type."""
- lst_objs = []
- for key in _db_content[obj_type]:
- lst_objs.append(key)
- return lst_objs
+def get_object_refs(obj_type):
+ """Get iterator over object References of the type."""
+ for obj in _db_content[obj_type].values():
+ yield obj.obj
-def _update_object(table, table_obj):
+def update_object(table_obj):
"""Update objects of the type."""
- _db_content[table][table_obj.obj] = table_obj
+ _db_content[table_obj.obj._type][table_obj.obj.value] = table_obj
class Prop(object):
@@ -177,6 +186,14 @@ class ManagedObjectReference(object):
self.type = name
self._type = name
+ def __repr__(self):
+ return f'{self._type}:{self.value}'
+
+ def __eq__(self, other):
+ return (other is not None and
+ vim_util.get_moref_value(other) == self.value and
+ vim_util.get_moref_type(other) == self.type)
+
class ObjectContent(object):
"""ObjectContent array holds dynamic properties."""
@@ -262,8 +279,11 @@ class ManagedObject(object):
return prefix + "-" + str(self.__class__._counter)
def __repr__(self):
- return jsonutils.dumps({elem.name: elem.val
- for elem in self.propSet})
+ # We can't just dump the managed-object, because it may be circular
+ return "{}:{}({})".format(self.obj._type, self.obj.value,
+ ", ".join(
+ "{}={}".format(p.name, p.val if p.name == "name" else "<>")
+ for p in self.propSet))
class DataObject(object):
@@ -593,8 +613,7 @@ class ResourcePool(ManagedObject):
class DatastoreHostMount(DataObject):
def __init__(self, value='host-100'):
super(DatastoreHostMount, self).__init__()
- host_ref = (_db_content["HostSystem"]
- [list(_db_content["HostSystem"].keys())[0]].obj)
+ host_ref = get_first_object_ref("HostSystem")
host_system = DataObject()
host_system.ManagedObjectReference = [host_ref]
host_system.value = value
@@ -621,9 +640,15 @@ class ClusterComputeResource(ManagedObject):
summary.effectiveCpu = 10000
self.set("summary", summary)
+ vm_list = DataObject()
+ vm_list.ManagedObjectReference = []
+ self.set("vm", vm_list)
+
def _add_root_resource_pool(self, r_pool):
if r_pool:
self.set("resourcePool", r_pool)
+ pool = get_object(r_pool)
+ self.set("vm", pool.get("vm"))
def _add_host(self, host_sys):
if host_sys:
@@ -659,7 +684,7 @@ class ClusterComputeResource(ManagedObject):
# Compute the aggregate stats
summary.numHosts = len(hosts.ManagedObjectReference)
for host_ref in hosts.ManagedObjectReference:
- host_sys = _get_object(host_ref)
+ host_sys = get_object(host_ref)
connected = host_sys.get("connected")
host_summary = host_sys.get("summary")
summary.numCpuCores += host_summary.hardware.numCpuCores
@@ -717,14 +742,17 @@ class HostSystem(ManagedObject):
maintenance_mode=False):
super(HostSystem, self).__init__("host")
self.set("name", name)
- if _db_content.get("HostNetworkSystem", None) is None:
+ if _no_objects_of_type("HostNetworkSystem"):
create_host_network_system()
- if not _get_object_refs('HostStorageSystem'):
+
+ if _no_objects_of_type("HostStorageSystem"):
create_host_storage_system()
- host_net_key = list(_db_content["HostNetworkSystem"].keys())[0]
- host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
- self.set("configManager.networkSystem", host_net_sys)
- host_storage_sys_key = _get_object_refs('HostStorageSystem')[0]
+
+ host_net_obj = get_first_object("HostNetworkSystem")
+ host_net_ref = host_net_obj.obj
+ self.set("configManager.networkSystem", host_net_ref)
+
+ host_storage_sys_key = get_first_object_ref('HostStorageSystem')
self.set("configManager.storageSystem", host_storage_sys_key)
if not ds_ref:
@@ -779,10 +807,9 @@ class HostSystem(ManagedObject):
self.set("config.network.pnic", net_info_pnic)
self.set("connected", connected)
- if _db_content.get("Network", None) is None:
+ if _no_objects_of_type("Network"):
create_network()
- net_ref = _db_content["Network"][
- list(_db_content["Network"].keys())[0]].obj
+ net_ref = get_first_object_ref("Network")
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
@@ -792,9 +819,9 @@ class HostSystem(ManagedObject):
vswitch_do.name = "vSwitch0"
vswitch_do.portgroup = ["PortGroup-vmnet0"]
- net_swicth = DataObject()
- net_swicth.HostVirtualSwitch = [vswitch_do]
- self.set("config.network.vswitch", net_swicth)
+ net_switch = DataObject()
+ net_switch.HostVirtualSwitch = [vswitch_do]
+ self.set("config.network.vswitch", net_switch)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-vmnet0"
@@ -821,7 +848,7 @@ class HostSystem(ManagedObject):
self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array)
# Set the same on the storage system managed object
- host_storage_sys = _get_object(host_storage_sys_key)
+ host_storage_sys = get_object(host_storage_sys_key)
host_storage_sys.set('storageDeviceInfo.hostBusAdapter',
host_bus_adapter_array)
@@ -882,17 +909,15 @@ class Datacenter(ManagedObject):
def __init__(self, name="ha-datacenter", ds_ref=None):
super(Datacenter, self).__init__("dc")
self.set("name", name)
- if _db_content.get("Folder", None) is None:
+ if _no_objects_of_type("Folder"):
create_folder()
- folder_ref = _db_content["Folder"][
- list(_db_content["Folder"].keys())[0]].obj
+ folder_ref = get_first_object_ref("Folder")
folder_do = DataObject()
folder_do.ManagedObjectReference = [folder_ref]
self.set("vmFolder", folder_ref)
- if _db_content.get("Network", None) is None:
+ if _no_objects_of_type("Network"):
create_network()
- net_ref = _db_content["Network"][
- list(_db_content["Network"].keys())[0]].obj
+ net_ref = get_first_object_ref("Network")
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
@@ -927,54 +952,56 @@ class Task(ManagedObject):
def create_host_network_system():
host_net_system = HostNetworkSystem()
- _create_object("HostNetworkSystem", host_net_system)
+ _create_object(host_net_system)
def create_host_storage_system():
host_storage_system = HostStorageSystem()
- _create_object("HostStorageSystem", host_storage_system)
+ _create_object(host_storage_system)
def create_host(ds_ref=None):
host_system = HostSystem(ds_ref=ds_ref)
- _create_object('HostSystem', host_system)
+ _create_object(host_system)
def create_datacenter(name, ds_ref=None):
data_center = Datacenter(name, ds_ref)
- _create_object('Datacenter', data_center)
+ _create_object(data_center)
def create_datastore(name, capacity, free):
data_store = Datastore(name, capacity, free)
- _create_object('Datastore', data_store)
+ _create_object(data_store)
return data_store.obj
def create_res_pool():
res_pool = ResourcePool()
- _create_object('ResourcePool', res_pool)
+ _create_object(res_pool)
return res_pool.obj
def create_folder():
folder = Folder()
- _create_object('Folder', folder)
+ _create_object(folder)
return folder.obj
def create_network():
network = Network()
- _create_object('Network', network)
+ _create_object(network)
def create_cluster(name, ds_ref):
cluster = ClusterComputeResource(name=name)
- cluster._add_host(_get_object_refs("HostSystem")[0])
- cluster._add_host(_get_object_refs("HostSystem")[1])
+ for i, host in enumerate(get_object_refs("HostSystem")):
+ cluster._add_host(host)
+ if i >= 1:
+ break
cluster._add_datastore(ds_ref)
cluster._add_root_resource_pool(create_res_pool())
- _create_object('ClusterComputeResource', cluster)
+ _create_object(cluster)
return cluster
@@ -993,16 +1020,15 @@ def create_vm(uuid=None, name=None,
devices = []
if vmPathName is None:
- vm_path = ds_obj.DatastorePath(
- list(_db_content['Datastore'].values())[0])
+ vm_path = ds_obj.DatastorePath(get_first_object("Datastore"))
else:
vm_path = ds_obj.DatastorePath.parse(vmPathName)
if res_pool_ref is None:
- res_pool_ref = list(_db_content['ResourcePool'].keys())[0]
+ res_pool_ref = get_first_object_ref("ResourcePool")
if host_ref is None:
- host_ref = list(_db_content["HostSystem"].keys())[0]
+ host_ref = get_first_object_ref("HostSystem")
# Fill in the default path to the vmx file if we were only given a
# datastore. Note that if you create a VM with vmPathName '[foo]', when you
@@ -1011,9 +1037,9 @@ def create_vm(uuid=None, name=None,
if vm_path.rel_path == '':
vm_path = vm_path.join(name, name + '.vmx')
- for key, value in _db_content["Datastore"].items():
+ for value in get_objects("Datastore"):
if value.get('summary.name') == vm_path.datastore:
- ds = key
+ ds = value.obj
break
else:
ds = create_datastore(vm_path.datastore, 1024, 500)
@@ -1030,9 +1056,9 @@ def create_vm(uuid=None, name=None,
"instanceUuid": uuid,
"version": version}
vm = VirtualMachine(**vm_dict)
- _create_object("VirtualMachine", vm)
+ _create_object(vm)
- res_pool = _get_object(res_pool_ref)
+ res_pool = get_object(res_pool_ref)
res_pool.vm.ManagedObjectReference.append(vm.obj)
return vm.obj
@@ -1040,7 +1066,7 @@ def create_vm(uuid=None, name=None,
def create_task(task_name, state="running", result=None, error_fault=None):
task = Task(task_name, state, result, error_fault)
- _create_object("Task", task)
+ _create_object(task)
return task
@@ -1103,12 +1129,14 @@ def fake_fetch_image(context, instance, host, port, dc_name, ds_name,
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
- if _db_content.get("VirtualMachine", None) is None:
+ vms = _db_content.get("VirtualMachine")
+ if not vms:
raise exception.NotFound("There is no VM registered")
- if vm_ref not in _db_content.get("VirtualMachine"):
+ try:
+ return vms[vm_ref.value]
+ except KeyError:
raise exception.NotFound("Virtual Machine with ref %s is not "
- "there" % vm_ref)
- return _db_content.get("VirtualMachine")[vm_ref]
+ "there" % vm_ref.value)
def _merge_extraconfig(existing, changes):
@@ -1354,11 +1382,10 @@ class FakeVim(object):
def _find_all_by_uuid(self, *args, **kwargs):
uuid = kwargs.get('uuid')
vm_refs = []
- for vm_ref in _db_content.get("VirtualMachine"):
- vm = _get_object(vm_ref)
+ for vm in get_objects("VirtualMachine"):
vm_uuid = vm.get("summary.config.instanceUuid")
if vm_uuid == uuid:
- vm_refs.append(vm_ref)
+ vm_refs.append(vm.obj)
return vm_refs
def _delete_snapshot(self, method, *args, **kwargs):
@@ -1412,7 +1439,7 @@ class FakeVim(object):
vm_dict["extra_config"] = extraConfigs
virtual_machine = VirtualMachine(**vm_dict)
- _create_object("VirtualMachine", virtual_machine)
+ _create_object(virtual_machine)
task_mdo = create_task(method, "success")
return task_mdo.obj
@@ -1420,7 +1447,7 @@ class FakeVim(object):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
_get_vm_mdo(vm_ref)
- del _db_content["VirtualMachine"][vm_ref]
+ del _db_content["VirtualMachine"][vm_ref.value]
task_mdo = create_task(method, "success")
return task_mdo.obj
@@ -1491,13 +1518,7 @@ class FakeVim(object):
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
- if _db_content.get("VirtualMachine", None) is None:
- raise exception.NotFound("No Virtual Machine has been "
- "registered yet")
- if vm_ref not in _db_content.get("VirtualMachine"):
- raise exception.NotFound("Virtual Machine with ref %s is not "
- "there" % vm_ref)
- vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
+ vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.set("runtime.powerState", pwr_state)
task_mdo = create_task(method, "success")
return task_mdo.obj
@@ -1526,7 +1547,7 @@ class FakeVim(object):
# This means that we are retrieving props for all managed
# data objects of the specified 'type' in the entire
# inventory. This gets invoked by vim_util.get_objects.
- mdo_refs = _db_content[spec_type]
+ mdo_refs = list(get_object_refs(spec_type))
elif obj_ref.type != spec_type:
# This means that we are retrieving props for the managed
# data objects in the parent object's 'path' property.
@@ -1536,7 +1557,7 @@ class FakeVim(object):
# path = 'datastore'
# the above will retrieve all datastores in the given
# cluster.
- parent_mdo = _db_content[obj_ref.type][obj_ref]
+ parent_mdo = get_object(obj_ref)
path = obj.selectSet[0].path
mdo_refs = parent_mdo.get(path).ManagedObjectReference
else:
@@ -1545,12 +1566,13 @@ class FakeVim(object):
# vim_util.get_properties_for_a_collection_of_objects.
mdo_refs = [obj_ref]
+ mdo_list = _db_content[spec_type]
for mdo_ref in mdo_refs:
- mdo = _db_content[spec_type][mdo_ref]
- prop_list = []
- for prop_name in properties:
- prop = Prop(prop_name, mdo.get(prop_name))
- prop_list.append(prop)
+ mdo = mdo_list[mdo_ref.value]
+ prop_list = [
+ Prop(prop_name, mdo.get(prop_name))
+ for prop_name in properties
+ ]
obj_content = ObjectContent(mdo.obj, prop_list)
lst_ret_objs.add_object(obj_content)
except Exception:
@@ -1560,14 +1582,13 @@ class FakeVim(object):
def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
- _host_sk = list(_db_content["HostSystem"].keys())[0]
- host_mdo = _db_content["HostSystem"][_host_sk]
+ host_mdo = get_first_object("HostSystem")
host_mdo._add_port_group(kwargs.get("portgrp"))
def _add_iscsi_send_tgt(self, method, *args, **kwargs):
"""Adds a iscsi send target to the hba."""
send_targets = kwargs.get('targets')
- host_storage_sys = _get_objects('HostStorageSystem').objects[0]
+ host_storage_sys = get_first_object('HostStorageSystem')
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
diff --git a/nova/tests/unit/virt/vmwareapi/stubs.py b/nova/tests/unit/virt/vmwareapi/stubs.py
index d0caaae43e..a0406bdac5 100644
--- a/nova/tests/unit/virt/vmwareapi/stubs.py
+++ b/nova/tests/unit/virt/vmwareapi/stubs.py
@@ -36,7 +36,7 @@ def fake_vim_prop(arg):
return fake.get_fake_vim_object(arg)
-def fake_is_vim_object(arg, module):
+def fake_is_vim_object(module):
"""Stubs out the VMwareAPISession's is_vim_object method."""
return isinstance(module, fake.FakeVim)
@@ -74,9 +74,10 @@ def set_stubs(test):
fake.fake_upload_image)
test.stub_out('nova.virt.vmwareapi.images.fetch_image',
fake.fake_fetch_image)
- test.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim',
+ test.stub_out('nova.virt.vmwareapi.session.VMwareAPISession.vim',
fake_vim_prop)
- test.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession._is_vim_object',
+ test.stub_out('nova.virt.vmwareapi.session.VMwareAPISession.'
+ '_is_vim_object',
fake_is_vim_object)
test.stub_out('nova.network.neutron.API.update_instance_vnic_index',
lambda *args, **kwargs: None)
diff --git a/nova/tests/unit/virt/vmwareapi/test_configdrive.py b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
index de07444ddb..7e8b1c1b63 100644
--- a/nova/tests/unit/virt/vmwareapi/test_configdrive.py
+++ b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
@@ -13,8 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
import fixtures
-import mock
from oslo_utils.fixture import uuidsentinel
from nova import context
diff --git a/nova/tests/unit/virt/vmwareapi/test_driver_api.py b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
index 5889fb8239..ac473c8c09 100644
--- a/nova/tests/unit/virt/vmwareapi/test_driver_api.py
+++ b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
@@ -21,9 +21,9 @@ Test suite for VMwareAPI.
import collections
import datetime
+from unittest import mock
from eventlet import greenthread
-import mock
import os_resource_classes as orc
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
@@ -61,6 +61,7 @@ from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -109,20 +110,11 @@ DEFAULT_FLAVOR_OBJS = [
]
-def _fake_create_session(inst):
- session = vmwareapi_fake.DataObject()
- session.key = 'fake_key'
- session.userName = 'fake_username'
- session._pbm_wsdl_loc = None
- session._pbm = None
- inst._session = session
-
-
class VMwareDriverStartupTestCase(test.NoDBTestCase):
def _start_driver_with_flags(self, expected_exception_type, startup_flags):
self.flags(**startup_flags)
with mock.patch(
- 'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
+ 'nova.virt.vmwareapi.session.VMwareAPISession.__init__'):
e = self.assertRaises(Exception, driver.VMwareVCDriver, None) # noqa
self.assertIs(type(e), expected_exception_type)
@@ -154,36 +146,6 @@ class VMwareDriverStartupTestCase(test.NoDBTestCase):
group='vmware'))
-class VMwareSessionTestCase(test.NoDBTestCase):
-
- @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
- return_value=False)
- def test_call_method(self, mock_is_vim):
- with test.nested(
- mock.patch.object(driver.VMwareAPISession, '_create_session',
- _fake_create_session),
- mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
- ) as (fake_create, fake_invoke):
- session = driver.VMwareAPISession()
- session._vim = mock.Mock()
- module = mock.Mock()
- session._call_method(module, 'fira')
- fake_invoke.assert_called_once_with(module, 'fira', session._vim)
-
- @mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
- return_value=True)
- def test_call_method_vim(self, mock_is_vim):
- with test.nested(
- mock.patch.object(driver.VMwareAPISession, '_create_session',
- _fake_create_session),
- mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
- ) as (fake_create, fake_invoke):
- session = driver.VMwareAPISession()
- module = mock.Mock()
- session._call_method(module, 'fira')
- fake_invoke.assert_called_once_with(module, 'fira')
-
-
class VMwareAPIVMTestCase(test.NoDBTestCase,
test_diagnostics.DiagnosticsComparisonMixin):
"""Unit tests for Vmware API connection calls."""
@@ -337,7 +299,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
_fake_check_session)
with mock.patch.object(greenthread, 'sleep'):
- self.conn = driver.VMwareAPISession()
+ self.conn = session.VMwareAPISession()
self.assertEqual(2, self.attempts)
def _get_flavor_by_name(self, type):
@@ -411,8 +373,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def _get_vm_record(self):
# Get record for VM
- vms = vmwareapi_fake._get_objects("VirtualMachine")
- for vm in vms.objects:
+ vms = vmwareapi_fake.get_objects("VirtualMachine")
+ for vm in vms:
if vm.get('name') == vm_util._get_vm_name(self._display_name,
self.uuid):
return vm
@@ -1307,7 +1269,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
self._create_vm()
- fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
+ fake_vm = vmwareapi_fake.get_first_object_ref("VirtualMachine")
snapshot_ref = vmwareapi_fake.ManagedObjectReference(
value="Snapshot-123",
name="VirtualMachineSnapshot")
@@ -1801,8 +1763,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
get_vm_ref.assert_called_once_with(self.conn._session,
self.instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(mock.sentinel.vm_ref,
self.instance, adapter_type, disk_type, vmdk_path='fake-path')
@@ -1878,8 +1839,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def test_iscsi_rescan_hba(self):
fake_target_portal = 'fake_target_host:port'
- host_storage_sys = vmwareapi_fake._get_objects(
- "HostStorageSystem").objects[0]
+ host_storage_sys = vmwareapi_fake.get_first_object(
+ "HostStorageSystem")
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
@@ -1899,7 +1860,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
def test_iscsi_get_target(self):
data = {'target_portal': 'fake_target_host:port',
'target_iqn': 'fake_target_iqn'}
- host = vmwareapi_fake._get_objects('HostSystem').objects[0]
+ host = vmwareapi_fake.get_first_object('HostSystem')
host._add_iscsi_target(data)
vops = volumeops.VMwareVolumeOps(self.conn._session)
result = vops._iscsi_get_target(data)
@@ -2162,7 +2123,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'min_unit': 1,
'max_unit': 16,
'step_size': 1,
- 'allocation_ratio': 16.0,
+ 'allocation_ratio': 4.0,
},
orc.MEMORY_MB: {
'total': 2048,
@@ -2170,7 +2131,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase,
'min_unit': 1,
'max_unit': 1024,
'step_size': 1,
- 'allocation_ratio': 1.5,
+ 'allocation_ratio': 1.0,
},
orc.DISK_GB: {
'total': 95,
diff --git a/nova/tests/unit/virt/vmwareapi/test_ds_util.py b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
index 3b909642fb..1716027afb 100644
--- a/nova/tests/unit/virt/vmwareapi/test_ds_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
@@ -14,8 +14,8 @@
from contextlib import contextmanager
import re
+from unittest import mock
-import mock
from oslo_utils import units
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
diff --git a/nova/tests/unit/virt/vmwareapi/test_imagecache.py b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
index 485b1ea4cd..1116804d2f 100644
--- a/nova/tests/unit/virt/vmwareapi/test_imagecache.py
+++ b/nova/tests/unit/virt/vmwareapi/test_imagecache.py
@@ -13,8 +13,8 @@
# under the License.
import datetime
+from unittest import mock
-import mock
from oslo_config import cfg
from oslo_utils import fixture as utils_fixture
from oslo_utils.fixture import uuidsentinel
diff --git a/nova/tests/unit/virt/vmwareapi/test_images.py b/nova/tests/unit/virt/vmwareapi/test_images.py
index 7cfec00c97..20abc063a0 100644
--- a/nova/tests/unit/virt/vmwareapi/test_images.py
+++ b/nova/tests/unit/virt/vmwareapi/test_images.py
@@ -17,8 +17,8 @@ Test suite for images.
import os
import tarfile
+from unittest import mock
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
from oslo_vmware import rw_handles
@@ -117,13 +117,11 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
@@ -172,7 +170,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock_write_handle)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
- mock_call_method.assert_called_once_with(
+ session._call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
@mock.patch('oslo_vmware.rw_handles.ImageReadHandle')
@@ -188,13 +186,11 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'image_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method'),
mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_image_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method,
mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
@@ -220,7 +216,7 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock_image_transfer.assert_called_once_with(mock_read_handle,
mock_write_handle)
- mock_call_method.assert_called_once_with(
+ session._call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
mock_get_vmdk_info.assert_called_once_with(
session, mock.sentinel.vm_ref, 'fake-vm')
diff --git a/nova/tests/unit/virt/vmwareapi/test_network_util.py b/nova/tests/unit/virt/vmwareapi/test_network_util.py
index 10f2583946..b3b5bb15ea 100644
--- a/nova/tests/unit/virt/vmwareapi/test_network_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_network_util.py
@@ -15,15 +15,15 @@
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_vmware import vim_util
from nova import test
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
-from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import network_util
+from nova.virt.vmwareapi import session
ResultSet = collections.namedtuple('ResultSet', ['objects'])
@@ -36,12 +36,12 @@ class GetNetworkWithTheNameTestCase(test.NoDBTestCase):
def setUp(self):
super(GetNetworkWithTheNameTestCase, self).setUp()
fake.reset()
- self.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim',
+ self.stub_out('nova.virt.vmwareapi.session.VMwareAPISession.vim',
stubs.fake_vim_prop)
- self.stub_out('nova.virt.vmwareapi.driver.'
+ self.stub_out('nova.virt.vmwareapi.session.'
'VMwareAPISession.is_vim_object',
stubs.fake_is_vim_object)
- self._session = driver.VMwareAPISession()
+ self._session = session.VMwareAPISession()
def _build_cluster_networks(self, networks):
"""Returns a set of results for a cluster network lookup.
diff --git a/nova/tests/unit/virt/vmwareapi/test_session.py b/nova/tests/unit/virt/vmwareapi/test_session.py
new file mode 100644
index 0000000000..6088e1f5b2
--- /dev/null
+++ b/nova/tests/unit/virt/vmwareapi/test_session.py
@@ -0,0 +1,208 @@
+# Copyright (c) 2022 SAP SE
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 VMware, Inc.
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+"""
+Test suite for VMwareAPI Session
+"""
+
+from unittest import mock
+
+from oslo_vmware import exceptions as vexec
+
+from nova import test
+from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
+from nova.virt.vmwareapi import session
+
+
+def _fake_create_session(inst):
+ _session = vmwareapi_fake.DataObject()
+ _session.key = 'fake_key'
+ _session.userName = 'fake_username'
+ _session._pbm_wsdl_loc = None
+ _session._pbm = None
+ inst._session = _session
+
+
+def _fake_fetch_moref_impl(inst, _):
+ inst.moref = vmwareapi_fake.ManagedObjectReference(
+ value=mock.sentinel.moref2)
+
+
+class FakeStableMoRefProxy(session.StableMoRefProxy):
+ def __init__(self, ref=None):
+ super(FakeStableMoRefProxy, self).__init__(
+ ref or vmwareapi_fake.ManagedObjectReference(
+ value=mock.sentinel.moref))
+
+ def fetch_moref(self, session):
+ pass
+
+ def __repr__(self):
+ return "FakeStableMoRefProxy({!r})".format(self.moref)
+
+
+class StableMoRefProxyTestCase(test.NoDBTestCase):
+ def test_proxy(self):
+ ref = FakeStableMoRefProxy()
+ self.assertEqual(mock.sentinel.moref, ref.value)
+ self.assertEqual("ManagedObject", ref._type)
+
+ def test_proxy_classes(self):
+ # Necessary for suds serialisation
+ ref = FakeStableMoRefProxy()
+ self.assertEqual("ManagedObjectReference", ref.__class__.__name__)
+
+
+class VMwareSessionTestCase(test.NoDBTestCase):
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=False)
+ def test_call_method(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession,
+ '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession,
+ 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ _session = session.VMwareAPISession()
+ _session._vim = mock.Mock()
+ module = mock.Mock()
+ _session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira', _session._vim)
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_vim(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession,
+ '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession,
+ 'invoke_api'),
+ ) as (fake_create, fake_invoke):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ _session._call_method(module, 'fira')
+ fake_invoke.assert_called_once_with(module, 'fira')
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_no_recovery(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy, 'fetch_moref'),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+
+ _session._call_method(
+ module, mock.sentinel.method_arg, ref, ref=ref)
+
+ fake_invoke.assert_called_once_with(
+ module, mock.sentinel.method_arg, ref, ref=ref)
+ fake_fetch_moref.assert_not_called()
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_recovery_arg_failed(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy, 'fetch_moref'),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+ fake_invoke.side_effect = [vexec.ManagedObjectNotFoundException]
+
+ self.assertRaises(vexec.ManagedObjectNotFoundException,
+ _session._call_method, module, mock.sentinel.method_arg, ref)
+
+ fake_invoke.assert_called_once_with(
+ module, mock.sentinel.method_arg, ref)
+ fake_fetch_moref.assert_not_called()
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_recovery_kwarg_failed(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy, 'fetch_moref'),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+ fake_invoke.side_effect = [vexec.ManagedObjectNotFoundException]
+
+ self.assertRaises(vexec.ManagedObjectNotFoundException,
+ _session._call_method, module,
+ mock.sentinel.method_arg, ref=ref)
+
+ fake_invoke.assert_called_once_with(
+ module, mock.sentinel.method_arg, ref=ref)
+ fake_fetch_moref.assert_not_called()
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_recovery_arg_success(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy,
+ 'fetch_moref', _fake_fetch_moref_impl),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+
+ fake_invoke.side_effect = [vexec.ManagedObjectNotFoundException(
+ details=dict(obj=mock.sentinel.moref),
+ ), None]
+ _session._call_method(module, mock.sentinel.method_arg, ref)
+ fake_invoke.assert_called_with(
+ module, mock.sentinel.method_arg, ref)
+
+ @mock.patch.object(session.VMwareAPISession, '_is_vim_object',
+ return_value=True)
+ def test_call_method_recovery_kwarg_success(self, mock_is_vim):
+ with test.nested(
+ mock.patch.object(session.VMwareAPISession, '_create_session',
+ _fake_create_session),
+ mock.patch.object(session.VMwareAPISession, 'invoke_api'),
+ mock.patch.object(FakeStableMoRefProxy,
+ 'fetch_moref', _fake_fetch_moref_impl),
+ ) as (fake_create, fake_invoke, fake_fetch_moref):
+ _session = session.VMwareAPISession()
+ module = mock.Mock()
+ ref = FakeStableMoRefProxy()
+
+ fake_invoke.side_effect = [vexec.ManagedObjectNotFoundException(
+ details=dict(obj=mock.sentinel.moref),
+ ), None]
+ _session._call_method(module, mock.sentinel.method_arg, ref=ref)
+ fake_invoke.assert_called_with(
+ module, mock.sentinel.method_arg, ref=ref)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vif.py b/nova/tests/unit/virt/vmwareapi/test_vif.py
index b0fb9df47c..02d516fac7 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vif.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vif.py
@@ -13,7 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
from oslo_vmware import vim_util
from nova import exception
diff --git a/nova/tests/unit/virt/vmwareapi/test_vim_util.py b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
index ebfa2010ee..b3057a99ac 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vim_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vim_util.py
@@ -28,11 +28,11 @@ class VMwareVIMUtilTestCase(test.NoDBTestCase):
def test_get_inner_objects(self):
property = ['summary.name']
# Get the fake datastores directly from the cluster
- cluster_refs = fake._get_object_refs('ClusterComputeResource')
- cluster = fake._get_object(cluster_refs[0])
+ cluster = fake.get_first_object('ClusterComputeResource')
+ cluster_ref = cluster.obj
expected_ds = cluster.datastore.ManagedObjectReference
# Get the fake datastores using inner objects utility method
result = vim_util.get_inner_objects(
- self.vim, cluster_refs[0], 'datastore', 'Datastore', property)
+ self.vim, cluster_ref, 'datastore', 'Datastore', property)
datastores = [oc.obj for oc in result.objects]
self.assertEqual(expected_ds, datastores)
diff --git a/nova/tests/unit/virt/vmwareapi/test_vm_util.py b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
index ea30895a4d..82fa07a882 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vm_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
@@ -15,14 +15,15 @@
# under the License.
import collections
+from unittest import mock
-import mock
from oslo_service import fixture as oslo_svc_fixture
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
+from oslo_vmware import vim_util as vutil
from nova import exception
from nova.network import model as network_model
@@ -31,7 +32,7 @@ from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import session as vmware_session
from nova.virt.vmwareapi import vm_util
@@ -375,7 +376,7 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
- fake._update_object("VirtualMachine", vm)
+ fake.update_object(vm)
# return the scsi type, not ide
self.assertEqual(constants.DEFAULT_ADAPTER_TYPE,
vm_util.get_scsi_adapter_type(devices))
@@ -387,7 +388,7 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
ide_controller = fake.VirtualIDEController()
devices.append(scsi_controller)
devices.append(ide_controller)
- fake._update_object("VirtualMachine", vm)
+ fake.update_object(vm)
# the controller is not suitable since the device under this controller
# has exceeded SCSI_MAX_CONNECT_NUMBER
for i in range(0, constants.SCSI_MAX_CONNECT_NUMBER):
@@ -1036,7 +1037,7 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
found[0] = True
mock_log_warn.side_effect = fake_log_warn
- session = driver.VMwareAPISession()
+ session = vmware_session.VMwareAPISession()
config_spec = vm_util.get_vm_create_spec(
session.vim.client.factory,
@@ -1987,23 +1988,85 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
mock_get_name.assert_called_once_with(self._instance.display_name,
self._instance.uuid)
-
-@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+ def test_create_fcd_id_obj(self):
+ fcd_id_obj = mock.Mock()
+ client_factory = mock.Mock()
+ client_factory.create.return_value = fcd_id_obj
+ fcd_id = mock.sentinel.fcd_id
+ ret = vm_util._create_fcd_id_obj(client_factory, fcd_id)
+
+ self.assertEqual(fcd_id_obj, ret)
+ self.assertEqual(fcd_id, ret.id)
+ client_factory.create.assert_called_once_with('ns0:ID')
+
+ @mock.patch.object(vm_util, '_create_fcd_id_obj')
+ @mock.patch.object(vutil, 'get_moref')
+ def test_attach_fcd(self, get_moref, create_fcd_id_obj):
+ disk_id = mock.sentinel.disk_id
+ create_fcd_id_obj.return_value = disk_id
+
+ ds_ref = mock.sentinel.ds_ref
+ get_moref.return_value = ds_ref
+
+ task = mock.sentinel.task
+ session = mock.Mock()
+ session._call_method.return_value = task
+
+ vm_ref = mock.sentinel.vm_ref
+ fcd_id = mock.sentinel.fcd_id
+ ds_ref_val = mock.sentinel.ds_ref_val
+ controller_key = mock.sentinel.controller_key
+ unit_number = mock.sentinel.unit_number
+ vm_util.attach_fcd(
+ session, vm_ref, fcd_id, ds_ref_val, controller_key, unit_number)
+
+ create_fcd_id_obj.assert_called_once_with(
+ session.vim.client.factory, fcd_id)
+ get_moref.assert_called_once_with(ds_ref_val, 'Datastore')
+ session._call_method.assert_called_once_with(
+ session.vim, "AttachDisk_Task", vm_ref, diskId=disk_id,
+ datastore=ds_ref, controllerKey=controller_key,
+ unitNumber=unit_number)
+ session._wait_for_task.assert_called_once_with(task)
+
+ @mock.patch.object(vm_util, '_create_fcd_id_obj')
+ def test_detach_fcd(self, create_fcd_id_obj):
+ disk_id = mock.sentinel.disk_id
+ create_fcd_id_obj.return_value = disk_id
+
+ task = mock.sentinel.task
+ session = mock.Mock()
+ session._call_method.return_value = task
+
+ vm_ref = mock.sentinel.vm_ref
+ fcd_id = mock.sentinel.fcd_id
+ vm_util.detach_fcd(session, vm_ref, fcd_id)
+
+ create_fcd_id_obj.assert_called_once_with(
+ session.vim.client.factory, fcd_id)
+ session._call_method.assert_called_once_with(
+ session.vim, "DetachDisk_Task", vm_ref, diskId=disk_id)
+ session._wait_for_task.assert_called_once_with(task)
+
+
+@mock.patch.object(vmware_session.VMwareAPISession, 'vim',
+ stubs.fake_vim_prop)
class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
# N.B. Mocking on the class only mocks test_*(), but we need
- # VMwareAPISession.vim to be mocked in both setUp and tests. Not mocking in
- # setUp causes object initialisation to fail. Not mocking in tests results
- # in vim calls not using FakeVim.
- @mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
+ # session.VMwareAPISession.vim to be mocked in both setUp and tests.
+ # Not mocking in setUp causes object initialisation to fail. Not
+ # mocking in tests results in vim calls not using FakeVim.
+ @mock.patch.object(vmware_session.VMwareAPISession, 'vim',
+ stubs.fake_vim_prop)
def setUp(self):
super(VMwareVMUtilGetHostRefTestCase, self).setUp()
fake.reset()
vm_util.vm_refs_cache_reset()
- self.session = driver.VMwareAPISession()
+ self.session = vmware_session.VMwareAPISession()
# Create a fake VirtualMachine running on a known host
- self.host_ref = list(fake._db_content['HostSystem'].keys())[0]
+ self.host_ref = fake.get_first_object_ref("HostSystem")
self.vm_ref = fake.create_vm(host_ref=self.host_ref)
@mock.patch.object(vm_util, 'get_vm_ref')
@@ -2019,7 +2082,7 @@ class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
def test_get_host_name_for_vm(self, mock_get_vm_ref):
mock_get_vm_ref.return_value = self.vm_ref
- host = fake._get_object(self.host_ref)
+ host = fake.get_object(self.host_ref)
ret = vm_util.get_host_name_for_vm(self.session, 'fake-instance')
diff --git a/nova/tests/unit/virt/vmwareapi/test_vmops.py b/nova/tests/unit/virt/vmwareapi/test_vmops.py
index f84c113758..19990b8b32 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vmops.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vmops.py
@@ -14,8 +14,8 @@
# under the License.
import time
+from unittest import mock
-import mock
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import units
@@ -37,9 +37,9 @@ from nova.tests.unit.virt.vmwareapi import stubs
from nova import version
from nova.virt import hardware
from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
@@ -65,18 +65,20 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
self.flags(my_ip='',
flat_injected=True)
self._context = context.RequestContext('fake_user', 'fake_project')
- self._session = driver.VMwareAPISession()
+ self._session = session.VMwareAPISession()
self._virtapi = mock.Mock()
self._image_id = uuids.image
- fake_ds_ref = vmwareapi_fake.ManagedObjectReference(value='fake-ds')
+ fake_ds_ref = vmwareapi_fake.ManagedObjectReference(
+ name='Datastore', value='fake-ds')
self._ds = ds_obj.Datastore(
ref=fake_ds_ref, name='fake_ds',
capacity=10 * units.Gi,
freespace=10 * units.Gi)
self._dc_info = ds_util.DcInfo(
ref='fake_dc_ref', name='fake_dc',
- vmFolder='fake_vm_folder')
+ vmFolder=vmwareapi_fake.ManagedObjectReference(
+ name='Folder', value='fake_vm_folder'))
cluster = vmwareapi_fake.create_cluster('fake_cluster', fake_ds_ref)
self._uuid = uuids.foo
fake_info_cache = {
@@ -166,7 +168,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.micro\n"
- "flavor:memory_mb:6\n"
+ "flavor:memory_mb:8\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
@@ -297,7 +299,8 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_save.assert_called_once_with()
self.assertEqual(50, self._instance.progress)
- @mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
+ @mock.patch.object(vm_util, 'get_vm_ref',
+ return_value=vmwareapi_fake.ManagedObjectReference())
def test_get_info(self, mock_get_vm_ref):
result = {
'summary.config.numCpu': 4,
@@ -577,7 +580,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
vmware_tools_status="toolsOk",
succeeds=False)
- def test_clean_shutdown_no_vwaretools(self):
+ def test_clean_shutdown_no_vmwaretools(self):
self._test_clean_shutdown(timeout=10,
retry_interval=3,
returns_on=1,
@@ -1138,6 +1141,14 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_attach_cdrom_to_vm.assert_called_once_with(
vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
+ def test_prepare_for_spawn_invalid_ram(self):
+ instance = self._instance.obj_clone()
+ flavor = objects.Flavor(vcpus=1, memory_mb=6, ephemeral_gb=1,
+ swap=1024, extra_specs={})
+ instance.flavor = flavor
+ self.assertRaises(exception.InstanceUnacceptable,
+ self._vmops.prepare_for_spawn, instance)
+
@mock.patch('nova.image.glance.API.get')
@mock.patch.object(vmops.LOG, 'debug')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@@ -2051,7 +2062,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
extra_specs,
self._metadata)
- vm = vmwareapi_fake._get_object(vm_ref)
+ vm = vmwareapi_fake.get_object(vm_ref)
# Test basic VM parameters
self.assertEqual(self._instance.uuid, vm.name)
@@ -2074,7 +2085,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
datastores = vm.datastore.ManagedObjectReference
self.assertEqual(1, len(datastores))
- datastore = vmwareapi_fake._get_object(datastores[0])
+ datastore = vmwareapi_fake.get_object(datastores[0])
self.assertEqual(self._ds.name, datastore.get('summary.name'))
# Test that the VM's network is configured as specified
@@ -2176,7 +2187,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def _validate_flavor_extra_specs(self, flavor_extra_specs, expected):
# Validate that the extra specs are parsed correctly
flavor = objects.Flavor(name='my-flavor',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2227,7 +2238,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
flavor_extra_specs = {'quota:cpu_limit': 7,
'quota:cpu_reservation': 6}
flavor = objects.Flavor(name='my-flavor',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2280,7 +2291,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
'quota:cpu_reservation': 6,
'hw_video:ram_max_mb': 100}
flavor = objects.Flavor(name='my-flavor',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2692,7 +2703,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_get_storage_policy_none(self):
flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2706,7 +2717,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_get_storage_policy_extra_specs(self):
extra_specs = {'vmware:storage_policy': 'flavor-policy'}
flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2781,7 +2792,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_get_instance_metadata(self):
flavor = objects.Flavor(id=7,
name='m1.small',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
@@ -2796,7 +2807,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
- "flavor:memory_mb:6\n"
+ "flavor:memory_mb:8\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
@@ -2913,7 +2924,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_get_cores_per_socket(self):
extra_specs = {'hw:cpu_sockets': 7}
flavor = objects.Flavor(name='m1.small',
- memory_mb=6,
+ memory_mb=8,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
diff --git a/nova/tests/unit/virt/vmwareapi/test_volumeops.py b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
index 0a051d62f5..003cbb9283 100644
--- a/nova/tests/unit/virt/vmwareapi/test_volumeops.py
+++ b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
@@ -12,7 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
+
+import ddt
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_vmware import exceptions as oslo_vmw_exceptions
from oslo_vmware import vim_util as vutil
@@ -26,11 +28,12 @@ from nova.tests.unit import fake_instance
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import constants
-from nova.virt.vmwareapi import driver
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import volumeops
+@ddt.ddt
class VMwareVolumeOpsTestCase(test.NoDBTestCase):
def setUp(self):
@@ -38,7 +41,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
super(VMwareVolumeOpsTestCase, self).setUp()
vmwareapi_fake.reset()
stubs.set_stubs(self)
- self._session = driver.VMwareAPISession()
+ self._session = session.VMwareAPISession()
self._context = context.RequestContext('fake_user', 'fake_project')
self._volumeops = volumeops.VMwareVolumeOps(self._session)
@@ -141,8 +144,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
self.assertTrue(get_vmdk_info.called)
get_vm_state.assert_called_once_with(self._volumeops._session,
instance)
@@ -265,8 +267,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
adapter_type = vm_util.CONTROLLER_TO_ADAPTER_TYPE.get(
@@ -315,8 +316,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_vm_ref.assert_called_once_with(self._volumeops._session,
instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
get_vmdk_backed_disk_device.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data'])
get_vm_state.assert_called_once_with(self._volumeops._session,
@@ -406,6 +406,57 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_rdm_disk.assert_called_once_with(hardware_devices, disk_uuid)
self.assertFalse(detach_disk_from_vm.called)
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ @mock.patch.object(vm_util, 'get_vm_state')
+ @mock.patch.object(vm_util, 'detach_fcd')
+ def _test__detach_volume_fcd(
+ self, detach_fcd, get_vm_state, get_vm_ref,
+ adapter_type=constants.ADAPTER_TYPE_IDE, powered_off=True):
+ vm_ref = mock.sentinel.vm_ref
+ get_vm_ref.return_value = vm_ref
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE:
+ get_vm_state.return_value = (
+ power_state.SHUTDOWN if powered_off else power_state.RUNNING)
+
+ fcd_id = mock.sentinel.fcd_id
+ ds_ref_val = mock.sentinel.ds_ref_val
+ connection_info = {'data': {'id': fcd_id,
+ 'ds_ref_val': ds_ref_val,
+ 'adapter_type': adapter_type}}
+ instance = mock.sentinel.instance
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE and not powered_off:
+ self.assertRaises(exception.Invalid,
+ self._volumeops._detach_volume_fcd,
+ connection_info,
+ instance)
+ detach_fcd.assert_not_called()
+ else:
+ self._volumeops._detach_volume_fcd(connection_info, instance)
+ detach_fcd.assert_called_once_with(
+ self._volumeops._session, vm_ref, fcd_id)
+
+ @ddt.data(
+ constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_IDE,
+ constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL)
+ def test_detach_volume_fcd_powered_off_instance(self, adapter_type):
+ self._test__detach_volume_fcd(adapter_type=adapter_type)
+
+ @ddt.data(
+ constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_IDE,
+ constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL)
+ def test_detach_volume_fcd_powered_on_instance(self, adapter_type):
+ self._test__detach_volume_fcd(adapter_type=adapter_type,
+ powered_off=False)
+
+ @mock.patch.object(volumeops.VMwareVolumeOps, '_detach_volume_fcd')
+ def test_detach_volume_fcd(self, detach_volume_fcd):
+ connection_info = {'driver_volume_type': constants.DISK_FORMAT_FCD}
+ instance = mock.sentinel.instance
+ self._volumeops.detach_volume(connection_info, instance)
+ detach_volume_fcd.assert_called_once_with(connection_info, instance)
+
def _test_attach_volume_vmdk(self, adapter_type=None):
connection_info = {'driver_volume_type': constants.DISK_FORMAT_VMDK,
'serial': 'volume-fake-id',
@@ -444,8 +495,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
get_vm_ref.assert_called_once_with(self._volumeops._session,
self._instance)
- get_volume_ref.assert_called_once_with(
- connection_info['data']['volume'])
+ get_volume_ref.assert_called_once_with(connection_info['data'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(
vm_ref, self._instance, adapter_type,
@@ -498,6 +548,126 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
constants.ADAPTER_TYPE_PARAVIRTUAL):
self._test_attach_volume_vmdk(adapter_type)
+ @mock.patch.object(vm_util, 'allocate_controller_key_and_unit_number')
+ def test_get_controller_key_and_unit(
+ self, allocate_controller_key_and_unit_number):
+ key = mock.sentinel.key
+ unit = mock.sentinel.unit
+ allocate_controller_key_and_unit_number.return_value = (
+ key, unit, None)
+
+ with mock.patch.object(self._volumeops, '_session') as session:
+ devices = mock.sentinel.devices
+ session._call_method.return_value = devices
+
+ vm_ref = mock.sentinel.vm_ref
+ adapter_type = mock.sentinel.adapter_type
+ ret = self._volumeops._get_controller_key_and_unit(
+ vm_ref, adapter_type)
+ self.assertEqual((key, unit, None), ret)
+ session._call_method.assert_called_once_with(
+ vutil, 'get_object_property', vm_ref, 'config.hardware.device')
+ allocate_controller_key_and_unit_number.assert_called_once_with(
+ session.vim.client.factory, devices, adapter_type)
+
+ @mock.patch.object(volumeops.VMwareVolumeOps,
+ '_get_controller_key_and_unit')
+ @mock.patch.object(vm_util, 'reconfigure_vm')
+ @mock.patch.object(vm_util, 'attach_fcd')
+ def _test_attach_fcd(
+ self, attach_fcd, reconfigure_vm, get_controller_key_and_unit,
+ existing_controller=True):
+ key = mock.sentinel.key
+ unit = mock.sentinel.unit
+ spec = mock.sentinel.spec
+ if existing_controller:
+ get_controller_key_and_unit.return_value = (key, unit, None)
+ else:
+ get_controller_key_and_unit.side_effect = [(None, None, spec),
+ (key, unit, None)]
+
+ with mock.patch.object(self._volumeops, '_session') as session:
+ config_spec = mock.Mock()
+ session.vim.client.factory.create.return_value = config_spec
+
+ vm_ref = mock.sentinel.vm_ref
+ adapter_type = mock.sentinel.adapter_type
+ fcd_id = mock.sentinel.fcd_id
+ ds_ref_val = mock.sentinel.ds_ref_val
+ self._volumeops._attach_fcd(
+ vm_ref, adapter_type, fcd_id, ds_ref_val)
+
+ attach_fcd.assert_called_once_with(
+ session, vm_ref, fcd_id, ds_ref_val, key, unit)
+ if existing_controller:
+ get_controller_key_and_unit.assert_called_once_with(
+ vm_ref, adapter_type)
+ reconfigure_vm.assert_not_called()
+ else:
+ exp_calls = [mock.call(vm_ref, adapter_type),
+ mock.call(vm_ref, adapter_type)]
+ get_controller_key_and_unit.assert_has_calls(exp_calls)
+ self.assertEqual([spec], config_spec.deviceChange)
+ reconfigure_vm.assert_called_once_with(
+ session, vm_ref, config_spec)
+
+ def test_attach_fcd_using_existing_controller(self):
+ self._test_attach_fcd()
+
+ def test_attach_fcd_using_new_controller(self):
+ self._test_attach_fcd(existing_controller=False)
+
+ @mock.patch.object(vm_util, 'get_vm_ref')
+ @mock.patch.object(vm_util, 'get_vm_state')
+ @mock.patch.object(volumeops.VMwareVolumeOps, '_attach_fcd')
+ def _test__attach_volume_fcd(
+ self, attach_fcd, get_vm_state, get_vm_ref,
+ adapter_type=constants.ADAPTER_TYPE_IDE, powered_off=True):
+ vm_ref = mock.sentinel.vm_ref
+ get_vm_ref.return_value = vm_ref
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE:
+ get_vm_state.return_value = (
+ power_state.SHUTDOWN if powered_off else power_state.RUNNING)
+
+ fcd_id = mock.sentinel.fcd_id
+ ds_ref_val = mock.sentinel.ds_ref_val
+ connection_info = {'data': {'id': fcd_id,
+ 'ds_ref_val': ds_ref_val,
+ 'adapter_type': adapter_type}}
+ instance = mock.sentinel.instance
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE and not powered_off:
+ self.assertRaises(exception.Invalid,
+ self._volumeops._attach_volume_fcd,
+ connection_info,
+ instance)
+ attach_fcd.assert_not_called()
+ else:
+ self._volumeops._attach_volume_fcd(connection_info, instance)
+ attach_fcd.assert_called_once_with(
+ vm_ref, adapter_type, fcd_id, ds_ref_val)
+
+ @ddt.data(
+ constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_IDE,
+ constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL)
+ def test_attach_volume_fcd_powered_off_instance(self, adapter_type):
+ self._test__attach_volume_fcd(adapter_type=adapter_type)
+
+ @ddt.data(
+ constants.ADAPTER_TYPE_BUSLOGIC, constants.ADAPTER_TYPE_IDE,
+ constants.ADAPTER_TYPE_LSILOGICSAS, constants.ADAPTER_TYPE_PARAVIRTUAL)
+ def test_attach_volume_fcd_powered_on_instance(self, adapter_type):
+ self._test__attach_volume_fcd(adapter_type=adapter_type,
+ powered_off=False)
+
+ @mock.patch.object(volumeops.VMwareVolumeOps, '_attach_volume_fcd')
+ def test_attach_volume_fcd(self, attach_volume_fcd):
+ connection_info = {'driver_volume_type': constants.DISK_FORMAT_FCD}
+ instance = mock.sentinel.instance
+ self._volumeops.attach_volume(connection_info, instance)
+ attach_volume_fcd.assert_called_once_with(connection_info, instance)
+
def test_attach_volume_iscsi(self):
for adapter_type in (None, constants.DEFAULT_ADAPTER_TYPE,
constants.ADAPTER_TYPE_BUSLOGIC,
diff --git a/nova/tests/unit/virt/zvm/__init__.py b/nova/tests/unit/virt/zvm/__init__.py
index e69de29bb2..a93e19e1be 100644
--- a/nova/tests/unit/virt/zvm/__init__.py
+++ b/nova/tests/unit/virt/zvm/__init__.py
@@ -0,0 +1,20 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+try:
+ import zvmconnector # noqa: F401
+except ImportError:
+ raise unittest.SkipTest(
+ "The 'zVMCloudConnector' dependency is not installed."
+ )
diff --git a/nova/tests/unit/virt/zvm/test_driver.py b/nova/tests/unit/virt/zvm/test_driver.py
index 85a8a5227c..a5a129331d 100644
--- a/nova/tests/unit/virt/zvm/test_driver.py
+++ b/nova/tests/unit/virt/zvm/test_driver.py
@@ -13,8 +13,9 @@
# under the License.
import copy
-import mock
import os
+from unittest import mock
+
from oslo_utils.fixture import uuidsentinel
from nova.compute import provider_tree
diff --git a/nova/tests/unit/virt/zvm/test_guest.py b/nova/tests/unit/virt/zvm/test_guest.py
index 029f211ea4..c786270715 100644
--- a/nova/tests/unit/virt/zvm/test_guest.py
+++ b/nova/tests/unit/virt/zvm/test_guest.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova.compute import power_state as compute_power_state
from nova import context
diff --git a/nova/tests/unit/virt/zvm/test_hypervisor.py b/nova/tests/unit/virt/zvm/test_hypervisor.py
index d2081d49e2..c816ca57f6 100644
--- a/nova/tests/unit/virt/zvm/test_hypervisor.py
+++ b/nova/tests/unit/virt/zvm/test_hypervisor.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from nova import context
from nova import exception
diff --git a/nova/tests/unit/virt/zvm/test_utils.py b/nova/tests/unit/virt/zvm/test_utils.py
index 60893759b9..77747855f4 100644
--- a/nova/tests/unit/virt/zvm/test_utils.py
+++ b/nova/tests/unit/virt/zvm/test_utils.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-import mock
+from unittest import mock
from zvmconnector import connector
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index 6aa89cafd5..e53ebe3cb8 100644
--- a/nova/tests/unit/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -13,13 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
+from unittest import mock
+
from cinderclient import api_versions as cinder_api_versions
from cinderclient import exceptions as cinder_exception
from cinderclient.v3 import limits as cinder_limits
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import session
from keystoneclient import exceptions as keystone_exception
-import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
@@ -520,16 +521,15 @@ class CinderApiTestCase(test.NoDBTestCase):
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_failed(self, mock_cinderclient, mock_log):
mock_cinderclient.return_value.attachments.delete.side_effect = (
- cinder_exception.NotFound(404, '404'))
+ cinder_exception.BadRequest(400, '400'))
attachment_id = uuids.attachment
- ex = self.assertRaises(exception.VolumeAttachmentNotFound,
+ ex = self.assertRaises(exception.InvalidInput,
self.api.attachment_delete,
self.ctx,
attachment_id)
- self.assertEqual(404, ex.code)
- self.assertIn(attachment_id, str(ex))
+ self.assertEqual(400, ex.code)
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=exception.CinderAPIVersionNotAvailable(
@@ -546,6 +546,16 @@ class CinderApiTestCase(test.NoDBTestCase):
skip_version_check=True)
@mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_not_found(self, mock_cinderclient):
+ mock_cinderclient.return_value.attachments.delete.side_effect = (
+ cinder_exception.ClientException(404))
+
+ attachment_id = uuids.attachment
+ self.api.attachment_delete(self.ctx, attachment_id)
+
+ self.assertEqual(1, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_internal_server_error(self, mock_cinderclient):
mock_cinderclient.return_value.attachments.delete.side_effect = (
cinder_exception.ClientException(500))
@@ -569,6 +579,29 @@ class CinderApiTestCase(test.NoDBTestCase):
self.assertEqual(2, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_gateway_timeout(self, mock_cinderclient):
+ mock_cinderclient.return_value.attachments.delete.side_effect = (
+ cinder_exception.ClientException(504))
+
+ self.assertRaises(cinder_exception.ClientException,
+ self.api.attachment_delete,
+ self.ctx, uuids.attachment_id)
+
+ self.assertEqual(5, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_attachment_delete_gateway_timeout_do_not_raise(
+ self, mock_cinderclient):
+ # generate exception, and then have a normal return on the next retry
+ mock_cinderclient.return_value.attachments.delete.side_effect = [
+ cinder_exception.ClientException(504), None]
+
+ attachment_id = uuids.attachment
+ self.api.attachment_delete(self.ctx, attachment_id)
+
+ self.assertEqual(2, mock_cinderclient.call_count)
+
+ @mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_bad_request_exception(self, mock_cinderclient):
mock_cinderclient.return_value.attachments.delete.side_effect = (
cinder_exception.BadRequest(400))
@@ -1046,6 +1079,17 @@ class CinderApiTestCase(test.NoDBTestCase):
mock_volumes.get_encryption_metadata.assert_called_once_with(
{'encryption_key_id': 'fake_key'})
+ @mock.patch('nova.volume.cinder.cinderclient')
+ def test_volume_reimage(self, mock_cinderclient):
+ mock_reimage = mock.MagicMock()
+ mock_volumes = mock.MagicMock(reimage=mock_reimage)
+ mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
+ self.api.reimage_volume(
+ self.ctx, uuids.volume_id, uuids.image_id,
+ reimage_reserved=True)
+ mock_cinderclient.assert_called_once_with(self.ctx, '3.68')
+ mock_reimage.assert_called_with(uuids.volume_id, uuids.image_id, True)
+
def test_translate_cinder_exception_no_error(self):
my_func = mock.Mock()
my_func.__name__ = 'my_func'
diff --git a/nova/utils.py b/nova/utils.py
index ec5e6c9248..b5d45c58b5 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -29,6 +29,7 @@ import shutil
import tempfile
import eventlet
+from eventlet import tpool
from keystoneauth1 import loading as ks_loading
import netaddr
from openstack import connection
@@ -631,15 +632,13 @@ def _serialize_profile_info():
return trace_info
-def spawn(func, *args, **kwargs):
- """Passthrough method for eventlet.spawn.
-
- This utility exists so that it can be stubbed for testing without
- interfering with the service spawns.
+def pass_context(runner, func, *args, **kwargs):
+ """Generalised passthrough method
- It will also grab the context from the threadlocal store and add it to
- the store on the new thread. This allows for continuity in logging the
- context when using this method to spawn a new thread.
+ It will grab the context from the threadlocal store and add it to
+ the store on the runner. This allows for continuity in logging the
+ context when using this method to spawn a new thread through the
+ runner function
"""
_context = common_context.get_current()
profiler_info = _serialize_profile_info()
@@ -654,11 +653,11 @@ def spawn(func, *args, **kwargs):
profiler.init(**profiler_info)
return func(*args, **kwargs)
- return eventlet.spawn(context_wrapper, *args, **kwargs)
+ return runner(context_wrapper, *args, **kwargs)
-def spawn_n(func, *args, **kwargs):
- """Passthrough method for eventlet.spawn_n.
+def spawn(func, *args, **kwargs):
+ """Passthrough method for eventlet.spawn.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
@@ -667,25 +666,26 @@ def spawn_n(func, *args, **kwargs):
the store on the new thread. This allows for continuity in logging the
context when using this method to spawn a new thread.
"""
- _context = common_context.get_current()
- profiler_info = _serialize_profile_info()
- @functools.wraps(func)
- def context_wrapper(*args, **kwargs):
- # NOTE: If update_store is not called after spawn_n it won't be
- # available for the logger to pull from threadlocal storage.
- if _context is not None:
- _context.update_store()
- if profiler_info and profiler:
- profiler.init(**profiler_info)
- func(*args, **kwargs)
+ return pass_context(eventlet.spawn, func, *args, **kwargs)
+
+
+def spawn_n(func, *args, **kwargs):
+ """Passthrough method for eventlet.spawn_n.
+
+ This utility exists so that it can be stubbed for testing without
+ interfering with the service spawns.
- eventlet.spawn_n(context_wrapper, *args, **kwargs)
+ It will also grab the context from the threadlocal store and add it to
+ the store on the new thread. This allows for continuity in logging the
+ context when using this method to spawn a new thread.
+ """
+ pass_context(eventlet.spawn_n, func, *args, **kwargs)
def tpool_execute(func, *args, **kwargs):
"""Run func in a native thread"""
- eventlet.tpool.execute(func, *args, **kwargs)
+ return pass_context(tpool.execute, func, *args, **kwargs)
def is_none_string(val):
diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py
index 4a41703174..28a866a817 100644
--- a/nova/virt/block_device.py
+++ b/nova/virt/block_device.py
@@ -227,9 +227,70 @@ class DriverSwapBlockDevice(DriverBlockDevice):
})
+class DriverImageBlockDevice(DriverBlockDevice):
+ _valid_source = 'image'
+ _proxy_as_attr_inherited = set(['image_id'])
+ _new_only_fields = set([
+ 'disk_bus',
+ 'device_type',
+ 'guest_format',
+ 'boot_index',
+ 'encrypted',
+ 'encryption_secret_uuid',
+ 'encryption_format',
+ 'encryption_options'
+ ])
+ _fields = set([
+ 'device_name',
+ 'size']) | _new_only_fields
+ _legacy_fields = (
+ _fields - _new_only_fields | set(['num', 'virtual_name']))
+ _update_on_save = {
+ 'disk_bus': None,
+ 'device_name': None,
+ 'device_type': None,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
+ }
+
+ def _transform(self):
+ if (not self._bdm_obj.get('source_type') == 'image' or
+ not self._bdm_obj.get('destination_type') == 'local'):
+ raise _InvalidType
+ self.update({
+ 'device_name': self._bdm_obj.device_name,
+ 'size': self._bdm_obj.volume_size or 0,
+ 'disk_bus': self._bdm_obj.disk_bus,
+ 'device_type': self._bdm_obj.device_type,
+ 'guest_format': self._bdm_obj.guest_format,
+ 'image_id': self._bdm_obj.image_id,
+ 'boot_index': 0,
+ 'encrypted': self._bdm_obj.encrypted,
+ 'encryption_secret_uuid': self._bdm_obj.encryption_secret_uuid,
+ 'encryption_format': self._bdm_obj.encryption_format,
+ 'encryption_options': self._bdm_obj.encryption_options
+ })
+
+
class DriverEphemeralBlockDevice(DriverBlockDevice):
- _new_only_fields = set(['disk_bus', 'device_type', 'guest_format'])
+ _new_only_fields = set([
+ 'disk_bus',
+ 'device_type',
+ 'guest_format',
+ 'encrypted',
+ 'encryption_secret_uuid',
+ 'encryption_format',
+ 'encryption_options'])
_fields = set(['device_name', 'size']) | _new_only_fields
+ _update_on_save = {
+ 'disk_bus': None,
+ 'device_name': None,
+ 'device_type': None,
+ 'encryption_secret_uuid': None,
+ 'encryption_format': None,
+ 'encryption_options': None,
+ }
def _transform(self):
if not block_device.new_format_is_ephemeral(self._bdm_obj):
@@ -239,7 +300,11 @@ class DriverEphemeralBlockDevice(DriverBlockDevice):
'size': self._bdm_obj.volume_size or 0,
'disk_bus': self._bdm_obj.disk_bus,
'device_type': self._bdm_obj.device_type,
- 'guest_format': self._bdm_obj.guest_format
+ 'guest_format': self._bdm_obj.guest_format,
+ 'encrypted': self._bdm_obj.encrypted,
+ 'encryption_secret_uuid': self._bdm_obj.encryption_secret_uuid,
+ 'encryption_format': self._bdm_obj.encryption_format,
+ 'encryption_options': self._bdm_obj.encryption_options
})
@@ -802,15 +867,15 @@ def _convert_block_devices(device_type, block_device_mapping):
convert_swap = functools.partial(_convert_block_devices,
DriverSwapBlockDevice)
+convert_local_images = functools.partial(_convert_block_devices,
+ DriverImageBlockDevice)
convert_ephemerals = functools.partial(_convert_block_devices,
DriverEphemeralBlockDevice)
-
convert_volumes = functools.partial(_convert_block_devices,
DriverVolumeBlockDevice)
-
convert_snapshots = functools.partial(_convert_block_devices,
DriverVolSnapshotBlockDevice)
@@ -897,9 +962,15 @@ def get_swap(transformed_list):
return None
-_IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice,
- DriverVolumeBlockDevice, DriverVolSnapshotBlockDevice,
- DriverVolImageBlockDevice, DriverVolBlankBlockDevice)
+_IMPLEMENTED_CLASSES = (
+ DriverSwapBlockDevice,
+ DriverEphemeralBlockDevice,
+ DriverVolumeBlockDevice,
+ DriverVolSnapshotBlockDevice,
+ DriverVolImageBlockDevice,
+ DriverVolBlankBlockDevice,
+ DriverImageBlockDevice
+)
def is_implemented(bdm):
@@ -912,6 +983,10 @@ def is_implemented(bdm):
return False
+def is_local_image(bdm):
+ return bdm.source_type == 'image' and bdm.destination_type == 'local'
+
+
def is_block_device_mapping(bdm):
return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank') and
bdm.destination_type == 'volume' and
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index b20e0c6bf7..5d42a392d8 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -20,7 +20,9 @@ Driver base-classes:
types that support that contract
"""
+import itertools
import sys
+import typing as ty
import os_resource_classes as orc
import os_traits
@@ -32,6 +34,7 @@ from nova import context as nova_context
from nova.i18n import _
from nova import objects
from nova.virt import event as virtevent
+import nova.virt.node
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -44,6 +47,7 @@ def get_block_device_info(instance, block_device_mapping):
of a dict containing the following keys:
- root_device_name: device name of the root disk
+ - image: An instance of DriverImageBlockDevice or None
- ephemerals: a (potentially empty) list of DriverEphemeralBlockDevice
instances
- swap: An instance of DriverSwapBlockDevice or None
@@ -52,18 +56,18 @@ def get_block_device_info(instance, block_device_mapping):
specialized subclasses.
"""
from nova.virt import block_device as virt_block_device
-
- block_device_info = {
+ return {
'root_device_name': instance.root_device_name,
+ 'image': virt_block_device.convert_local_images(
+ block_device_mapping),
'ephemerals': virt_block_device.convert_ephemerals(
block_device_mapping),
'block_device_mapping':
- virt_block_device.convert_all_volumes(*block_device_mapping)
+ virt_block_device.convert_all_volumes(*block_device_mapping),
+ 'swap':
+ virt_block_device.get_swap(
+ virt_block_device.convert_swap(block_device_mapping))
}
- swap_list = virt_block_device.convert_swap(block_device_mapping)
- block_device_info['swap'] = virt_block_device.get_swap(swap_list)
-
- return block_device_info
def block_device_info_get_root_device(block_device_info):
@@ -81,6 +85,14 @@ def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
+def block_device_info_get_image(block_device_info):
+ block_device_info = block_device_info or {}
+ # get_disk_mapping() supports block_device_info=None and thus requires that
+ # we return a list here.
+ image = block_device_info.get('image') or []
+ return image
+
+
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
@@ -93,6 +105,19 @@ def block_device_info_get_mapping(block_device_info):
return block_device_mapping
+def block_device_info_get_encrypted_disks(
+ block_device_info: ty.Mapping[str, ty.Any],
+) -> ty.List['nova.virt.block_device.DriverBlockDevice']:
+ block_device_info = block_device_info or {}
+ return [
+ driver_bdm for driver_bdm in itertools.chain(
+ block_device_info.get('image', []),
+ block_device_info.get('ephemerals', []),
+ )
+ if driver_bdm.get('encrypted')
+ ]
+
+
# NOTE(aspiers): When adding new capabilities, ensure they are
# mirrored in ComputeDriver.capabilities, and that the corresponding
# values should always be standard traits in os_traits. If something
@@ -126,11 +151,17 @@ CAPABILITY_TRAITS_MAP = {
"supports_secure_boot": os_traits.COMPUTE_SECURITY_UEFI_SECURE_BOOT,
"supports_socket_pci_numa_affinity":
os_traits.COMPUTE_SOCKET_PCI_NUMA_AFFINITY,
+ "supports_remote_managed_ports": os_traits.COMPUTE_REMOTE_MANAGED_PORTS,
+ "supports_ephemeral_encryption": os_traits.COMPUTE_EPHEMERAL_ENCRYPTION,
+ "supports_ephemeral_encryption_luks":
+ os_traits.COMPUTE_EPHEMERAL_ENCRYPTION_LUKS,
+ "supports_ephemeral_encryption_plain":
+ os_traits.COMPUTE_EPHEMERAL_ENCRYPTION_PLAIN,
}
def _check_image_type_exclude_list(capability, supported):
- """Enforce the exclusion list on image_type capabilites.
+ """Enforce the exclusion list on image_type capabilities.
:param capability: The supports_image_type_foo capability being checked
:param supported: The flag indicating whether the virt driver *can*
@@ -194,6 +225,12 @@ class ComputeDriver(object):
"supports_vtpm": False,
"supports_secure_boot": False,
"supports_socket_pci_numa_affinity": False,
+ "supports_remote_managed_ports": False,
+
+ # Ephemeral encryption support flags
+ "supports_ephemeral_encryption": False,
+ "supports_ephemeral_encryption_luks": False,
+ "supports_ephemeral_encryption_plain": False,
# Image type support flags
"supports_image_type_aki": False,
@@ -297,7 +334,8 @@ class ComputeDriver(object):
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None,
evacuate=False, block_device_info=None,
- preserve_ephemeral=False, accel_uuids=None):
+ preserve_ephemeral=False, accel_uuids=None,
+ reimage_boot_volume=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -335,6 +373,7 @@ class ComputeDriver(object):
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
:param accel_uuids: Accelerator UUIDs.
+ :param reimage_boot_volume: Re-image the volume backed instance.
"""
raise NotImplementedError()
@@ -1557,6 +1596,11 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
+ def get_nodenames_by_uuid(self, refresh=False):
+ """Returns a dict of {uuid: nodename} for all managed nodes."""
+ nodename = self.get_available_nodes()[0]
+ return {nova.virt.node.get_local_node_uuid(): nodename}
+
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
if nodename in self.get_available_nodes():
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 41524b69d2..bf7dc8fc72 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -32,6 +32,7 @@ import fixtures
import os_resource_classes as orc
from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import versionutils
from nova.compute import power_state
@@ -48,6 +49,7 @@ from nova.objects import migrate_data
from nova.virt import driver
from nova.virt import hardware
from nova.virt.ironic import driver as ironic
+import nova.virt.node
from nova.virt import virtapi
CONF = nova.conf.CONF
@@ -116,6 +118,7 @@ class FakeDriver(driver.ComputeDriver):
"supports_trusted_certs": True,
"supports_pcpus": False,
"supports_accelerators": True,
+ "supports_remote_managed_ports": True,
# Supported image types
"supports_image_type_raw": True,
@@ -159,8 +162,8 @@ class FakeDriver(driver.ComputeDriver):
self._host = host
# NOTE(gibi): this is unnecessary complex and fragile but this is
# how many current functional sample tests expect the node name.
- self._nodes = (['fake-mini'] if self._host == 'compute'
- else [self._host])
+ self._set_nodes(['fake-mini'] if self._host == 'compute'
+ else [self._host])
def _set_nodes(self, nodes):
# NOTE(gibi): this is not part of the driver interface but used
@@ -503,6 +506,12 @@ class FakeDriver(driver.ComputeDriver):
host_status['host_hostname'] = nodename
host_status['host_name_label'] = nodename
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
+ # NOTE(danms): Because the fake driver runs on the same host
+ # in tests, potentially with multiple nodes, we need to
+ # control our node uuids. Make sure we return a unique and
+ # consistent uuid for each node we are responsible for to
+ # avoid the persistent local node identity from taking over.
+ host_status['uuid'] = str(getattr(uuids, 'node_%s' % nodename))
return host_status
def update_provider_tree(self, provider_tree, nodename, allocations=None):
@@ -645,6 +654,10 @@ class FakeDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return self._nodes
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {str(getattr(uuids, 'node_%s' % n)): n
+ for n in self.get_available_nodes()}
+
def instance_on_disk(self, instance):
return False
@@ -763,7 +776,7 @@ class PredictableNodeUUIDDriver(SmallFakeDriver):
PredictableNodeUUIDDriver, self).get_available_resource(nodename)
# This is used in ComputeNode.update_from_virt_driver which is called
# from the ResourceTracker when creating a ComputeNode.
- resources['uuid'] = uuid.uuid5(uuid.NAMESPACE_DNS, nodename)
+ resources['uuid'] = str(uuid.uuid5(uuid.NAMESPACE_DNS, nodename))
return resources
@@ -890,6 +903,36 @@ class FakeLiveMigrateDriverWithNestedCustomResources(
class FakeDriverWithPciResources(SmallFakeDriver):
+ """NOTE: this driver provides symmetric compute nodes. Each compute will
+ have the same resources with the same addresses. It is dangerous as using
+ this driver can hide issues when in an asymmetric environment nova fails to
+ update entities according to the host specific addresses (e.g. pci_slot of
+ the neutron port bindings).
+
+ The current non virt driver specific functional test environment has many
+ shortcomings making it really hard to simulate host specific virt drivers.
+
+ 1) The virt driver is instantiated by the service logic from the name of
+ the driver class. This makes passing input to the driver instance from the
+ test at init time pretty impossible. This could be solved with some
+ fixtures around nova.virt.driver.load_compute_driver()
+
+ 2) The compute service access the hypervisor not only via the virt
+ interface but also reads the sysfs of the host. So simply providing a fake
+ virt driver instance is not enough to isolate simulated compute services
+ that are running on the same host. Also these low level sysfs reads are not
+ having host specific information in the call params. So simply mocking the
+ low level call does not give a way to provide host specific return values.
+
+ 3) CONF is global, and it is read dynamically by the driver. So
+ providing host specific CONF to driver instances without race conditions
+ between the drivers are extremely hard especially if periodic tasks are
+ enabled.
+
+ The libvirt based functional test env under nova.tests.functional.libvirt
+ has better support to create asymmetric environments. So please consider
+ using that if possible instead.
+ """
PCI_ADDR_PF1 = '0000:01:00.0'
PCI_ADDR_PF1_VF1 = '0000:01:00.1'
@@ -905,7 +948,7 @@ class FakeDriverWithPciResources(SmallFakeDriver):
def setUp(self):
super(FakeDriverWithPciResources.
FakeDriverWithPciResourcesConfigFixture, self).setUp()
- # Set passthrough_whitelist before the compute node starts to match
+ # Set device_spec before the compute node starts to match
# with the PCI devices reported by this fake driver.
# NOTE(gibi): 0000:01:00 is tagged to physnet1 and therefore not a
@@ -920,7 +963,7 @@ class FakeDriverWithPciResources(SmallFakeDriver):
# Having two PFs on the same physnet will allow us to test the
# placement allocation - physical allocation matching based on the
# bandwidth allocation in the future.
- CONF.set_override('passthrough_whitelist', override=[
+ CONF.set_override('device_spec', override=[
jsonutils.dumps(
{
"address": {
@@ -954,6 +997,19 @@ class FakeDriverWithPciResources(SmallFakeDriver):
],
group='pci')
+ # These mocks should be removed after bug
+ # https://bugs.launchpad.net/nova/+bug/1961587 has been fixed and
+ # every SRIOV device related information is transferred through the
+ # virt driver and the PciDevice object instead of queried with
+ # sysfs calls by the network.neutron.API code.
+ self.useFixture(fixtures.MockPatch(
+ 'nova.pci.utils.get_mac_by_pci_address',
+ return_value='52:54:00:1e:59:c6'))
+
+ self.useFixture(fixtures.MockPatch(
+ 'nova.pci.utils.get_vf_num_by_pci_address',
+ return_value=1))
+
def get_available_resource(self, nodename):
host_status = super(
FakeDriverWithPciResources, self).get_available_resource(nodename)
@@ -1055,3 +1111,42 @@ class FakeDriverWithCaching(FakeDriver):
else:
self.cached_images.add(image_id)
return True
+
+
+class EphEncryptionDriver(MediumFakeDriver):
+ capabilities = dict(
+ FakeDriver.capabilities,
+ supports_ephemeral_encryption=True)
+
+
+class EphEncryptionDriverLUKS(MediumFakeDriver):
+ capabilities = dict(
+ FakeDriver.capabilities,
+ supports_ephemeral_encryption=True,
+ supports_ephemeral_encryption_luks=True)
+
+
+class EphEncryptionDriverPLAIN(MediumFakeDriver):
+ capabilities = dict(
+ FakeDriver.capabilities,
+ supports_ephemeral_encryption=True,
+ supports_ephemeral_encryption_plain=True)
+
+
+class FakeDriverWithoutFakeNodes(FakeDriver):
+ """FakeDriver that behaves like a real single-node driver.
+
+ This behaves like a real virt driver from the perspective of its
+ nodes, with a stable nodename and use of the global node identity
+ stuff to provide a stable node UUID.
+ """
+
+ def get_available_resource(self, nodename):
+ resources = super().get_available_resource(nodename)
+ resources['uuid'] = nova.virt.node.get_local_node_uuid()
+ return resources
+
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {
+ nova.virt.node.get_local_node_uuid(): self.get_available_nodes()[0]
+ }
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 994be56418..9693e405d3 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -869,7 +869,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
instance_cell.pcpuset)
cpuset_reserved = _get_reserved(
sibling_sets[1], pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
continue
break
@@ -895,7 +895,7 @@ def _pack_instance_onto_cores(host_cell, instance_cell,
cpuset_reserved = _get_reserved(
sibling_set, pinning, num_cpu_reserved=num_cpu_reserved)
- if not pinning or (num_cpu_reserved and not cpuset_reserved):
+ if pinning is None or (num_cpu_reserved and not cpuset_reserved):
return
LOG.debug('Selected cores for pinning: %s, in cell %s', pinning,
host_cell.id)
@@ -1213,10 +1213,13 @@ def _check_for_mem_encryption_requirement_conflicts(
"image %(image_name)s which has hw_mem_encryption property "
"explicitly set to %(image_val)s"
)
+ # image_meta.name is not set if image object represents root
+ # Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {
'flavor_name': flavor.name,
'flavor_val': flavor_mem_enc_str,
- 'image_name': image_meta.name,
+ 'image_name': image_name,
'image_val': image_mem_enc,
}
raise exception.FlavorImageConflict(emsg % data)
@@ -1228,10 +1231,15 @@ def _check_mem_encryption_uses_uefi_image(requesters, image_meta):
emsg = _(
"Memory encryption requested by %(requesters)s but image "
- "%(image_name)s doesn't have 'hw_firmware_type' property set to 'uefi'"
+ "%(image_name)s doesn't have 'hw_firmware_type' property set to "
+ "'uefi' or volume-backed instance was requested"
)
+ # image_meta.name is not set if image object represents root Cinder
+ # volume, for this case FlavorImageConflict should be raised, but
+ # image_meta.name can't be extracted.
+ image_name = (image_meta.name if 'name' in image_meta else None)
data = {'requesters': " and ".join(requesters),
- 'image_name': image_meta.name}
+ 'image_name': image_name}
raise exception.FlavorImageConflict(emsg % data)
@@ -1260,12 +1268,14 @@ def _check_mem_encryption_machine_type(image_meta, machine_type=None):
if mach_type is None:
return
+ # image_meta.name is not set if image object represents root Cinder volume.
+ image_name = (image_meta.name if 'name' in image_meta else None)
# Could be something like pc-q35-2.11 if a specific version of the
# machine type is required, so do substring matching.
if 'q35' not in mach_type:
raise exception.InvalidMachineType(
mtype=mach_type,
- image_id=image_meta.id, image_name=image_meta.name,
+ image_id=image_meta.id, image_name=image_name,
reason=_("q35 type is required for SEV to work"))
@@ -1337,6 +1347,48 @@ def _get_constraint_mappings_from_flavor(flavor, key, func):
return hw_numa_map or None
+def get_locked_memory_constraint(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> ty.Optional[bool]:
+ """Validate and return the requested locked memory.
+
+ :param flavor: ``nova.objects.Flavor`` instance
+ :param image_meta: ``nova.objects.ImageMeta`` instance
+ :raises: exception.LockMemoryForbidden if mem_page_size is not set
+ while provide locked_memory value in image or flavor.
+ :returns: The locked memory flag requested.
+ """
+ mem_page_size_flavor, mem_page_size_image = _get_flavor_image_meta(
+ 'mem_page_size', flavor, image_meta)
+
+ locked_memory_flavor, locked_memory_image = _get_flavor_image_meta(
+ 'locked_memory', flavor, image_meta)
+
+ if locked_memory_flavor is not None:
+ # locked_memory_image is boolean type already
+ locked_memory_flavor = strutils.bool_from_string(locked_memory_flavor)
+
+ if locked_memory_image is not None and (
+ locked_memory_flavor != locked_memory_image
+ ):
+ # We don't allow provide different value to flavor and image
+ raise exception.FlavorImageLockedMemoryConflict(
+ image=locked_memory_image, flavor=locked_memory_flavor)
+
+ locked_memory = locked_memory_flavor
+
+ else:
+ locked_memory = locked_memory_image
+
+ if locked_memory and not (
+ mem_page_size_flavor or mem_page_size_image
+ ):
+ raise exception.LockMemoryForbidden()
+
+ return locked_memory
+
+
def _get_numa_cpu_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
@@ -1784,6 +1836,57 @@ def get_pci_numa_policy_constraint(
return policy
+def get_pmu_constraint(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> ty.Optional[bool]:
+ """Validate and return the requested vPMU configuration.
+
+ This one's a little different since we don't return False in the default
+ case: the PMU should only be configured if explicit configuration is
+ provided, otherwise we leave it to the hypervisor.
+
+ :param flavor: ``nova.objects.Flavor`` instance
+ :param image_meta: ``nova.objects.ImageMeta`` instance
+ :raises: nova.exception.FlavorImageConflict if a value is specified in both
+ the flavor and the image, but the values do not match
+ :raises: nova.exception.Invalid if a value or combination of values is
+ invalid
+ :returns: True if the virtual Performance Monitoring Unit must be enabled,
+ False if it should be disabled, or None if unconfigured.
+ """
+ flavor_value_str, image_value = _get_flavor_image_meta(
+ 'pmu', flavor, image_meta)
+
+ flavor_value = None
+ if flavor_value_str is not None:
+ flavor_value = strutils.bool_from_string(flavor_value_str)
+
+ if (
+ image_value is not None and
+ flavor_value is not None and
+ image_value != flavor_value
+ ):
+ msg = _(
+ "Flavor %(flavor_name)s has %(prefix)s:%(key)s extra spec "
+ "explicitly set to %(flavor_val)s, conflicting with image "
+ "%(image_name)s which has %(prefix)s_%(key)s explicitly set to "
+ "%(image_val)s."
+ )
+ raise exception.FlavorImageConflict(
+ msg % {
+ 'prefix': 'hw',
+ 'key': 'pmu',
+ 'flavor_name': flavor.name,
+ 'flavor_val': flavor_value,
+ 'image_name': image_meta.name,
+ 'image_val': image_value,
+ },
+ )
+
+ return flavor_value if flavor_value is not None else image_value
+
+
def get_vif_multiqueue_constraint(
flavor: 'objects.Flavor',
image_meta: 'objects.ImageMeta',
@@ -2056,6 +2159,8 @@ def numa_get_constraints(flavor, image_meta):
pagesize = _get_numa_pagesize_constraint(flavor, image_meta)
vpmems = get_vpmems(flavor)
+ get_locked_memory_constraint(flavor, image_meta)
+
# If 'hw:cpu_dedicated_mask' is not found in flavor extra specs, the
# 'dedicated_cpus' variable is None, while we hope it being an empty set.
dedicated_cpus = dedicated_cpus or set()
@@ -2200,6 +2305,7 @@ def _numa_cells_support_network_metadata(
def numa_fit_instance_to_host(
host_topology: 'objects.NUMATopology',
instance_topology: 'objects.InstanceNUMATopology',
+ provider_mapping: ty.Optional[ty.Dict[str, ty.List[str]]],
limits: ty.Optional['objects.NUMATopologyLimit'] = None,
pci_requests: ty.Optional['objects.InstancePCIRequests'] = None,
pci_stats: ty.Optional[stats.PciDeviceStats] = None,
@@ -2215,6 +2321,12 @@ def numa_fit_instance_to_host(
:param host_topology: objects.NUMATopology object to fit an
instance on
:param instance_topology: objects.InstanceNUMATopology to be fitted
+ :param provider_mapping: A dict keyed by RequestGroup requester_id,
+ to a list of resource provider UUIDs which provide resource
+ for that RequestGroup. If it is None then it signals that the
+ InstancePCIRequest objects already stores a mapping per request.
+ I.e.: we are called _after_ the scheduler made allocations for this
+ request in placement.
:param limits: objects.NUMATopologyLimits that defines limits
:param pci_requests: instance pci_requests
:param pci_stats: pci_stats for the host
@@ -2244,21 +2356,99 @@ def numa_fit_instance_to_host(
host_cells = host_topology.cells
- # If PCI device(s) are not required, prefer host cells that don't have
- # devices attached. Presence of a given numa_node in a PCI pool is
- # indicative of a PCI device being associated with that node
- if not pci_requests and pci_stats:
- # TODO(stephenfin): pci_stats can't be None here but mypy can't figure
- # that out for some reason
- host_cells = sorted(host_cells, key=lambda cell: cell.id in [
- pool['numa_node'] for pool in pci_stats.pools]) # type: ignore
+ # We need to perform all optimizations only if number of instance's
+ # cells less than host's cells number. If it's equal, we'll use
+ # all cells and no sorting of the cells list is needed.
+ if len(host_topology) > len(instance_topology):
+ pack = CONF.compute.packing_host_numa_cells_allocation_strategy
+ # To balance NUMA cells usage based on several parameters
+ # some sorts performed on host_cells list to move less used cells
+ # to the beginning of the host_cells list (when pack variable is set to
+ # 'False'). When pack is set to 'True', most used cells will be put at
+ # the beginning of the host_cells list.
+
+ # Fist sort is based on memory usage. cell.avail_memory returns free
+ # memory for cell. Revert sorting to get cells with more free memory
+ # first when pack is 'False'
+ host_cells = sorted(
+ host_cells,
+ reverse=not pack,
+ key=lambda cell: cell.avail_memory)
+
+ # Next sort based on available dedicated or shared CPUs.
+ # cpu_policy is set to the same value in all cells so we use
+ # first cell in list (it exists if instance_topology defined)
+ # to get cpu_policy
+ if instance_topology.cells[0].cpu_policy in (
+ None, fields.CPUAllocationPolicy.SHARED):
+ # sort based on used CPUs
+ host_cells = sorted(
+ host_cells,
+ reverse=pack,
+ key=lambda cell: cell.cpu_usage)
+ else:
+ # sort based on presence of pinned CPUs
+ host_cells = sorted(
+ host_cells,
+ reverse=not pack,
+ key=lambda cell: len(cell.free_pcpus))
+
+ # Perform sort only if pci_stats exists
+ if pci_stats:
+ # Create dict with numa cell id as key
+ # and total number of free pci devices as value.
+ total_pci_in_cell: ty.Dict[int, int] = {}
+ for pool in pci_stats.pools:
+ if pool['numa_node'] in list(total_pci_in_cell):
+ total_pci_in_cell[pool['numa_node']] += pool['count']
+ else:
+ total_pci_in_cell[pool['numa_node']] = pool['count']
+ # For backward compatibility we will always 'spread':
+ # we always move host cells with PCI at the beginning if PCI
+ # requested by VM and move host cells with PCI at the end of the
+ # list if PCI isn't requested by VM
+ if pci_requests:
+ host_cells = sorted(
+ host_cells,
+ reverse=True,
+ key=lambda cell: total_pci_in_cell.get(cell.id, 0))
+ else:
+ host_cells = sorted(
+ host_cells,
+ key=lambda cell: total_pci_in_cell.get(cell.id, 0))
+
+ # a set of host_cell.id, instance_cell.id pairs where we already checked
+ # that the instance cell does not fit
+ not_fit_cache = set()
+ # a set of host_cell.id, instance_cell.id pairs where we already checked
+ # that the instance cell does fit
+ fit_cache = set()
for host_cell_perm in itertools.permutations(
host_cells, len(instance_topology)):
chosen_instance_cells: ty.List['objects.InstanceNUMACell'] = []
chosen_host_cells: ty.List['objects.NUMACell'] = []
for host_cell, instance_cell in zip(
host_cell_perm, instance_topology.cells):
+
+ cell_pair = (host_cell.id, instance_cell.id)
+
+ # if we already checked this pair, and they did not fit then no
+ # need to check again just move to the next permutation
+ if cell_pair in not_fit_cache:
+ break
+
+ # if we already checked this pair, and they fit before that they
+ # will fit now too. So no need to check again. Just continue with
+ # the next cell pair in the permutation
+ if cell_pair in fit_cache:
+ chosen_host_cells.append(host_cell)
+ # Normally this would have done by _numa_fit_instance_cell
+ # but we optimized that out here based on the cache
+ instance_cell.id = host_cell.id
+ chosen_instance_cells.append(instance_cell)
+ continue
+
try:
cpuset_reserved = 0
if (instance_topology.emulator_threads_isolated and
@@ -2275,17 +2465,24 @@ def numa_fit_instance_to_host(
# This exception will been raised if instance cell's
# custom pagesize is not supported with host cell in
# _numa_cell_supports_pagesize_request function.
+
+ # cache the result
+ not_fit_cache.add(cell_pair)
break
if got_cell is None:
+ # cache the result
+ not_fit_cache.add(cell_pair)
break
chosen_host_cells.append(host_cell)
chosen_instance_cells.append(got_cell)
+ # cache the result
+ fit_cache.add(cell_pair)
if len(chosen_instance_cells) != len(host_cell_perm):
continue
if pci_requests and pci_stats and not pci_stats.support_requests(
- pci_requests, chosen_instance_cells):
+ pci_requests, provider_mapping, chosen_instance_cells):
continue
if network_metadata and not _numa_cells_support_network_metadata(
@@ -2386,6 +2583,7 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
cpuset=host_cell.cpuset,
pcpuset=host_cell.pcpuset,
memory=host_cell.memory,
+ socket=host_cell.socket,
cpu_usage=0,
memory_usage=0,
mempages=host_cell.mempages,
@@ -2410,8 +2608,10 @@ def numa_usage_from_instance_numa(host_topology, instance_topology,
None, fields.CPUAllocationPolicy.SHARED,
):
continue
-
- pinned_cpus = set(instance_cell.cpu_pinning.values())
+ if instance_cell.cpu_pinning:
+ pinned_cpus = set(instance_cell.cpu_pinning.values())
+ else:
+ pinned_cpus = set()
if instance_cell.cpuset_reserved:
pinned_cpus |= instance_cell.cpuset_reserved
@@ -2462,3 +2662,73 @@ def check_hw_rescue_props(image_meta):
"""
hw_rescue_props = ['hw_rescue_device', 'hw_rescue_bus']
return any(key in image_meta.properties for key in hw_rescue_props)
+
+
+def get_ephemeral_encryption_constraint(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> bool:
+ """Get the ephemeral encryption constrants based on the flavor and image.
+
+ :param flavor: an objects.Flavor object
+ :param image_meta: an objects.ImageMeta object
+ :raises: nova.exception.FlavorImageConflict
+ :returns: boolean indicating whether encryption of guest ephemeral storage
+ was requested
+ """
+ flavor_eph_encryption_str, image_eph_encryption = _get_flavor_image_meta(
+ 'ephemeral_encryption', flavor, image_meta)
+
+ flavor_eph_encryption = None
+ if flavor_eph_encryption_str is not None:
+ flavor_eph_encryption = strutils.bool_from_string(
+ flavor_eph_encryption_str)
+
+ # Check for conflicts between explicit requirements regarding
+ # ephemeral encryption.
+ # TODO(layrwood): make _check_for_mem_encryption_requirement_conflicts
+ # generic and reuse here
+ if (
+ flavor_eph_encryption is not None and
+ image_eph_encryption is not None and
+ flavor_eph_encryption != image_eph_encryption
+ ):
+ emsg = _(
+ "Flavor %(flavor_name)s has hw:ephemeral_encryption extra spec "
+ "explicitly set to %(flavor_val)s, conflicting with "
+ "image %(image_name)s which has hw_eph_encryption property "
+ "explicitly set to %(image_val)s"
+ )
+ data = {
+ 'flavor_name': flavor.name,
+ 'flavor_val': flavor_eph_encryption_str,
+ 'image_name': image_meta.name,
+ 'image_val': image_eph_encryption,
+ }
+ raise exception.FlavorImageConflict(emsg % data)
+
+ return flavor_eph_encryption or image_eph_encryption
+
+
+def get_ephemeral_encryption_format(
+ flavor: 'objects.Flavor',
+ image_meta: 'objects.ImageMeta',
+) -> ty.Optional[str]:
+ """Get the ephemeral encryption format.
+
+ :param flavor: an objects.Flavor object
+ :param image_meta: an objects.ImageMeta object
+ :raises: nova.exception.FlavorImageConflict or nova.exception.Invalid
+ :returns: BlockDeviceEncryptionFormatType or None
+ """
+ eph_format = _get_unique_flavor_image_meta(
+ 'ephemeral_encryption_format', flavor, image_meta)
+ if eph_format:
+ if eph_format not in fields.BlockDeviceEncryptionFormatType.ALL:
+ allowed = fields.BlockDeviceEncryptionFormatType.ALL
+ raise exception.Invalid(
+ f"Invalid ephemeral encryption format {eph_format}. "
+ f"Allowed values: {', '.join(allowed)}"
+ )
+ return eph_format
+ return None
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 350e59e295..ba18c85cf7 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -103,6 +103,7 @@ class HyperVDriver(driver.ComputeDriver):
"supports_pcpus": False,
"supports_accelerators": False,
"supports_secure_boot": True,
+ "supports_remote_managed_ports": False,
# Supported image types
"supports_image_type_vhd": True,
@@ -145,6 +146,14 @@ class HyperVDriver(driver.ComputeDriver):
'in Rocky.')
def init_host(self, host):
+ LOG.warning(
+ 'The hyperv driver is not tested by the OpenStack project nor '
+ 'does it have clear maintainer(s) and thus its quality can not be '
+ 'ensured. It should be considered experimental and may be removed '
+ 'in a future release. If you are using the driver in production '
+ 'please let us know via the openstack-discuss mailing list.'
+ )
+
self._serialconsoleops.start_console_handlers()
event_handler = eventhandler.InstanceEventHandler(
state_change_callback=self.emit_event)
diff --git a/nova/virt/hyperv/serialproxy.py b/nova/virt/hyperv/serialproxy.py
index 4f8a99dcf6..d12fb8bf6e 100644
--- a/nova/virt/hyperv/serialproxy.py
+++ b/nova/virt/hyperv/serialproxy.py
@@ -46,7 +46,7 @@ class SerialProxy(threading.Thread):
def __init__(self, instance_name, addr, port, input_queue,
output_queue, client_connected):
super(SerialProxy, self).__init__()
- self.setDaemon(True)
+ self.daemon = True
self._instance_name = instance_name
self._addr = addr
@@ -99,7 +99,7 @@ class SerialProxy(threading.Thread):
workers = []
for job in [self._get_data, self._send_data]:
worker = threading.Thread(target=job)
- worker.setDaemon(True)
+ worker.daemon = True
worker.start()
workers.append(worker)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 5358f3766a..f13c872290 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -110,6 +110,34 @@ def get_info(context, image_href):
return IMAGE_API.get(context, image_href)
+def check_vmdk_image(image_id, data):
+ # Check some rules about VMDK files. Specifically we want to make
+ # sure that the "create-type" of the image is one that we allow.
+ # Some types of VMDK files can reference files outside the disk
+ # image and we do not want to allow those for obvious reasons.
+
+ types = CONF.compute.vmdk_allowed_types
+
+ if not len(types):
+ LOG.warning('Refusing to allow VMDK image as vmdk_allowed_'
+ 'types is empty')
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ try:
+ create_type = data.format_specific['data']['create-type']
+ except KeyError:
+ msg = _('Unable to determine VMDK create-type')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+ if create_type not in CONF.compute.vmdk_allowed_types:
+ LOG.warning('Refusing to process VMDK file with create-type of %r '
+ 'which is not in allowed set of: %s', create_type,
+ ','.join(CONF.compute.vmdk_allowed_types))
+ msg = _('Invalid VMDK create-type specified')
+ raise exception.ImageUnacceptable(image_id=image_id, reason=msg)
+
+
def fetch_to_raw(context, image_href, path, trusted_certs=None):
path_tmp = "%s.part" % path
fetch(context, image_href, path_tmp, trusted_certs)
@@ -129,6 +157,9 @@ def fetch_to_raw(context, image_href, path, trusted_certs=None):
reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") %
{'fmt': fmt, 'backing_file': backing_file}))
+ if fmt == 'vmdk':
+ check_vmdk_image(image_href, data)
+
if fmt != "raw" and CONF.force_raw_images:
staged = "%s.converted" % path
LOG.debug("%s was %s, converting to raw", image_href, fmt)
diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template
index ee78a1fc60..453ac43a8f 100644
--- a/nova/virt/interfaces.template
+++ b/nova/virt/interfaces.template
@@ -9,6 +9,7 @@ iface lo inet loopback
{% for ifc in interfaces %}
auto {{ ifc.name }}
+{% if ifc.address %}
iface {{ ifc.name }} inet static
hwaddress ether {{ ifc.hwaddress }}
address {{ ifc.address }}
@@ -20,11 +21,11 @@ iface {{ ifc.name }} inet static
{% if ifc.dns %}
dns-nameservers {{ ifc.dns }}
{% endif %}
+{% endif %}
{% if use_ipv6 %}
-{% if libvirt_virt_type == 'lxc' %}
{% if ifc.address_v6 %}
+{% if libvirt_virt_type == 'lxc' %}
post-up ip -6 addr add {{ ifc.address_v6 }}/{{ifc.netmask_v6 }} dev ${IFACE}
-{% endif %}
{% if ifc.gateway_v6 %}
post-up ip -6 route add default via {{ ifc.gateway_v6 }} dev ${IFACE}
{% endif %}
@@ -41,4 +42,5 @@ iface {{ ifc.name }} inet6 static
{% endif %}
{% endif %}
{% endif %}
+{% endif %}
{% endfor %}
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index 2a4fd39fda..77fefb81ea 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -20,13 +20,13 @@ bare metal resources.
"""
import base64
-from distutils import version
import gzip
import shutil
import tempfile
import time
from urllib import parse as urlparse
+import microversion_parse
from openstack import exceptions as sdk_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
@@ -164,6 +164,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"supports_trusted_certs": False,
"supports_pcpus": False,
"supports_accelerators": False,
+ "supports_remote_managed_ports": False,
# Image type support flags
"supports_image_type_aki": False,
@@ -396,6 +397,18 @@ class IronicDriver(virt_driver.ComputeDriver):
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
node = self._get_node(node_uuid)
+
+ # Its possible this node has just moved from deleting
+ # to cleaning. Placement will update the inventory
+ # as all reserved, but this instance might have got here
+ # before that happened, but after the previous allocation
+ # got deleted. We trigger a re-schedule to another node.
+ if (self._node_resources_used(node) or
+ self._node_resources_unavailable(node)):
+ msg = "Chosen ironic node %s is not available" % node_uuid
+ LOG.info(msg, instance=instance)
+ raise exception.ComputeResourcesUnavailable(reason=msg)
+
self._set_instance_id(node, instance)
def failed_spawn_cleanup(self, instance):
@@ -741,7 +754,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# baremetal nodes. Depending on the version of Ironic,
# this can be as long as 2-10 seconds per every thousand
# nodes, and this call may retrieve all nodes in a deployment,
- # depending on if any filter paramters are applied.
+ # depending on if any filter parameters are applied.
return self._get_node_list(fields=_NODE_FIELDS, **kwargs)
# NOTE(jroll) if partition_key is set, we need to limit nodes that
@@ -826,6 +839,13 @@ class IronicDriver(virt_driver.ComputeDriver):
return node_uuids
+ def get_nodenames_by_uuid(self, refresh=False):
+ nodes = self.get_available_nodes(refresh=refresh)
+ # We use the uuid for compute_node.uuid and
+ # compute_node.hypervisor_hostname, so the dict keys and values are
+ # the same.
+ return dict(zip(nodes, nodes))
+
def update_provider_tree(self, provider_tree, nodename, allocations=None):
"""Update a ProviderTree object with current resource provider and
inventory information.
@@ -873,15 +893,25 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
# nodename is the ironic node's UUID.
node = self._node_from_cache(nodename)
+
reserved = False
- if (not self._node_resources_used(node) and
- self._node_resources_unavailable(node)):
- LOG.debug('Node %(node)s is not ready for a deployment, '
- 'reporting resources as reserved for it. Node\'s '
- 'provision state is %(prov)s, power state is '
- '%(power)s and maintenance is %(maint)s.',
- {'node': node.uuid, 'prov': node.provision_state,
- 'power': node.power_state, 'maint': node.maintenance})
+ if self._node_resources_unavailable(node):
+ # Operators might mark a node as in maintainance,
+ # even when an instance is on the node,
+ # either way lets mark this as reserved
+ reserved = True
+
+ if (self._node_resources_used(node) and
+ not CONF.workarounds.skip_reserve_in_use_ironic_nodes):
+ # Make resources as reserved once we have
+ # and instance here.
+ # When the allocation is deleted, most likely
+ # automatic clean will start, so we keep the node
+ # reserved until it becomes available again.
+ # In the case without automatic clean, once
+ # the allocation is removed in placement it
+ # also stays as reserved until we notice on
+ # the next periodic its actually available.
reserved = True
info = self._node_resource(node)
@@ -1629,7 +1659,8 @@ class IronicDriver(virt_driver.ComputeDriver):
admin_password, allocations, bdms, detach_block_devices,
attach_block_devices, network_info=None,
evacuate=False, block_device_info=None,
- preserve_ephemeral=False, accel_uuids=None):
+ preserve_ephemeral=False, accel_uuids=None,
+ reimage_boot_volume=False):
"""Rebuild/redeploy an instance.
This version of rebuild() allows for supporting the option to
@@ -1670,7 +1701,13 @@ class IronicDriver(virt_driver.ComputeDriver):
:param preserve_ephemeral: Boolean value; if True the ephemeral
must be preserved on rebuild.
:param accel_uuids: Accelerator UUIDs. Ignored by this driver.
+ :param reimage_boot_volume: Re-image the volume backed instance.
"""
+ if reimage_boot_volume:
+ raise exception.NovaException(
+ _("Ironic doesn't support rebuilding volume backed "
+ "instances."))
+
LOG.debug('Rebuild called for instance', instance=instance)
instance.task_state = task_states.REBUILD_SPAWNING
@@ -2056,7 +2093,7 @@ class IronicDriver(virt_driver.ComputeDriver):
return None
def _can_send_version(self, min_version=None, max_version=None):
- """Validate if the suppplied version is available in the API."""
+ """Validate if the supplied version is available in the API."""
# NOTE(TheJulia): This will effectively just be a pass if no
# version negotiation has occured, since there is no way for
# us to know without explicitly otherwise requesting that
@@ -2066,13 +2103,17 @@ class IronicDriver(virt_driver.ComputeDriver):
if self.ironicclient.is_api_version_negotiated:
current_api_version = self.ironicclient.current_api_version
if (min_version and
- version.StrictVersion(current_api_version) <
- version.StrictVersion(min_version)):
+ microversion_parse.parse_version_string(
+ current_api_version) <
+ microversion_parse.parse_version_string(
+ min_version)):
raise exception.IronicAPIVersionNotAvailable(
version=min_version)
if (max_version and
- version.StrictVersion(current_api_version) >
- version.StrictVersion(max_version)):
+ microversion_parse.parse_version_string(
+ current_api_version) >
+ microversion_parse.parse_version_string(
+ max_version)):
raise exception.IronicAPIVersionNotAvailable(
version=max_version)
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index f86a9c461c..4efc6fbaeb 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -73,6 +73,7 @@ import itertools
import operator
from oslo_config import cfg
+from oslo_serialization import jsonutils
from nova import block_device
@@ -400,6 +401,16 @@ def get_info_from_bdm(instance, virt_type, image_meta, bdm,
# NOTE(ndipanov): libvirt starts ordering from 1, not 0
bdm_info['boot_index'] = str(boot_index + 1)
+ # If the device is encrypted pass through the secret, format and options
+ if bdm.get('encrypted'):
+ bdm_info['encrypted'] = bdm.get('encrypted')
+ bdm_info['encryption_secret_uuid'] = bdm.get('encryption_secret_uuid')
+ bdm_info['encryption_format'] = bdm.get('encryption_format')
+ encryption_options = bdm.get('encryption_options')
+ if encryption_options:
+ bdm_info['encryption_options'] = jsonutils.loads(
+ encryption_options)
+
return bdm_info
@@ -414,13 +425,7 @@ def get_device_name(bdm):
def get_root_info(instance, virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name=None):
- # NOTE (ndipanov): This is a hack to avoid considering an image
- # BDM with local target, as we don't support them
- # yet. Only applies when passed non-driver format
- no_root_bdm = (not root_bdm or (
- root_bdm.get('source_type') == 'image' and
- root_bdm.get('destination_type') == 'local'))
- if no_root_bdm:
+ if root_bdm is None:
# NOTE(mriedem): In case the image_meta object was constructed from
# an empty dict, like in the case of evacuate, we have to first check
# if disk_format is set on the ImageMeta object.
@@ -452,10 +457,13 @@ def default_device_names(virt_type, context, instance, block_device_info,
image_meta):
get_disk_info(virt_type, instance, image_meta, block_device_info)
- for driver_bdm in itertools.chain(block_device_info['ephemerals'],
- [block_device_info['swap']] if
- block_device_info['swap'] else [],
- block_device_info['block_device_mapping']):
+ for driver_bdm in itertools.chain(
+ block_device_info['image'],
+ block_device_info['ephemerals'],
+ [block_device_info['swap']] if
+ block_device_info['swap'] else [],
+ block_device_info['block_device_mapping']
+ ):
driver_bdm.save()
@@ -563,41 +571,48 @@ def _get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, image_meta,
:returns: Disk mapping for the given instance.
"""
mapping = {}
- pre_assigned_device_names = \
- [block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain(
+
+ driver_bdms = itertools.chain(
+ driver.block_device_info_get_image(block_device_info),
driver.block_device_info_get_ephemerals(block_device_info),
[driver.block_device_info_get_swap(block_device_info)],
- driver.block_device_info_get_mapping(block_device_info))
- if get_device_name(bdm)]
-
- # NOTE (ndipanov): root_bdm can be None when we boot from image
- # as there is no driver representation of local targeted images
- # and they will not be in block_device_info list.
- root_bdm = block_device.get_root_bdm(
- driver.block_device_info_get_mapping(block_device_info))
+ driver.block_device_info_get_mapping(block_device_info)
+ )
+ pre_assigned_device_names = [
+ block_device.strip_dev(get_device_name(bdm))
+ for bdm in driver_bdms if get_device_name(bdm)
+ ]
+
+ # Try to find the root driver bdm, either an image based disk or volume
+ root_bdm = None
+ if any(driver.block_device_info_get_image(block_device_info)):
+ root_bdm = driver.block_device_info_get_image(block_device_info)[0]
+ elif driver.block_device_info_get_mapping(block_device_info):
+ root_bdm = block_device.get_root_bdm(
+ driver.block_device_info_get_mapping(block_device_info))
root_device_name = block_device.strip_dev(
driver.block_device_info_get_root_device(block_device_info))
root_info = get_root_info(
instance, virt_type, image_meta, root_bdm,
disk_bus, cdrom_bus, root_device_name)
-
mapping['root'] = root_info
- # NOTE (ndipanov): This implicitly relies on image->local BDMs not
- # being considered in the driver layer - so missing
- # bdm with boot_index 0 means - use image, unless it was
- # overridden. This can happen when using legacy syntax and
- # no root_device_name is set on the instance.
- if not root_bdm and not block_device.volume_in_mapping(root_info['dev'],
- block_device_info):
- mapping['disk'] = root_info
- elif root_bdm:
- # NOTE (ft): If device name is not set in root bdm, root_info has a
- # generated one. We have to copy device name to root bdm to prevent its
- # second generation in loop through bdms. If device name is already
- # set, nothing is changed.
+
+ # NOTE (ft): If device name is not set in root bdm, root_info has a
+ # generated one. We have to copy device name to root bdm to prevent its
+ # second generation in loop through bdms. If device name is already
+ # set, nothing is changed.
+ # NOTE(melwitt): root_bdm can be None in the case of a ISO root device, for
+ # example.
+ if root_bdm:
update_bdm(root_bdm, root_info)
+ if (
+ driver.block_device_info_get_image(block_device_info) or
+ root_bdm is None
+ ):
+ mapping['disk'] = root_info
+
default_eph = get_default_ephemeral_info(instance, disk_bus,
block_device_info, mapping)
if default_eph:
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index 7129933f34..231283b8dd 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -24,6 +24,7 @@ helpers for populating up config object instances.
"""
import time
+import typing as ty
from collections import OrderedDict
from lxml import etree
@@ -32,6 +33,7 @@ from oslo_utils import units
from nova import exception
from nova.i18n import _
+from nova.objects import fields
from nova.pci import utils as pci_utils
from nova.virt import hardware
@@ -45,9 +47,12 @@ class LibvirtConfigObject(object):
def __init__(self, **kwargs):
super(LibvirtConfigObject, self).__init__()
- self.root_name = kwargs.get("root_name")
- self.ns_prefix = kwargs.get('ns_prefix')
- self.ns_uri = kwargs.get('ns_uri')
+ self.root_name = kwargs.pop("root_name")
+ self.ns_prefix = kwargs.pop("ns_prefix", None)
+ self.ns_uri = kwargs.pop("ns_uri", None)
+
+ # handle programmer error
+ assert not kwargs
def _new_node(self, node_name, **kwargs):
if self.ns_uri is None:
@@ -63,9 +68,6 @@ class LibvirtConfigObject(object):
child.text = str(value)
return child
- def get_yes_no_str(self, value):
- return 'yes' if value else 'no'
-
def format_dom(self):
return self._new_node(self.root_name)
@@ -84,6 +86,25 @@ class LibvirtConfigObject(object):
pretty_print=pretty_print)
return xml_str
+ @classmethod
+ def parse_on_off_str(self, value: ty.Optional[str]) -> bool:
+ if value is not None and value not in ('on', 'off'):
+ msg = _(
+ "Element should contain either 'on' or 'off'; "
+ "found: '%(value)s'"
+ )
+ raise exception.InvalidInput(msg % {'value': value})
+
+ return value == 'on'
+
+ @classmethod
+ def get_yes_no_str(self, value: bool) -> str:
+ return 'yes' if value else 'no'
+
+ @classmethod
+ def get_on_off_str(self, value: bool) -> str:
+ return 'on' if value else 'off'
+
def __repr__(self):
return self.to_xml(pretty_print=False)
@@ -1532,7 +1553,8 @@ class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice):
class LibvirtConfigGuestDiskEncryptionSecret(LibvirtConfigObject):
def __init__(self, **kwargs):
- super(LibvirtConfigGuestDiskEncryptionSecret, self).__init__(**kwargs)
+ super(LibvirtConfigGuestDiskEncryptionSecret, self).__init__(
+ root_name='diskencryptionsecret', **kwargs)
self.type = None
self.uuid = None
@@ -1552,7 +1574,8 @@ class LibvirtConfigGuestDiskEncryption(LibvirtConfigObject):
"""
def __init__(self, **kwargs):
- super(LibvirtConfigGuestDiskEncryption, self).__init__(**kwargs)
+ super(LibvirtConfigGuestDiskEncryption, self).__init__(
+ root_name='diskencryption', **kwargs)
self.format = None
self.secret = None
@@ -1575,7 +1598,8 @@ class LibvirtConfigGuestDiskEncryption(LibvirtConfigObject):
class LibvirtConfigGuestDiskMirror(LibvirtConfigObject):
def __init__(self, **kwargs):
- super(LibvirtConfigGuestDiskMirror, self).__init__(**kwargs)
+ super(LibvirtConfigGuestDiskMirror, self).__init__(
+ root_name='diskmirror', **kwargs)
self.ready = None
def parse_dom(self, xmldoc):
@@ -1585,6 +1609,8 @@ class LibvirtConfigGuestDiskMirror(LibvirtConfigObject):
class LibvirtConfigGuestIDMap(LibvirtConfigObject):
def __init__(self, **kwargs):
+ if 'root_name' not in kwargs:
+ kwargs['root_name'] = 'id'
super(LibvirtConfigGuestIDMap, self).__init__(**kwargs)
self.start = 0
self.target = 0
@@ -1912,6 +1938,8 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
if self.net_type == 'direct':
self.source_dev = c.get('dev')
self.source_mode = c.get('mode', 'private')
+ elif self.net_type == 'vdpa':
+ self.source_dev = c.get('dev')
elif self.net_type == 'vhostuser':
self.vhostuser_type = c.get('type')
self.vhostuser_mode = c.get('mode')
@@ -2019,6 +2047,12 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
self.keymap = None
self.listen = None
+ self.image_compression = None
+ self.jpeg_compression = None
+ self.zlib_compression = None
+ self.playback_compression = None
+ self.streaming_mode = None
+
def format_dom(self):
dev = super(LibvirtConfigGuestGraphics, self).format_dom()
@@ -2029,6 +2063,24 @@ class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice):
if self.listen:
dev.set("listen", self.listen)
+ if self.type == "spice":
+ if self.image_compression is not None:
+ dev.append(etree.Element(
+ 'image', compression=self.image_compression))
+ if self.jpeg_compression is not None:
+ dev.append(etree.Element(
+ 'jpeg', compression=self.jpeg_compression))
+ if self.zlib_compression is not None:
+ dev.append(etree.Element(
+ 'zlib', compression=self.zlib_compression))
+ if self.playback_compression is not None:
+ dev.append(etree.Element(
+ 'playback', compression=self.get_on_off_str(
+ self.playback_compression)))
+ if self.streaming_mode is not None:
+ dev.append(etree.Element(
+ 'streaming', mode=self.streaming_mode))
+
return dev
@@ -2168,13 +2220,14 @@ class LibvirtConfigGuestPCIeRootPortController(LibvirtConfigGuestController):
class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
def __init__(self, **kwargs):
- super(LibvirtConfigGuestHostdev, self).\
- __init__(root_name="hostdev", **kwargs)
- self.mode = kwargs.get('mode')
- self.type = kwargs.get('type')
+ super(LibvirtConfigGuestHostdev, self).__init__(
+ root_name="hostdev", **kwargs,
+ )
+ self.mode = None
+ self.type = None
# managed attribute is only used by PCI devices but mediated devices
# need to say managed=no
- self.managed = kwargs.get('managed', 'yes')
+ self.managed = "yes"
def format_dom(self):
dev = super(LibvirtConfigGuestHostdev, self).format_dom()
@@ -2194,8 +2247,11 @@ class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice):
class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
super(LibvirtConfigGuestHostdevPCI, self).\
- __init__(mode='subsystem', type='pci',
- **kwargs)
+ __init__(**kwargs)
+
+ self.mode = 'subsystem'
+ self.type = 'pci'
+
# These are returned from libvirt as hexadecimal strings with 0x prefix
# even if they have a different meaningful range: domain 16 bit,
# bus 8 bit, slot 5 bit, and function 3 bit
@@ -2252,10 +2308,14 @@ class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev):
class LibvirtConfigGuestHostdevMDEV(LibvirtConfigGuestHostdev):
def __init__(self, **kwargs):
- super(LibvirtConfigGuestHostdevMDEV, self).__init__(
- mode='subsystem', type='mdev', managed='no', **kwargs)
+ super(LibvirtConfigGuestHostdevMDEV, self).__init__(**kwargs)
+
+ self.mode = 'subsystem'
+ self.type = 'mdev'
+ self.managed = 'no'
+
# model attribute is only supported by mediated devices
- self.model = kwargs.get('model', 'vfio-pci')
+ self.model = 'vfio-pci'
self.uuid = None
def format_dom(self):
@@ -2688,6 +2748,19 @@ class LibvirtConfigGuestFeatureKvmHidden(LibvirtConfigGuestFeature):
return root
+class LibvirtConfigGuestFeatureSMM(LibvirtConfigGuestFeature):
+
+ def __init__(self, **kwargs):
+ super(LibvirtConfigGuestFeatureSMM, self).__init__("smm", **kwargs)
+
+ def format_dom(self):
+ root = super(LibvirtConfigGuestFeatureSMM, self).format_dom()
+
+ root.append(etree.Element("smm", state="on"))
+
+ return root
+
+
class LibvirtConfigGuestFeaturePMU(LibvirtConfigGuestFeature):
def __init__(self, state, **kwargs):
@@ -2704,6 +2777,18 @@ class LibvirtConfigGuestFeaturePMU(LibvirtConfigGuestFeature):
return root
+class LibvirtConfigGuestFeatureIOAPIC(LibvirtConfigGuestFeature):
+
+ def __init__(self, **kwargs):
+ super().__init__("ioapic", **kwargs)
+ self.driver = "qemu"
+
+ def format_dom(self):
+ root = super().format_dom()
+ root.set('driver', self.driver)
+ return root
+
+
class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
# QEMU requires at least this value to be set
@@ -2719,6 +2804,15 @@ class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
self.vapic = False
self.spinlocks = False
self.spinlock_retries = self.MIN_SPINLOCK_RETRIES
+ self.vpindex = False
+ self.runtime = False
+ self.synic = False
+ self.reset = False
+ self.frequencies = False
+ self.reenlightenment = False
+ self.tlbflush = False
+ self.ipi = False
+ self.evmcs = False
self.vendorid_spoof = False
self.vendorid = self.SPOOFED_VENDOR_ID
@@ -2735,6 +2829,24 @@ class LibvirtConfigGuestFeatureHyperV(LibvirtConfigGuestFeature):
if self.vendorid_spoof:
root.append(etree.Element("vendor_id", state="on",
value=self.vendorid))
+ if self.vpindex:
+ root.append(etree.Element('vpindex', state='on'))
+ if self.runtime:
+ root.append(etree.Element('runtime', state='on'))
+ if self.synic:
+ root.append(etree.Element('synic', state='on'))
+ if self.reset:
+ root.append(etree.Element('reset', state='on'))
+ if self.frequencies:
+ root.append(etree.Element('frequencies', state='on'))
+ if self.reenlightenment:
+ root.append(etree.Element('reenlightenment', state='on'))
+ if self.tlbflush:
+ root.append(etree.Element('tlbflush', state='on'))
+ if self.ipi:
+ root.append(etree.Element('ipi', state='on'))
+ if self.evmcs:
+ root.append(etree.Element('evmcs', state='on'))
return root
@@ -2810,6 +2922,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.os_init_path = None
self.os_boot_dev = []
self.os_smbios = None
+ self.os_arch = None
self.os_mach_type = None
self.os_bootmenu = False
self.devices = []
@@ -2852,6 +2965,8 @@ class LibvirtConfigGuest(LibvirtConfigObject):
os.set("firmware", self.os_firmware)
type_node = self._text_node("type", self.os_type)
+ if self.os_arch is not None:
+ type_node.set("arch", self.os_arch)
if self.os_mach_type is not None:
type_node.set("machine", self.os_mach_type)
os.append(type_node)
@@ -3029,6 +3144,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
# LibvirtConfigGuestGidMap
# LibvirtConfigGuestCPU
# LibvirtConfigGuestVPMEM
+ # LibvirtConfigGuestIOMMU
for c in xmldoc:
if c.tag == 'devices':
for d in c:
@@ -3056,6 +3172,10 @@ class LibvirtConfigGuest(LibvirtConfigObject):
obj = LibvirtConfigGuestVPMEM()
obj.parse_dom(d)
self.devices.append(obj)
+ elif d.tag == 'iommu':
+ obj = LibvirtConfigGuestIOMMU()
+ obj.parse_dom(d)
+ self.devices.append(obj)
if c.tag == 'idmap':
for idmap in c:
obj = None
@@ -3080,7 +3200,10 @@ class LibvirtConfigGuest(LibvirtConfigObject):
else:
self._parse_basic_props(c)
- def add_device(self, dev):
+ def add_feature(self, dev: LibvirtConfigGuestFeature) -> None:
+ self.features.append(dev)
+
+ def add_device(self, dev: LibvirtConfigGuestDevice) -> None:
self.devices.append(dev)
def add_perf_event(self, event):
@@ -3130,6 +3253,7 @@ class LibvirtConfigNodeDevice(LibvirtConfigObject):
self.pci_capability = None
self.mdev_information = None
self.vdpa_capability = None
+ self.vpd_capability = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc)
@@ -3183,6 +3307,7 @@ class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
self.numa_node = None
self.fun_capability = []
self.mdev_capability = []
+ self.vpd_capability = None
self.interface = None
self.address = None
self.link_state = None
@@ -3225,6 +3350,10 @@ class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject):
mdevcap = LibvirtConfigNodeDeviceMdevCapableSubFunctionCap()
mdevcap.parse_dom(c)
self.mdev_capability.append(mdevcap)
+ elif c.tag == "capability" and c.get('type') in ('vpd',):
+ vpdcap = LibvirtConfigNodeDeviceVpdCap()
+ vpdcap.parse_dom(c)
+ self.vpd_capability = vpdcap
def pci_address(self):
return "%04x:%02x:%02x.%01x" % (
@@ -3277,6 +3406,7 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
root_name="capability", **kwargs)
self.type = None
self.iommu_group = None
+ self.uuid = None
def parse_dom(self, xmldoc):
super(LibvirtConfigNodeDeviceMdevInformation,
@@ -3286,6 +3416,103 @@ class LibvirtConfigNodeDeviceMdevInformation(LibvirtConfigObject):
self.type = c.get('id')
if c.tag == "iommuGroup":
self.iommu_group = int(c.get('number'))
+ if c.tag == "uuid":
+ self.uuid = c.text
+
+
+class LibvirtConfigNodeDeviceVpdCap(LibvirtConfigObject):
+
+ def __init__(self, **kwargs):
+ super().__init__(
+ root_name="capability", **kwargs)
+ self._card_name = None
+ self._change_level = None
+ self._manufacture_id = None
+ self._part_number = None
+ self._serial_number = None
+ self._asset_tag = None
+ self._ro_vendor_fields = {}
+ self._rw_vendor_fields = {}
+ self._rw_system_fields = {}
+
+ @staticmethod
+ def _process_custom_field(fields_dict, field_element):
+ index = field_element.get('index')
+ if index:
+ fields_dict[index] = field_element.text
+
+ def _parse_ro_fields(self, fields_element):
+ for e in fields_element:
+ if e.tag == 'change_level':
+ self._change_level = e.text
+ elif e.tag == 'manufacture_id':
+ self._manufacture_id = e.text
+ elif e.tag == 'part_number':
+ self._part_number = e.text
+ elif e.tag == 'serial_number':
+ self._serial_number = e.text
+ elif e.tag == 'vendor_field':
+ self._process_custom_field(self._ro_vendor_fields, e)
+
+ def _parse_rw_fields(self, fields_element):
+ for e in fields_element:
+ if e.tag == 'asset_tag':
+ self._asset_tag = e.text
+ elif e.tag == 'vendor_field':
+ self._process_custom_field(self._rw_vendor_fields, e)
+ elif e.tag == 'system_field':
+ self._process_custom_field(self._rw_system_fields, e)
+
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigNodeDeviceVpdCap, self).parse_dom(xmldoc)
+ for c in xmldoc:
+ if c.tag == "name":
+ self._card_name = c.text
+ if c.tag == "fields":
+ access = c.get('access')
+ if access:
+ if access == 'readonly':
+ self._parse_ro_fields(c)
+ elif access == 'readwrite':
+ self._parse_rw_fields(c)
+ else:
+ continue
+
+ @property
+ def card_name(self):
+ return self._card_name
+
+ @property
+ def change_level(self):
+ return self._change_level
+
+ @property
+ def manufacture_id(self):
+ return self._manufacture_id
+
+ @property
+ def part_number(self):
+ return self._part_number
+
+ @property
+ def card_serial_number(self):
+ return self._serial_number
+
+ @property
+ def asset_tag(self):
+ return self._asset_tag
+
+ @property
+ def ro_vendor_fields(self):
+ return self._ro_vendor_fields
+
+ @property
+ def rw_vendor_fields(self):
+ return self._rw_vendor_fields
+
+ @property
+ def rw_system_fields(self):
+ return self._rw_system_fields
class LibvirtConfigGuestRng(LibvirtConfigGuestDevice):
@@ -3468,11 +3695,11 @@ class LibvirtConfigGuestVPMEM(LibvirtConfigGuestDevice):
self.model = "nvdimm"
self.access = "shared"
- self.source_path = kwargs.get("devpath", "")
- self.align_size = kwargs.get("align_kb", 0)
+ self.source_path = ""
+ self.align_size = 0
self.pmem = True
- self.target_size = kwargs.get("size_kb", 0)
+ self.target_size = 0
self.target_node = 0
self.label_size = 2 * units.Ki
@@ -3518,6 +3745,53 @@ class LibvirtConfigGuestVPMEM(LibvirtConfigGuestDevice):
self.target_size = sub.text
+class LibvirtConfigGuestIOMMU(LibvirtConfigGuestDevice):
+ """https://libvirt.org/formatdomain.html#iommu-devices"""
+
+ def __init__(self, **kwargs):
+ super().__init__(root_name="iommu", **kwargs)
+
+ self.model: str = fields.VIOMMUModel.AUTO
+ self.interrupt_remapping: bool = False
+ self.caching_mode: bool = False
+ self.eim: bool = False
+ self.iotlb: bool = False
+
+ def format_dom(self):
+ iommu = super().format_dom()
+ iommu.set("model", self.model)
+
+ driver = etree.Element("driver")
+ driver.set("intremap", self.get_on_off_str(self.interrupt_remapping))
+ driver.set("caching_mode", self.get_on_off_str(self.caching_mode))
+
+ # Set aw_bits to None when the Libvirt version not satisfy
+ # MIN_LIBVIRT_VIOMMU_AW_BITS in driver. When it's None, means it's not
+ # supported to have aw_bits.
+ if hasattr(self, "aw_bits"):
+ driver.set("aw_bits", str(self.aw_bits))
+ driver.set("eim", self.get_on_off_str(self.eim))
+ driver.set("iotlb", self.get_on_off_str(self.iotlb))
+ iommu.append(driver)
+
+ return iommu
+
+ def parse_dom(self, xmldoc):
+ super().parse_dom(xmldoc)
+ self.model = xmldoc.get("model")
+
+ driver = xmldoc.find("./driver")
+ if driver:
+ self.interrupt_remapping = self.parse_on_off_str(
+ driver.get("intremap"))
+ self.caching_mode = self.parse_on_off_str(
+ driver.get("caching_mode"))
+ if driver.get("aw_bits") is not None:
+ self.aw_bits = int(driver.get("aw_bits"))
+ self.iotlb = self.parse_on_off_str(driver.get("iotlb"))
+ self.eim = self.parse_on_off_str(driver.get("eim"))
+
+
class LibvirtConfigGuestMetaNovaPorts(LibvirtConfigObject):
def __init__(self, ports=None):
diff --git a/nova/db/main/legacy_migrations/versions/__init__.py b/nova/virt/libvirt/cpu/__init__.py
index e69de29bb2..e69de29bb2 100644
--- a/nova/db/main/legacy_migrations/versions/__init__.py
+++ b/nova/virt/libvirt/cpu/__init__.py
diff --git a/nova/virt/libvirt/cpu/api.py b/nova/virt/libvirt/cpu/api.py
new file mode 100644
index 0000000000..1c17458d6b
--- /dev/null
+++ b/nova/virt/libvirt/cpu/api.py
@@ -0,0 +1,157 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from dataclasses import dataclass
+
+from oslo_log import log as logging
+
+import nova.conf
+from nova import exception
+from nova.i18n import _
+from nova import objects
+from nova.virt import hardware
+from nova.virt.libvirt.cpu import core
+
+LOG = logging.getLogger(__name__)
+
+CONF = nova.conf.CONF
+
+
+@dataclass
+class Core:
+ """Class to model a CPU core as reported by sysfs.
+
+ It may be a physical CPU core or a hardware thread on a shared CPU core
+ depending on if the system supports SMT.
+ """
+
+ # NOTE(sbauza): ident is a mandatory field.
+ # The CPU core id/number
+ ident: int
+
+ @property
+ def online(self) -> bool:
+ return core.get_online(self.ident)
+
+ @online.setter
+ def online(self, state: bool) -> None:
+ if state:
+ core.set_online(self.ident)
+ else:
+ core.set_offline(self.ident)
+
+ def __hash__(self):
+ return hash(self.ident)
+
+ def __eq__(self, other):
+ return self.ident == other.ident
+
+ def __str__(self):
+ return str(self.ident)
+
+ @property
+ def governor(self) -> str:
+ return core.get_governor(self.ident)
+
+ def set_high_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_high)
+
+ def set_low_governor(self) -> None:
+ core.set_governor(self.ident, CONF.libvirt.cpu_power_governor_low)
+
+
+def power_up(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_up = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = True
+ else:
+ pcpu.set_high_governor()
+ powered_up.add(str(pcpu))
+ LOG.debug("Cores powered up : %s", powered_up)
+
+
+def power_down(instance: objects.Instance) -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if instance.numa_topology is None:
+ return
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ pcpus = instance.numa_topology.cpu_pinning
+ powered_down = set()
+ for pcpu in pcpus:
+ if pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ powered_down.add(str(pcpu))
+ LOG.debug("Cores powered down : %s", powered_down)
+
+
+def power_down_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ if (CONF.libvirt.cpu_power_management and
+ not CONF.compute.cpu_dedicated_set
+ ):
+ msg = _("'[compute]/cpu_dedicated_set' is mandatory to be set if "
+ "'[libvirt]/cpu_power_management' is set."
+ "Please provide the CPUs that can be pinned or don't use the "
+ "power management if you only use shared CPUs.")
+ raise exception.InvalidConfiguration(msg)
+
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ pcpu.online = False
+ else:
+ pcpu.set_low_governor()
+ LOG.debug("Cores powered down : %s", cpu_dedicated_set)
+
+
+def validate_all_dedicated_cpus() -> None:
+ if not CONF.libvirt.cpu_power_management:
+ return
+ cpu_dedicated_set = hardware.get_cpu_dedicated_set() or set()
+ governors = set()
+ cpu_states = set()
+ for pcpu in cpu_dedicated_set:
+ pcpu = Core(pcpu)
+ # we need to collect the governors strategy and the CPU states
+ governors.add(pcpu.governor)
+ cpu_states.add(pcpu.online)
+ if CONF.libvirt.cpu_power_management_strategy == 'cpu_state':
+ # all the cores need to have the same governor strategy
+ if len(governors) > 1:
+ msg = _("All the cores need to have the same governor strategy"
+ "before modifying the CPU states. You can reboot the "
+ "compute node if you prefer.")
+ raise exception.InvalidConfiguration(msg)
+ elif CONF.libvirt.cpu_power_management_strategy == 'governor':
+ # all the cores need to be online
+ if False in cpu_states:
+ msg = _("All the cores need to be online before modifying the "
+ "governor strategy.")
+ raise exception.InvalidConfiguration(msg)
diff --git a/nova/virt/libvirt/cpu/core.py b/nova/virt/libvirt/cpu/core.py
new file mode 100644
index 0000000000..782f028fee
--- /dev/null
+++ b/nova/virt/libvirt/cpu/core.py
@@ -0,0 +1,78 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import typing as ty
+
+from oslo_log import log as logging
+
+from nova import exception
+from nova import filesystem
+import nova.privsep
+from nova.virt import hardware
+
+LOG = logging.getLogger(__name__)
+
+AVAILABLE_PATH = '/sys/devices/system/cpu/present'
+
+CPU_PATH_TEMPLATE = '/sys/devices/system/cpu/cpu%(core)s'
+
+
+def get_available_cores() -> ty.Set[int]:
+ cores = filesystem.read_sys(AVAILABLE_PATH)
+ return hardware.parse_cpu_spec(cores) if cores else set()
+
+
+def exists(core: int) -> bool:
+ return core in get_available_cores()
+
+
+def gen_cpu_path(core: int) -> str:
+ if not exists(core):
+ LOG.warning('Unable to access CPU: %s', core)
+ raise ValueError('CPU: %(core)s does not exist', core)
+ return CPU_PATH_TEMPLATE % {'core': core}
+
+
+def get_online(core: int) -> bool:
+ try:
+ online = filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'online')).strip()
+ except exception.FileNotFound:
+ # The online file may not exist if we haven't written it yet.
+ # By default, this means that the CPU is online.
+ online = '1'
+ return online == '1'
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_online(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='1')
+ return get_online(core)
+
+
+def set_offline(core: int) -> bool:
+ filesystem.write_sys(os.path.join(gen_cpu_path(core), 'online'), data='0')
+ return not get_online(core)
+
+
+def get_governor(core: int) -> str:
+ return filesystem.read_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor')).strip()
+
+
+@nova.privsep.sys_admin_pctxt.entrypoint
+def set_governor(core: int, governor: str) -> str:
+ filesystem.write_sys(
+ os.path.join(gen_cpu_path(core), 'cpufreq/scaling_governor'),
+ data=governor)
+ return get_governor(core)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 2ea493d452..fe48960296 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -96,7 +96,6 @@ from nova import objects
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields
from nova.objects import migrate_data as migrate_data_obj
-from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.libvirt
import nova.privsep.path
@@ -115,6 +114,7 @@ from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
+from nova.virt.libvirt.cpu import api as libvirt_cpu
from nova.virt.libvirt import designer
from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
@@ -188,6 +188,7 @@ VOLUME_DRIVERS = {
'vzstorage': 'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver', # noqa:E501
'storpool': 'nova.virt.libvirt.volume.storpool.LibvirtStorPoolVolumeDriver', # noqa:E501
'nvmeof': 'nova.virt.libvirt.volume.nvme.LibvirtNVMEVolumeDriver',
+ 'lightos': 'nova.virt.libvirt.volume.lightos.LibvirtLightOSVolumeDriver',
}
@@ -220,6 +221,12 @@ MIN_QEMU_VERSION = (4, 2, 0)
NEXT_MIN_LIBVIRT_VERSION = (7, 0, 0)
NEXT_MIN_QEMU_VERSION = (5, 2, 0)
+# vIOMMU driver attribute aw_bits minimal support version.
+MIN_LIBVIRT_VIOMMU_AW_BITS = (6, 5, 0)
+
+# vIOMMU model value `virtio` minimal support version
+MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL = (8, 3, 0)
+
MIN_LIBVIRT_AARCH64_CPU_COMPARE = (6, 9, 0)
# Virtuozzo driver support
@@ -243,6 +250,16 @@ LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_'
MIN_LIBVIRT_VDPA = (6, 9, 0)
MIN_QEMU_VDPA = (5, 1, 0)
+REGISTER_IMAGE_PROPERTY_DEFAULTS = [
+ 'hw_machine_type',
+ 'hw_cdrom_bus',
+ 'hw_disk_bus',
+ 'hw_input_bus',
+ 'hw_pointer_model',
+ 'hw_video_model',
+ 'hw_vif_model',
+]
+
class AsyncDeviceEventsHandler:
"""A synchornization point between libvirt events an clients waiting for
@@ -396,6 +413,8 @@ class LibvirtDriver(driver.ComputeDriver):
not CONF.force_raw_images)
requires_ploop_image = CONF.libvirt.virt_type == 'parallels'
+ self.image_backend = imagebackend.Backend(CONF.use_cow_images)
+
self.capabilities = {
"has_imagecache": True,
"supports_evacuate": True,
@@ -423,6 +442,10 @@ class LibvirtDriver(driver.ComputeDriver):
"supports_bfv_rescue": True,
"supports_vtpm": CONF.libvirt.swtpm_enabled,
"supports_socket_pci_numa_affinity": True,
+ "supports_ephemeral_encryption":
+ self.image_backend.backend().SUPPORTS_LUKS,
+ "supports_ephemeral_encryption_luks":
+ self.image_backend.backend().SUPPORTS_LUKS,
}
super(LibvirtDriver, self).__init__(virtapi)
@@ -447,7 +470,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
- self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
@@ -796,6 +818,18 @@ class LibvirtDriver(driver.ComputeDriver):
"force_raw_images to True.")
raise exception.InvalidConfiguration(msg)
+ # NOTE(sbauza): We verify first if the dedicated CPU performances were
+ # modified by Nova before. Note that it can provide an exception if
+ # either the governor strategies are different between the cores or if
+ # the cores are offline.
+ libvirt_cpu.validate_all_dedicated_cpus()
+ # NOTE(sbauza): We powerdown all dedicated CPUs but if some instances
+ # exist that are pinned for some CPUs, then we'll later powerup those
+ # CPUs when rebooting the instance in _init_instance()
+ # Note that it can provide an exception if the config options are
+ # wrongly modified.
+ libvirt_cpu.power_down_all_dedicated_cpus()
+
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
self._recreate_assigned_mediated_devices()
@@ -804,7 +838,9 @@ class LibvirtDriver(driver.ComputeDriver):
self._check_vtpm_support()
- self._register_instance_machine_type()
+ # Set REGISTER_IMAGE_PROPERTY_DEFAULTS in the instance system_metadata
+ # to default values for properties that have not already been set.
+ self._register_all_undefined_instance_details()
def _update_host_specific_capabilities(self) -> None:
"""Update driver capabilities based on capabilities of the host."""
@@ -812,36 +848,118 @@ class LibvirtDriver(driver.ComputeDriver):
# or UEFI bootloader support in this manner
self.capabilities.update({
'supports_secure_boot': self._host.supports_secure_boot,
+ 'supports_remote_managed_ports':
+ self._host.supports_remote_managed_ports
})
- def _register_instance_machine_type(self):
- """Register the machine type of instances on this host
+ def _register_all_undefined_instance_details(self) -> None:
+ """Register the default image properties of instances on this host
For each instance found on this host by InstanceList.get_by_host ensure
- a machine type is registered within the system metadata of the instance
+ REGISTER_IMAGE_PROPERTY_DEFAULTS are registered within the system
+ metadata of the instance
"""
context = nova_context.get_admin_context()
hostname = self._host.get_hostname()
+ for instance in objects.InstanceList.get_by_host(
+ context, hostname, expected_attrs=['flavor', 'system_metadata']
+ ):
+ try:
+ self._register_undefined_instance_details(context, instance)
+ except Exception:
+ LOG.exception('Ignoring unknown failure while attempting '
+ 'to save the defaults for unregistered image '
+ 'properties', instance=instance)
- for instance in objects.InstanceList.get_by_host(context, hostname):
- # NOTE(lyarwood): Skip if hw_machine_type is set already in the
- # image_meta of the instance. Note that this value comes from the
- # system metadata of the instance where it is stored under the
- # image_hw_machine_type key.
- if instance.image_meta.properties.get('hw_machine_type'):
- continue
+ def _register_undefined_instance_details(
+ self,
+ context: nova_context.RequestContext,
+ instance: 'objects.Instance',
+ ) -> None:
+ # Find any unregistered image properties against this instance
+ unregistered_image_props = [
+ p for p in REGISTER_IMAGE_PROPERTY_DEFAULTS
+ if f"image_{p}" not in instance.system_metadata
+ ]
- # Fetch and record the machine type from the config
- hw_machine_type = libvirt_utils.get_machine_type(
- instance.image_meta)
- # NOTE(lyarwood): As above this updates
- # image_meta.properties.hw_machine_type within the instance and
- # will be returned the next time libvirt_utils.get_machine_type is
- # called for the instance image meta.
- instance.system_metadata['image_hw_machine_type'] = hw_machine_type
- instance.save()
- LOG.debug("Instance machine_type updated to %s", hw_machine_type,
- instance=instance)
+ # Return if there's nothing left to register for this instance
+ if not unregistered_image_props:
+ return
+
+ LOG.debug(f'Attempting to register defaults for the following '
+ f'image properties: {unregistered_image_props}',
+ instance=instance)
+
+ # NOTE(lyarwood): Only build disk_info once per instance if we need it
+ # for hw_{disk,cdrom}_bus to avoid pulling bdms from the db etc.
+ requires_disk_info = ['hw_disk_bus', 'hw_cdrom_bus']
+ disk_info = None
+ if set(requires_disk_info) & set(unregistered_image_props):
+ bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ context, instance.uuid)
+ block_device_info = driver.get_block_device_info(instance, bdms)
+ disk_info = blockinfo.get_disk_info(
+ CONF.libvirt.virt_type, instance, instance.image_meta,
+ block_device_info)
+
+ # Only pull the guest config once per instance if we need it for
+ # hw_pointer_model or hw_input_bus.
+ requires_guest_config = ['hw_pointer_model', 'hw_input_bus']
+ guest_config = None
+ if set(requires_guest_config) & set(unregistered_image_props):
+ guest_config = self._host.get_guest(instance).get_config()
+
+ for image_prop in unregistered_image_props:
+ try:
+ default_value = self._find_default_for_image_property(
+ instance, image_prop, disk_info, guest_config)
+ instance.system_metadata[f"image_{image_prop}"] = default_value
+
+ LOG.debug(f'Found default for {image_prop} of {default_value}',
+ instance=instance)
+ except Exception:
+ LOG.exception(f'Ignoring unknown failure while attempting '
+ f'to find the default of {image_prop}',
+ instance=instance)
+ instance.save()
+
+ def _find_default_for_image_property(
+ self,
+ instance: 'objects.Instance',
+ image_property: str,
+ disk_info: ty.Optional[ty.Dict[str, ty.Any]],
+ guest_config: ty.Optional[vconfig.LibvirtConfigGuest],
+ ) -> ty.Optional[str]:
+ if image_property == 'hw_machine_type':
+ return libvirt_utils.get_machine_type(instance.image_meta)
+
+ if image_property == 'hw_disk_bus' and disk_info:
+ return disk_info.get('disk_bus')
+
+ if image_property == 'hw_cdrom_bus' and disk_info:
+ return disk_info.get('cdrom_bus')
+
+ if image_property == 'hw_input_bus' and guest_config:
+ _, default_input_bus = self._get_pointer_bus_and_model(
+ guest_config, instance.image_meta)
+ return default_input_bus
+
+ if image_property == 'hw_pointer_model' and guest_config:
+ default_pointer_model, _ = self._get_pointer_bus_and_model(
+ guest_config, instance.image_meta)
+ # hw_pointer_model is of type PointerModelType ('usbtablet' instead
+ # of 'tablet')
+ if default_pointer_model == 'tablet':
+ default_pointer_model = 'usbtablet'
+ return default_pointer_model
+
+ if image_property == 'hw_video_model':
+ return self._get_video_type(instance.image_meta)
+
+ if image_property == 'hw_vif_model':
+ return self.vif_driver.get_vif_model(instance.image_meta)
+
+ return None
def _prepare_cpu_flag(self, flag):
# NOTE(kchamart) This helper method will be used while computing
@@ -884,33 +1002,26 @@ class LibvirtDriver(driver.ComputeDriver):
msg = _("The cpu_models option is required when cpu_mode=custom")
raise exception.Invalid(msg)
- cpu = vconfig.LibvirtConfigGuestCPU()
- for model in models:
- cpu.model = self._get_cpu_model_mapping(model)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured CPU model: %(model)s is not "
- "compatible with host CPU. Please correct your "
- "config and try again. %(e)s") % {
- 'model': model, 'e': e})
- raise exception.InvalidCPUInfo(msg)
-
- # Use guest CPU model to check the compatibility between guest CPU and
- # configured extra_flags
- cpu = vconfig.LibvirtConfigGuestCPU()
- cpu.model = self._host.get_capabilities().host.cpu.model
- for flag in set(x.lower() for x in CONF.libvirt.cpu_model_extra_flags):
- cpu_feature = self._prepare_cpu_flag(flag)
- cpu.add_feature(cpu_feature)
- try:
- self._compare_cpu(cpu, self._get_cpu_info(), None)
- except exception.InvalidCPUInfo as e:
- msg = (_("Configured extra flag: %(flag)s it not correct, or "
- "the host CPU does not support this flag. Please "
- "correct the config and try again. %(e)s") % {
- 'flag': flag, 'e': e})
- raise exception.InvalidCPUInfo(msg)
+ if not CONF.workarounds.skip_cpu_compare_at_startup:
+ # Use guest CPU model to check the compatibility between
+ # guest CPU and configured extra_flags
+ for model in models:
+ cpu = vconfig.LibvirtConfigGuestCPU()
+ cpu.model = self._get_cpu_model_mapping(model)
+ for flag in set(x.lower() for
+ x in CONF.libvirt.cpu_model_extra_flags):
+ cpu_feature = self._prepare_cpu_flag(flag)
+ cpu.add_feature(cpu_feature)
+ try:
+ self._compare_cpu(cpu, self._get_cpu_info(), None)
+ except exception.InvalidCPUInfo as e:
+ msg = (_("Configured CPU model: %(model)s "
+ "and CPU Flags %(flags)s ar not "
+ "compatible with host CPU. Please correct your "
+ "config and try again. %(e)s") % {
+ 'model': model, 'e': e,
+ 'flags': CONF.libvirt.cpu_model_extra_flags})
+ raise exception.InvalidCPUInfo(msg)
def _check_vtpm_support(self) -> None:
# TODO(efried): A key manager must be configured to create/retrieve
@@ -1414,6 +1525,8 @@ class LibvirtDriver(driver.ComputeDriver):
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
+ # We're sure the instance is gone, we can shutdown the core if so
+ libvirt_cpu.power_down(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, destroy_secrets=True):
@@ -2472,7 +2585,7 @@ class LibvirtDriver(driver.ComputeDriver):
# ServerRescueNegativeTestJSON.test_rescued_vm_detach_volume
# Log a warning and let the upper layer detect that the device is
# still attached and retry
- LOG.error(
+ LOG.warning(
'Waiting for libvirt event about the detach of '
'device %s with device alias %s from instance %s is timed '
'out.', device_name, dev.alias, instance_uuid)
@@ -2616,7 +2729,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
block_device.resize(new_size)
- def _resize_attached_encrypted_volume(self, original_new_size,
+ def _resize_attached_encrypted_volume(self, context, original_new_size,
block_device, instance,
connection_info, encryption):
# TODO(lyarwood): Also handle the dm-crpyt encryption providers of
@@ -2662,6 +2775,17 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.exception('Unknown error when attempting to find the '
'payload_offset for LUKSv1 encrypted disk '
'%s.', path, instance=instance)
+
+ else: # os-brick encryptor driver
+ encryptor = self._get_volume_encryptor(connection_info, encryption)
+ decrypted_device_new_size = encryptor.extend_volume(context,
+ **encryption)
+ if decrypted_device_new_size is None:
+ raise exception.VolumeExtendFailed(
+ volume_id=block_device._disk,
+ reason="Encryptor extend failed."
+ )
+
# NOTE(lyarwood): Resize the decrypted device within the instance to
# the calculated size as with normal volumes.
self._resize_attached_volume(
@@ -2710,7 +2834,7 @@ class LibvirtDriver(driver.ComputeDriver):
context, self._volume_api, volume_id, connection_info)
if encryption:
self._resize_attached_encrypted_volume(
- new_size, dev, instance,
+ context, new_size, dev, instance,
connection_info, encryption)
else:
self._resize_attached_volume(
@@ -3055,11 +3179,16 @@ class LibvirtDriver(driver.ComputeDriver):
current_power_state = guest.get_power_state(self._host)
+ libvirt_cpu.power_up(instance)
# TODO(stephenfin): Any reason we couldn't use 'self.resume' here?
guest.launch(pause=current_power_state == power_state.PAUSED)
self._attach_pci_devices(
- guest, pci_manager.get_instance_pci_devs(instance))
+ guest,
+ instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
self._attach_direct_passthrough_ports(context, instance, guest)
def _can_set_admin_password(self, image_meta):
@@ -3135,7 +3264,13 @@ class LibvirtDriver(driver.ComputeDriver):
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': err_msg})
- raise exception.InternalError(msg)
+
+ if error_code == libvirt.VIR_ERR_AGENT_UNRESPONSIVE:
+ msg += (", libvirt cannot connect to the qemu-guest-agent"
+ " inside the instance.")
+ raise exception.InstanceQuiesceFailed(reason=msg)
+ else:
+ raise exception.InternalError(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
@@ -3173,17 +3308,18 @@ class LibvirtDriver(driver.ComputeDriver):
format=source_format,
basename=False)
disk_delta = out_path + '.delta'
- libvirt_utils.create_cow_image(src_back_path, disk_delta,
- src_disk_size)
+ libvirt_utils.create_image(
+ disk_delta, 'qcow2', src_disk_size, backing_file=src_back_path)
- quiesced = False
try:
- self._set_quiesced(context, instance, image_meta, True)
- quiesced = True
+ self._can_quiesce(instance, image_meta)
except exception.NovaException as err:
- if self._requires_quiesce(image_meta):
+ if image_meta.properties.get('os_require_quiesce', False):
+ LOG.error('Quiescing instance failed but image property '
+ '"os_require_quiesce" is set: %(reason)s.',
+ {'reason': err}, instance=instance)
raise
- LOG.info('Skipping quiescing instance: %(reason)s.',
+ LOG.info('Quiescing instance not available: %(reason)s.',
{'reason': err}, instance=instance)
try:
@@ -3204,12 +3340,24 @@ class LibvirtDriver(driver.ComputeDriver):
while not dev.is_job_complete():
time.sleep(0.5)
+ finally:
+ quiesced = False
+ try:
+ # NOTE: The freeze FS is applied after the end of
+ # the mirroring of the disk to minimize the time of
+ # the freeze. The mirror between both disks is finished,
+ # sync continuously, and stopped after abort_job().
+ self.quiesce(context, instance, image_meta)
+ quiesced = True
+ except exception.NovaException as err:
+ LOG.info('Skipping quiescing instance: %(reason)s.',
+ {'reason': err}, instance=instance)
+
dev.abort_job()
nova.privsep.path.chown(disk_delta, uid=os.getuid())
- finally:
self._host.write_instance_config(xml)
if quiesced:
- self._set_quiesced(context, instance, image_meta, False)
+ self.unquiesce(context, instance, image_meta)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
@@ -3897,7 +4045,12 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
- guest.shutdown()
+ try:
+ guest.shutdown()
+ except libvirt.libvirtError as e:
+ LOG.debug("Ignoring libvirt exception from shutdown request: %s",
+ encodeutils.exception_to_unicode(e),
+ instance=instance)
retry_countdown = retry_interval
for sec in range(timeout):
@@ -3977,8 +4130,12 @@ class LibvirtDriver(driver.ComputeDriver):
"""Suspend the specified instance."""
guest = self._host.get_guest(instance)
- self._detach_pci_devices(guest,
- pci_manager.get_instance_pci_devs(instance))
+ self._detach_pci_devices(
+ guest,
+ instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
self._detach_direct_passthrough_ports(context, instance, guest)
self._detach_mediated_devices(guest)
guest.save_memory_state()
@@ -3996,8 +4153,12 @@ class LibvirtDriver(driver.ComputeDriver):
guest = self._create_guest_with_network(
context, xml, instance, network_info, block_device_info,
vifs_already_plugged=True)
- self._attach_pci_devices(guest,
- pci_manager.get_instance_pci_devs(instance))
+ self._attach_pci_devices(
+ guest,
+ instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ),
+ )
self._attach_direct_passthrough_ports(
context, instance, guest, network_info)
self._attach_mediated_devices(guest, mdevs)
@@ -4244,6 +4405,11 @@ class LibvirtDriver(driver.ComputeDriver):
else:
LOG.info("Instance spawned successfully.", instance=instance)
+ # Finally register defaults for any undefined image properties so that
+ # future changes by QEMU, libvirt or within this driver don't change
+ # the ABI of the instance.
+ self._register_undefined_instance_details(context, instance)
+
def _get_console_output_file(self, instance, console_log):
bytes_to_read = MAX_CONSOLE_BYTES
log_data = b"" # The last N read bytes
@@ -4377,7 +4543,7 @@ class LibvirtDriver(driver.ComputeDriver):
'%dG' % ephemeral_size,
specified_fs)
return
- libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size)
+ libvirt_utils.create_image(target, 'raw', f'{ephemeral_size}G')
# Run as root only for block devices.
disk_api.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
@@ -4386,7 +4552,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_swap(target, swap_mb, context=None):
"""Create a swap file of specified size."""
- libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
+ libvirt_utils.create_image(target, 'raw', f'{swap_mb}M')
nova.privsep.fs.unprivileged_mkfs('swap', target)
@staticmethod
@@ -4509,12 +4675,16 @@ class LibvirtDriver(driver.ComputeDriver):
ignore_bdi_for_swap=False):
booted_from_volume = self._is_booted_from_volume(block_device_info)
- def image(fname, image_type=CONF.libvirt.images_type):
- return self.image_backend.by_name(instance,
- fname + suffix, image_type)
+ def image(
+ fname, image_type=CONF.libvirt.images_type, disk_info_mapping=None
+ ):
+ return self.image_backend.by_name(
+ instance, fname + suffix, image_type,
+ disk_info_mapping=disk_info_mapping)
- def raw(fname):
- return image(fname, image_type='raw')
+ def raw(fname, disk_info_mapping=None):
+ return image(
+ fname, image_type='raw', disk_info_mapping=disk_info_mapping)
created_instance_dir = True
@@ -4528,13 +4698,11 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug("Creating instance directory", instance=instance)
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
- LOG.info('Creating image', instance=instance)
+ LOG.info('Creating image(s)', instance=instance)
flavor = instance.get_flavor()
swap_mb = 0
if 'disk.swap' in disk_mapping:
- mapping = disk_mapping['disk.swap']
-
if ignore_bdi_for_swap:
# This is a workaround to support legacy swap resizing,
# which does not touch swap size specified in bdm,
@@ -4548,12 +4716,17 @@ class LibvirtDriver(driver.ComputeDriver):
# leaving the work with bdm only.
swap_mb = flavor['swap']
else:
+ disk_info_mapping = disk_mapping['disk.swap']
+ disk_device = disk_info_mapping['dev']
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
- elif (flavor['swap'] > 0 and
- not block_device.volume_in_mapping(
- mapping['dev'], block_device_info)):
+ elif (
+ flavor['swap'] > 0 and
+ not block_device.volume_in_mapping(
+ disk_device, block_device_info,
+ )
+ ):
swap_mb = flavor['swap']
if swap_mb > 0:
@@ -4586,8 +4759,8 @@ class LibvirtDriver(driver.ComputeDriver):
image_id=disk_images['ramdisk_id'])
created_disks = self._create_and_inject_local_root(
- context, instance, booted_from_volume, suffix, disk_images,
- injection_info, fallback_from_host)
+ context, instance, disk_mapping, booted_from_volume, suffix,
+ disk_images, injection_info, fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = nova.privsep.fs.get_fs_type_for_os_type(
@@ -4600,7 +4773,9 @@ class LibvirtDriver(driver.ComputeDriver):
vm_mode = fields.VMMode.get_from_instance(instance)
ephemeral_gb = instance.flavor.ephemeral_gb
if 'disk.local' in disk_mapping:
- disk_image = image('disk.local')
+ disk_info_mapping = disk_mapping['disk.local']
+ disk_image = image(
+ 'disk.local', disk_info_mapping=disk_info_mapping)
# Short circuit the exists() tests if we already created a disk
created_disks = created_disks or not disk_image.exists()
@@ -4619,7 +4794,9 @@ class LibvirtDriver(driver.ComputeDriver):
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
- disk_image = image(blockinfo.get_eph_disk(idx))
+ disk_name = blockinfo.get_eph_disk(idx)
+ disk_info_mapping = disk_mapping[disk_name]
+ disk_image = image(disk_name, disk_info_mapping=disk_info_mapping)
# Short circuit the exists() tests if we already created a disk
created_disks = created_disks or not disk_image.exists()
@@ -4658,7 +4835,7 @@ class LibvirtDriver(driver.ComputeDriver):
return (created_instance_dir, created_disks)
- def _create_and_inject_local_root(self, context, instance,
+ def _create_and_inject_local_root(self, context, instance, disk_mapping,
booted_from_volume, suffix, disk_images,
injection_info, fallback_from_host):
created_disks = False
@@ -4668,9 +4845,6 @@ class LibvirtDriver(driver.ComputeDriver):
injection_info is not None and
CONF.libvirt.inject_partition != -2)
- # NOTE(ndipanov): Even if disk_mapping was passed in, which
- # currently happens only on rescue - we still don't want to
- # create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images['image_id'])
size = instance.flavor.root_gb * units.Gi
@@ -4678,8 +4852,10 @@ class LibvirtDriver(driver.ComputeDriver):
if size == 0 or suffix == '.rescue':
size = None
- backend = self.image_backend.by_name(instance, 'disk' + suffix,
- CONF.libvirt.images_type)
+ disk_name = 'disk' + suffix
+ disk_info_mapping = disk_mapping[disk_name]
+ backend = self.image_backend.by_name(
+ instance, disk_name, disk_info_mapping=disk_info_mapping)
created_disks = not backend.exists()
if instance.task_state == task_states.RESIZE_FINISH:
@@ -4857,16 +5033,18 @@ class LibvirtDriver(driver.ComputeDriver):
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
- for hdev in [d for d in guest_config.devices
- if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
+ for hdev in [
+ d for d in guest_config.devices
+ if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)
+ ]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev.address)
- if [int(x, 16) for x in hdbsf] ==\
- [int(x, 16) for x in dbsf]:
- raise exception.PciDeviceDetachFailed(reason=
- "timeout",
- dev=dev)
-
+ if (
+ [int(x, 16) for x in hdbsf] ==
+ [int(x, 16) for x in dbsf]
+ ):
+ raise exception.PciDeviceDetachFailed(
+ reason="timeout", dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
@@ -4914,33 +5092,76 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
guest.attach_device(cfg)
+ # TODO(sean-k-mooney): we should try and converge this fuction with
+ # _detach_direct_passthrough_vifs which does the same operation correctly
+ # for live migration
def _detach_direct_passthrough_ports(self, context, instance, guest):
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_direct_passthrough_port(network_info):
- # In case of VNIC_TYPES_DIRECT_PASSTHROUGH ports we create
- # pci request per direct passthrough port. Therefore we can trust
- # that pci_slot value in the vif is correct.
- direct_passthrough_pci_addresses = [
+
+ attached_via_hostdev_element = []
+ attached_via_interface_element = []
+
+ for vif in network_info:
+ if vif['profile'].get('pci_slot') is None:
+ # this is not an sriov interface so skip it
+ continue
+
+ if (vif['vnic_type'] not in
+ network_model.VNIC_TYPES_DIRECT_PASSTHROUGH):
+ continue
+
+ cfg = self.vif_driver.get_config(
+ instance, vif, instance.image_meta, instance.flavor,
+ CONF.libvirt.virt_type)
+ LOG.debug(f'Detaching type: {type(cfg)}, data: {cfg}')
+ if isinstance(cfg, vconfig.LibvirtConfigGuestHostdevPCI):
+ attached_via_hostdev_element.append(vif)
+ else:
+ attached_via_interface_element.append(vif)
+
+ pci_devs = instance.get_pci_devices()
+ hostdev_pci_addresses = {
vif['profile']['pci_slot']
- for vif in network_info
- if (vif['vnic_type'] in
- network_model.VNIC_TYPES_DIRECT_PASSTHROUGH and
- vif['profile'].get('pci_slot') is not None)
+ for vif in attached_via_hostdev_element
+ }
+ direct_passthrough_pci_addresses = [
+ pci_dev for pci_dev in pci_devs
+ if pci_dev.address in hostdev_pci_addresses
]
- # use detach_pci_devices to avoid failure in case of
- # multiple guest direct passthrough ports with the same MAC
- # (protection use-case, ports are on different physical
- # interfaces)
- pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
- direct_passthrough_pci_addresses = (
- [pci_dev for pci_dev in pci_devs
- if pci_dev.address in direct_passthrough_pci_addresses])
+ # FIXME(sean-k-mooney): i am using _detach_pci_devices because
+ # of the previous comment introduced by change-id:
+ # I3a45b1fb41e8e446d1f25d7a1d77991c8bf2a1ed
+ # in relation to bug 1563874 however i'm not convinced that
+ # patch was correct so we should reevaluate if we should do this.
+ # The intent of using _detach_pci_devices is
+ # to somehow cater for the use case where multiple ports have
+ # the same MAC address however _detach_pci_device can only remove
+ # device that are attached as hostdev elements, not via the
+ # interface element.
+ # So using it for all devices would break vnic-type direct when
+ # using the sriov_nic_agent ml2 driver or vif of vnic_type vdpa.
+ # Since PF ports cant have the same MAC that means that this
+ # use case was for hardware offloaded OVS? many NICs do not allow
+ # two VFs to have the same MAC on different VLANs due to the
+ # ordering of the VLAN and MAC filters in there static packet
+ # processing pipeline as such its unclear if this will work in any
+ # non ovs offload case. We should look into this more closely
+ # as from my testing in this patch we appear to use the interface
+ # element for hardware offloaded ovs too. Infiniband and vnic_type
+ # direct-physical port type do need this code path, both those cant
+ # have duplicate MACs...
self._detach_pci_devices(guest, direct_passthrough_pci_addresses)
+ # for ports that are attached with interface elements we cannot use
+ # _detach_pci_devices so we use detach_interface
+ for vif in attached_via_interface_element:
+ self.detach_interface(context, instance, vif)
+
def _update_compute_provider_status(self, context, service):
"""Calls the ComputeVirtAPI.update_compute_provider_status method
@@ -5018,6 +5239,43 @@ class LibvirtDriver(driver.ComputeDriver):
else:
mount.get_manager().host_down()
+ def _check_emulation_arch(self, image_meta):
+ # NOTE(chateaulav) In order to support emulation via qemu,
+ # there are required metadata properties that need applied
+ # to the designated glance image. The config drive is not
+ # supported. This leverages the hw_architecture and
+ # hw_emulation_architecture image_meta fields to allow for
+ # emulation to take advantage of all physical multiarch work
+ # being done.
+ #
+ # aarch64 emulation support metadata values:
+ # 'hw_emulation_architecture=aarch64'
+ # 'hw_firmware_type=uefi'
+ # 'hw_machine_type=virt'
+ #
+ # ppc64le emulation support metadata values:
+ # 'hw_emulation_architecture=ppc64le'
+ # 'hw_machine_type=pseries'
+ #
+ # s390x emulation support metadata values:
+ # 'hw_emulation_architecture=s390x'
+ # 'hw_machine_type=s390-ccw-virtio'
+ # 'hw_video_model=virtio'
+ #
+ # TODO(chateaulav) Further Work to be done:
+ # testing mips functionality while waiting on redhat libvirt
+ # patch https://listman.redhat.com/archives/libvir-list/
+ # 2016-May/msg00197.html
+ #
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1432101
+ emulation_arch = image_meta.properties.get("hw_emulation_architecture")
+ if emulation_arch:
+ arch = emulation_arch
+ else:
+ arch = libvirt_utils.get_arch(image_meta)
+
+ return arch
+
def _get_cpu_model_mapping(self, model):
"""Get the CPU model mapping
@@ -5158,7 +5416,7 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_guest_cpu_config(self, flavor, image_meta,
guest_cpu_numa_config, instance_numa_topology):
- arch = libvirt_utils.get_arch(image_meta)
+ arch = self._check_emulation_arch(image_meta)
cpu = self._get_guest_cpu_model_config(flavor, arch)
if cpu is None:
@@ -5171,14 +5429,38 @@ class LibvirtDriver(driver.ComputeDriver):
cpu.threads = topology.threads
cpu.numa = guest_cpu_numa_config
+ caps = self._host.get_capabilities()
+ if arch != caps.host.cpu.arch:
+ # Try emulating. Other arch configs will go here
+ cpu.mode = None
+ if arch == fields.Architecture.AARCH64:
+ cpu.model = "cortex-a57"
+ elif arch == fields.Architecture.PPC64LE:
+ cpu.model = "POWER8"
+ # TODO(chateaulav): re-evaluate when libvirtd adds overall
+ # RISCV suuport as a supported architecture, as there is no
+ # cpu models associated, this simply associates X vcpus to the
+ # guest according to the flavor. Thes same issue should be
+ # present with mipsel due to same limitation, but has not been
+ # tested.
+ elif arch == fields.Architecture.MIPSEL:
+ cpu = None
+
return cpu
def _get_guest_disk_config(
self, instance, name, disk_mapping, flavor, image_type=None,
boot_order=None,
):
+ # NOTE(artom) To pass unit tests, wherein the code here is loaded
+ # *before* any config with self.flags() is done, we need to have the
+ # default inline in the method, and not in the kwarg declaration.
+ if image_type is None:
+ image_type = CONF.libvirt.images_type
disk_unit = None
- disk = self.image_backend.by_name(instance, name, image_type)
+ disk_info_mapping = disk_mapping[name]
+ disk = self.image_backend.by_name(
+ instance, name, image_type, disk_info_mapping=disk_info_mapping)
if (name == 'disk.config' and image_type == 'rbd' and
not disk.exists()):
# This is likely an older config drive that has not been migrated
@@ -5187,21 +5469,26 @@ class LibvirtDriver(driver.ComputeDriver):
# remove this fall back once we know all config drives are in rbd.
# NOTE(vladikr): make sure that the flat image exist, otherwise
# the image will be created after the domain definition.
- flat_disk = self.image_backend.by_name(instance, name, 'flat')
+ flat_disk = self.image_backend.by_name(
+ instance, name, 'flat', disk_info_mapping=disk_info_mapping)
if flat_disk.exists():
disk = flat_disk
LOG.debug('Config drive not found in RBD, falling back to the '
'instance directory', instance=instance)
- disk_info = disk_mapping[name]
- if 'unit' in disk_mapping and disk_info['bus'] == 'scsi':
+ # The 'unit' key is global to the disk_mapping (rather than for an
+ # individual disk) because it is used solely to track the incrementing
+ # unit number.
+ if 'unit' in disk_mapping and disk_info_mapping['bus'] == 'scsi':
disk_unit = disk_mapping['unit']
- disk_mapping['unit'] += 1 # Increments for the next disk added
+ disk_mapping['unit'] += 1 # Increments for the next disk
conf = disk.libvirt_info(
- disk_info, self.disk_cachemode, flavor['extra_specs'],
- disk_unit=disk_unit, boot_order=boot_order)
+ self.disk_cachemode, flavor['extra_specs'], disk_unit=disk_unit,
+ boot_order=boot_order)
return conf
- def _get_guest_fs_config(self, instance, name, image_type=None):
+ def _get_guest_fs_config(
+ self, instance, name, image_type=CONF.libvirt.images_type
+ ):
disk = self.image_backend.by_name(instance, name, image_type)
return disk.libvirt_fs_info("/", "ploop")
@@ -5514,15 +5801,11 @@ class LibvirtDriver(driver.ComputeDriver):
if not is_able or CONF.libvirt.virt_type not in ('lxc', 'kvm', 'qemu'):
return
- if guest.cputune is None:
- guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
- # Setting the default cpu.shares value to be a value
- # dependent on the number of vcpus
- guest.cputune.shares = 1024 * guest.vcpus
-
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
+ if guest.cputune is None:
+ guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
@@ -5857,7 +6140,7 @@ class LibvirtDriver(driver.ComputeDriver):
clk.add_timer(tmrtc)
hpet = image_meta.properties.get('hw_time_hpet', False)
- guestarch = libvirt_utils.get_arch(image_meta)
+ guestarch = self._check_emulation_arch(image_meta)
if guestarch in (fields.Architecture.I686,
fields.Architecture.X86_64):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
@@ -5888,9 +6171,9 @@ class LibvirtDriver(driver.ComputeDriver):
image_meta.properties.get('img_hide_hypervisor_id'))
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
- guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
+ guest.add_feature(vconfig.LibvirtConfigGuestFeatureACPI())
if not CONF.workarounds.libvirt_disable_apic:
- guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
+ guest.add_feature(vconfig.LibvirtConfigGuestFeatureAPIC())
if CONF.libvirt.virt_type in ('qemu', 'kvm') and os_type == 'windows':
hv = vconfig.LibvirtConfigGuestFeatureHyperV()
@@ -5902,6 +6185,15 @@ class LibvirtDriver(driver.ComputeDriver):
# with Microsoft
hv.spinlock_retries = 8191
hv.vapic = True
+ hv.vpindex = True
+ hv.runtime = True
+ hv.synic = True
+ hv.reset = True
+ hv.frequencies = True
+ hv.reenlightenment = True
+ hv.tlbflush = True
+ hv.ipi = True
+ hv.evmcs = True
# NOTE(kosamara): Spoofing the vendor_id aims to allow the nvidia
# driver to work on windows VMs. At the moment, the nvidia driver
@@ -5920,28 +6212,21 @@ class LibvirtDriver(driver.ComputeDriver):
if CONF.libvirt.virt_type in ("qemu", "kvm"):
# vmcoreinfo support is x86, ARM-only for now
- guestarch = libvirt_utils.get_arch(image_meta)
+ guestarch = self._check_emulation_arch(image_meta)
if guestarch in (
fields.Architecture.I686, fields.Architecture.X86_64,
fields.Architecture.AARCH64,
):
- guest.features.append(
+ guest.add_feature(
vconfig.LibvirtConfigGuestFeatureVMCoreInfo())
if hide_hypervisor_id:
- guest.features.append(
+ guest.add_feature(
vconfig.LibvirtConfigGuestFeatureKvmHidden())
- # NOTE(sean-k-mooney): we validate that the image and flavor
- # cannot have conflicting values in the compute API
- # so we just use the values directly. If it is not set in
- # either the flavor or image pmu will be none and we should
- # not generate the element to allow qemu to decide if a vPMU
- # should be provided for backwards compatibility.
- pmu = (flavor.extra_specs.get('hw:pmu') or
- image_meta.properties.get('hw_pmu'))
+ pmu = hardware.get_pmu_constraint(flavor, image_meta)
if pmu is not None:
- guest.features.append(
+ guest.add_feature(
vconfig.LibvirtConfigGuestFeaturePMU(pmu))
def _check_number_of_serial_console(self, num_ports):
@@ -5958,53 +6243,76 @@ class LibvirtDriver(driver.ComputeDriver):
def _add_video_driver(self, guest, image_meta, flavor):
video = vconfig.LibvirtConfigGuestVideo()
- # NOTE(ldbragst): The following logic sets the video.type
+ video.type = self._get_video_type(image_meta) or video.type
+ # Set video memory, only if the flavor's limit is set
+ video_ram = image_meta.properties.get('hw_video_ram', 0)
+ max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
+ if video_ram > max_vram:
+ raise exception.RequestedVRamTooHigh(req_vram=video_ram,
+ max_vram=max_vram)
+ if max_vram and video_ram:
+ video.vram = video_ram * units.Mi // units.Ki
+ guest.add_device(video)
+
+ # NOTE(sean-k-mooney): return the video device we added
+ # for simpler testing.
+ return video
+
+ def _get_video_type(
+ self,
+ image_meta: objects.ImageMeta,
+ ) -> ty.Optional[str]:
+ # NOTE(ldbragst): The following logic returns the video type
# depending on supported defaults given the architecture,
- # virtualization type, and features. The video.type attribute can
+ # virtualization type, and features. The video type can
# be overridden by the user with image_meta.properties, which
- # is carried out in the next if statement below this one.
- guestarch = libvirt_utils.get_arch(image_meta)
+ # is carried out first.
+ if image_meta.properties.get('hw_video_model'):
+ video_type = image_meta.properties.hw_video_model
+ if not self._video_model_supported(video_type):
+ raise exception.InvalidVideoMode(model=video_type)
+ return video_type
+
+ guestarch = self._check_emulation_arch(image_meta)
if CONF.libvirt.virt_type == 'parallels':
- video.type = 'vga'
+ return 'vga'
+
# NOTE(kchamart): 'virtio' is a sensible default whether or not
# the guest has the native kernel driver (called "virtio-gpu" in
# Linux) -- i.e. if the guest has the VirtIO GPU driver, it'll
# be used; otherwise, the 'virtio' model will gracefully
# fallback to VGA compatibiliy mode.
- elif (guestarch in (fields.Architecture.I686,
- fields.Architecture.X86_64) and not
- CONF.spice.enabled):
- video.type = 'virtio'
- elif guestarch in (fields.Architecture.PPC,
- fields.Architecture.PPC64,
- fields.Architecture.PPC64LE):
+ if (
+ guestarch in (
+ fields.Architecture.I686,
+ fields.Architecture.X86_64
+ ) and not CONF.spice.enabled
+ ):
+ return 'virtio'
+
+ if (
+ guestarch in (
+ fields.Architecture.PPC,
+ fields.Architecture.PPC64,
+ fields.Architecture.PPC64LE
+ )
+ ):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
- video.type = 'vga'
- elif guestarch == fields.Architecture.AARCH64:
+ return 'vga'
+
+ if guestarch == fields.Architecture.AARCH64:
# NOTE(kevinz): Only virtio device type is supported by AARCH64
# so use 'virtio' instead when running on AArch64 hardware.
- video.type = 'virtio'
+ return 'virtio'
+ elif guestarch == fields.Architecture.MIPSEL:
+ return 'virtio'
elif CONF.spice.enabled:
- video.type = 'qxl'
- if image_meta.properties.get('hw_video_model'):
- video.type = image_meta.properties.hw_video_model
- if not self._video_model_supported(video.type):
- raise exception.InvalidVideoMode(model=video.type)
-
- # Set video memory, only if the flavor's limit is set
- video_ram = image_meta.properties.get('hw_video_ram', 0)
- max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
- if video_ram > max_vram:
- raise exception.RequestedVRamTooHigh(req_vram=video_ram,
- max_vram=max_vram)
- if max_vram and video_ram:
- video.vram = video_ram * units.Mi // units.Ki
- guest.add_device(video)
+ return 'qxl'
- # NOTE(sean-k-mooney): return the video device we added
- # for simpler testing.
- return video
+ # NOTE(lyarwood): Return None and default to the default of
+ # LibvirtConfigGuestVideo.type that is currently virtio
+ return None
def _add_qga_device(self, guest, instance):
qga = vconfig.LibvirtConfigGuestChannel()
@@ -6135,6 +6443,11 @@ class LibvirtDriver(driver.ComputeDriver):
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.locked = True
+ if hardware.get_locked_memory_constraint(flavor, image_meta):
+ if not membacking:
+ membacking = vconfig.LibvirtConfigGuestMemoryBacking()
+ membacking.locked = True
+
return membacking
def _get_memory_backing_hugepages_support(self, inst_topology, numatune):
@@ -6239,12 +6552,21 @@ class LibvirtDriver(driver.ComputeDriver):
flavor: 'objects.Flavor',
) -> None:
if CONF.libvirt.virt_type in ("kvm", "qemu"):
- arch = libvirt_utils.get_arch(image_meta)
+ caps = self._host.get_capabilities()
+ host_arch = caps.host.cpu.arch
+ arch = self._check_emulation_arch(image_meta)
+ guest.os_arch = self._check_emulation_arch(image_meta)
+ if arch != host_arch:
+ # If emulating, downgrade to qemu
+ guest.virt_type = "qemu"
+
if arch in (fields.Architecture.I686, fields.Architecture.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
mach_type = libvirt_utils.get_machine_type(image_meta)
+ self._host._check_machine_type(caps, mach_type)
+
guest.os_mach_type = mach_type
hw_firmware_type = image_meta.properties.get('hw_firmware_type')
@@ -6292,9 +6614,10 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_loader_secure = False
try:
- loader, nvram_template = self._host.get_loader(
+ loader, nvram_template, requires_smm = (
+ self._host.get_loader(
arch, mach_type,
- has_secure_boot=guest.os_loader_secure)
+ has_secure_boot=guest.os_loader_secure))
except exception.UEFINotSupported as exc:
if guest.os_loader_secure:
# we raise a specific exception if we requested secure
@@ -6306,6 +6629,11 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_loader_type = 'pflash'
guest.os_nvram_template = nvram_template
+ # if the feature set says we need SMM then enable it
+ if requires_smm:
+ guest.features.append(
+ vconfig.LibvirtConfigGuestFeatureSMM())
+
# NOTE(lyarwood): If the machine type isn't recorded in the stashed
# image metadata then record it through the system metadata table.
# This will allow the host configuration to change in the future
@@ -6380,13 +6708,25 @@ class LibvirtDriver(driver.ComputeDriver):
self._create_consoles_qemu_kvm(
guest_cfg, instance, flavor, image_meta)
- def _is_s390x_guest(self, image_meta):
- s390x_archs = (fields.Architecture.S390, fields.Architecture.S390X)
- return libvirt_utils.get_arch(image_meta) in s390x_archs
+ def _is_mipsel_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ archs = (fields.Architecture.MIPSEL, fields.Architecture.MIPS64EL)
+ return self._check_emulation_arch(image_meta) in archs
+
+ def _is_s390x_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ archs = (fields.Architecture.S390, fields.Architecture.S390X)
+ return self._check_emulation_arch(image_meta) in archs
- def _is_ppc64_guest(self, image_meta):
+ def _is_ppc64_guest(self, image_meta: 'objects.ImageMeta') -> bool:
archs = (fields.Architecture.PPC64, fields.Architecture.PPC64LE)
- return libvirt_utils.get_arch(image_meta) in archs
+ return self._check_emulation_arch(image_meta) in archs
+
+ def _is_aarch64_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ arch = fields.Architecture.AARCH64
+ return self._check_emulation_arch(image_meta) == arch
+
+ def _is_x86_guest(self, image_meta: 'objects.ImageMeta') -> bool:
+ archs = (fields.Architecture.I686, fields.Architecture.X86_64)
+ return self._check_emulation_arch(image_meta) in archs
def _create_consoles_qemu_kvm(self, guest_cfg, instance, flavor,
image_meta):
@@ -6555,7 +6895,19 @@ class LibvirtDriver(driver.ComputeDriver):
# controller (x86 gets one by default)
usbhost.model = None
if not self._guest_needs_usb(guest, image_meta):
- usbhost.model = 'none'
+ archs = (
+ fields.Architecture.PPC,
+ fields.Architecture.PPC64,
+ fields.Architecture.PPC64LE,
+ )
+ if self._check_emulation_arch(image_meta) in archs:
+ # NOTE(chateaulav): during actual testing and implementation
+ # it wanted None for ppc, as this removes it from the domain
+ # xml, where 'none' adds it but then disables it causing
+ # libvirt errors and the instances not being able to build
+ usbhost.model = None
+ else:
+ usbhost.model = 'none'
guest.add_device(usbhost)
def _guest_add_pcie_root_ports(self, guest):
@@ -6581,38 +6933,28 @@ class LibvirtDriver(driver.ComputeDriver):
"""
caps = self._host.get_capabilities()
- # TODO(kchamart) In the third 'if' conditional below, for 'x86'
- # arch, we're assuming: when 'os_mach_type' is 'None', you'll
- # have "pc" machine type. That assumption, although it is
- # correct for the "forseeable future", it will be invalid when
- # libvirt / QEMU changes the default machine types.
- #
- # From libvirt 4.7.0 onwards (September 2018), it will ensure
- # that *if* 'pc' is available, it will be used as the default --
- # to not break existing applications. (Refer:
- # https://libvirt.org/git/?p=libvirt.git;a=commit;h=26cfb1a3
- # --"qemu: ensure default machine types don't change if QEMU
- # changes").
- #
- # But even if libvirt (>=v4.7.0) handled the default case,
- # relying on such assumptions is not robust. Instead we should
- # get the default machine type for a given architecture reliably
- # -- by Nova setting it explicitly (we already do it for Arm /
- # AArch64 & s390x). A part of this bug is being tracked here:
- # https://bugs.launchpad.net/nova/+bug/1780138).
-
# Add PCIe root port controllers for PCI Express machines
# but only if their amount is configured
if not CONF.libvirt.num_pcie_ports:
return False
- if (caps.host.cpu.arch == fields.Architecture.AARCH64 and
- guest.os_mach_type.startswith('virt')):
+
+ # Only certain architectures and machine types can handle PCIe ports;
+ # the latter will be handled by libvirt.utils.get_machine_type
+
+ if (
+ caps.host.cpu.arch == fields.Architecture.AARCH64 and
+ guest.os_mach_type.startswith('virt')
+ ):
return True
- if (caps.host.cpu.arch == fields.Architecture.X86_64 and
- guest.os_mach_type is not None and
- 'q35' in guest.os_mach_type):
+
+ if (
+ caps.host.cpu.arch == fields.Architecture.X86_64 and
+ guest.os_mach_type is not None and
+ 'q35' in guest.os_mach_type
+ ):
return True
+
return False
def _get_guest_config(self, instance, network_info, image_meta,
@@ -6763,6 +7105,8 @@ class LibvirtDriver(driver.ComputeDriver):
if vpmems:
self._guest_add_vpmems(guest, vpmems)
+ self._guest_add_iommu_device(guest, image_meta, flavor)
+
return guest
def _get_ordered_vpmems(self, instance, flavor):
@@ -6787,8 +7131,10 @@ class LibvirtDriver(driver.ComputeDriver):
size_kb = vpmem.size // units.Ki
align_kb = vpmem.align // units.Ki
- vpmem_config = vconfig.LibvirtConfigGuestVPMEM(
- devpath=vpmem.devpath, size_kb=size_kb, align_kb=align_kb)
+ vpmem_config = vconfig.LibvirtConfigGuestVPMEM()
+ vpmem_config.source_path = vpmem.devpath
+ vpmem_config.target_size = size_kb
+ vpmem_config.align_size = align_kb
# max memory size needs contain vpmem size
guest.max_memory_size += size_kb
@@ -6925,11 +7271,13 @@ class LibvirtDriver(driver.ComputeDriver):
def _guest_add_pci_devices(self, guest, instance):
if CONF.libvirt.virt_type in ('qemu', 'kvm'):
# Get all generic PCI devices (non-SR-IOV).
- for pci_dev in pci_manager.get_instance_pci_devs(instance):
+ for pci_dev in instance.get_pci_devices(
+ source=objects.InstancePCIRequest.FLAVOR_ALIAS
+ ):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
# PCI devices is only supported for QEMU/KVM hypervisor
- if pci_manager.get_instance_pci_devs(instance, 'all'):
+ if instance.get_pci_devices():
raise exception.PciDeviceUnsupportedHypervisor(
type=CONF.libvirt.virt_type
)
@@ -6968,18 +7316,21 @@ class LibvirtDriver(driver.ComputeDriver):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.listen = CONF.spice.server_listen
+ graphics.image_compression = CONF.spice.image_compression
+ graphics.jpeg_compression = CONF.spice.jpeg_compression
+ graphics.zlib_compression = CONF.spice.zlib_compression
+ graphics.playback_compression = CONF.spice.playback_compression
+ graphics.streaming_mode = CONF.spice.streaming_mode
guest.add_device(graphics)
add_video_driver = True
return add_video_driver
- def _guest_add_pointer_device(self, guest, image_meta):
- """Build the pointer device to add to the instance.
-
- The configuration is determined by examining the 'hw_input_bus' image
- metadata property, the 'hw_pointer_model' image metadata property, and
- the '[DEFAULT] pointer_model' config option in that order.
- """
+ def _get_pointer_bus_and_model(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: objects.ImageMeta,
+ ) -> ty.Tuple[ty.Optional[str], ty.Optional[str]]:
pointer_bus = image_meta.properties.get('hw_input_bus')
pointer_model = image_meta.properties.get('hw_pointer_model')
@@ -6993,7 +7344,7 @@ class LibvirtDriver(driver.ComputeDriver):
else:
# If the user hasn't requested anything and the host config says to
# use something other than a USB tablet, there's nothing to do
- return
+ return None, None
# For backward compatibility, we don't want to error out if the host
# configuration requests a USB tablet but the virtual machine mode is
@@ -7003,7 +7354,7 @@ class LibvirtDriver(driver.ComputeDriver):
'USB tablet requested for guests on non-HVM host; '
'in order to accept this request the machine mode should '
'be configured as HVM.')
- return
+ return None, None
# Ditto for using a USB tablet when the SPICE agent is enabled, since
# that has a paravirt mouse builtin which drastically reduces overhead;
@@ -7017,15 +7368,32 @@ class LibvirtDriver(driver.ComputeDriver):
'USB tablet requested for guests but the SPICE agent is '
'enabled; ignoring request in favour of default '
'configuration.')
- return
+ return None, None
- pointer = vconfig.LibvirtConfigGuestInput()
- pointer.type = pointer_model
- pointer.bus = pointer_bus
- guest.add_device(pointer)
+ return pointer_model, pointer_bus
- # returned for unit testing purposes
- return pointer
+ def _guest_add_pointer_device(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: objects.ImageMeta
+ ) -> None:
+ """Build the pointer device to add to the instance.
+
+ The configuration is determined by examining the 'hw_input_bus' image
+ metadata property, the 'hw_pointer_model' image metadata property, and
+ the '[DEFAULT] pointer_model' config option in that order.
+ """
+ pointer_model, pointer_bus = self._get_pointer_bus_and_model(
+ guest, image_meta)
+
+ if pointer_model and pointer_bus:
+ pointer = vconfig.LibvirtConfigGuestInput()
+ pointer.type = pointer_model
+ pointer.bus = pointer_bus
+ guest.add_device(pointer)
+
+ # returned for unit testing purposes
+ return pointer
def _guest_add_keyboard_device(self, guest, image_meta):
"""Add keyboard for graphical console use."""
@@ -7037,7 +7405,7 @@ class LibvirtDriver(driver.ComputeDriver):
# libvirt will automatically add a PS2 keyboard)
# TODO(stephenfin): We might want to do this for other non-x86
# architectures
- arch = libvirt_utils.get_arch(image_meta)
+ arch = self._check_emulation_arch(image_meta)
if arch != fields.Architecture.AARCH64:
return None
@@ -7051,6 +7419,92 @@ class LibvirtDriver(driver.ComputeDriver):
# returned for unit testing purposes
return keyboard
+ def _get_iommu_model(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: 'objects.ImageMeta',
+ flavor: 'objects.Flavor',
+ ) -> ty.Optional[str]:
+ model = flavor.extra_specs.get(
+ 'hw:viommu_model') or image_meta.properties.get(
+ 'hw_viommu_model')
+ if not model:
+ return None
+
+ is_x86 = self._is_x86_guest(image_meta)
+ is_aarch64 = self._is_aarch64_guest(image_meta)
+
+ if is_x86:
+ if guest.os_mach_type is not None and not (
+ 'q35' in guest.os_mach_type
+ ):
+ arch = self._check_emulation_arch(image_meta)
+ mtype = guest.os_mach_type if (
+ guest.os_mach_type is not None
+ ) else "unknown"
+ raise exception.InvalidVIOMMUMachineType(
+ mtype=mtype, arch=arch)
+ elif is_aarch64:
+ if guest.os_mach_type is not None and not (
+ 'virt' in guest.os_mach_type
+ ):
+ arch = self._check_emulation_arch(image_meta)
+ mtype = guest.os_mach_type if (
+ guest.os_mach_type is not None
+ ) else "unknown"
+ raise exception.InvalidVIOMMUMachineType(
+ mtype=mtype, arch=arch)
+ else:
+ raise exception.InvalidVIOMMUArchitecture(
+ arch=self._check_emulation_arch(image_meta))
+
+ if model == fields.VIOMMUModel.AUTO:
+ if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL):
+ model = fields.VIOMMUModel.VIRTIO
+ elif self._is_x86_guest(image_meta) and (
+ guest.os_mach_type is not None and 'q35' in guest.os_mach_type
+ ):
+ model = fields.VIOMMUModel.INTEL
+ else:
+ # AArch64
+ model = fields.VIOMMUModel.SMMUV3
+ return model
+
+ def _guest_add_iommu_device(
+ self,
+ guest: vconfig.LibvirtConfigGuest,
+ image_meta: 'objects.ImageMeta',
+ flavor: 'objects.Flavor',
+ ) -> None:
+ """Add a virtual IOMMU device to allow e.g. vfio-pci usage."""
+ if CONF.libvirt.virt_type not in ('qemu', 'kvm'):
+ # vIOMMU requires QEMU
+ return
+
+ iommu = vconfig.LibvirtConfigGuestIOMMU()
+
+ iommu.model = self._get_iommu_model(guest, image_meta, flavor)
+ if iommu.model is None:
+ return
+
+ iommu.interrupt_remapping = True
+ iommu.caching_mode = True
+ iommu.iotlb = True
+
+ # As Qemu supported values are 39 and 48, we set this to
+ # larger width (48) by default and will not exposed to end user.
+ if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_AW_BITS):
+ iommu.aw_bits = 48
+
+ if guest.os_mach_type is not None and 'q35' in guest.os_mach_type:
+ iommu.eim = True
+ else:
+ iommu.eim = False
+ guest.add_device(iommu)
+
+ ioapic = vconfig.LibvirtConfigGuestFeatureIOAPIC()
+ guest.add_feature(ioapic)
+
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None,
@@ -7182,7 +7636,7 @@ class LibvirtDriver(driver.ComputeDriver):
instance: 'objects.Instance',
power_on: bool = True,
pause: bool = False,
- post_xml_callback: ty.Callable = None,
+ post_xml_callback: ty.Optional[ty.Callable] = None,
) -> libvirt_guest.Guest:
"""Create a Guest from XML.
@@ -7208,6 +7662,7 @@ class LibvirtDriver(driver.ComputeDriver):
post_xml_callback()
if power_on or pause:
+ libvirt_cpu.power_up(instance)
guest.launch(pause=pause)
return guest
@@ -7230,7 +7685,8 @@ class LibvirtDriver(driver.ComputeDriver):
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
- for vif in network_info if vif.get('active', True) is False]
+ for vif in network_info if vif.get('active', True) is False and
+ vif['vnic_type'] != network_model.VNIC_TYPE_REMOTE_MANAGED]
def _create_guest_with_network(
self,
@@ -7241,7 +7697,7 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info: ty.Optional[ty.Dict[str, ty.Any]],
power_on: bool = True,
vifs_already_plugged: bool = False,
- post_xml_callback: ty.Callable = None,
+ post_xml_callback: ty.Optional[ty.Callable] = None,
external_events: ty.Optional[ty.List[ty.Tuple[str, str]]] = None,
cleanup_instance_dir: bool = False,
cleanup_instance_disks: bool = False,
@@ -7273,17 +7729,9 @@ class LibvirtDriver(driver.ComputeDriver):
pause=pause, power_on=power_on,
post_xml_callback=post_xml_callback)
except eventlet.timeout.Timeout:
- # We never heard from Neutron
- LOG.warning(
- 'Timeout waiting for %(events)s for instance with '
- 'vm_state %(vm_state)s and task_state %(task_state)s',
- {
- 'events': events,
- 'vm_state': instance.vm_state,
- 'task_state': instance.task_state,
- },
- instance=instance)
-
+ # We did not receive all expected events from Neutron, a warning
+ # has already been logged by wait_for_instance_event, but we need
+ # to decide if the issue is fatal.
if CONF.vif_plugging_is_fatal:
# NOTE(stephenfin): don't worry, guest will be in scope since
# we can only hit this branch if the VIF plug timed out
@@ -7319,15 +7767,18 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.compute.cpu_dedicated_set:
return set()
- online_cpus = self._host.get_online_cpus()
+ if CONF.libvirt.cpu_power_management:
+ available_cpus = self._host.get_available_cpus()
+ else:
+ available_cpus = self._host.get_online_cpus()
dedicated_cpus = hardware.get_cpu_dedicated_set()
- if not dedicated_cpus.issubset(online_cpus):
+ if not dedicated_cpus.issubset(available_cpus):
msg = _("Invalid '[compute] cpu_dedicated_set' config: one or "
- "more of the configured CPUs is not online. Online "
- "cpuset(s): %(online)s, configured cpuset(s): %(req)s")
+ "more of the configured CPUs is not available. Available "
+ "cpuset(s): %(available)s, configured cpuset(s): %(req)s")
raise exception.Invalid(msg % {
- 'online': sorted(online_cpus),
+ 'available': sorted(available_cpus),
'req': sorted(dedicated_cpus)})
return dedicated_cpus
@@ -7530,15 +7981,7 @@ class LibvirtDriver(driver.ComputeDriver):
device_address = self._get_pci_id_from_libvirt_name(device_address)
if not device_address:
return
- try:
- return self.pgpu_type_mapping.get(device_address)
- except KeyError:
- LOG.warning("No mdev type was configured for PCI address: %s",
- device_address)
- # We accept to return None instead of raising an exception
- # because we prefer the callers to return the existing exceptions
- # in case we can't find a specific pGPU
- return
+ return self.pgpu_type_mapping.get(device_address)
def _get_resource_class_for_device(self, device_address):
"""Returns the resource class for the inventory of this device.
@@ -7738,13 +8181,33 @@ class LibvirtDriver(driver.ComputeDriver):
dev.name(): dev for dev in
self._host.list_all_devices(flags=dev_flags)
}
- net_devs = [dev for dev in devices.values() if "net" in dev.listCaps()]
+
+ # NOTE(mnaser): The listCaps() function can raise an exception if the
+ # device disappeared while we're looping, this method
+ # returns an empty list rather than raising an exception
+ # which will remove the device for Nova's resource
+ # tracker, but that is OK since the device disappeared.
+ def _safe_list_caps(dev):
+ try:
+ return dev.listCaps()
+ except libvirt.libvirtError:
+ return []
+
+ net_devs = [
+ dev for dev in devices.values() if "net" in _safe_list_caps(dev)
+ ]
vdpa_devs = [
- dev for dev in devices.values() if "vdpa" in dev.listCaps()
+ dev for dev in devices.values() if "vdpa" in _safe_list_caps(dev)
]
+ pci_devs = {
+ name: dev for name, dev in devices.items()
+ if "pci" in _safe_list_caps(dev)}
pci_info = [
- self._host._get_pcidev_info(name, dev, net_devs, vdpa_devs)
- for name, dev in devices.items() if "pci" in dev.listCaps()
+ self._host._get_pcidev_info(
+ name, dev, net_devs,
+ vdpa_devs, list(pci_devs.values())
+ )
+ for name, dev in pci_devs.items()
]
return jsonutils.dumps(pci_info)
@@ -7793,15 +8256,52 @@ class LibvirtDriver(driver.ComputeDriver):
def _get_mediated_device_information(self, devname):
"""Returns a dict of a mediated device."""
- virtdev = self._host.device_lookup_by_name(devname)
+ # LP #1951656 - In Libvirt 7.7, the mdev name now includes the PCI
+ # address of the parent device (e.g. mdev_<uuid>_<pci_address>) due to
+ # the mdevctl allowing for multiple mediated devs having the same UUID
+ # defined (only one can be active at a time). Since the guest
+ # information doesn't have the parent ID, try to lookup which
+ # mediated device is available that matches the UUID. If multiple
+ # devices are found that match the UUID, then this is an error
+ # condition.
+ try:
+ virtdev = self._host.device_lookup_by_name(devname)
+ except libvirt.libvirtError as ex:
+ if ex.get_error_code() != libvirt.VIR_ERR_NO_NODE_DEVICE:
+ raise
+ mdevs = [dev for dev in self._host.list_mediated_devices()
+ if dev.startswith(devname)]
+ # If no matching devices are found, simply raise the original
+ # exception indicating that no devices are found.
+ if not mdevs:
+ raise
+ elif len(mdevs) > 1:
+ msg = ("The mediated device name %(devname)s refers to a UUID "
+ "that is present in multiple libvirt mediated devices. "
+ "Matching libvirt mediated devices are %(devices)s. "
+ "Mediated device UUIDs must be unique for Nova." %
+ {'devname': devname,
+ 'devices': ', '.join(mdevs)})
+ raise exception.InvalidLibvirtMdevConfig(reason=msg)
+
+ LOG.debug('Found requested device %s as %s. Using that.',
+ devname, mdevs[0])
+ virtdev = self._host.device_lookup_by_name(mdevs[0])
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
+ # Starting with Libvirt 7.3, the uuid information is available in the
+ # node device information. If its there, use that. Otherwise,
+ # fall back to the previous behavior of parsing the uuid from the
+ # devname.
+ if cfgdev.mdev_information.uuid:
+ mdev_uuid = cfgdev.mdev_information.uuid
+ else:
+ mdev_uuid = libvirt_utils.mdev_name2uuid(cfgdev.name)
device = {
"dev_id": cfgdev.name,
- # name is like mdev_00ead764_fdc0_46b6_8db9_2963f5c815b4
- "uuid": libvirt_utils.mdev_name2uuid(cfgdev.name),
+ "uuid": mdev_uuid,
# the physical GPU PCI device
"parent": cfgdev.parent,
"type": cfgdev.mdev_information.type,
@@ -7889,6 +8389,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param requested_types: Filter out the result for only mediated devices
having those types.
"""
+ LOG.debug('Searching for available mdevs...')
allocated_mdevs = self._get_all_assigned_mediated_devices()
mdevs = self._get_mediated_devices(requested_types)
available_mdevs = set()
@@ -7904,6 +8405,7 @@ class LibvirtDriver(driver.ComputeDriver):
available_mdevs.add(mdev["uuid"])
available_mdevs -= set(allocated_mdevs)
+ LOG.info('Available mdevs at: %s.', available_mdevs)
return available_mdevs
def _create_new_mediated_device(self, parent, uuid=None):
@@ -7915,6 +8417,7 @@ class LibvirtDriver(driver.ComputeDriver):
:returns: the newly created mdev UUID or None if not possible
"""
+ LOG.debug('Attempting to create new mdev...')
supported_types = self.supported_vgpu_types
# Try to see if we can still create a new mediated device
devices = self._get_mdev_capable_devices(supported_types)
@@ -7926,6 +8429,7 @@ class LibvirtDriver(driver.ComputeDriver):
# The device is not the one that was called, not creating
# the mdev
continue
+ LOG.debug('Trying on: %s.', dev_name)
dev_supported_type = self._get_vgpu_type_per_pgpu(dev_name)
if dev_supported_type and device['types'][
dev_supported_type]['availableInstances'] > 0:
@@ -7935,7 +8439,13 @@ class LibvirtDriver(driver.ComputeDriver):
pci_addr = "{}:{}:{}.{}".format(*dev_name[4:].split('_'))
chosen_mdev = nova.privsep.libvirt.create_mdev(
pci_addr, dev_supported_type, uuid=uuid)
+ LOG.info('Created mdev: %s on pGPU: %s.',
+ chosen_mdev, pci_addr)
return chosen_mdev
+ LOG.debug('Failed: No available instances on device.')
+ LOG.info('Failed to create mdev. '
+ 'No free space found among the following devices: %s.',
+ [dev['dev_id'] for dev in devices])
@utils.synchronized(VGPU_RESOURCE_SEMAPHORE)
def _allocate_mdevs(self, allocations):
@@ -8018,6 +8528,8 @@ class LibvirtDriver(driver.ComputeDriver):
# Take the first available mdev
chosen_mdev = mdevs_available.pop()
else:
+ LOG.debug('No available mdevs where found. '
+ 'Creating an new one...')
chosen_mdev = self._create_new_mediated_device(parent_device)
if not chosen_mdev:
# If we can't find devices having available VGPUs, just raise
@@ -8025,6 +8537,7 @@ class LibvirtDriver(driver.ComputeDriver):
reason='mdev-capable resource is not available')
else:
chosen_mdevs.append(chosen_mdev)
+ LOG.info('Allocated mdev: %s.', chosen_mdev)
return chosen_mdevs
def _detach_mediated_devices(self, guest):
@@ -8501,6 +9014,7 @@ class LibvirtDriver(driver.ComputeDriver):
traits.update(self._get_storage_bus_traits())
traits.update(self._get_video_model_traits())
traits.update(self._get_vif_model_traits())
+ traits.update(self._get_iommu_model_traits())
traits.update(self._get_tpm_traits())
_, invalid_traits = ot.check_traits(traits)
@@ -9026,6 +9540,7 @@ class LibvirtDriver(driver.ComputeDriver):
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
+ data["uuid"] = self._host.get_node_uuid()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
@@ -9112,15 +9627,16 @@ class LibvirtDriver(driver.ComputeDriver):
disk_available_mb = (
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb)
- # Compare CPU
- try:
- if not instance.vcpu_model or not instance.vcpu_model.model:
- source_cpu_info = src_compute_info['cpu_info']
- self._compare_cpu(None, source_cpu_info, instance)
- else:
- self._compare_cpu(instance.vcpu_model, None, instance)
- except exception.InvalidCPUInfo as e:
- raise exception.MigrationPreCheckError(reason=e)
+ if not CONF.workarounds.skip_cpu_compare_on_dest:
+ # Compare CPU
+ try:
+ if not instance.vcpu_model or not instance.vcpu_model.model:
+ source_cpu_info = src_compute_info['cpu_info']
+ self._compare_cpu(None, source_cpu_info, instance)
+ else:
+ self._compare_cpu(instance.vcpu_model, None, instance)
+ except exception.InvalidCPUInfo as e:
+ raise exception.MigrationPreCheckError(reason=e)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file(instance)
@@ -9155,7 +9671,7 @@ class LibvirtDriver(driver.ComputeDriver):
# populate it if we are using multiple port bindings.
# TODO(stephenfin): Remove once we can do this unconditionally in X or
# later
- if self._network_api.supports_port_binding_extension(context):
+ if self._network_api.has_port_binding_extension(context):
data.vifs = (
migrate_data_obj.VIFMigrateData.create_skeleton_migrate_vifs(
instance.get_network_info()))
@@ -9478,7 +9994,7 @@ class LibvirtDriver(driver.ComputeDriver):
try:
cpu_xml = cpu.to_xml()
LOG.debug("cpu compare xml: %s", cpu_xml, instance=instance)
- ret = self._host.compare_cpu(cpu_xml)
+ ret = self._host.compare_hypervisor_cpu(cpu_xml)
except libvirt.libvirtError as e:
error_code = e.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
@@ -9555,7 +10071,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
# 'dest' will be substituted into 'migration_uri' so ensure
- # it does't contain any characters that could be used to
+ # it doesn't contain any characters that could be used to
# exploit the URI accepted by libvirt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)
@@ -10192,10 +10708,13 @@ class LibvirtDriver(driver.ComputeDriver):
:param instance: the instance being migrated
:param migrate_date: a LibvirtLiveMigrateData object
"""
- network_info = network_model.NetworkInfo(
- [vif.source_vif for vif in migrate_data.vifs
- if "source_vif" in vif and vif.source_vif])
- self._reattach_instance_vifs(context, instance, network_info)
+ # NOTE(artom) migrate_data.vifs might not be set if our Neutron doesn't
+ # have the multiple port bindings extension.
+ if 'vifs' in migrate_data and migrate_data.vifs:
+ network_info = network_model.NetworkInfo(
+ [vif.source_vif for vif in migrate_data.vifs
+ if "source_vif" in vif and vif.source_vif])
+ self._reattach_instance_vifs(context, instance, network_info)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
@@ -10476,14 +10995,13 @@ class LibvirtDriver(driver.ComputeDriver):
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
- libvirt_utils.create_image(info['type'], instance_disk,
- info['virt_disk_size'])
+ libvirt_utils.create_image(
+ instance_disk, info['type'], info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
- disk = self.image_backend.by_name(instance, instance_disk,
- CONF.libvirt.images_type)
+ disk = self.image_backend.by_name(instance, instance_disk)
if cache_name.startswith('ephemeral'):
# The argument 'size' is used by image.cache to
# validate disk size retrieved from cache against
@@ -10559,16 +11077,37 @@ class LibvirtDriver(driver.ComputeDriver):
if not CONF.workarounds.enable_qemu_monitor_announce_self:
return
- LOG.info('Sending announce-self command to QEMU monitor',
- instance=instance)
+ current_attempt = 0
- try:
- guest = self._host.get_guest(instance)
- guest.announce_self()
- except Exception:
- LOG.warning('Failed to send announce-self command to QEMU monitor',
- instance=instance)
- LOG.exception()
+ max_attempts = (
+ CONF.workarounds.qemu_monitor_announce_self_count)
+ # qemu_monitor_announce_retry_interval specified in seconds
+ announce_pause = (
+ CONF.workarounds.qemu_monitor_announce_self_interval)
+
+ while(current_attempt < max_attempts):
+ # Increment attempt
+ current_attempt += 1
+
+ # Only use announce_pause after the first attempt to avoid
+ # pausing before calling announce_self for the first attempt
+ if current_attempt != 1:
+ greenthread.sleep(announce_pause)
+
+ LOG.info('Sending announce-self command to QEMU monitor. '
+ 'Attempt %(current_attempt)s of %(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ try:
+ guest = self._host.get_guest(instance)
+ guest.announce_self()
+ except Exception:
+ LOG.warning('Failed to send announce-self command to '
+ 'QEMU monitor. Attempt %(current_attempt)s of '
+ '%(max_attempts)s',
+ {'current_attempt': current_attempt,
+ 'max_attempts': max_attempts}, instance=instance)
+ LOG.exception()
def post_live_migration_at_destination(self, context,
instance,
@@ -10818,6 +11357,9 @@ class LibvirtDriver(driver.ComputeDriver):
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
+ def get_nodenames_by_uuid(self, refresh=False):
+ return {self._host.get_node_uuid(): self._host.get_hostname()}
+
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
return self._host.get_cpu_stats()
@@ -10973,6 +11515,9 @@ class LibvirtDriver(driver.ComputeDriver):
disk_info = self._get_instance_disk_info(instance, block_device_info)
try:
+ # If cleanup failed in previous resize attempts we try to remedy
+ # that before a resize is tried again
+ self._cleanup_failed_instance_base(inst_base_resize)
os.rename(inst_base, inst_base_resize)
# if we are migrating the instance with shared instance path then
# create the directory. If it is a remote node the directory
@@ -11196,9 +11741,9 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug("finish_migration finished successfully.", instance=instance)
- def _cleanup_failed_migration(self, inst_base):
- """Make sure that a failed migrate doesn't prevent us from rolling
- back in a revert.
+ def _cleanup_failed_instance_base(self, inst_base):
+ """Make sure that a failed migrate or resize doesn't prevent us from
+ rolling back in a revert or retrying a resize.
"""
try:
shutil.rmtree(inst_base)
@@ -11254,7 +11799,7 @@ class LibvirtDriver(driver.ComputeDriver):
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
- self._cleanup_failed_migration(inst_base)
+ self._cleanup_failed_instance_base(inst_base)
os.rename(inst_base_resize, inst_base)
root_disk = self.image_backend.by_name(instance, 'disk')
@@ -11771,6 +12316,30 @@ class LibvirtDriver(driver.ComputeDriver):
in supported_models for model in all_models
}
+ def _get_iommu_model_traits(self) -> ty.Dict[str, bool]:
+ """Get iommu model traits based on the currently enabled virt_type.
+ Not all traits generated by this function may be valid and the result
+ should be validated.
+ :return: A dict of trait names mapped to boolean values.
+ """
+ dom_caps = self._host.get_domain_capabilities()
+ supported_models: ty.Set[str] = {fields.VIOMMUModel.AUTO}
+ # our min version of qemu/libvirt supprot q35 and virt machine types.
+ # They also support the smmuv3 and intel iommu modeles so if the qemu
+ # binary is avaiable we can report the trait.
+ if fields.Architecture.AARCH64 in dom_caps:
+ supported_models.add(fields.VIOMMUModel.SMMUV3)
+ if fields.Architecture.X86_64 in dom_caps:
+ supported_models.add(fields.VIOMMUModel.INTEL)
+ # the virtio iommu model requires a newer libvirt then our min
+ # libvirt so we need to check the version explcitly.
+ if self._host.has_min_version(MIN_LIBVIRT_VIOMMU_VIRTIO_MODEL):
+ supported_models.add(fields.VIOMMUModel.VIRTIO)
+ return {
+ f'COMPUTE_VIOMMU_MODEL_{model.replace("-", "_").upper()}': model
+ in supported_models for model in fields.VIOMMUModel.ALL
+ }
+
def _get_storage_bus_traits(self) -> ty.Dict[str, bool]:
"""Get storage bus traits based on the currently enabled virt_type.
diff --git a/nova/virt/libvirt/event.py b/nova/virt/libvirt/event.py
index a7d2a3624f..56951dc11c 100644
--- a/nova/virt/libvirt/event.py
+++ b/nova/virt/libvirt/event.py
@@ -9,6 +9,8 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import typing as ty
+
from nova.virt import event
@@ -22,7 +24,10 @@ class LibvirtEvent(event.InstanceEvent):
class DeviceEvent(LibvirtEvent):
"""Base class for device related libvirt events"""
- def __init__(self, uuid: str, dev: str, timestamp: float = None):
+ def __init__(self,
+ uuid: str,
+ dev: str,
+ timestamp: ty.Optional[float] = None):
super().__init__(uuid, timestamp)
self.dev = dev
diff --git a/nova/virt/libvirt/guest.py b/nova/virt/libvirt/guest.py
index 53080e41f0..c40c3c4a7f 100644
--- a/nova/virt/libvirt/guest.py
+++ b/nova/virt/libvirt/guest.py
@@ -254,8 +254,17 @@ class Guest(object):
"""
if cfg:
+ LOG.debug(f'looking for interface given config: {cfg}')
interfaces = self.get_all_devices(
type(cfg), from_persistent_config)
+ if not interfaces:
+ LOG.debug(f'No interface of type: {type(cfg)} found in domain')
+ return None
+ # FIXME(sean-k-mooney): we should be able to print the list of
+ # interfaces however some tests use incomplete objects that cant
+ # be printed due to incomplete mocks or defects in the libvirt
+ # fixture. Lets address this later.
+ # LOG.debug(f'within interfaces: {list(interfaces)}')
for interface in interfaces:
# NOTE(leehom) LibvirtConfigGuest get from domain and
# LibvirtConfigGuest generated by
@@ -264,6 +273,16 @@ class Guest(object):
# equality check based on available information on nova side
if cfg == interface:
return interface
+ else:
+ # NOTE(sean-k-mooney): {list(interfaces)} could be used
+ # instead of self._domain.XMLDesc(0) once all tests have
+ # printable interfaces see the comment above ^.
+ # While the XML is more verbose it should always work
+ # for our current test suite and in production code.
+ LOG.debug(
+ f'interface for config: {cfg}'
+ f'not found in domain: {self._domain.XMLDesc(0)}'
+ )
return None
def get_vcpus_info(self):
@@ -533,7 +552,7 @@ class Guest(object):
:param no_metadata: Make snapshot without remembering it
:param disk_only: Disk snapshot, no system checkpoint
:param reuse_ext: Reuse any existing external files
- :param quiesce: Use QGA to quiece all mounted file systems
+ :param quiesce: Use QGA to quiesce all mounted file systems
"""
flags = no_metadata and (
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA or 0)
@@ -655,6 +674,7 @@ class Guest(object):
stats = self._domain.jobStats()
return JobInfo(**stats)
except libvirt.libvirtError as ex:
+ errmsg = ex.get_error_message()
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
@@ -667,6 +687,12 @@ class Guest(object):
# away completclsely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return JobInfo(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
+ elif (ex.get_error_code() == libvirt.VIR_ERR_INTERNAL_ERROR and
+ errmsg and "migration was active, "
+ "but no RAM info was set" in errmsg):
+ LOG.debug("Migration is active or completed but "
+ "virDomainGetJobStats is missing ram: %s", ex)
+ return JobInfo(type=libvirt.VIR_DOMAIN_JOB_NONE)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index 5c39dd320f..1ae86d9f47 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -31,6 +31,7 @@ from collections import defaultdict
import fnmatch
import glob
import inspect
+from lxml import etree
import operator
import os
import queue
@@ -46,6 +47,7 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import importutils
+from oslo_utils import strutils
from oslo_utils import units
from oslo_utils import versionutils
@@ -64,6 +66,7 @@ from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt import utils as libvirt_utils
+import nova.virt.node # noqa
if ty.TYPE_CHECKING:
import libvirt
@@ -136,6 +139,7 @@ class Host(object):
self._caps = None
self._domain_caps = None
self._hostname = None
+ self._node_uuid = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
@@ -488,7 +492,7 @@ class Host(object):
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
- self._event_thread.setDaemon(True)
+ self._event_thread.daemon = True
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
@@ -736,6 +740,14 @@ class Host(object):
return doms
+ def get_available_cpus(self):
+ """Get the set of CPUs that exist on the host.
+
+ :returns: set of CPUs, raises libvirtError on error
+ """
+ cpus, cpu_map, online = self.get_connection().getCPUMap()
+ return {cpu for cpu in range(cpus)}
+
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
@@ -1057,6 +1069,12 @@ class Host(object):
{'old': self._hostname, 'new': hostname})
return self._hostname
+ def get_node_uuid(self):
+ """Returns the UUID of this node."""
+ if not self._node_uuid:
+ self._node_uuid = nova.virt.node.get_local_node_uuid()
+ return self._node_uuid
+
def find_secret(self, usage_type, usage_id):
"""Find a secret.
@@ -1197,6 +1215,25 @@ class Host(object):
stats["frequency"] = self._get_hardware_info()[3]
return stats
+ def _check_machine_type(self, caps, mach_type):
+ """Validate if hw machine type is in capabilities of the host
+
+ :param caps: host capabilities
+ :param mach_type: machine type
+ """
+ possible_machine_types = []
+
+ caps_tree = etree.fromstring(str(caps))
+ for guest in caps_tree.findall('guest'):
+ for machine in guest.xpath('arch/machine'):
+ possible_machine_types.append(machine.text)
+
+ if mach_type not in possible_machine_types:
+ raise exception.InvalidMachineType(
+ message="'%s' is not valid/supported machine type, "
+ "Supported machine types are: %s" % (
+ mach_type, possible_machine_types))
+
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
@@ -1229,12 +1266,65 @@ class Host(object):
cfgdev.parse_str(xmlstr)
return cfgdev.pci_capability.features
+ def _get_vf_parent_pci_vpd_info(
+ self,
+ vf_device: 'libvirt.virNodeDevice',
+ parent_pf_name: str,
+ candidate_devs: ty.List['libvirt.virNodeDevice']
+ ) -> ty.Optional[vconfig.LibvirtConfigNodeDeviceVpdCap]:
+ """Returns PCI VPD info of a parent device of a PCI VF.
+
+ :param vf_device: a VF device object to use for lookup.
+ :param str parent_pf_name: parent PF name formatted as pci_dddd_bb_ss_f
+ :param candidate_devs: devices that could be parent devs for the VF.
+ :returns: A VPD capability object of a parent device.
+ """
+ parent_dev = next(
+ (dev for dev in candidate_devs if dev.name() == parent_pf_name),
+ None
+ )
+ if parent_dev is None:
+ return None
+
+ xmlstr = parent_dev.XMLDesc(0)
+ cfgdev = vconfig.LibvirtConfigNodeDevice()
+ cfgdev.parse_str(xmlstr)
+ return cfgdev.pci_capability.vpd_capability
+
+ @staticmethod
+ def _get_vpd_card_serial_number(
+ dev: 'libvirt.virNodeDevice',
+ ) -> ty.Optional[ty.List[str]]:
+ """Returns a card serial number stored in PCI VPD (if present)."""
+ xmlstr = dev.XMLDesc(0)
+ cfgdev = vconfig.LibvirtConfigNodeDevice()
+ cfgdev.parse_str(xmlstr)
+ vpd_cap = cfgdev.pci_capability.vpd_capability
+ if not vpd_cap:
+ return None
+ return vpd_cap.card_serial_number
+
+ def _get_pf_details(self, device: dict, pci_address: str) -> dict:
+ if device.get('dev_type') != fields.PciDeviceType.SRIOV_PF:
+ return {}
+
+ try:
+ return {
+ 'mac_address': pci_utils.get_mac_by_pci_address(pci_address)
+ }
+ except exception.PciDeviceNotFoundById:
+ LOG.debug(
+ 'Cannot get MAC address of the PF %s. It is probably attached '
+ 'to a guest already', pci_address)
+ return {}
+
def _get_pcidev_info(
self,
devname: str,
dev: 'libvirt.virNodeDevice',
net_devs: ty.List['libvirt.virNodeDevice'],
vdpa_devs: ty.List['libvirt.virNodeDevice'],
+ pci_devs: ty.List['libvirt.virNodeDevice'],
) -> ty.Dict[str, ty.Union[str, dict]]:
"""Returns a dict of PCI device."""
@@ -1297,23 +1387,112 @@ class Host(object):
return {'dev_type': fields.PciDeviceType.STANDARD}
+ def _get_vpd_details(
+ device_dict: dict,
+ device: 'libvirt.virNodeDevice',
+ pci_devs: ty.List['libvirt.virNodeDevice']
+ ) -> ty.Dict[str, ty.Any]:
+ """Get information from PCI VPD (if present).
+
+ PCI/PCIe devices may include the optional VPD capability. It may
+ contain useful information such as the unique serial number
+ uniquely assigned at a factory.
+
+ If a device is a VF and it does not contain the VPD capability,
+ a parent device's VPD is used (if present) as a fallback to
+ retrieve the unique add-in card number. Whether a VF exposes
+ the VPD capability or not may be controlled via a vendor-specific
+ firmware setting.
+ """
+ vpd_info: ty.Dict[str, ty.Any] = {}
+ # At the time of writing only the serial number had a clear
+ # use-case. However, the set of fields may be extended.
+ card_serial_number = self._get_vpd_card_serial_number(device)
+
+ if (not card_serial_number and
+ device_dict.get('dev_type') == fields.PciDeviceType.SRIOV_VF
+ ):
+ # Format the address of a physical function to use underscores
+ # since that's how Libvirt formats the <name> element content.
+ pf_addr = device_dict.get('parent_addr')
+ if not pf_addr:
+ LOG.warning("A VF device dict does not have a parent PF "
+ "address in it which is unexpected. Skipping "
+ "serial number retrieval")
+ return vpd_info
+
+ formatted_addr = pf_addr.replace('.', '_').replace(':', '_')
+ vpd_cap = self._get_vf_parent_pci_vpd_info(
+ device, f'pci_{formatted_addr}', pci_devs)
+ if vpd_cap is not None:
+ card_serial_number = vpd_cap.card_serial_number
+
+ if card_serial_number:
+ vpd_info = {'card_serial_number': card_serial_number}
+ return vpd_info
+
+ def _get_sriov_netdev_details(
+ device_dict: dict,
+ device: 'libvirt.virNodeDevice',
+ ) -> ty.Dict[str, ty.Dict[str, ty.Any]]:
+ """Get SR-IOV related information"""
+ sriov_info: ty.Dict[str, ty.Any] = {}
+
+ if device_dict.get('dev_type') != fields.PciDeviceType.SRIOV_VF:
+ return sriov_info
+
+ pf_addr = device_dict['parent_addr']
+
+ # A netdev VF may be associated with a PF which does not have a
+ # netdev as described in LP #1915255.
+ try:
+ sriov_info.update({
+ 'pf_mac_address': pci_utils.get_mac_by_pci_address(pf_addr)
+ })
+ except exception.PciDeviceNotFoundById:
+ LOG.debug(f'Could not get a PF mac for {pf_addr}')
+ # For the purposes Nova uses this information currently,
+ # having both a PF MAC and a VF number is needed so we return
+ # an empty dict if a PF MAC is not available.
+ return {}
+
+ vf_num = pci_utils.get_vf_num_by_pci_address(
+ device_dict['address'])
+
+ sriov_info.update({'vf_num': vf_num})
+ return sriov_info
+
def _get_device_capabilities(
device_dict: dict,
device: 'libvirt.virNodeDevice',
+ pci_devs: ty.List['libvirt.virNodeDevice'],
net_devs: ty.List['libvirt.virNodeDevice']
- ) -> ty.Dict[str, ty.Dict[str, ty.Any]]:
+ ) -> ty.Dict[str, ty.Any]:
"""Get PCI VF device's additional capabilities.
If a PCI device is a virtual function, this function reads the PCI
parent's network capabilities (must be always a NIC device) and
appends this information to the device's dictionary.
"""
- caps: ty.Dict[str, ty.Dict[str, ty.Any]] = {}
+ caps: ty.Dict[str, ty.Any] = {}
if device_dict.get('dev_type') == fields.PciDeviceType.SRIOV_VF:
pcinet_info = self._get_pcinet_info(device, net_devs)
if pcinet_info:
- return {'capabilities': {'network': pcinet_info}}
+ caps['network'] = pcinet_info
+ # Only attempt to get SR-IOV details if a VF is a netdev
+ # because there are no use cases for other dev types yet.
+ sriov_caps = _get_sriov_netdev_details(device_dict, dev)
+ if sriov_caps:
+ caps['sriov'] = sriov_caps
+
+ vpd_info = _get_vpd_details(device_dict, device, pci_devs)
+ if vpd_info:
+ caps['vpd'] = vpd_info
+
+ if caps:
+ return {'capabilities': caps}
+
return caps
xmlstr = dev.XMLDesc(0)
@@ -1339,7 +1518,9 @@ class Host(object):
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(
_get_device_type(cfgdev, address, dev, net_devs, vdpa_devs))
- device.update(_get_device_capabilities(device, dev, net_devs))
+ device.update(_get_device_capabilities(device, dev,
+ pci_devs, net_devs))
+ device.update(self._get_pf_details(device, address))
return device
def get_vdpa_nodedev_by_address(
@@ -1361,7 +1542,7 @@ class Host(object):
vdpa_devs = [
dev for dev in devices.values() if "vdpa" in dev.listCaps()]
pci_info = [
- self._get_pcidev_info(name, dev, [], vdpa_devs) for name, dev
+ self._get_pcidev_info(name, dev, [], vdpa_devs, []) for name, dev
in devices.items() if "pci" in dev.listCaps()]
parent_dev = next(
dev for dev in pci_info if dev['address'] == pci_address)
@@ -1401,7 +1582,7 @@ class Host(object):
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
- :returns: a list of virNodeDevice instance
+ :returns: a list of strings with the name of the instance
"""
return self._list_devices("mdev", flags=flags)
@@ -1440,21 +1621,66 @@ class Host(object):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
+ def compare_hypervisor_cpu(self, xmlDesc, flags=0):
+ """Compares the given CPU description with the CPU provided by
+ the host hypervisor. This is different from the older method,
+ compare_cpu(), which compares a given CPU definition with the
+ host CPU without considering the abilities of the host
+ hypervisor. Except @xmlDesc, rest of all the parameters to
+ compareHypervisorCPU API are optional (libvirt will choose
+ sensible defaults).
+ """
+ emulator = None
+ arch = None
+ machine = None
+ virttype = None
+ return self.get_connection().compareHypervisorCPU(
+ emulator, arch, machine, virttype, xmlDesc, flags)
+
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
+ return self._has_cgroupsv1_cpu_controller() or \
+ self._has_cgroupsv2_cpu_controller()
+
+ def _has_cgroupsv1_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V1...")
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
+ return True
+ LOG.debug("CPU controller missing on host.")
+ return False
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V1. "
+ "Deemed host to be missing controller by this approach.")
+ return False
+
+ def _has_cgroupsv2_cpu_controller(self):
+ LOG.debug(f"Searching host: '{self.get_hostname()}' "
+ "for CPU controller through CGroups V2...")
+ try:
+ with open("/sys/fs/cgroup/cgroup.controllers", "r") as fd:
+ for line in fd.readlines():
+ bits = line.split()
+ if "cpu" in bits:
+ LOG.debug("CPU controller found on host.")
return True
+ LOG.debug("CPU controller missing on host.")
return False
- except IOError:
+ except IOError as ex:
+ LOG.debug(f"Search failed due to: '{ex}'. "
+ "Maybe the host is not running under CGroups V2. "
+ "Deemed host to be missing controller by this approach.")
return False
def get_canonical_machine_type(self, arch, machine) -> str:
@@ -1570,9 +1796,9 @@ class Host(object):
return False
with open(SEV_KERNEL_PARAM_FILE) as f:
- contents = f.read()
- LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, contents)
- return contents == "1\n"
+ content = f.read()
+ LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, content)
+ return strutils.bool_from_string(content)
@property
def supports_amd_sev(self) -> bool:
@@ -1616,6 +1842,23 @@ class Host(object):
return self._supports_amd_sev
@property
+ def supports_remote_managed_ports(self) -> bool:
+ """Determine if the host supports remote managed ports.
+
+ Returns a boolean indicating whether remote managed ports are
+ possible to use on this host.
+
+ The check is based on a Libvirt version which added support for
+ parsing and exposing PCI VPD since a card serial number (if present in
+ the VPD) since the use of remote managed ports depends on this.
+ https://libvirt.org/news.html#v7-9-0-2021-11-01
+
+ The actual presence of a card serial number for a particular device
+ is meant to be checked elsewhere.
+ """
+ return self.has_min_version(lv_ver=(7, 9, 0))
+
+ @property
def loaders(self) -> ty.List[dict]:
"""Retrieve details of loader configuration for the host.
@@ -1642,11 +1885,11 @@ class Host(object):
arch: str,
machine: str,
has_secure_boot: bool,
- ) -> ty.Tuple[str, str]:
+ ) -> ty.Tuple[str, str, bool]:
"""Get loader for the specified architecture and machine type.
- :returns: A tuple of the bootloader executable path and the NVRAM
- template path.
+ :returns: A the bootloader executable path and the NVRAM
+ template path and a bool indicating if we need to enable SMM.
"""
machine = self.get_canonical_machine_type(arch, machine)
@@ -1676,6 +1919,7 @@ class Host(object):
return (
loader['mapping']['executable']['filename'],
loader['mapping']['nvram-template']['filename'],
+ 'requires-smm' in loader['features'],
)
raise exception.UEFINotSupported()
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 08bad69489..0a64ef43dd 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -81,14 +81,24 @@ def _update_utime_ignore_eacces(path):
class Image(metaclass=abc.ABCMeta):
SUPPORTS_CLONE = False
-
- def __init__(self, path, source_type, driver_format, is_block_dev=False):
+ SUPPORTS_LUKS = False
+
+ def __init__(
+ self,
+ path,
+ source_type,
+ driver_format,
+ is_block_dev=False,
+ disk_info_mapping=None
+ ):
"""Image initialization.
:param path: libvirt's representation of the path of this disk.
:param source_type: block or file
:param driver_format: raw or qcow2
:param is_block_dev:
+ :param disk_info_mapping: disk_info['mapping'][device] metadata
+ specific to this image generated by nova.virt.libvirt.blockinfo.
"""
if (CONF.ephemeral_storage_encryption.enabled and
not self._supports_encryption()):
@@ -105,6 +115,8 @@ class Image(metaclass=abc.ABCMeta):
self.is_block_dev = is_block_dev
self.preallocate = False
+ self.disk_info_mapping = disk_info_mapping
+
# NOTE(dripton): We store lines of json (path, disk_format) in this
# file, for some image types, to prevent attacks based on changing the
# disk_format.
@@ -145,22 +157,23 @@ class Image(metaclass=abc.ABCMeta):
pass
def libvirt_info(
- self, disk_info, cache_mode, extra_specs, boot_order=None,
- disk_unit=None,
+ self, cache_mode, extra_specs, boot_order=None, disk_unit=None,
):
"""Get `LibvirtConfigGuestDisk` filled for this image.
- :disk_info: Metadata generated by libvirt.blockinfo.get_disk_mapping
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
:boot_order: Disk device boot order
"""
- disk_bus = disk_info['bus']
+ if self.disk_info_mapping is None:
+ raise AttributeError(
+ 'Image must have disk_info_mapping to call libvirt_info()')
+ disk_bus = self.disk_info_mapping['bus']
info = vconfig.LibvirtConfigGuestDisk()
info.source_type = self.source_type
- info.source_device = disk_info['type']
+ info.source_device = self.disk_info_mapping['type']
info.target_bus = disk_bus
- info.target_dev = disk_info['dev']
+ info.target_dev = self.disk_info_mapping['dev']
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
info.driver_io = self.driver_io
@@ -522,11 +535,16 @@ class Flat(Image):
when creating a disk from a qcow2 if force_raw_images is not set in config.
"""
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
self.disk_name = disk_name
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Flat, self).__init__(path, "file", "raw", is_block_dev=False)
+ super().__init__(
+ path, "file", "raw", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
@@ -568,15 +586,21 @@ class Flat(Image):
def copy_raw_image(base, target, size):
libvirt_utils.copy_image(base, target)
if size:
- image = imgmodel.LocalFileImage(target,
- self.driver_format)
- disk.extend(image, size)
+ self.resize_image(size)
generating = 'image_id' not in kwargs
if generating:
if not self.exists():
# Generating image in place
prepare_template(target=self.path, *args, **kwargs)
+
+ # NOTE(plibeau): extend the disk in the case of image is not
+ # accessible anymore by the customer and the base image is
+ # available on source compute during the resize of the
+ # instance.
+ else:
+ if size:
+ self.resize_image(size)
else:
if not os.path.exists(base):
prepare_template(target=base, *args, **kwargs)
@@ -608,10 +632,15 @@ class Flat(Image):
class Qcow2(Image):
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Qcow2, self).__init__(path, "file", "qcow2", is_block_dev=False)
+ super().__init__(
+ path, "file", "qcow2", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
@@ -625,7 +654,8 @@ class Qcow2(Image):
@utils.synchronized(filename, external=True, lock_path=self.lock_path)
def create_qcow2_image(base, target, size):
- libvirt_utils.create_cow_image(base, target, size)
+ libvirt_utils.create_image(
+ target, 'qcow2', size, backing_file=base)
# Download the unmodified base image unless we already have a copy.
if not os.path.exists(base):
@@ -689,7 +719,10 @@ class Lvm(Image):
def escape(filename):
return filename.replace('_', '__')
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None,
+ disk_info_mapping=None
+ ):
self.ephemeral_key_uuid = instance.get('ephemeral_key_uuid')
if self.ephemeral_key_uuid is not None:
@@ -718,7 +751,10 @@ class Lvm(Image):
self.lv_path = os.path.join('/dev', self.vg, self.lv)
path = '/dev/mapper/' + dmcrypt.volume_name(self.lv)
- super(Lvm, self).__init__(path, "block", "raw", is_block_dev=True)
+ super(Lvm, self).__init__(
+ path, "block", "raw", is_block_dev=True,
+ disk_info_mapping=disk_info_mapping
+ )
# TODO(sbauza): Remove the config option usage and default the
# LVM logical volume creation to preallocate the full size only.
@@ -826,7 +862,9 @@ class Rbd(Image):
SUPPORTS_CLONE = True
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
if not CONF.libvirt.images_rbd_pool:
raise RuntimeError(_('You should specify'
' images_rbd_pool'
@@ -848,31 +886,32 @@ class Rbd(Image):
if self.driver.ceph_conf:
path += ':conf=' + self.driver.ceph_conf
- super(Rbd, self).__init__(path, "block", "rbd", is_block_dev=False)
+ super().__init__(
+ path, "block", "rbd", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.discard_mode = CONF.libvirt.hw_disk_discard
def libvirt_info(
- self, disk_info, cache_mode, extra_specs, boot_order=None,
- disk_unit=None,
+ self, cache_mode, extra_specs, boot_order=None, disk_unit=None
):
"""Get `LibvirtConfigGuestDisk` filled for this image.
- :disk_info: Metadata generated by libvirt.blockinfo.get_disk_mapping
:cache_mode: Caching mode for this image
:extra_specs: Instance type extra specs dict.
:boot_order: Disk device boot order
"""
info = vconfig.LibvirtConfigGuestDisk()
- disk_bus = disk_info['bus']
+ disk_bus = self.disk_info_mapping['bus']
hosts, ports = self.driver.get_mon_addrs()
- info.source_device = disk_info['type']
+ info.source_device = self.disk_info_mapping['type']
info.driver_format = 'raw'
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
info.target_bus = disk_bus
- info.target_dev = disk_info['dev']
+ info.target_dev = self.disk_info_mapping['dev']
info.source_type = 'network'
info.source_protocol = 'rbd'
info.source_name = '%s/%s' % (self.driver.pool, self.rbd_name)
@@ -1189,10 +1228,15 @@ class Rbd(Image):
class Ploop(Image):
- def __init__(self, instance=None, disk_name=None, path=None):
+ def __init__(
+ self, instance=None, disk_name=None, path=None, disk_info_mapping=None
+ ):
path = (path or os.path.join(libvirt_utils.get_instance_path(instance),
disk_name))
- super(Ploop, self).__init__(path, "file", "ploop", is_block_dev=False)
+ super().__init__(
+ path, "file", "ploop", is_block_dev=False,
+ disk_info_mapping=disk_info_mapping
+ )
self.resolve_driver_format()
@@ -1295,18 +1339,25 @@ class Backend(object):
raise RuntimeError(_('Unknown image_type=%s') % image_type)
return image
- def by_name(self, instance, name, image_type=None):
+ def by_name(self, instance, name, image_type=None, disk_info_mapping=None):
"""Return an Image object for a disk with the given name.
:param instance: the instance which owns this disk
:param name: The name of the disk
:param image_type: (Optional) Image type.
Default is CONF.libvirt.images_type.
+ :param disk_info_mapping: (Optional) Disk info mapping dict
:return: An Image object for the disk with given name and instance.
:rtype: Image
"""
+ # NOTE(artom) To pass functional tests, wherein the code here is loaded
+ # *before* any config with self.flags() is done, we need to have the
+ # default inline in the method, and not in the kwarg declaration.
+ image_type = image_type or CONF.libvirt.images_type
backend = self.backend(image_type)
- return backend(instance=instance, disk_name=name)
+ return backend(
+ instance=instance, disk_name=name,
+ disk_info_mapping=disk_info_mapping)
def by_libvirt_path(self, instance, path, image_type=None):
"""Return an Image object for a disk with the given libvirt path.
diff --git a/nova/virt/libvirt/migration.py b/nova/virt/libvirt/migration.py
index 8cea9f2983..4726111a76 100644
--- a/nova/virt/libvirt/migration.py
+++ b/nova/virt/libvirt/migration.py
@@ -62,6 +62,7 @@ def get_updated_guest_xml(instance, guest, migrate_data, get_volume_config,
xml_doc, migrate_data, instance, get_volume_config)
xml_doc = _update_perf_events_xml(xml_doc, migrate_data)
xml_doc = _update_memory_backing_xml(xml_doc, migrate_data)
+ xml_doc = _update_quota_xml(instance, xml_doc)
if get_vif_config is not None:
xml_doc = _update_vif_xml(xml_doc, migrate_data, get_vif_config)
if 'dst_numa_info' in migrate_data:
@@ -71,6 +72,18 @@ def get_updated_guest_xml(instance, guest, migrate_data, get_volume_config,
return etree.tostring(xml_doc, encoding='unicode')
+def _update_quota_xml(instance, xml_doc):
+ flavor_shares = instance.flavor.extra_specs.get('quota:cpu_shares')
+ cputune = xml_doc.find('./cputune')
+ shares = xml_doc.find('./cputune/shares')
+ if shares is not None and not flavor_shares:
+ cputune.remove(shares)
+ # Remove the cputune element entirely if it has no children left.
+ if cputune is not None and not list(cputune):
+ xml_doc.remove(cputune)
+ return xml_doc
+
+
def _update_device_resources_xml(xml_doc, new_resources):
vpmems = []
for resource in new_resources:
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index da2a6e8b8a..e1298ee5c8 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -22,6 +22,7 @@ import grp
import os
import pwd
import re
+import tempfile
import typing as ty
import uuid
@@ -110,55 +111,99 @@ VTPM_DIR = '/var/lib/libvirt/swtpm/'
def create_image(
- disk_format: str, path: str, size: ty.Union[str, int],
+ path: str,
+ disk_format: str,
+ disk_size: ty.Optional[ty.Union[str, int]],
+ backing_file: ty.Optional[str] = None,
+ encryption: ty.Optional[ty.Dict[str, ty.Any]] = None
) -> None:
- """Create a disk image
-
- :param disk_format: Disk image format (as known by qemu-img)
+ """Disk image creation with qemu-img
:param path: Desired location of the disk image
- :param size: Desired size of disk image. May be given as an int or
- a string. If given as an int, it will be interpreted
- as bytes. If it's a string, it should consist of a number
- with an optional suffix ('K' for Kibibytes,
- M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
- If no suffix is given, it will be interpreted as bytes.
+ :param disk_format: Disk image format (as known by qemu-img)
+ :param disk_size: Desired size of disk image. May be given as an int or
+ a string. If given as an int, it will be interpreted as bytes. If it's
+ a string, it should consist of a number with an optional suffix ('K'
+ for Kibibytes, M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
+ If no suffix is given, it will be interpreted as bytes.
+ Can be None in the case of a COW image.
+ :param backing_file: (Optional) Backing file to use.
+ :param encryption: (Optional) Dict detailing various encryption attributes
+ such as the format and passphrase.
"""
- processutils.execute('qemu-img', 'create', '-f', disk_format, path, size)
-
+ cmd = [
+ 'env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'create', '-f', disk_format
+ ]
-def create_cow_image(
- backing_file: ty.Optional[str], path: str, size: ty.Optional[int] = None,
-) -> None:
- """Create COW image
-
- Creates a COW image with the given backing file
-
- :param backing_file: Existing image on which to base the COW image
- :param path: Desired location of the COW image
- """
- base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
- cow_opts = []
if backing_file:
base_details = images.qemu_img_info(backing_file)
- cow_opts += ['backing_file=%s' % backing_file]
- cow_opts += ['backing_fmt=%s' % base_details.file_format]
- else:
- base_details = None
- # Explicitly inherit the value of 'cluster_size' property of a qcow2
- # overlay image from its backing file. This can be useful in cases
- # when people create a base image with a non-default 'cluster_size'
- # value or cases when images were created with very old QEMU
- # versions which had a different default 'cluster_size'.
- if base_details and base_details.cluster_size is not None:
- cow_opts += ['cluster_size=%s' % base_details.cluster_size]
- if size is not None:
- cow_opts += ['size=%s' % size]
- if cow_opts:
+ cow_opts = [
+ f'backing_file={backing_file}',
+ f'backing_fmt={base_details.file_format}'
+ ]
+ # Explicitly inherit the value of 'cluster_size' property of a qcow2
+ # overlay image from its backing file. This can be useful in cases when
+ # people create a base image with a non-default 'cluster_size' value or
+ # cases when images were created with very old QEMU versions which had
+ # a different default 'cluster_size'.
+ if base_details.cluster_size is not None:
+ cow_opts += [f'cluster_size={base_details.cluster_size}']
+
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
- cow_opts = ['-o', csv_opts]
- cmd = base_cmd + cow_opts + [path]
- processutils.execute(*cmd)
+ cmd += ['-o', csv_opts]
+
+ # Disk size can be None in the case of a COW image
+ disk_size_arg = [str(disk_size)] if disk_size is not None else []
+
+ if encryption:
+ with tempfile.NamedTemporaryFile(mode='tr+', encoding='utf-8') as f:
+ # Write out the passphrase secret to a temp file
+ f.write(encryption.get('secret'))
+
+ # Ensure the secret is written to disk, we can't .close() here as
+ # that removes the file when using NamedTemporaryFile
+ f.flush()
+
+ # The basic options include the secret and encryption format
+ encryption_opts = [
+ '--object', f"secret,id=sec,file={f.name}",
+ '-o', 'encrypt.key-secret=sec',
+ '-o', f"encrypt.format={encryption.get('format')}",
+ ]
+ # Supported luks options:
+ # cipher-alg=<str> - Name of cipher algorithm and key length
+ # cipher-mode=<str> - Name of encryption cipher mode
+ # hash-alg=<str> - Name of hash algorithm to use for PBKDF
+ # iter-time=<num> - Time to spend in PBKDF in milliseconds
+ # ivgen-alg=<str> - Name of IV generator algorithm
+ # ivgen-hash-alg=<str> - Name of IV generator hash algorithm
+ #
+ # NOTE(melwitt): Sensible defaults (that match the qemu defaults)
+ # are hardcoded at this time for simplicity and consistency when
+ # instances are migrated. Configuration of luks options could be
+ # added in a future release.
+ encryption_options = {
+ 'cipher-alg': 'aes-256',
+ 'cipher-mode': 'xts',
+ 'hash-alg': 'sha256',
+ 'iter-time': 2000,
+ 'ivgen-alg': 'plain64',
+ 'ivgen-hash-alg': 'sha256',
+ }
+
+ for option, value in encryption_options.items():
+ encryption_opts += [
+ '-o',
+ f'encrypt.{option}={value}',
+ ]
+
+ # We need to execute the command while the NamedTemporaryFile still
+ # exists
+ cmd += encryption_opts + [path] + disk_size_arg
+ processutils.execute(*cmd)
+ else:
+ cmd += [path] + disk_size_arg
+ processutils.execute(*cmd)
def create_ploop_image(
@@ -216,8 +261,8 @@ def copy_image(
dest: str,
host: ty.Optional[str] = None,
receive: bool = False,
- on_execute: ty.Callable = None,
- on_completion: ty.Callable = None,
+ on_execute: ty.Optional[ty.Callable] = None,
+ on_completion: ty.Optional[ty.Callable] = None,
compression: bool = True,
) -> None:
"""Copy a disk image to an existing directory
@@ -526,6 +571,9 @@ def get_cpu_model_from_arch(arch: str) -> str:
mode = 'qemu32'
elif arch == obj_fields.Architecture.PPC64LE:
mode = 'POWER8'
+ # TODO(chateaulav): Testing of emulated archs ongoing
+ # elif arch == obj_fields.Architecture.MIPSEL:
+ # mode = '24Kf-mips-cpu'
# NOTE(kevinz): In aarch64, cpu model 'max' will offer the capabilities
# that all the stuff it can currently emulate, both for "TCG" and "KVM"
elif arch == obj_fields.Architecture.AARCH64:
@@ -568,6 +616,7 @@ def get_default_machine_type(arch: str) -> ty.Optional[str]:
default_mtypes = {
obj_fields.Architecture.ARMV7: "virt",
obj_fields.Architecture.AARCH64: "virt",
+ obj_fields.Architecture.PPC64LE: "pseries",
obj_fields.Architecture.S390: "s390-ccw-virtio",
obj_fields.Architecture.S390X: "s390-ccw-virtio",
obj_fields.Architecture.I686: "pc",
@@ -577,17 +626,31 @@ def get_default_machine_type(arch: str) -> ty.Optional[str]:
def mdev_name2uuid(mdev_name: str) -> str:
- """Convert an mdev name (of the form mdev_<uuid_with_underscores>) to a
- uuid (of the form 8-4-4-4-12).
+ """Convert an mdev name (of the form mdev_<uuid_with_underscores> or
+ mdev_<uuid_with_underscores>_<pciaddress>) to a uuid
+ (of the form 8-4-4-4-12).
+
+ :param mdev_name: the name of the mdev to parse the UUID from
+ :returns: string containing the uuid
"""
- return str(uuid.UUID(mdev_name[5:].replace('_', '-')))
+ mdev_uuid = mdev_name[5:].replace('_', '-')
+ # Unconditionnally remove the PCI address from the name
+ mdev_uuid = mdev_uuid[:36]
+ return str(uuid.UUID(mdev_uuid))
+
+def mdev_uuid2name(mdev_uuid: str, parent: ty.Optional[str] = None) -> str:
+ """Convert an mdev uuid (of the form 8-4-4-4-12) and optionally its parent
+ device to a name (of the form mdev_<uuid_with_underscores>[_<pciid>]).
-def mdev_uuid2name(mdev_uuid: str) -> str:
- """Convert an mdev uuid (of the form 8-4-4-4-12) to a name (of the form
- mdev_<uuid_with_underscores>).
+ :param mdev_uuid: the uuid of the mediated device
+ :param parent: the parent device id for the mediated device
+ :returns: name of the mdev to reference in libvirt
"""
- return "mdev_" + mdev_uuid.replace('-', '_')
+ name = "mdev_" + mdev_uuid.replace('-', '_')
+ if parent and parent.startswith('pci_'):
+ name = name + parent[4:]
+ return name
def get_flags_by_flavor_specs(flavor: 'objects.Flavor') -> ty.Set[str]:
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 85c83572e1..6a7daa6b54 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -623,7 +623,7 @@ class LibvirtGenericVIFDriver(object):
# 2. libvirt driver does not change mac address for macvtap VNICs
# or Alternatively does not rely on recreating libvirt's nodev
# name from the current mac address set on the netdevice.
- # See: virt.libvrit.driver.LibvirtDriver._get_pcinet_info
+ # See: virt.libvirt.driver.LibvirtDriver._get_pcinet_info
if vif['vnic_type'] == network_model.VNIC_TYPE_MACVTAP:
set_vf_interface_vlan(
vif['profile']['pci_slot'],
diff --git a/nova/virt/libvirt/volume/fibrechannel.py b/nova/virt/libvirt/volume/fibrechannel.py
index b50db3aa1c..22c65e99c0 100644
--- a/nova/virt/libvirt/volume/fibrechannel.py
+++ b/nova/virt/libvirt/volume/fibrechannel.py
@@ -79,7 +79,6 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
"""Extend the volume."""
LOG.debug("calling os-brick to extend FC Volume", instance=instance)
new_size = self.connector.extend_volume(connection_info['data'])
- LOG.debug("Extend FC Volume %s; new_size=%s",
- connection_info['data']['device_path'],
+ LOG.debug("Extend FC Volume: new_size=%s",
new_size, instance=instance)
return new_size
diff --git a/nova/virt/libvirt/volume/lightos.py b/nova/virt/libvirt/volume/lightos.py
new file mode 100644
index 0000000000..d6d393994e
--- /dev/null
+++ b/nova/virt/libvirt/volume/lightos.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2016-2020 Lightbits Labs Ltd.
+# Copyright (C) 2020 Intel Corporation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import nova.conf
+from nova import utils
+from nova.virt.libvirt.volume import volume as libvirt_volume
+from os_brick import initiator
+from os_brick.initiator import connector
+from oslo_log import log as logging
+
+
+LOG = logging.getLogger(__name__)
+CONF = nova.conf.CONF
+
+
+class LibvirtLightOSVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
+ """Driver to attach NVMe volumes to libvirt."""
+ VERSION = '2.3.12'
+
+ def __init__(self, connection):
+ super(LibvirtLightOSVolumeDriver, self).__init__(connection)
+ self.connector = connector.InitiatorConnector.factory(
+ initiator.LIGHTOS,
+ root_helper=utils.get_root_helper(),
+ device_scan_attempts=CONF.libvirt.num_nvme_discover_tries)
+
+ def connect_volume(self, connection_info, instance):
+ device_info = self.connector.connect_volume(connection_info['data'])
+ LOG.debug("Connecting NVMe volume with device_info %s", device_info)
+ connection_info['data']['device_path'] = device_info['path']
+
+ def disconnect_volume(self, connection_info, instance):
+ """Detach the volume from the instance."""
+ LOG.debug("Disconnecting NVMe disk. instance:%s, volume_id:%s",
+ connection_info.get("instance", ""),
+ connection_info.get("volume_id", ""))
+ self.connector.disconnect_volume(connection_info['data'], None)
+ super(LibvirtLightOSVolumeDriver, self).disconnect_volume(
+ connection_info, instance)
+
+ def extend_volume(self, connection_info, instance, requested_size=None):
+ """Extend the volume."""
+ LOG.debug("calling os-brick to extend LightOS Volume."
+ "instance:%s, volume_id:%s",
+ connection_info.get("instance", ""),
+ connection_info.get("volume_id", ""))
+ new_size = self.connector.extend_volume(connection_info['data'])
+ LOG.debug("Extend LightOS Volume %s; new_size=%s",
+ connection_info['data']['device_path'], new_size)
+ return new_size
diff --git a/nova/virt/libvirt/volume/nvme.py b/nova/virt/libvirt/volume/nvme.py
index fefaaf434d..7436552812 100644
--- a/nova/virt/libvirt/volume/nvme.py
+++ b/nova/virt/libvirt/volume/nvme.py
@@ -33,6 +33,7 @@ class LibvirtNVMEVolumeDriver(libvirt_volume.LibvirtVolumeDriver):
self.connector = connector.InitiatorConnector.factory(
initiator.NVME, utils.get_root_helper(),
+ use_multipath=CONF.libvirt.volume_use_multipath,
device_scan_attempts=CONF.libvirt.num_nvme_discover_tries)
def connect_volume(self, connection_info, instance):
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 6ea91e2221..0ab3ddc4c1 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -263,12 +263,19 @@ def _get_eth_link(vif, ifc_num):
'id': link_id,
'vif_id': vif['id'],
'type': nic_type,
- 'mtu': vif['network']['meta'].get('mtu'),
+ 'mtu': _get_link_mtu(vif),
'ethernet_mac_address': vif.get('address'),
}
return link
+def _get_link_mtu(vif):
+ for subnet in vif['network']['subnets']:
+ if subnet['meta'].get('dhcp_server'):
+ return None
+ return vif['network']['meta'].get('mtu')
+
+
def _get_nets(vif, subnet, version, net_num, link_id):
"""Get networks for the given VIF and subnet
diff --git a/nova/virt/node.py b/nova/virt/node.py
new file mode 100644
index 0000000000..4cb3d0a573
--- /dev/null
+++ b/nova/virt/node.py
@@ -0,0 +1,108 @@
+# Copyright 2022 Red Hat, inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import os
+import uuid
+
+from oslo_utils import uuidutils
+
+import nova.conf
+from nova import exception
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+COMPUTE_ID_FILE = 'compute_id'
+LOCAL_NODE_UUID = None
+
+
+def write_local_node_uuid(node_uuid):
+ # We only ever write an identity file in the CONF.state_path
+ # location
+ fn = os.path.join(CONF.state_path, COMPUTE_ID_FILE)
+
+ # Try to create the identity file and write our uuid into it. Fail
+ # if the file exists (since it shouldn't if we made it here).
+ try:
+ with open(fn, 'x') as f:
+ f.write(node_uuid)
+ except FileExistsError:
+ # If the file exists, we must either fail or re-survey all the
+ # potential files. If we just read and return it, it could be
+ # inconsistent with files in the other locations.
+ raise exception.InvalidNodeConfiguration(
+ reason='Identity file %s appeared unexpectedly' % fn)
+ except Exception as e:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to write uuid to %s: %s' % (fn, e))
+
+ LOG.info('Wrote node identity %s to %s', node_uuid, fn)
+
+
+def read_local_node_uuid():
+ locations = ([os.path.dirname(f) for f in CONF.config_file] +
+ [CONF.state_path])
+
+ uuids = []
+ found = []
+ for location in locations:
+ fn = os.path.join(location, COMPUTE_ID_FILE)
+ try:
+ # UUIDs should be 36 characters in canonical format. Read
+ # a little more to be graceful about whitespace in/around
+ # the actual value we want to read. However, it must parse
+ # to a legit UUID once we strip the whitespace.
+ with open(fn) as f:
+ content = f.read(40)
+ node_uuid = str(uuid.UUID(content.strip()))
+ except FileNotFoundError:
+ continue
+ except ValueError:
+ raise exception.InvalidNodeConfiguration(
+ reason='Unable to parse UUID from %s' % fn)
+ uuids.append(node_uuid)
+ found.append(fn)
+
+ if uuids:
+ # Any identities we found must be consistent, or we fail
+ first = uuids[0]
+ for i, (node_uuid, fn) in enumerate(zip(uuids, found)):
+ if node_uuid != first:
+ raise exception.InvalidNodeConfiguration(
+ reason='UUID %s in %s does not match %s' % (
+ node_uuid, fn, uuids[i - 1]))
+ LOG.info('Determined node identity %s from %s', first, found[0])
+ return first
+ else:
+ return None
+
+
+def get_local_node_uuid():
+ """Read or create local node uuid file.
+
+ :returns: UUID string read from file, or generated
+ """
+ global LOCAL_NODE_UUID
+
+ if LOCAL_NODE_UUID is not None:
+ return LOCAL_NODE_UUID
+
+ node_uuid = read_local_node_uuid()
+ if not node_uuid:
+ node_uuid = uuidutils.generate_uuid()
+ LOG.info('Generated node identity %s', node_uuid)
+ write_local_node_uuid(node_uuid)
+
+ LOCAL_NODE_UUID = node_uuid
+ return node_uuid
diff --git a/nova/virt/powervm/__init__.py b/nova/virt/powervm/__init__.py
deleted file mode 100644
index 9780cb4856..0000000000
--- a/nova/virt/powervm/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova.virt.powervm import driver
-
-PowerVMDriver = driver.PowerVMDriver
diff --git a/nova/virt/powervm/disk/__init__.py b/nova/virt/powervm/disk/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/virt/powervm/disk/__init__.py
+++ /dev/null
diff --git a/nova/virt/powervm/disk/driver.py b/nova/virt/powervm/disk/driver.py
deleted file mode 100644
index 2ba083736e..0000000000
--- a/nova/virt/powervm/disk/driver.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-import oslo_log.log as logging
-import pypowervm.const as pvm_const
-import pypowervm.tasks.scsi_mapper as tsk_map
-import pypowervm.util as pvm_u
-import pypowervm.wrappers.virtual_io_server as pvm_vios
-
-from nova import exception
-from nova.virt.powervm import mgmt
-from nova.virt.powervm import vm
-
-LOG = logging.getLogger(__name__)
-
-
-class DiskType(object):
- BOOT = 'boot'
- IMAGE = 'image'
-
-
-class IterableToFileAdapter(object):
- """A degenerate file-like so that an iterable can be read like a file.
-
- The Glance client returns an iterable, but PowerVM requires a file. This
- is the adapter between the two.
- """
-
- def __init__(self, iterable):
- self.iterator = iterable.__iter__()
- self.remaining_data = ''
-
- def read(self, size):
- chunk = self.remaining_data
- try:
- while not chunk:
- chunk = next(self.iterator)
- except StopIteration:
- return ''
- return_value = chunk[0:size]
- self.remaining_data = chunk[size:]
- return return_value
-
-
-class DiskAdapter(metaclass=abc.ABCMeta):
-
- capabilities = {
- 'shared_storage': False,
- 'has_imagecache': False,
- 'snapshot': False,
- }
-
- def __init__(self, adapter, host_uuid):
- """Initialize the DiskAdapter.
-
- :param adapter: The pypowervm adapter.
- :param host_uuid: The UUID of the PowerVM host.
- """
- self._adapter = adapter
- self._host_uuid = host_uuid
- self.mp_uuid = mgmt.mgmt_uuid(self._adapter)
-
- @abc.abstractproperty
- def _vios_uuids(self):
- """List the UUIDs of the Virtual I/O Servers hosting the storage."""
- raise NotImplementedError()
-
- @abc.abstractmethod
- def _disk_match_func(self, disk_type, instance):
- """Return a matching function to locate the disk for an instance.
-
- :param disk_type: One of the DiskType enum values.
- :param instance: The instance whose disk is to be found.
- :return: Callable suitable for the match_func parameter of the
- pypowervm.tasks.scsi_mapper.find_maps method, with the
- following specification:
- def match_func(storage_elem)
- param storage_elem: A backing storage element wrapper (VOpt,
- VDisk, PV, or LU) to be analyzed.
- return: True if the storage_elem's mapping should be included;
- False otherwise.
- """
- raise NotImplementedError()
-
- def get_bootdisk_path(self, instance, vios_uuid):
- """Find the local path for the instance's boot disk.
-
- :param instance: nova.objects.instance.Instance object owning the
- requested disk.
- :param vios_uuid: PowerVM UUID of the VIOS to search for mappings.
- :return: Local path for instance's boot disk.
- """
- vm_uuid = vm.get_pvm_uuid(instance)
- match_func = self._disk_match_func(DiskType.BOOT, instance)
- vios_wrap = pvm_vios.VIOS.get(self._adapter, uuid=vios_uuid,
- xag=[pvm_const.XAG.VIO_SMAP])
- maps = tsk_map.find_maps(vios_wrap.scsi_mappings,
- client_lpar_id=vm_uuid, match_func=match_func)
- if maps:
- return maps[0].server_adapter.backing_dev_name
- return None
-
- def _get_bootdisk_iter(self, instance):
- """Return an iterator of (storage_elem, VIOS) tuples for the instance.
-
- This method returns an iterator of (storage_elem, VIOS) tuples, where
- storage_element is a pypowervm storage element wrapper associated with
- the instance boot disk and VIOS is the wrapper of the Virtual I/O
- server owning that storage element.
-
- :param instance: nova.objects.instance.Instance object owning the
- requested disk.
- :return: Iterator of tuples of (storage_elem, VIOS).
- """
- lpar_wrap = vm.get_instance_wrapper(self._adapter, instance)
- match_func = self._disk_match_func(DiskType.BOOT, instance)
- for vios_uuid in self._vios_uuids:
- vios_wrap = pvm_vios.VIOS.get(
- self._adapter, uuid=vios_uuid, xag=[pvm_const.XAG.VIO_SMAP])
- for scsi_map in tsk_map.find_maps(
- vios_wrap.scsi_mappings, client_lpar_id=lpar_wrap.id,
- match_func=match_func):
- yield scsi_map.backing_storage, vios_wrap
-
- def connect_instance_disk_to_mgmt(self, instance):
- """Connect an instance's boot disk to the management partition.
-
- :param instance: The instance whose boot disk is to be mapped.
- :return stg_elem: The storage element (LU, VDisk, etc.) that was mapped
- :return vios: The EntryWrapper of the VIOS from which the mapping was
- made.
- :raise InstanceDiskMappingFailed: If the mapping could not be done.
- """
- for stg_elem, vios in self._get_bootdisk_iter(instance):
- msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name}
-
- # Create a new mapping. NOTE: If there's an existing mapping on
- # the other VIOS but not this one, we'll create a second mapping
- # here. It would take an extreme sequence of events to get to that
- # point, and the second mapping would be harmless anyway. The
- # alternative would be always checking all VIOSes for existing
- # mappings, which increases the response time of the common case by
- # an entire GET of VIOS+VIO_SMAP.
- LOG.debug("Mapping boot disk %(disk_name)s to the management "
- "partition from Virtual I/O Server %(vios_name)s.",
- msg_args, instance=instance)
- try:
- tsk_map.add_vscsi_mapping(self._host_uuid, vios, self.mp_uuid,
- stg_elem)
- # If that worked, we're done. add_vscsi_mapping logged.
- return stg_elem, vios
- except Exception:
- LOG.exception("Failed to map boot disk %(disk_name)s to the "
- "management partition from Virtual I/O Server "
- "%(vios_name)s.", msg_args, instance=instance)
- # Try the next hit, if available.
- # We either didn't find the boot dev, or failed all attempts to map it.
- raise exception.InstanceDiskMappingFailed(instance_name=instance.name)
-
- @abc.abstractmethod
- def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
- """Disconnect a disk from the management partition.
-
- :param vios_uuid: The UUID of the Virtual I/O Server serving the
- mapping.
- :param disk_name: The name of the disk to unmap.
- """
- raise NotImplementedError()
-
- @abc.abstractproperty
- def capacity(self):
- """Capacity of the storage in gigabytes.
-
- Default is to make the capacity arbitrarily large.
- """
- raise NotImplementedError()
-
- @abc.abstractproperty
- def capacity_used(self):
- """Capacity of the storage in gigabytes that is used.
-
- Default is to say none of it is used.
- """
- raise NotImplementedError()
-
- @staticmethod
- def _get_disk_name(disk_type, instance, short=False):
- """Generate a name for a virtual disk associated with an instance.
-
- :param disk_type: One of the DiskType enum values.
- :param instance: The instance for which the disk is to be created.
- :param short: If True, the generated name will be limited to 15
- characters (the limit for virtual disk). If False, it
- will be limited by the API (79 characters currently).
- :return: The sanitized file name for the disk.
- """
- prefix = '%s_' % (disk_type[0] if short else disk_type)
- base = ('%s_%s' % (instance.name[:8], instance.uuid[:4]) if short
- else instance.name)
- return pvm_u.sanitize_file_name_for_api(
- base, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short
- else pvm_const.MaxLen.FILENAME_DEFAULT)
-
- @abc.abstractmethod
- def detach_disk(self, instance):
- """Detaches the storage adapters from the image disk.
-
- :param instance: instance to detach the image for.
- :return: A list of all the backing storage elements that were
- detached from the I/O Server and VM.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def delete_disks(self, storage_elems):
- """Removes the disks specified by the mappings.
-
- :param storage_elems: A list of the storage elements that are to be
- deleted. Derived from the return value from
- detach_disk.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def create_disk_from_image(self, context, instance, image_meta):
- """Creates a disk and copies the specified image to it.
-
- Cleans up created disk if an error occurs.
- :param context: nova context used to retrieve image from glance
- :param instance: instance to create the disk for.
- :param image_meta: nova.objects.ImageMeta object with the metadata of
- the image of the instance.
- :return: The backing pypowervm storage object that was created.
- """
- raise NotImplementedError()
-
- @abc.abstractmethod
- def attach_disk(self, instance, disk_info, stg_ftsk):
- """Attaches the disk image to the Virtual Machine.
-
- :param instance: nova instance to attach the disk to.
- :param disk_info: The pypowervm storage element returned from
- create_disk_from_image. Ex. VOptMedia, VDisk, LU,
- or PV.
- :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
- I/O Operations. If provided, the Virtual I/O Server
- mapping updates will be added to the FeedTask. This
- defers the updates to some later point in time. If
- the FeedTask is not provided, the updates will be run
- immediately when this method is executed.
- """
- raise NotImplementedError()
diff --git a/nova/virt/powervm/disk/localdisk.py b/nova/virt/powervm/disk/localdisk.py
deleted file mode 100644
index e8d2ff4f46..0000000000
--- a/nova/virt/powervm/disk/localdisk.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Copyright 2013 OpenStack Foundation
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import oslo_log.log as logging
-from pypowervm import const as pvm_const
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-
-from nova import conf
-from nova import exception
-from nova.image import glance
-from nova.virt.powervm.disk import driver as disk_dvr
-from nova.virt.powervm import vm
-
-LOG = logging.getLogger(__name__)
-CONF = conf.CONF
-IMAGE_API = glance.API()
-
-
-class LocalStorage(disk_dvr.DiskAdapter):
-
- def __init__(self, adapter, host_uuid):
- super(LocalStorage, self).__init__(adapter, host_uuid)
-
- self.capabilities = {
- 'shared_storage': False,
- 'has_imagecache': False,
- # NOTE(efried): 'snapshot' capability set dynamically below.
- }
-
- # Query to get the Volume Group UUID
- if not CONF.powervm.volume_group_name:
- raise exception.OptRequiredIfOtherOptValue(
- if_opt='disk_driver', if_value='localdisk',
- then_opt='volume_group_name')
- self.vg_name = CONF.powervm.volume_group_name
- vios_w, vg_w = tsk_stg.find_vg(adapter, self.vg_name)
- self._vios_uuid = vios_w.uuid
- self.vg_uuid = vg_w.uuid
- # Set the 'snapshot' capability dynamically. If we're hosting I/O on
- # the management partition, we can snapshot. If we're hosting I/O on
- # traditional VIOS, we are limited by the fact that a VSCSI device
- # can't be mapped to two partitions (the VIOS and the management) at
- # once.
- self.capabilities['snapshot'] = self.mp_uuid == self._vios_uuid
- LOG.info("Local Storage driver initialized: volume group: '%s'",
- self.vg_name)
-
- @property
- def _vios_uuids(self):
- """List the UUIDs of the Virtual I/O Servers hosting the storage.
-
- For localdisk, there's only one.
- """
- return [self._vios_uuid]
-
- @staticmethod
- def _disk_match_func(disk_type, instance):
- """Return a matching function to locate the disk for an instance.
-
- :param disk_type: One of the DiskType enum values.
- :param instance: The instance whose disk is to be found.
- :return: Callable suitable for the match_func parameter of the
- pypowervm.tasks.scsi_mapper.find_maps method.
- """
- disk_name = LocalStorage._get_disk_name(
- disk_type, instance, short=True)
- return tsk_map.gen_match_func(pvm_stg.VDisk, names=[disk_name])
-
- @property
- def capacity(self):
- """Capacity of the storage in gigabytes."""
- vg_wrap = self._get_vg_wrap()
- return float(vg_wrap.capacity)
-
- @property
- def capacity_used(self):
- """Capacity of the storage in gigabytes that is used."""
- vg_wrap = self._get_vg_wrap()
- # Subtract available from capacity
- return float(vg_wrap.capacity) - float(vg_wrap.available_size)
-
- def delete_disks(self, storage_elems):
- """Removes the specified disks.
-
- :param storage_elems: A list of the storage elements that are to be
- deleted. Derived from the return value from
- detach_disk.
- """
- # All of localdisk is done against the volume group. So reload
- # that (to get new etag) and then update against it.
- tsk_stg.rm_vg_storage(self._get_vg_wrap(), vdisks=storage_elems)
-
- def detach_disk(self, instance):
- """Detaches the storage adapters from the image disk.
-
- :param instance: Instance to disconnect the image for.
- :return: A list of all the backing storage elements that were
- disconnected from the I/O Server and VM.
- """
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- # Build the match function
- match_func = tsk_map.gen_match_func(pvm_stg.VDisk)
-
- vios_w = pvm_vios.VIOS.get(
- self._adapter, uuid=self._vios_uuid, xag=[pvm_const.XAG.VIO_SMAP])
-
- # Remove the mappings.
- mappings = tsk_map.remove_maps(
- vios_w, lpar_uuid, match_func=match_func)
-
- # Update the VIOS with the removed mappings.
- vios_w.update()
-
- return [x.backing_storage for x in mappings]
-
- def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
- """Disconnect a disk from the management partition.
-
- :param vios_uuid: The UUID of the Virtual I/O Server serving the
- mapping.
- :param disk_name: The name of the disk to unmap.
- """
- tsk_map.remove_vdisk_mapping(self._adapter, vios_uuid, self.mp_uuid,
- disk_names=[disk_name])
- LOG.info("Unmapped boot disk %(disk_name)s from the management "
- "partition from Virtual I/O Server %(vios_name)s.",
- {'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
- 'vios_name': vios_uuid})
-
- def create_disk_from_image(self, context, instance, image_meta):
- """Creates a disk and copies the specified image to it.
-
- Cleans up the created disk if an error occurs.
-
- :param context: nova context used to retrieve image from glance
- :param instance: instance to create the disk for.
- :param image_meta: The metadata of the image of the instance.
- :return: The backing pypowervm storage object that was created.
- """
- LOG.info('Create disk.', instance=instance)
-
- return self._upload_image(context, instance, image_meta)
-
- # TODO(esberglu): Copy vdisk when implementing image cache.
-
- def _upload_image(self, context, instance, image_meta):
- """Upload a new image.
-
- :param context: Nova context used to retrieve image from glance.
- :param image_meta: The metadata of the image of the instance.
- :return: The virtual disk containing the image.
- """
-
- img_name = self._get_disk_name(disk_dvr.DiskType.BOOT, instance,
- short=True)
-
- # TODO(esberglu) Add check for cached image when adding imagecache.
-
- return tsk_stg.upload_new_vdisk(
- self._adapter, self._vios_uuid, self.vg_uuid,
- disk_dvr.IterableToFileAdapter(
- IMAGE_API.download(context, image_meta.id)), img_name,
- image_meta.size, d_size=image_meta.size,
- upload_type=tsk_stg.UploadType.IO_STREAM,
- file_format=image_meta.disk_format)[0]
-
- def attach_disk(self, instance, disk_info, stg_ftsk):
- """Attaches the disk image to the Virtual Machine.
-
- :param instance: nova instance to connect the disk to.
- :param disk_info: The pypowervm storage element returned from
- create_disk_from_image. Ex. VOptMedia, VDisk, LU,
- or PV.
- :param stg_ftsk: The pypowervm transaction FeedTask for the
- I/O Operations. The Virtual I/O Server mapping updates
- will be added to the FeedTask. This defers the updates
- to some later point in time.
- """
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- def add_func(vios_w):
- LOG.info("Adding logical volume disk connection to VIOS %(vios)s.",
- {'vios': vios_w.name}, instance=instance)
- mapping = tsk_map.build_vscsi_mapping(
- self._host_uuid, vios_w, lpar_uuid, disk_info)
- return tsk_map.add_map(vios_w, mapping)
-
- stg_ftsk.wrapper_tasks[self._vios_uuid].add_functor_subtask(add_func)
-
- def _get_vg_wrap(self):
- return pvm_stg.VG.get(self._adapter, uuid=self.vg_uuid,
- parent_type=pvm_vios.VIOS,
- parent_uuid=self._vios_uuid)
diff --git a/nova/virt/powervm/disk/ssp.py b/nova/virt/powervm/disk/ssp.py
deleted file mode 100644
index e7cdc9cf6c..0000000000
--- a/nova/virt/powervm/disk/ssp.py
+++ /dev/null
@@ -1,258 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import random
-
-import oslo_log.log as logging
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.tasks import cluster_ssp as tsk_cs
-from pypowervm.tasks import partition as tsk_par
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.tasks import storage as tsk_stg
-import pypowervm.util as pvm_u
-import pypowervm.wrappers.cluster as pvm_clust
-import pypowervm.wrappers.storage as pvm_stg
-
-from nova import exception
-from nova.image import glance
-from nova.virt.powervm.disk import driver as disk_drv
-from nova.virt.powervm import vm
-
-LOG = logging.getLogger(__name__)
-
-IMAGE_API = glance.API()
-
-
-class SSPDiskAdapter(disk_drv.DiskAdapter):
- """Provides a disk adapter for Shared Storage Pools.
-
- Shared Storage Pools are a clustered file system technology that can link
- together Virtual I/O Servers.
-
- This adapter provides the connection for nova ephemeral storage (not
- Cinder) to connect to virtual machines.
- """
-
- capabilities = {
- 'shared_storage': True,
- # NOTE(efried): Whereas the SSP disk driver definitely does image
- # caching, it's not through the nova.virt.imagecache.ImageCacheManager
- # API. Setting `has_imagecache` to True here would have the side
- # effect of having a periodic task try to call this class's
- # manage_image_cache method (not implemented here; and a no-op in the
- # superclass) which would be harmless, but unnecessary.
- 'has_imagecache': False,
- 'snapshot': True,
- }
-
- def __init__(self, adapter, host_uuid):
- """Initialize the SSPDiskAdapter.
-
- :param adapter: pypowervm.adapter.Adapter for the PowerVM REST API.
- :param host_uuid: PowerVM UUID of the managed system.
- """
- super(SSPDiskAdapter, self).__init__(adapter, host_uuid)
-
- try:
- self._clust = pvm_clust.Cluster.get(self._adapter)[0]
- self._ssp = pvm_stg.SSP.get_by_href(
- self._adapter, self._clust.ssp_uri)
- self._tier = tsk_stg.default_tier_for_ssp(self._ssp)
- except pvm_exc.Error:
- LOG.exception("A unique PowerVM Cluster and Shared Storage Pool "
- "is required in the default Tier.")
- raise exception.NotFound()
-
- LOG.info(
- "SSP Storage driver initialized. Cluster '%(clust_name)s'; "
- "SSP '%(ssp_name)s'; Tier '%(tier_name)s'",
- {'clust_name': self._clust.name, 'ssp_name': self._ssp.name,
- 'tier_name': self._tier.name})
-
- @property
- def capacity(self):
- """Capacity of the storage in gigabytes."""
- # Retrieving the Tier is faster (because don't have to refresh LUs.)
- return float(self._tier.refresh().capacity)
-
- @property
- def capacity_used(self):
- """Capacity of the storage in gigabytes that is used."""
- self._ssp = self._ssp.refresh()
- return float(self._ssp.capacity) - float(self._ssp.free_space)
-
- def detach_disk(self, instance):
- """Detaches the storage adapters from the disk.
-
- :param instance: instance from which to detach the image.
- :return: A list of all the backing storage elements that were detached
- from the I/O Server and VM.
- """
- stg_ftsk = tsk_par.build_active_vio_feed_task(
- self._adapter, name='ssp', xag=[pvm_const.XAG.VIO_SMAP])
-
- lpar_uuid = vm.get_pvm_uuid(instance)
- match_func = tsk_map.gen_match_func(pvm_stg.LU)
-
- def rm_func(vwrap):
- LOG.info("Removing SSP disk connection to VIOS %s.",
- vwrap.name, instance=instance)
- return tsk_map.remove_maps(vwrap, lpar_uuid,
- match_func=match_func)
-
- # Remove the mapping from *each* VIOS on the LPAR's host.
- # The LPAR's host has to be self._host_uuid, else the PowerVM API will
- # fail.
- #
- # Note - this may not be all the VIOSes on the system...just the ones
- # in the SSP cluster.
- #
- # The mappings will normally be the same on all VIOSes, unless a VIOS
- # was down when a disk was added. So for the return value, we need to
- # collect the union of all relevant mappings from all VIOSes.
- lu_set = set()
- for vios_uuid in self._vios_uuids:
- # Add the remove for the VIO
- stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)
-
- # Find the active LUs so that a delete op knows what to remove.
- vios_w = stg_ftsk.wrapper_tasks[vios_uuid].wrapper
- mappings = tsk_map.find_maps(vios_w.scsi_mappings,
- client_lpar_id=lpar_uuid,
- match_func=match_func)
- if mappings:
- lu_set.update([x.backing_storage for x in mappings])
-
- stg_ftsk.execute()
-
- return list(lu_set)
-
- def delete_disks(self, storage_elems):
- """Removes the disks specified by the mappings.
-
- :param storage_elems: A list of the storage elements (LU
- ElementWrappers) that are to be deleted. Derived
- from the return value from detach_disk.
- """
- tsk_stg.rm_tier_storage(storage_elems, tier=self._tier)
-
- def create_disk_from_image(self, context, instance, image_meta):
- """Creates a boot disk and links the specified image to it.
-
- If the specified image has not already been uploaded, an Image LU is
- created for it. A Disk LU is then created for the instance and linked
- to the Image LU.
-
- :param context: nova context used to retrieve image from glance
- :param instance: instance to create the disk for.
- :param nova.objects.ImageMeta image_meta:
- The metadata of the image of the instance.
- :return: The backing pypowervm LU storage object that was created.
- """
- LOG.info('SSP: Create boot disk from image %s.', image_meta.id,
- instance=instance)
-
- image_lu = tsk_cs.get_or_upload_image_lu(
- self._tier, pvm_u.sanitize_file_name_for_api(
- image_meta.name, prefix=disk_drv.DiskType.IMAGE + '_',
- suffix='_' + image_meta.checksum),
- random.choice(self._vios_uuids), disk_drv.IterableToFileAdapter(
- IMAGE_API.download(context, image_meta.id)), image_meta.size,
- upload_type=tsk_stg.UploadType.IO_STREAM)
-
- boot_lu_name = pvm_u.sanitize_file_name_for_api(
- instance.name, prefix=disk_drv.DiskType.BOOT + '_')
-
- LOG.info('SSP: Disk name is %s', boot_lu_name, instance=instance)
-
- return tsk_stg.crt_lu(
- self._tier, boot_lu_name, instance.flavor.root_gb,
- typ=pvm_stg.LUType.DISK, clone=image_lu)[1]
-
- def attach_disk(self, instance, disk_info, stg_ftsk):
- """Connects the disk image to the Virtual Machine.
-
- :param instance: nova instance to which to attach the disk.
- :param disk_info: The pypowervm storage element returned from
- create_disk_from_image. Ex. VOptMedia, VDisk, LU,
- or PV.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- """
- # Create the LU structure
- lu = pvm_stg.LU.bld_ref(self._adapter, disk_info.name, disk_info.udid)
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- # This is the delay apply mapping
- def add_func(vios_w):
- LOG.info("Attaching SSP disk from VIOS %s.",
- vios_w.name, instance=instance)
- mapping = tsk_map.build_vscsi_mapping(
- self._host_uuid, vios_w, lpar_uuid, lu)
- return tsk_map.add_map(vios_w, mapping)
-
- # Add the mapping to *each* VIOS on the LPAR's host.
- # The LPAR's host has to be self._host_uuid, else the PowerVM API will
- # fail.
- #
- # Note: this may not be all the VIOSes on the system - just the ones
- # in the SSP cluster.
- for vios_uuid in self._vios_uuids:
- stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
-
- @property
- def _vios_uuids(self):
- """List the UUIDs of our cluster's VIOSes on this host.
-
- (If a VIOS is not on this host, we can't interact with it, even if its
- URI and therefore its UUID happen to be available in the pypowervm
- wrapper.)
-
- :return: A list of VIOS UUID strings.
- """
- ret = []
- for n in self._clust.nodes:
- # Skip any nodes that we don't have the VIOS uuid or uri
- if not (n.vios_uuid and n.vios_uri):
- continue
- if self._host_uuid == pvm_u.get_req_path_uuid(
- n.vios_uri, preserve_case=True, root=True):
- ret.append(n.vios_uuid)
- return ret
-
- def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
- """Disconnect a disk from the management partition.
-
- :param vios_uuid: The UUID of the Virtual I/O Server serving the
- mapping.
- :param disk_name: The name of the disk to unmap.
- """
- tsk_map.remove_lu_mapping(self._adapter, vios_uuid, self.mp_uuid,
- disk_names=[disk_name])
- LOG.info("Unmapped boot disk %(disk_name)s from the management "
- "partition from Virtual I/O Server %(vios_uuid)s.",
- {'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
- 'vios_uuid': vios_uuid})
-
- @staticmethod
- def _disk_match_func(disk_type, instance):
- """Return a matching function to locate the disk for an instance.
-
- :param disk_type: One of the DiskType enum values.
- :param instance: The instance whose disk is to be found.
- :return: Callable suitable for the match_func parameter of the
- pypowervm.tasks.scsi_mapper.find_maps method.
- """
- disk_name = SSPDiskAdapter._get_disk_name(disk_type, instance)
- return tsk_map.gen_match_func(pvm_stg.LU, names=[disk_name])
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
deleted file mode 100644
index 0f94a3b75b..0000000000
--- a/nova/virt/powervm/driver.py
+++ /dev/null
@@ -1,708 +0,0 @@
-# Copyright 2014, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Connection to PowerVM hypervisor through NovaLink."""
-
-import os_resource_classes as orc
-from oslo_log import log as logging
-from oslo_utils import excutils
-from oslo_utils import importutils
-from pypowervm import adapter as pvm_apt
-from pypowervm import const as pvm_const
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as log_hlp
-from pypowervm.helpers import vios_busy as vio_hlp
-from pypowervm.tasks import partition as pvm_par
-from pypowervm.tasks import storage as pvm_stor
-from pypowervm.tasks import vterm as pvm_vterm
-from pypowervm.wrappers import managed_system as pvm_ms
-from taskflow.patterns import linear_flow as tf_lf
-
-from nova.compute import task_states
-from nova import conf as cfg
-from nova.console import type as console_type
-from nova import exception as exc
-from nova.i18n import _
-from nova.image import glance
-from nova.virt import configdrive
-from nova.virt import driver
-from nova.virt.powervm import host as pvm_host
-from nova.virt.powervm.tasks import base as tf_base
-from nova.virt.powervm.tasks import image as tf_img
-from nova.virt.powervm.tasks import network as tf_net
-from nova.virt.powervm.tasks import storage as tf_stg
-from nova.virt.powervm.tasks import vm as tf_vm
-from nova.virt.powervm import vm
-from nova.virt.powervm import volume
-from nova.virt.powervm.volume import fcvscsi
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-DISK_ADPT_NS = 'nova.virt.powervm.disk'
-DISK_ADPT_MAPPINGS = {
- 'localdisk': 'localdisk.LocalStorage',
- 'ssp': 'ssp.SSPDiskAdapter'
-}
-
-
-class PowerVMDriver(driver.ComputeDriver):
- """PowerVM NovaLink Implementation of Compute Driver.
-
- https://wiki.openstack.org/wiki/PowerVM
- """
-
- def __init__(self, virtapi):
- # NOTE(edmondsw) some of these will be dynamic in future, so putting
- # capabilities on the instance rather than on the class.
- self.capabilities = {
- 'has_imagecache': False,
- 'supports_bfv_rescue': False,
- 'supports_evacuate': False,
- 'supports_migrate_to_same_host': False,
- 'supports_attach_interface': True,
- 'supports_device_tagging': False,
- 'supports_tagged_attach_interface': False,
- 'supports_tagged_attach_volume': False,
- 'supports_extend_volume': True,
- 'supports_multiattach': False,
- 'supports_trusted_certs': False,
- 'supports_pcpus': False,
- 'supports_accelerators': False,
- 'supports_vtpm': False,
- 'supports_secure_boot': False,
- 'supports_socket_pci_numa_affinity': False,
-
- # Supported image types
- "supports_image_type_aki": False,
- "supports_image_type_ami": False,
- "supports_image_type_ari": False,
- "supports_image_type_iso": False,
- "supports_image_type_qcow2": False,
- "supports_image_type_raw": True,
- "supports_image_type_vdi": False,
- "supports_image_type_vhd": False,
- "supports_image_type_vhdx": False,
- "supports_image_type_vmdk": False,
- "supports_image_type_ploop": False,
- }
- super(PowerVMDriver, self).__init__(virtapi)
-
- def init_host(self, host):
- """Initialize anything that is necessary for the driver to function.
-
- Includes catching up with currently running VMs on the given host.
- """
- LOG.warning(
- 'The powervm virt driver is deprecated and may be removed in a '
- 'future release. The driver is not tested by the OpenStack '
- 'project nor does it have clear maintainers and thus its quality'
- 'can not be ensured. If you are using the driver in production '
- 'please let us know the openstack-discuss mailing list or on IRC'
- )
-
- # Build the adapter. May need to attempt the connection multiple times
- # in case the PowerVM management API service is starting.
- # TODO(efried): Implement async compute service enable/disable like
- # I73a34eb6e0ca32d03e54d12a5e066b2ed4f19a61
- self.adapter = pvm_apt.Adapter(
- pvm_apt.Session(conn_tries=60),
- helpers=[log_hlp.log_helper, vio_hlp.vios_busy_retry_helper])
- # Make sure the Virtual I/O Server(s) are available.
- pvm_par.validate_vios_ready(self.adapter)
- self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
-
- # Do a scrub of the I/O plane to make sure the system is in good shape
- LOG.info("Clearing stale I/O connections on driver init.")
- pvm_stor.ComprehensiveScrub(self.adapter).execute()
-
- # Initialize the disk adapter
- self.disk_dvr = importutils.import_object_ns(
- DISK_ADPT_NS, DISK_ADPT_MAPPINGS[CONF.powervm.disk_driver.lower()],
- self.adapter, self.host_wrapper.uuid)
- self.image_api = glance.API()
-
- LOG.info("The PowerVM compute driver has been initialized.")
-
- @staticmethod
- def _log_operation(op, instance):
- """Log entry point of driver operations."""
- LOG.info('Operation: %(op)s. Virtual machine display name: '
- '%(display_name)s, name: %(name)s',
- {'op': op, 'display_name': instance.display_name,
- 'name': instance.name}, instance=instance)
-
- def get_info(self, instance, use_cache=True):
- """Get the current status of an instance.
-
- :param instance: nova.objects.instance.Instance object
- :param use_cache: unused in this driver
- :returns: An InstanceInfo object.
- """
- return vm.get_vm_info(self.adapter, instance)
-
- def list_instances(self):
- """Return the names of all the instances known to the virt host.
-
- :return: VM Names as a list.
- """
- return vm.get_lpar_names(self.adapter)
-
- def get_available_nodes(self, refresh=False):
- """Returns nodenames of all nodes managed by the compute service.
-
- This method is for multi compute-nodes support. If a driver supports
- multi compute-nodes, this method returns a list of nodenames managed
- by the service. Otherwise, this method should return
- [hypervisor_hostname].
- """
-
- return [CONF.host]
-
- def get_available_resource(self, nodename):
- """Retrieve resource information.
-
- This method is called when nova-compute launches, and as part of a
- periodic task.
-
- :param nodename: Node from which the caller wants to get resources.
- A driver that manages only one node can safely ignore
- this.
- :return: Dictionary describing resources.
- """
- # Do this here so it refreshes each time this method is called.
- self.host_wrapper = pvm_ms.System.get(self.adapter)[0]
- return self._get_available_resource()
-
- def _get_available_resource(self):
- # Get host information
- data = pvm_host.build_host_resource_from_ms(self.host_wrapper)
-
- # Add the disk information
- data["local_gb"] = self.disk_dvr.capacity
- data["local_gb_used"] = self.disk_dvr.capacity_used
-
- return data
-
- def update_provider_tree(self, provider_tree, nodename, allocations=None):
- """Update a ProviderTree with current provider and inventory data.
-
- :param nova.compute.provider_tree.ProviderTree provider_tree:
- A nova.compute.provider_tree.ProviderTree object representing all
- the providers in the tree associated with the compute node, and any
- sharing providers (those with the ``MISC_SHARES_VIA_AGGREGATE``
- trait) associated via aggregate with any of those providers (but
- not *their* tree- or aggregate-associated providers), as currently
- known by placement.
- :param nodename:
- String name of the compute node (i.e.
- ComputeNode.hypervisor_hostname) for which the caller is requesting
- updated provider information.
- :param allocations: Currently ignored by this driver.
- """
- # Get (legacy) resource information. Same as get_available_resource,
- # but we don't need to refresh self.host_wrapper as it was *just*
- # refreshed by get_available_resource in the resource tracker's
- # update_available_resource flow.
- data = self._get_available_resource()
-
- # NOTE(yikun): If the inv record does not exists, the allocation_ratio
- # will use the CONF.xxx_allocation_ratio value if xxx_allocation_ratio
- # is set, and fallback to use the initial_xxx_allocation_ratio
- # otherwise.
- inv = provider_tree.data(nodename).inventory
- ratios = self._get_allocation_ratios(inv)
- # TODO(efried): Fix these to reflect something like reality
- cpu_reserved = CONF.reserved_host_cpus
- mem_reserved = CONF.reserved_host_memory_mb
- disk_reserved = self._get_reserved_host_disk_gb_from_config()
-
- inventory = {
- orc.VCPU: {
- 'total': data['vcpus'],
- 'max_unit': data['vcpus'],
- 'allocation_ratio': ratios[orc.VCPU],
- 'reserved': cpu_reserved,
- },
- orc.MEMORY_MB: {
- 'total': data['memory_mb'],
- 'max_unit': data['memory_mb'],
- 'allocation_ratio': ratios[orc.MEMORY_MB],
- 'reserved': mem_reserved,
- },
- orc.DISK_GB: {
- # TODO(efried): Proper DISK_GB sharing when SSP driver in play
- 'total': int(data['local_gb']),
- 'max_unit': int(data['local_gb']),
- 'allocation_ratio': ratios[orc.DISK_GB],
- 'reserved': disk_reserved,
- },
- }
- provider_tree.update_inventory(nodename, inventory)
-
- def spawn(self, context, instance, image_meta, injected_files,
- admin_password, allocations, network_info=None,
- block_device_info=None, power_on=True, accel_info=None):
- """Create a new instance/VM/domain on the virtualization platform.
-
- Once this successfully completes, the instance should be
- running (power_state.RUNNING).
-
- If this fails, any partial instance should be completely
- cleaned up, and the virtualization platform should be in the state
- that it was before this call began.
-
- :param context: security context
- :param instance: nova.objects.instance.Instance
- This function should use the data there to guide
- the creation of the new instance.
- :param nova.objects.ImageMeta image_meta:
- The metadata of the image of the instance.
- :param injected_files: User files to inject into instance.
- :param admin_password: Administrator password to set in instance.
- :param allocations: Information about resources allocated to the
- instance via placement, of the form returned by
- SchedulerReportClient.get_allocations_for_consumer.
- :param network_info: instance network information
- :param block_device_info: Information about block devices to be
- attached to the instance.
- :param power_on: True if the instance should be powered on, False
- otherwise
- """
- self._log_operation('spawn', instance)
- # Define the flow
- flow_spawn = tf_lf.Flow("spawn")
-
- # This FeedTask accumulates VIOS storage connection operations to be
- # run in parallel. Include both SCSI and fibre channel mappings for
- # the scrubber.
- stg_ftsk = pvm_par.build_active_vio_feed_task(
- self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})
-
- flow_spawn.add(tf_vm.Create(
- self.adapter, self.host_wrapper, instance, stg_ftsk))
-
- # Create a flow for the IO
- flow_spawn.add(tf_net.PlugVifs(
- self.virtapi, self.adapter, instance, network_info))
- flow_spawn.add(tf_net.PlugMgmtVif(
- self.adapter, instance))
-
- # Create the boot image.
- flow_spawn.add(tf_stg.CreateDiskForImg(
- self.disk_dvr, context, instance, image_meta))
- # Connects up the disk to the LPAR
- flow_spawn.add(tf_stg.AttachDisk(
- self.disk_dvr, instance, stg_ftsk=stg_ftsk))
-
- # Extract the block devices.
- bdms = driver.block_device_info_get_mapping(block_device_info)
-
- # Determine if there are volumes to connect. If so, add a connection
- # for each type.
- for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms,
- stg_ftsk=stg_ftsk):
- # Connect the volume. This will update the connection_info.
- flow_spawn.add(tf_stg.AttachVolume(vol_drv))
-
- # If the config drive is needed, add those steps. Should be done
- # after all the other I/O.
- if configdrive.required_by(instance):
- flow_spawn.add(tf_stg.CreateAndConnectCfgDrive(
- self.adapter, instance, injected_files, network_info,
- stg_ftsk, admin_pass=admin_password))
-
- # Add the transaction manager flow at the end of the 'I/O
- # connection' tasks. This will run all the connections in parallel.
- flow_spawn.add(stg_ftsk)
-
- # Last step is to power on the system.
- flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))
-
- # Run the flow.
- tf_base.run(flow_spawn, instance=instance)
-
- def destroy(self, context, instance, network_info, block_device_info=None,
- destroy_disks=True):
- """Destroy the specified instance from the Hypervisor.
-
- If the instance is not found (for example if networking failed), this
- function should still succeed. It's probably a good idea to log a
- warning in that case.
-
- :param context: security context
- :param instance: Instance object as returned by DB layer.
- :param network_info: instance network information
- :param block_device_info: Information about block devices that should
- be detached from the instance.
- :param destroy_disks: Indicates if disks should be destroyed
- """
- # TODO(thorst, efried) Add resize checks for destroy
-
- self._log_operation('destroy', instance)
-
- def _setup_flow_and_run():
- # Define the flow
- flow = tf_lf.Flow("destroy")
-
- # Power Off the LPAR. If its disks are about to be deleted, issue a
- # hard shutdown.
- flow.add(tf_vm.PowerOff(self.adapter, instance,
- force_immediate=destroy_disks))
-
- # The FeedTask accumulates storage disconnection tasks to be run in
- # parallel.
- stg_ftsk = pvm_par.build_active_vio_feed_task(
- self.adapter, xag=[pvm_const.XAG.VIO_SMAP])
-
- # Call the unplug VIFs task. While CNAs get removed from the LPAR
- # directly on the destroy, this clears up the I/O Host side.
- flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
-
- # Add the disconnect/deletion of the vOpt to the transaction
- # manager.
- if configdrive.required_by(instance):
- flow.add(tf_stg.DeleteVOpt(
- self.adapter, instance, stg_ftsk=stg_ftsk))
-
- # Extract the block devices.
- bdms = driver.block_device_info_get_mapping(block_device_info)
-
- # Determine if there are volumes to detach. If so, remove each
- # volume (within the transaction manager)
- for bdm, vol_drv in self._vol_drv_iter(
- context, instance, bdms, stg_ftsk=stg_ftsk):
- flow.add(tf_stg.DetachVolume(vol_drv))
-
- # Detach the disk storage adapters
- flow.add(tf_stg.DetachDisk(self.disk_dvr, instance))
-
- # Accumulated storage disconnection tasks next
- flow.add(stg_ftsk)
-
- # Delete the storage disks
- if destroy_disks:
- flow.add(tf_stg.DeleteDisk(self.disk_dvr))
-
- # TODO(thorst, efried) Add LPAR id based scsi map clean up task
- flow.add(tf_vm.Delete(self.adapter, instance))
-
- # Build the engine & run!
- tf_base.run(flow, instance=instance)
-
- try:
- _setup_flow_and_run()
- except exc.InstanceNotFound:
- LOG.debug('VM was not found during destroy operation.',
- instance=instance)
- return
- except pvm_exc.Error as e:
- LOG.exception("PowerVM error during destroy.", instance=instance)
- # Convert to a Nova exception
- raise exc.InstanceTerminationFailure(reason=str(e))
-
- def snapshot(self, context, instance, image_id, update_task_state):
- """Snapshots the specified instance.
-
- :param context: security context
- :param instance: nova.objects.instance.Instance
- :param image_id: Reference to a pre-created image that will hold the
- snapshot.
- :param update_task_state: Callback function to update the task_state
- on the instance while the snapshot operation progresses. The
- function takes a task_state argument and an optional
- expected_task_state kwarg which defaults to
- nova.compute.task_states.IMAGE_SNAPSHOT. See
- nova.objects.instance.Instance.save for expected_task_state usage.
- """
-
- if not self.disk_dvr.capabilities.get('snapshot'):
- raise exc.NotSupportedWithOption(
- message=_("The snapshot operation is not supported in "
- "conjunction with a [powervm]/disk_driver setting "
- "of %s.") % CONF.powervm.disk_driver)
-
- self._log_operation('snapshot', instance)
-
- # Define the flow.
- flow = tf_lf.Flow("snapshot")
-
- # Notify that we're starting the process.
- flow.add(tf_img.UpdateTaskState(update_task_state,
- task_states.IMAGE_PENDING_UPLOAD))
-
- # Connect the instance's boot disk to the management partition, and
- # scan the scsi bus and bring the device into the management partition.
- flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))
-
- # Notify that the upload is in progress.
- flow.add(tf_img.UpdateTaskState(
- update_task_state, task_states.IMAGE_UPLOADING,
- expected_state=task_states.IMAGE_PENDING_UPLOAD))
-
- # Stream the disk to glance.
- flow.add(tf_img.StreamToGlance(context, self.image_api, image_id,
- instance))
-
- # Disconnect the boot disk from the management partition and delete the
- # device.
- flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))
-
- # Run the flow.
- tf_base.run(flow, instance=instance)
-
- def power_off(self, instance, timeout=0, retry_interval=0):
- """Power off the specified instance.
-
- :param instance: nova.objects.instance.Instance
- :param timeout: time to wait for GuestOS to shutdown
- :param retry_interval: How often to signal guest while
- waiting for it to shutdown
- """
- self._log_operation('power_off', instance)
- force_immediate = (timeout == 0)
- timeout = timeout or None
- vm.power_off(self.adapter, instance, force_immediate=force_immediate,
- timeout=timeout)
-
- def power_on(self, context, instance, network_info,
- block_device_info=None, accel_info=None):
- """Power on the specified instance.
-
- :param instance: nova.objects.instance.Instance
- """
- self._log_operation('power_on', instance)
- vm.power_on(self.adapter, instance)
-
- def reboot(self, context, instance, network_info, reboot_type,
- block_device_info=None, bad_volumes_callback=None,
- accel_info=None):
- """Reboot the specified instance.
-
- After this is called successfully, the instance's state
- goes back to power_state.RUNNING. The virtualization
- platform should ensure that the reboot action has completed
- successfully even in cases in which the underlying domain/vm
- is paused or halted/stopped.
-
- :param instance: nova.objects.instance.Instance
- :param network_info: `nova.network.models.NetworkInfo` object
- describing the network metadata.
- :param reboot_type: Either a HARD or SOFT reboot
- :param block_device_info: Info pertaining to attached volumes
- :param bad_volumes_callback: Function to handle any bad volumes
- encountered
- :param accel_info: List of accelerator request dicts. The exact
- data struct is doc'd in nova/virt/driver.py::spawn().
- """
- self._log_operation(reboot_type + ' reboot', instance)
- vm.reboot(self.adapter, instance, reboot_type == 'HARD')
- # pypowervm exceptions are sufficient to indicate real failure.
- # Otherwise, pypowervm thinks the instance is up.
-
- def attach_interface(self, context, instance, image_meta, vif):
- """Attach an interface to the instance."""
- self.plug_vifs(instance, [vif])
-
- def detach_interface(self, context, instance, vif):
- """Detach an interface from the instance."""
- self.unplug_vifs(instance, [vif])
-
- def plug_vifs(self, instance, network_info):
- """Plug VIFs into networks."""
- self._log_operation('plug_vifs', instance)
-
- # Define the flow
- flow = tf_lf.Flow("plug_vifs")
-
- # Get the LPAR Wrapper
- flow.add(tf_vm.Get(self.adapter, instance))
-
- # Run the attach
- flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance,
- network_info))
-
- # Run the flow
- try:
- tf_base.run(flow, instance=instance)
- except exc.InstanceNotFound:
- raise exc.VirtualInterfacePlugException(
- _("Plug vif failed because instance %s was not found.")
- % instance.name)
- except Exception:
- LOG.exception("PowerVM error plugging vifs.", instance=instance)
- raise exc.VirtualInterfacePlugException(
- _("Plug vif failed because of an unexpected error."))
-
- def unplug_vifs(self, instance, network_info):
- """Unplug VIFs from networks."""
- self._log_operation('unplug_vifs', instance)
-
- # Define the flow
- flow = tf_lf.Flow("unplug_vifs")
-
- # Run the detach
- flow.add(tf_net.UnplugVifs(self.adapter, instance, network_info))
-
- # Run the flow
- try:
- tf_base.run(flow, instance=instance)
- except exc.InstanceNotFound:
- LOG.warning('VM was not found during unplug operation as it is '
- 'already possibly deleted.', instance=instance)
- except Exception:
- LOG.exception("PowerVM error trying to unplug vifs.",
- instance=instance)
- raise exc.InterfaceDetachFailed(instance_uuid=instance.uuid)
-
- def get_vnc_console(self, context, instance):
- """Get connection info for a vnc console.
-
- :param context: security context
- :param instance: nova.objects.instance.Instance
-
- :return: An instance of console.type.ConsoleVNC
- """
- self._log_operation('get_vnc_console', instance)
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- # Build the connection to the VNC.
- host = CONF.vnc.server_proxyclient_address
- # TODO(thorst, efried) Add the x509 certificate support when it lands
-
- try:
- # Open up a remote vterm
- port = pvm_vterm.open_remotable_vnc_vterm(
- self.adapter, lpar_uuid, host, vnc_path=lpar_uuid)
- # Note that the VNC viewer will wrap the internal_access_path with
- # the HTTP content.
- return console_type.ConsoleVNC(host=host, port=port,
- internal_access_path=lpar_uuid)
- except pvm_exc.HttpError as e:
- with excutils.save_and_reraise_exception(logger=LOG) as sare:
- # If the LPAR was not found, raise a more descriptive error
- if e.response.status == 404:
- sare.reraise = False
- raise exc.InstanceNotFound(instance_id=instance.uuid)
-
- def attach_volume(self, context, connection_info, instance, mountpoint,
- disk_bus=None, device_type=None, encryption=None):
- """Attach the volume to the instance using the connection_info.
-
- :param context: security context
- :param connection_info: Volume connection information from the block
- device mapping
- :param instance: nova.objects.instance.Instance
- :param mountpoint: Unused
- :param disk_bus: Unused
- :param device_type: Unused
- :param encryption: Unused
- """
- self._log_operation('attach_volume', instance)
-
- # Define the flow
- flow = tf_lf.Flow("attach_volume")
-
- # Build the driver
- vol_drv = volume.build_volume_driver(self.adapter, instance,
- connection_info)
-
- # Add the volume attach to the flow.
- flow.add(tf_stg.AttachVolume(vol_drv))
-
- # Run the flow
- tf_base.run(flow, instance=instance)
-
- # The volume connector may have updated the system metadata. Save
- # the instance to persist the data. Spawn/destroy auto saves instance,
- # but the attach does not. Detach does not need this save - as the
- # detach flows do not (currently) modify system metadata. May need
- # to revise in the future as volume connectors evolve.
- instance.save()
-
- def detach_volume(self, context, connection_info, instance, mountpoint,
- encryption=None):
- """Detach the volume attached to the instance.
-
- :param context: security context
- :param connection_info: Volume connection information from the block
- device mapping
- :param instance: nova.objects.instance.Instance
- :param mountpoint: Unused
- :param encryption: Unused
- """
- self._log_operation('detach_volume', instance)
-
- # Define the flow
- flow = tf_lf.Flow("detach_volume")
-
- # Get a volume adapter for this volume
- vol_drv = volume.build_volume_driver(self.adapter, instance,
- connection_info)
-
- # Add a task to detach the volume
- flow.add(tf_stg.DetachVolume(vol_drv))
-
- # Run the flow
- tf_base.run(flow, instance=instance)
-
- def extend_volume(self, context, connection_info, instance,
- requested_size):
- """Extend the disk attached to the instance.
-
- :param context: security context
- :param dict connection_info: The connection for the extended volume.
- :param nova.objects.instance.Instance instance:
- The instance whose volume gets extended.
- :param int requested_size: The requested new volume size in bytes.
- :return: None
- """
-
- vol_drv = volume.build_volume_driver(
- self.adapter, instance, connection_info)
- vol_drv.extend_volume()
-
- def _vol_drv_iter(self, context, instance, bdms, stg_ftsk=None):
- """Yields a bdm and volume driver.
-
- :param context: security context
- :param instance: nova.objects.instance.Instance
- :param bdms: block device mappings
- :param stg_ftsk: storage FeedTask
- """
- # Get a volume driver for each volume
- for bdm in bdms or []:
- conn_info = bdm.get('connection_info')
- vol_drv = volume.build_volume_driver(self.adapter, instance,
- conn_info, stg_ftsk=stg_ftsk)
- yield bdm, vol_drv
-
- def get_volume_connector(self, instance):
- """Get connector information for the instance for attaching to volumes.
-
- Connector information is a dictionary representing information about
- the system that will be making the connection.
-
- :param instance: nova.objects.instance.Instance
- """
- # Put the values in the connector
- connector = {}
- wwpn_list = fcvscsi.wwpns(self.adapter)
-
- if wwpn_list is not None:
- connector["wwpns"] = wwpn_list
- connector["multipath"] = False
- connector['host'] = CONF.host
- connector['initiator'] = None
-
- return connector
diff --git a/nova/virt/powervm/host.py b/nova/virt/powervm/host.py
deleted file mode 100644
index 2b206fee41..0000000000
--- a/nova/virt/powervm/host.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import math
-
-from oslo_serialization import jsonutils
-
-from nova import conf as cfg
-from nova.objects import fields
-
-
-CONF = cfg.CONF
-
-# Power VM hypervisor info
-# Normally, the hypervisor version is a string in the form of '8.0.0' and
-# converted to an int with nova.virt.utils.convert_version_to_int() however
-# there isn't currently a mechanism to retrieve the exact version.
-# Complicating this is the fact that nova conductor only allows live migration
-# from the source host to the destination if the source is equal to or less
-# than the destination version. PowerVM live migration limitations are
-# checked by the PowerVM capabilities flags and not specific version levels.
-# For that reason, we'll just publish the major level.
-IBM_POWERVM_HYPERVISOR_VERSION = 8
-
-# The types of LPARS that are supported.
-POWERVM_SUPPORTED_INSTANCES = [
- (fields.Architecture.PPC64, fields.HVType.PHYP, fields.VMMode.HVM),
- (fields.Architecture.PPC64LE, fields.HVType.PHYP, fields.VMMode.HVM)]
-
-
-def build_host_resource_from_ms(ms_w):
- """Build the host resource dict from a ManagedSystem PowerVM wrapper.
-
- :param ms_w: The pypowervm System wrapper describing the managed system.
- """
- data = {}
- # Calculate the vcpus
- proc_units = ms_w.proc_units_configurable
- pu_used = float(proc_units) - float(ms_w.proc_units_avail)
- data['vcpus'] = int(math.ceil(float(proc_units)))
- data['vcpus_used'] = int(math.ceil(pu_used))
- data['memory_mb'] = ms_w.memory_configurable
- data['memory_mb_used'] = (ms_w.memory_configurable -
- ms_w.memory_free)
- data["hypervisor_type"] = fields.HVType.PHYP
- data["hypervisor_version"] = IBM_POWERVM_HYPERVISOR_VERSION
- data["hypervisor_hostname"] = CONF.host
- data["cpu_info"] = jsonutils.dumps({'vendor': 'ibm', 'arch': 'ppc64'})
- data["numa_topology"] = None
- data["supported_instances"] = POWERVM_SUPPORTED_INSTANCES
- stats = {'proc_units': '%.2f' % float(proc_units),
- 'proc_units_used': '%.2f' % pu_used,
- 'memory_region_size': ms_w.memory_region_size}
- data["stats"] = stats
- return data
diff --git a/nova/virt/powervm/image.py b/nova/virt/powervm/image.py
deleted file mode 100644
index b4636b0f11..0000000000
--- a/nova/virt/powervm/image.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Utilities related to glance image management for the PowerVM driver."""
-
-from nova import utils
-
-
-def stream_blockdev_to_glance(context, image_api, image_id, metadata, devpath):
- """Stream the entire contents of a block device to a glance image.
-
- :param context: Nova security context.
- :param image_api: Handle to the glance image API.
- :param image_id: UUID of the prepared glance image.
- :param metadata: Dictionary of metadata for the image.
- :param devpath: String path to device file of block device to be uploaded,
- e.g. "/dev/sde".
- """
- # Make the device file owned by the current user for the duration of the
- # operation.
- with utils.temporary_chown(devpath), open(devpath, 'rb') as stream:
- # Stream it. This is synchronous.
- image_api.update(context, image_id, metadata, stream)
-
-
-def generate_snapshot_metadata(context, image_api, image_id, instance):
- """Generate a metadata dictionary for an instance snapshot.
-
- :param context: Nova security context.
- :param image_api: Handle to the glance image API.
- :param image_id: UUID of the prepared glance image.
- :param instance: The Nova instance whose disk is to be snapshotted.
- :return: A dict of metadata suitable for image_api.update.
- """
- image = image_api.get(context, image_id)
-
- # TODO(esberglu): Update this to v2 metadata
- metadata = {
- 'name': image['name'],
- 'status': 'active',
- 'disk_format': 'raw',
- 'container_format': 'bare',
- 'properties': {
- 'image_location': 'snapshot',
- 'image_state': 'available',
- 'owner_id': instance.project_id,
- }
- }
- return metadata
diff --git a/nova/virt/powervm/media.py b/nova/virt/powervm/media.py
deleted file mode 100644
index f57ddd332d..0000000000
--- a/nova/virt/powervm/media.py
+++ /dev/null
@@ -1,237 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import os
-import tempfile
-
-from oslo_log import log as logging
-from oslo_utils import excutils
-from pypowervm import const as pvm_const
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.tasks import storage as tsk_stg
-from pypowervm.tasks import vopt as tsk_vopt
-from pypowervm import util as pvm_util
-from pypowervm.wrappers import storage as pvm_stg
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-import retrying
-from taskflow import task
-
-from nova.api.metadata import base as instance_metadata
-from nova.network import model as network_model
-from nova.virt import configdrive
-from nova.virt.powervm import vm
-
-
-LOG = logging.getLogger(__name__)
-
-_LLA_SUBNET = "fe80::/64"
-# TODO(efried): CONF these (maybe)
-_VOPT_VG = 'rootvg'
-_VOPT_SIZE_GB = 1
-
-
-class ConfigDrivePowerVM(object):
-
- def __init__(self, adapter):
- """Creates the config drive manager for PowerVM.
-
- :param adapter: The pypowervm adapter to communicate with the system.
- """
- self.adapter = adapter
-
- # Validate that the virtual optical exists
- self.vios_uuid, self.vg_uuid = tsk_vopt.validate_vopt_repo_exists(
- self.adapter, vopt_media_volume_group=_VOPT_VG,
- vopt_media_rep_size=_VOPT_SIZE_GB)
-
- @staticmethod
- def _sanitize_network_info(network_info):
- """Will sanitize the network info for the config drive.
-
- Newer versions of cloud-init look at the vif type information in
- the network info and utilize it to determine what to do. There are
- a limited number of vif types, and it seems to be built on the idea
- that the neutron vif type is the cloud init vif type (which is not
- quite right).
-
- This sanitizes the network info that gets passed into the config
- drive to work properly with cloud-inits.
- """
- network_info = copy.deepcopy(network_info)
-
- # OVS is the only supported vif type. All others (SEA, PowerVM SR-IOV)
- # will default to generic vif.
- for vif in network_info:
- if vif.get('type') != 'ovs':
- LOG.debug('Changing vif type from %(type)s to vif for vif '
- '%(id)s.', {'type': vif.get('type'),
- 'id': vif.get('id')})
- vif['type'] = 'vif'
- return network_info
-
- def _create_cfg_dr_iso(self, instance, injected_files, network_info,
- iso_path, admin_pass=None):
- """Creates an ISO file that contains the injected files.
-
- Used for config drive.
-
- :param instance: The VM instance from OpenStack.
- :param injected_files: A list of file paths that will be injected into
- the ISO.
- :param network_info: The network_info from the nova spawn method.
- :param iso_path: The absolute file path for the new ISO
- :param admin_pass: Optional password to inject for the VM.
- """
- LOG.info("Creating config drive.", instance=instance)
- extra_md = {}
- if admin_pass is not None:
- extra_md['admin_pass'] = admin_pass
-
- # Sanitize the vifs for the network config
- network_info = self._sanitize_network_info(network_info)
-
- inst_md = instance_metadata.InstanceMetadata(instance,
- content=injected_files,
- extra_md=extra_md,
- network_info=network_info)
-
- with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
- LOG.info("Config drive ISO being built in %s.", iso_path,
- instance=instance)
-
- # There may be an OSError exception when create the config drive.
- # If so, retry the operation before raising.
- @retrying.retry(retry_on_exception=lambda exc: isinstance(
- exc, OSError), stop_max_attempt_number=2)
- def _make_cfg_drive(iso_path):
- cdb.make_drive(iso_path)
-
- try:
- _make_cfg_drive(iso_path)
- except OSError:
- with excutils.save_and_reraise_exception(logger=LOG):
- LOG.exception("Config drive ISO could not be built",
- instance=instance)
-
- def create_cfg_drv_vopt(self, instance, injected_files, network_info,
- stg_ftsk, admin_pass=None, mgmt_cna=None):
- """Create the config drive virtual optical and attach to VM.
-
- :param instance: The VM instance from OpenStack.
- :param injected_files: A list of file paths that will be injected into
- the ISO.
- :param network_info: The network_info from the nova spawn method.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- :param admin_pass: (Optional) password to inject for the VM.
- :param mgmt_cna: (Optional) The management (RMC) CNA wrapper.
- """
- # If there is a management client network adapter, then we should
- # convert that to a VIF and add it to the network info
- if mgmt_cna is not None:
- network_info = copy.deepcopy(network_info)
- network_info.append(self._mgmt_cna_to_vif(mgmt_cna))
-
- # Pick a file name for when we upload the media to VIOS
- file_name = pvm_util.sanitize_file_name_for_api(
- instance.uuid.replace('-', ''), prefix='cfg_', suffix='.iso',
- max_len=pvm_const.MaxLen.VOPT_NAME)
-
- # Create and upload the media
- with tempfile.NamedTemporaryFile(mode='rb') as fh:
- self._create_cfg_dr_iso(instance, injected_files, network_info,
- fh.name, admin_pass=admin_pass)
- vopt, f_uuid = tsk_stg.upload_vopt(
- self.adapter, self.vios_uuid, fh, file_name,
- os.path.getsize(fh.name))
-
- # Define the function to build and add the mapping
- def add_func(vios_w):
- LOG.info("Adding cfg drive mapping to Virtual I/O Server %s.",
- vios_w.name, instance=instance)
- mapping = tsk_map.build_vscsi_mapping(
- None, vios_w, vm.get_pvm_uuid(instance), vopt)
- return tsk_map.add_map(vios_w, mapping)
-
- # Add the subtask to create the mapping when the FeedTask runs
- stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(add_func)
-
- def _mgmt_cna_to_vif(self, cna):
- """Converts the mgmt CNA to VIF format for network injection."""
- mac = vm.norm_mac(cna.mac)
- ipv6_link_local = self._mac_to_link_local(mac)
-
- subnet = network_model.Subnet(
- version=6, cidr=_LLA_SUBNET,
- ips=[network_model.FixedIP(address=ipv6_link_local)])
- network = network_model.Network(id='mgmt', subnets=[subnet],
- injected='yes')
- return network_model.VIF(id='mgmt_vif', address=mac,
- network=network)
-
- @staticmethod
- def _mac_to_link_local(mac):
- # Convert the address to IPv6. The first step is to separate out the
- # mac address
- splits = mac.split(':')
-
- # Create EUI-64 id per RFC 4291 Appendix A
- splits.insert(3, 'ff')
- splits.insert(4, 'fe')
-
- # Create modified EUI-64 id via bit flip per RFC 4291 Appendix A
- splits[0] = "%.2x" % (int(splits[0], 16) ^ 0b00000010)
-
- # Convert to the IPv6 link local format. The prefix is fe80::. Join
- # the hexes together at every other digit.
- ll = ['fe80:']
- ll.extend([splits[x] + splits[x + 1]
- for x in range(0, len(splits), 2)])
- return ':'.join(ll)
-
- def dlt_vopt(self, instance, stg_ftsk):
- """Deletes the virtual optical and scsi mappings for a VM.
-
- :param instance: The nova instance whose VOpt(s) are to be removed.
- :param stg_ftsk: A FeedTask. The actions to modify the storage will be
- added as batched functions onto the FeedTask.
- """
- lpar_uuid = vm.get_pvm_uuid(instance)
-
- # The matching function for find_maps, remove_maps
- match_func = tsk_map.gen_match_func(pvm_stg.VOptMedia)
-
- # Add a function to remove the mappings
- stg_ftsk.wrapper_tasks[self.vios_uuid].add_functor_subtask(
- tsk_map.remove_maps, lpar_uuid, match_func=match_func)
-
- # Find the VOpt device based from the mappings
- media_mappings = tsk_map.find_maps(
- stg_ftsk.get_wrapper(self.vios_uuid).scsi_mappings,
- client_lpar_id=lpar_uuid, match_func=match_func)
- media_elems = [x.backing_storage for x in media_mappings]
-
- def rm_vopt():
- LOG.info("Removing virtual optical storage.",
- instance=instance)
- vg_wrap = pvm_stg.VG.get(self.adapter, uuid=self.vg_uuid,
- parent_type=pvm_vios.VIOS,
- parent_uuid=self.vios_uuid)
- tsk_stg.rm_vg_storage(vg_wrap, vopts=media_elems)
-
- # Add task to remove the media if it exists
- if media_elems:
- stg_ftsk.add_post_execute(task.FunctorTask(rm_vopt))
diff --git a/nova/virt/powervm/mgmt.py b/nova/virt/powervm/mgmt.py
deleted file mode 100644
index a62fa29bde..0000000000
--- a/nova/virt/powervm/mgmt.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Utilities related to the PowerVM management partition.
-
-The management partition is a special LPAR that runs the PowerVM REST API
-service. It itself appears through the REST API as a LogicalPartition of type
-aixlinux, but with the is_mgmt_partition property set to True.
-The PowerVM Nova Compute service runs on the management partition.
-"""
-import glob
-import os
-from os import path
-
-from oslo_concurrency import lockutils
-from oslo_log import log as logging
-from pypowervm.tasks import partition as pvm_par
-import retrying
-
-from nova import exception
-import nova.privsep.path
-
-
-LOG = logging.getLogger(__name__)
-
-_MP_UUID = None
-
-
-@lockutils.synchronized("mgmt_lpar_uuid")
-def mgmt_uuid(adapter):
- """Returns the management partitions UUID."""
- global _MP_UUID
- if not _MP_UUID:
- _MP_UUID = pvm_par.get_this_partition(adapter).uuid
- return _MP_UUID
-
-
-def discover_vscsi_disk(mapping, scan_timeout=300):
- """Bring a mapped device into the management partition and find its name.
-
- Based on a VSCSIMapping, scan the appropriate virtual SCSI host bus,
- causing the operating system to discover the mapped device. Find and
- return the path of the newly-discovered device based on its UDID in the
- mapping.
-
- Note: scanning the bus will cause the operating system to discover *all*
- devices on that bus. However, this method will only return the path for
- the specific device from the input mapping, based on its UDID.
-
- :param mapping: The pypowervm.wrappers.virtual_io_server.VSCSIMapping
- representing the mapping of the desired disk to the
- management partition.
- :param scan_timeout: The maximum number of seconds after scanning to wait
- for the specified device to appear.
- :return: The udev-generated ("/dev/sdX") name of the discovered disk.
- :raise NoDiskDiscoveryException: If the disk did not appear after the
- specified timeout.
- :raise UniqueDiskDiscoveryException: If more than one disk appears with the
- expected UDID.
- """
- # Calculate the Linux slot number from the client adapter slot number.
- lslot = 0x30000000 | mapping.client_adapter.lpar_slot_num
- # We'll match the device ID based on the UDID, which is actually the last
- # 32 chars of the field we get from PowerVM.
- udid = mapping.backing_storage.udid[-32:]
-
- LOG.debug("Trying to discover VSCSI disk with UDID %(udid)s on slot "
- "%(slot)x.", {'udid': udid, 'slot': lslot})
-
- # Find the special file to scan the bus, and scan it.
- # This glob should yield exactly one result, but use the loop just in case.
- for scanpath in glob.glob(
- '/sys/bus/vio/devices/%x/host*/scsi_host/host*/scan' % lslot):
- # Writing '- - -' to this sysfs file triggers bus rescan
- nova.privsep.path.writefile(scanpath, 'a', '- - -')
-
- # Now see if our device showed up. If so, we can reliably match it based
- # on its Linux ID, which ends with the disk's UDID.
- dpathpat = '/dev/disk/by-id/*%s' % udid
-
- # The bus scan is asynchronous. Need to poll, waiting for the device to
- # spring into existence. Stop when glob finds at least one device, or
- # after the specified timeout. Sleep 1/4 second between polls.
- @retrying.retry(retry_on_result=lambda result: not result, wait_fixed=250,
- stop_max_delay=scan_timeout * 1000)
- def _poll_for_dev(globpat):
- return glob.glob(globpat)
- try:
- disks = _poll_for_dev(dpathpat)
- except retrying.RetryError as re:
- raise exception.NoDiskDiscoveryException(
- bus=lslot, udid=udid, polls=re.last_attempt.attempt_number,
- timeout=scan_timeout)
- # If we get here, _poll_for_dev returned a nonempty list. If not exactly
- # one entry, this is an error.
- if len(disks) != 1:
- raise exception.UniqueDiskDiscoveryException(path_pattern=dpathpat,
- count=len(disks))
-
- # The by-id path is a symlink. Resolve to the /dev/sdX path
- dpath = path.realpath(disks[0])
- LOG.debug("Discovered VSCSI disk with UDID %(udid)s on slot %(slot)x at "
- "path %(devname)s.",
- {'udid': udid, 'slot': lslot, 'devname': dpath})
- return dpath
-
-
-def remove_block_dev(devpath, scan_timeout=10):
- """Remove a block device from the management partition.
-
- This method causes the operating system of the management partition to
- delete the device special files associated with the specified block device.
-
- :param devpath: Any path to the block special file associated with the
- device to be removed.
- :param scan_timeout: The maximum number of seconds after scanning to wait
- for the specified device to disappear.
- :raise InvalidDevicePath: If the specified device or its 'delete' special
- file cannot be found.
- :raise DeviceDeletionException: If the deletion was attempted, but the
- device special file is still present
- afterward.
- """
- # Resolve symlinks, if any, to get to the /dev/sdX path
- devpath = path.realpath(devpath)
- try:
- os.stat(devpath)
- except OSError:
- raise exception.InvalidDevicePath(path=devpath)
- devname = devpath.rsplit('/', 1)[-1]
- delpath = '/sys/block/%s/device/delete' % devname
- try:
- os.stat(delpath)
- except OSError:
- raise exception.InvalidDevicePath(path=delpath)
- LOG.debug("Deleting block device %(devpath)s from the management "
- "partition via special file %(delpath)s.",
- {'devpath': devpath, 'delpath': delpath})
- # Writing '1' to this sysfs file deletes the block device and rescans.
- nova.privsep.path.writefile(delpath, 'a', '1')
-
- # The bus scan is asynchronous. Need to poll, waiting for the device to
- # disappear. Stop when stat raises OSError (dev file not found) - which is
- # success - or after the specified timeout (which is failure). Sleep 1/4
- # second between polls.
- @retrying.retry(retry_on_result=lambda result: result, wait_fixed=250,
- stop_max_delay=scan_timeout * 1000)
- def _poll_for_del(statpath):
- try:
- os.stat(statpath)
- return True
- except OSError:
- # Device special file is absent, as expected
- return False
- try:
- _poll_for_del(devpath)
- except retrying.RetryError as re:
- # stat just kept returning (dev file continued to exist).
- raise exception.DeviceDeletionException(
- devpath=devpath, polls=re.last_attempt.attempt_number,
- timeout=scan_timeout)
- # Else stat raised - the device disappeared - all done.
diff --git a/nova/virt/powervm/tasks/__init__.py b/nova/virt/powervm/tasks/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/virt/powervm/tasks/__init__.py
+++ /dev/null
diff --git a/nova/virt/powervm/tasks/base.py b/nova/virt/powervm/tasks/base.py
deleted file mode 100644
index 07714d5b8f..0000000000
--- a/nova/virt/powervm/tasks/base.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2016, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from oslo_log import log as logging
-from taskflow import engines as tf_eng
-from taskflow.listeners import timing as tf_tm
-
-
-LOG = logging.getLogger(__name__)
-
-
-def run(flow, instance=None):
- """Run a TaskFlow Flow with task timing and logging with instance.
-
- :param flow: A taskflow.flow.Flow to run.
- :param instance: A nova instance, for logging.
- :return: The result of taskflow.engines.run(), a dictionary of named
- results of the Flow's execution.
- """
- def log_with_instance(*args, **kwargs):
- """Wrapper for LOG.info(*args, **kwargs, instance=instance)."""
- if instance is not None:
- kwargs['instance'] = instance
- LOG.info(*args, **kwargs)
-
- eng = tf_eng.load(flow)
- with tf_tm.PrintingDurationListener(eng, printer=log_with_instance):
- return eng.run()
diff --git a/nova/virt/powervm/tasks/image.py b/nova/virt/powervm/tasks/image.py
deleted file mode 100644
index 4f8fe4ba18..0000000000
--- a/nova/virt/powervm/tasks/image.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from taskflow import task
-
-from nova.virt.powervm import image
-
-
-LOG = logging.getLogger(__name__)
-
-
-class UpdateTaskState(task.Task):
-
- def __init__(self, update_task_state, task_state, expected_state=None):
- """Invoke the update_task_state callback with the desired arguments.
-
- :param update_task_state: update_task_state callable passed into
- snapshot.
- :param task_state: The new task state (from nova.compute.task_states)
- to set.
- :param expected_state: Optional. The expected state of the task prior
- to this request.
- """
- self.update_task_state = update_task_state
- self.task_state = task_state
- self.kwargs = {}
- if expected_state is not None:
- # We only want to pass expected state if it's not None! That's so
- # we take the update_task_state method's default.
- self.kwargs['expected_state'] = expected_state
- super(UpdateTaskState, self).__init__(
- name='update_task_state_%s' % task_state)
-
- def execute(self):
- self.update_task_state(self.task_state, **self.kwargs)
-
-
-class StreamToGlance(task.Task):
-
- """Task around streaming a block device to glance."""
-
- def __init__(self, context, image_api, image_id, instance):
- """Initialize the flow for streaming a block device to glance.
-
- Requires: disk_path: Path to the block device file for the instance's
- boot disk.
- :param context: Nova security context.
- :param image_api: Handle to the glance API.
- :param image_id: UUID of the prepared glance image.
- :param instance: Instance whose backing device is being captured.
- """
- self.context = context
- self.image_api = image_api
- self.image_id = image_id
- self.instance = instance
- super(StreamToGlance, self).__init__(name='stream_to_glance',
- requires='disk_path')
-
- def execute(self, disk_path):
- metadata = image.generate_snapshot_metadata(
- self.context, self.image_api, self.image_id, self.instance)
- LOG.info("Starting stream of boot device (local blockdev %(devpath)s) "
- "to glance image %(img_id)s.",
- {'devpath': disk_path, 'img_id': self.image_id},
- instance=self.instance)
- image.stream_blockdev_to_glance(self.context, self.image_api,
- self.image_id, metadata, disk_path)
diff --git a/nova/virt/powervm/tasks/network.py b/nova/virt/powervm/tasks/network.py
deleted file mode 100644
index d96ff25d9d..0000000000
--- a/nova/virt/powervm/tasks/network.py
+++ /dev/null
@@ -1,259 +0,0 @@
-# Copyright 2015, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import eventlet
-from oslo_log import log as logging
-from pypowervm.tasks import cna as pvm_cna
-from pypowervm.wrappers import managed_system as pvm_ms
-from pypowervm.wrappers import network as pvm_net
-from taskflow import task
-
-from nova import conf as cfg
-from nova import exception
-from nova.virt.powervm import vif
-from nova.virt.powervm import vm
-
-LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-
-SECURE_RMC_VSWITCH = 'MGMTSWITCH'
-SECURE_RMC_VLAN = 4094
-
-
-class PlugVifs(task.Task):
-
- """The task to plug the Virtual Network Interfaces to a VM."""
-
- def __init__(self, virt_api, adapter, instance, network_infos):
- """Create the task.
-
- Provides 'vm_cnas' - the list of the Virtual Machine's Client Network
- Adapters as they stand after all VIFs are plugged. May be None, in
- which case the Task requiring 'vm_cnas' should discover them afresh.
-
- :param virt_api: The VirtAPI for the operation.
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- :param network_infos: The network information containing the nova
- VIFs to create.
- """
- self.virt_api = virt_api
- self.adapter = adapter
- self.instance = instance
- self.network_infos = network_infos or []
- self.crt_network_infos, self.update_network_infos = [], []
- # Cache of CNAs that is filled on initial _vif_exists() call.
- self.cnas = None
-
- super(PlugVifs, self).__init__(
- name='plug_vifs', provides='vm_cnas', requires=['lpar_wrap'])
-
- def _vif_exists(self, network_info):
- """Does the instance have a CNA for a given net?
-
- :param network_info: A network information dict. This method expects
- it to contain key 'address' (MAC address).
- :return: True if a CNA with the network_info's MAC address exists on
- the instance. False otherwise.
- """
- if self.cnas is None:
- self.cnas = vm.get_cnas(self.adapter, self.instance)
- vifs = self.cnas
-
- return network_info['address'] in [vm.norm_mac(v.mac) for v in vifs]
-
- def execute(self, lpar_wrap):
- # Check to see if the LPAR is OK to add VIFs to.
- modifiable, reason = lpar_wrap.can_modify_io()
- if not modifiable:
- LOG.error("Unable to create VIF(s) for instance in the system's "
- "current state. The reason from the system is: %s",
- reason, instance=self.instance)
- raise exception.VirtualInterfaceCreateException()
-
- # We will have two types of network infos. One is for newly created
- # vifs. The others are those that exist, but should be re-'treated'
- for network_info in self.network_infos:
- if self._vif_exists(network_info):
- self.update_network_infos.append(network_info)
- else:
- self.crt_network_infos.append(network_info)
-
- # If there are no vifs to create or update, then just exit immediately.
- if not self.crt_network_infos and not self.update_network_infos:
- return []
-
- # For existing VIFs that we just need to update, run the plug but do
- # not wait for the neutron event as that likely won't be sent (it was
- # already done).
- for network_info in self.update_network_infos:
- LOG.info("Updating VIF with mac %s for instance.",
- network_info['address'], instance=self.instance)
- vif.plug(self.adapter, self.instance, network_info, new_vif=False)
-
- # For the new VIFs, run the creates (and wait for the events back)
- try:
- with self.virt_api.wait_for_instance_event(
- self.instance, self._get_vif_events(),
- deadline=CONF.vif_plugging_timeout,
- error_callback=self._vif_callback_failed):
- for network_info in self.crt_network_infos:
- LOG.info('Creating VIF with mac %s for instance.',
- network_info['address'], instance=self.instance)
- new_vif = vif.plug(
- self.adapter, self.instance, network_info,
- new_vif=True)
- if self.cnas is not None:
- self.cnas.append(new_vif)
- except eventlet.timeout.Timeout:
- LOG.error('Error waiting for VIF to be created for instance.',
- instance=self.instance)
- raise exception.VirtualInterfaceCreateException()
-
- return self.cnas
-
- def _vif_callback_failed(self, event_name, instance):
- LOG.error('VIF Plug failure for callback on event %s for instance.',
- event_name, instance=self.instance)
- if CONF.vif_plugging_is_fatal:
- raise exception.VirtualInterfaceCreateException()
-
- def _get_vif_events(self):
- """Returns the VIF events that need to be received for a VIF plug.
-
- In order for a VIF plug to be successful, certain events should be
- received from other components within the OpenStack ecosystem. This
- method returns the events neutron needs for a given deploy.
- """
- # See libvirt's driver.py -> _get_neutron_events method for
- # more information.
- if CONF.vif_plugging_is_fatal and CONF.vif_plugging_timeout:
- return [('network-vif-plugged', network_info['id'])
- for network_info in self.crt_network_infos
- if not network_info.get('active', True)]
-
- def revert(self, lpar_wrap, result, flow_failures):
- if not self.network_infos:
- return
-
- LOG.warning('VIF creation being rolled back for instance.',
- instance=self.instance)
-
- # Get the current adapters on the system
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
- for network_info in self.crt_network_infos:
- try:
- vif.unplug(self.adapter, self.instance, network_info,
- cna_w_list=cna_w_list)
- except Exception:
- LOG.exception("An exception occurred during an unplug in the "
- "vif rollback. Ignoring.",
- instance=self.instance)
-
-
-class UnplugVifs(task.Task):
-
- """The task to unplug Virtual Network Interfaces from a VM."""
-
- def __init__(self, adapter, instance, network_infos):
- """Create the task.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- :param network_infos: The network information containing the nova
- VIFs to create.
- """
- self.adapter = adapter
- self.instance = instance
- self.network_infos = network_infos or []
-
- super(UnplugVifs, self).__init__(name='unplug_vifs')
-
- def execute(self):
- # If the LPAR is not in an OK state for deleting, then throw an
- # error up front.
- lpar_wrap = vm.get_instance_wrapper(self.adapter, self.instance)
- modifiable, reason = lpar_wrap.can_modify_io()
- if not modifiable:
- LOG.error("Unable to remove VIFs from instance in the system's "
- "current state. The reason reported by the system is: "
- "%s", reason, instance=self.instance)
- raise exception.VirtualInterfaceUnplugException(reason=reason)
-
- # Get all the current Client Network Adapters (CNA) on the VM itself.
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
-
- # Walk through the VIFs and delete the corresponding CNA on the VM.
- for network_info in self.network_infos:
- vif.unplug(self.adapter, self.instance, network_info,
- cna_w_list=cna_w_list)
-
-
-class PlugMgmtVif(task.Task):
-
- """The task to plug the Management VIF into a VM."""
-
- def __init__(self, adapter, instance):
- """Create the task.
-
- Requires 'vm_cnas' from PlugVifs. If None, this Task will retrieve the
- VM's list of CNAs.
-
- Provides the mgmt_cna. This may be None if no management device was
- created. This is the CNA of the mgmt vif for the VM.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- """
- self.adapter = adapter
- self.instance = instance
-
- super(PlugMgmtVif, self).__init__(
- name='plug_mgmt_vif', provides='mgmt_cna', requires=['vm_cnas'])
-
- def execute(self, vm_cnas):
- LOG.info('Plugging the Management Network Interface to instance.',
- instance=self.instance)
- # Determine if we need to create the secure RMC VIF. This should only
- # be needed if there is not a VIF on the secure RMC vSwitch
- vswitch = None
- vswitches = pvm_net.VSwitch.search(
- self.adapter, parent_type=pvm_ms.System.schema_type,
- parent_uuid=self.adapter.sys_uuid, name=SECURE_RMC_VSWITCH)
- if len(vswitches) == 1:
- vswitch = vswitches[0]
-
- if vswitch is None:
- LOG.warning('No management VIF created for instance due to lack '
- 'of Management Virtual Switch', instance=self.instance)
- return None
-
- # This next check verifies that there are no existing NICs on the
- # vSwitch, so that the VM does not end up with multiple RMC VIFs.
- if vm_cnas is None:
- has_mgmt_vif = vm.get_cnas(self.adapter, self.instance,
- vswitch_uri=vswitch.href)
- else:
- has_mgmt_vif = vswitch.href in [cna.vswitch_uri for cna in vm_cnas]
-
- if has_mgmt_vif:
- LOG.debug('Management VIF already created for instance',
- instance=self.instance)
- return None
-
- lpar_uuid = vm.get_pvm_uuid(self.instance)
- return pvm_cna.crt_cna(self.adapter, None, lpar_uuid, SECURE_RMC_VLAN,
- vswitch=SECURE_RMC_VSWITCH, crt_vswitch=True)
diff --git a/nova/virt/powervm/tasks/storage.py b/nova/virt/powervm/tasks/storage.py
deleted file mode 100644
index 24449a1bef..0000000000
--- a/nova/virt/powervm/tasks/storage.py
+++ /dev/null
@@ -1,429 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from pypowervm import exceptions as pvm_exc
-from pypowervm.tasks import scsi_mapper as pvm_smap
-from taskflow import task
-from taskflow.types import failure as task_fail
-
-from nova import exception
-from nova.virt import block_device
-from nova.virt.powervm import media
-from nova.virt.powervm import mgmt
-
-LOG = logging.getLogger(__name__)
-
-
-class AttachVolume(task.Task):
-
- """The task to attach a volume to an instance."""
-
- def __init__(self, vol_drv):
- """Create the task.
-
- :param vol_drv: The volume driver. Ties the storage to a connection
- type (ex. vSCSI).
- """
- self.vol_drv = vol_drv
- self.vol_id = block_device.get_volume_id(self.vol_drv.connection_info)
-
- super(AttachVolume, self).__init__(name='attach_vol_%s' % self.vol_id)
-
- def execute(self):
- LOG.info('Attaching volume %(vol)s.', {'vol': self.vol_id},
- instance=self.vol_drv.instance)
- self.vol_drv.attach_volume()
-
- def revert(self, result, flow_failures):
- LOG.warning('Rolling back attachment for volume %(vol)s.',
- {'vol': self.vol_id}, instance=self.vol_drv.instance)
-
- # Note that the rollback is *instant*. Resetting the FeedTask ensures
- # immediate rollback.
- self.vol_drv.reset_stg_ftsk()
- try:
- # We attempt to detach in case we 'partially attached'. In
- # the attach scenario, perhaps one of the Virtual I/O Servers
- # was attached. This attempts to clear anything out to make sure
- # the terminate attachment runs smoothly.
- self.vol_drv.detach_volume()
- except exception.VolumeDetachFailed:
- # Does not block due to being in the revert flow.
- LOG.exception("Unable to detach volume %s during rollback.",
- self.vol_id, instance=self.vol_drv.instance)
-
-
-class DetachVolume(task.Task):
-
- """The task to detach a volume from an instance."""
-
- def __init__(self, vol_drv):
- """Create the task.
-
- :param vol_drv: The volume driver. Ties the storage to a connection
- type (ex. vSCSI).
- """
- self.vol_drv = vol_drv
- self.vol_id = self.vol_drv.connection_info['data']['volume_id']
-
- super(DetachVolume, self).__init__(name='detach_vol_%s' % self.vol_id)
-
- def execute(self):
- LOG.info('Detaching volume %(vol)s.',
- {'vol': self.vol_id}, instance=self.vol_drv.instance)
- self.vol_drv.detach_volume()
-
- def revert(self, result, flow_failures):
- LOG.warning('Reattaching volume %(vol)s on detach rollback.',
- {'vol': self.vol_id}, instance=self.vol_drv.instance)
-
- # Note that the rollback is *instant*. Resetting the FeedTask ensures
- # immediate rollback.
- self.vol_drv.reset_stg_ftsk()
- try:
- # We try to reattach the volume here so that it maintains its
- # linkage (in the hypervisor) to the VM. This makes it easier for
- # operators to understand the linkage between the VMs and volumes
- # in error scenarios. This is simply useful for debug purposes
- # if there is an operational error.
- self.vol_drv.attach_volume()
- except exception.VolumeAttachFailed:
- # Does not block due to being in the revert flow. See above.
- LOG.exception("Unable to reattach volume %s during rollback.",
- self.vol_id, instance=self.vol_drv.instance)
-
-
-class CreateDiskForImg(task.Task):
-
- """The Task to create the disk from an image in the storage."""
-
- def __init__(self, disk_dvr, context, instance, image_meta):
- """Create the Task.
-
- Provides the 'disk_dev_info' for other tasks. Comes from the disk_dvr
- create_disk_from_image method.
-
- :param disk_dvr: The storage driver.
- :param context: The context passed into the driver method.
- :param instance: The nova instance.
- :param nova.objects.ImageMeta image_meta:
- The metadata of the image of the instance.
- """
- super(CreateDiskForImg, self).__init__(
- name='create_disk_from_img', provides='disk_dev_info')
- self.disk_dvr = disk_dvr
- self.instance = instance
- self.context = context
- self.image_meta = image_meta
-
- def execute(self):
- return self.disk_dvr.create_disk_from_image(
- self.context, self.instance, self.image_meta)
-
- def revert(self, result, flow_failures):
- # If there is no result, or its a direct failure, then there isn't
- # anything to delete.
- if result is None or isinstance(result, task_fail.Failure):
- return
-
- # Run the delete. The result is a single disk. Wrap into list
- # as the method works with plural disks.
- try:
- self.disk_dvr.delete_disks([result])
- except pvm_exc.Error:
- # Don't allow revert exceptions to interrupt the revert flow.
- LOG.exception("Disk deletion failed during revert. Ignoring.",
- instance=self.instance)
-
-
-class AttachDisk(task.Task):
-
- """The task to attach the disk to the instance."""
-
- def __init__(self, disk_dvr, instance, stg_ftsk):
- """Create the Task for the attach disk to instance method.
-
- Requires disk info through requirement of disk_dev_info (provided by
- crt_disk_from_img)
-
- :param disk_dvr: The disk driver.
- :param instance: The nova instance.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- """
- super(AttachDisk, self).__init__(
- name='attach_disk', requires=['disk_dev_info'])
- self.disk_dvr = disk_dvr
- self.instance = instance
- self.stg_ftsk = stg_ftsk
-
- def execute(self, disk_dev_info):
- self.disk_dvr.attach_disk(self.instance, disk_dev_info, self.stg_ftsk)
-
- def revert(self, disk_dev_info, result, flow_failures):
- try:
- self.disk_dvr.detach_disk(self.instance)
- except pvm_exc.Error:
- # Don't allow revert exceptions to interrupt the revert flow.
- LOG.exception("Disk detach failed during revert. Ignoring.",
- instance=self.instance)
-
-
-class DetachDisk(task.Task):
-
- """The task to detach the disk storage from the instance."""
-
- def __init__(self, disk_dvr, instance):
- """Creates the Task to detach the storage adapters.
-
- Provides the stor_adpt_mappings. A list of pypowervm
- VSCSIMappings or VFCMappings (depending on the storage adapter).
-
- :param disk_dvr: The DiskAdapter for the VM.
- :param instance: The nova instance.
- """
- super(DetachDisk, self).__init__(
- name='detach_disk', provides='stor_adpt_mappings')
- self.instance = instance
- self.disk_dvr = disk_dvr
-
- def execute(self):
- return self.disk_dvr.detach_disk(self.instance)
-
-
-class DeleteDisk(task.Task):
-
- """The task to delete the backing storage."""
-
- def __init__(self, disk_dvr):
- """Creates the Task to delete the disk storage from the system.
-
- Requires the stor_adpt_mappings.
-
- :param disk_dvr: The DiskAdapter for the VM.
- """
- super(DeleteDisk, self).__init__(
- name='delete_disk', requires=['stor_adpt_mappings'])
- self.disk_dvr = disk_dvr
-
- def execute(self, stor_adpt_mappings):
- self.disk_dvr.delete_disks(stor_adpt_mappings)
-
-
-class CreateAndConnectCfgDrive(task.Task):
-
- """The task to create the config drive."""
-
- def __init__(self, adapter, instance, injected_files,
- network_info, stg_ftsk, admin_pass=None):
- """Create the Task that creates and connects the config drive.
-
- Requires the 'mgmt_cna'
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance
- :param injected_files: A list of file paths that will be injected into
- the ISO.
- :param network_info: The network_info from the nova spawn method.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- :param admin_pass (Optional, Default None): Password to inject for the
- VM.
- """
- super(CreateAndConnectCfgDrive, self).__init__(
- name='cfg_drive', requires=['mgmt_cna'])
- self.adapter = adapter
- self.instance = instance
- self.injected_files = injected_files
- self.network_info = network_info
- self.stg_ftsk = stg_ftsk
- self.ad_pass = admin_pass
- self.mb = None
-
- def execute(self, mgmt_cna):
- self.mb = media.ConfigDrivePowerVM(self.adapter)
- self.mb.create_cfg_drv_vopt(self.instance, self.injected_files,
- self.network_info, self.stg_ftsk,
- admin_pass=self.ad_pass, mgmt_cna=mgmt_cna)
-
- def revert(self, mgmt_cna, result, flow_failures):
- # No media builder, nothing to do
- if self.mb is None:
- return
-
- # Delete the virtual optical media. We don't care if it fails
- try:
- self.mb.dlt_vopt(self.instance, self.stg_ftsk)
- except pvm_exc.Error:
- LOG.exception('VOpt removal (as part of reversion) failed.',
- instance=self.instance)
-
-
-class DeleteVOpt(task.Task):
-
- """The task to delete the virtual optical."""
-
- def __init__(self, adapter, instance, stg_ftsk=None):
- """Creates the Task to delete the instance's virtual optical media.
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- """
- super(DeleteVOpt, self).__init__(name='vopt_delete')
- self.adapter = adapter
- self.instance = instance
- self.stg_ftsk = stg_ftsk
-
- def execute(self):
- media_builder = media.ConfigDrivePowerVM(self.adapter)
- media_builder.dlt_vopt(self.instance, stg_ftsk=self.stg_ftsk)
-
-
-class InstanceDiskToMgmt(task.Task):
-
- """The task to connect an instance's disk to the management partition.
-
- This task will connect the instance's disk to the management partition and
- discover it. We do these two pieces together because their reversion
- happens in the same order.
- """
-
- def __init__(self, disk_dvr, instance):
- """Create the Task for connecting boot disk to mgmt partition.
-
- Provides:
- stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
- connected.
- vios_wrap: The Virtual I/O Server wrapper from which the storage
- element was mapped.
- disk_path: The local path to the mapped-and-discovered device, e.g.
- '/dev/sde'.
-
- :param disk_dvr: The disk driver.
- :param instance: The nova instance whose boot disk is to be connected.
- """
- super(InstanceDiskToMgmt, self).__init__(
- name='instance_disk_to_mgmt',
- provides=['stg_elem', 'vios_wrap', 'disk_path'])
- self.disk_dvr = disk_dvr
- self.instance = instance
- self.stg_elem = None
- self.vios_wrap = None
- self.disk_path = None
-
- def execute(self):
- """Map the instance's boot disk and discover it."""
-
- # Search for boot disk on the NovaLink partition.
- if self.disk_dvr.mp_uuid in self.disk_dvr._vios_uuids:
- dev_name = self.disk_dvr.get_bootdisk_path(
- self.instance, self.disk_dvr.mp_uuid)
- if dev_name is not None:
- return None, None, dev_name
-
- self.stg_elem, self.vios_wrap = (
- self.disk_dvr.connect_instance_disk_to_mgmt(self.instance))
- new_maps = pvm_smap.find_maps(
- self.vios_wrap.scsi_mappings, client_lpar_id=self.disk_dvr.mp_uuid,
- stg_elem=self.stg_elem)
- if not new_maps:
- raise exception.NewMgmtMappingNotFoundException(
- stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name)
-
- # new_maps should be length 1, but even if it's not - i.e. we somehow
- # matched more than one mapping of the same dev to the management
- # partition from the same VIOS - it is safe to use the first one.
- mapping = new_maps[0]
- # Scan the SCSI bus, discover the disk, find its canonical path.
- LOG.info("Discovering device and path for mapping of %(dev_name)s "
- "on the management partition.",
- {'dev_name': self.stg_elem.name}, instance=self.instance)
- self.disk_path = mgmt.discover_vscsi_disk(mapping)
- return self.stg_elem, self.vios_wrap, self.disk_path
-
- def revert(self, result, flow_failures):
- """Unmap the disk and then remove it from the management partition.
-
- We use this order to avoid rediscovering the device in case some other
- thread scans the SCSI bus between when we remove and when we unmap.
- """
- if self.vios_wrap is None or self.stg_elem is None:
- # We never even got connected - nothing to do.
- return
- LOG.warning("Unmapping boot disk %(disk_name)s from the management "
- "partition via Virtual I/O Server %(vioname)s.",
- {'disk_name': self.stg_elem.name,
- 'vioname': self.vios_wrap.name}, instance=self.instance)
- self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid,
- self.stg_elem.name)
-
- if self.disk_path is None:
- # We did not discover the disk - nothing else to do.
- return
- LOG.warning("Removing disk %(dpath)s from the management partition.",
- {'dpath': self.disk_path}, instance=self.instance)
- try:
- mgmt.remove_block_dev(self.disk_path)
- except pvm_exc.Error:
- # Don't allow revert exceptions to interrupt the revert flow.
- LOG.exception("Remove disk failed during revert. Ignoring.",
- instance=self.instance)
-
-
-class RemoveInstanceDiskFromMgmt(task.Task):
-
- """Unmap and remove an instance's boot disk from the mgmt partition."""
-
- def __init__(self, disk_dvr, instance):
- """Create task to unmap and remove an instance's boot disk from mgmt.
-
- Requires (from InstanceDiskToMgmt):
- stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
- connected.
- vios_wrap: The Virtual I/O Server wrapper.
- (pypowervm.wrappers.virtual_io_server.VIOS) from which the
- storage element was mapped.
- disk_path: The local path to the mapped-and-discovered device, e.g.
- '/dev/sde'.
- :param disk_dvr: The disk driver.
- :param instance: The nova instance whose boot disk is to be connected.
- """
- self.disk_dvr = disk_dvr
- self.instance = instance
- super(RemoveInstanceDiskFromMgmt, self).__init__(
- name='remove_inst_disk_from_mgmt',
- requires=['stg_elem', 'vios_wrap', 'disk_path'])
-
- def execute(self, stg_elem, vios_wrap, disk_path):
- """Unmap and remove an instance's boot disk from the mgmt partition.
-
- Input parameters ('requires') provided by InstanceDiskToMgmt task.
- :param stg_elem: The storage element wrapper (pypowervm LU, PV, etc.)
- to be disconnected.
- :param vios_wrap: The Virtual I/O Server wrapper from which the
- mapping is to be removed.
- :param disk_path: The local path to the disk device to be removed, e.g.
- '/dev/sde'
- """
- # stg_elem is None if boot disk was not mapped to management partition.
- if stg_elem is None:
- return
- LOG.info("Unmapping boot disk %(disk_name)s from the management "
- "partition via Virtual I/O Server %(vios_name)s.",
- {'disk_name': stg_elem.name, 'vios_name': vios_wrap.name},
- instance=self.instance)
- self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
- LOG.info("Removing disk %(disk_path)s from the management partition.",
- {'disk_path': disk_path}, instance=self.instance)
- mgmt.remove_block_dev(disk_path)
diff --git a/nova/virt/powervm/tasks/vm.py b/nova/virt/powervm/tasks/vm.py
deleted file mode 100644
index 59cd1a2377..0000000000
--- a/nova/virt/powervm/tasks/vm.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from pypowervm import exceptions as pvm_exc
-from pypowervm.tasks import storage as pvm_stg
-from taskflow import task
-from taskflow.types import failure as task_fail
-
-from nova.virt.powervm import vm
-
-
-LOG = logging.getLogger(__name__)
-
-
-class Get(task.Task):
-
- """The task for getting a VM entry."""
-
- def __init__(self, adapter, instance):
- """Creates the Task for getting a VM entry.
-
- Provides the 'lpar_wrap' for other tasks.
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance.
- """
- super(Get, self).__init__(name='get_vm', provides='lpar_wrap')
- self.adapter = adapter
- self.instance = instance
-
- def execute(self):
- return vm.get_instance_wrapper(self.adapter, self.instance)
-
-
-class Create(task.Task):
- """The task for creating a VM."""
-
- def __init__(self, adapter, host_wrapper, instance, stg_ftsk):
- """Creates the Task for creating a VM.
-
- The revert method only needs to do something for failed rebuilds.
- Since the rebuild and build methods have different flows, it is
- necessary to clean up the destination LPAR on fails during rebuild.
-
- The revert method is not implemented for build because the compute
- manager calls the driver destroy operation for spawn errors. By
- not deleting the lpar, it's a cleaner flow through the destroy
- operation and accomplishes the same result.
-
- Any stale storage associated with the new VM's (possibly recycled) ID
- will be cleaned up. The cleanup work will be delegated to the FeedTask
- represented by the stg_ftsk parameter.
-
- :param adapter: The adapter for the pypowervm API
- :param host_wrapper: The managed system wrapper
- :param instance: The nova instance.
- :param stg_ftsk: FeedTask to defer storage connectivity operations.
- """
- super(Create, self).__init__(name='crt_vm', provides='lpar_wrap')
- self.instance = instance
- self.adapter = adapter
- self.host_wrapper = host_wrapper
- self.stg_ftsk = stg_ftsk
-
- def execute(self):
- wrap = vm.create_lpar(self.adapter, self.host_wrapper, self.instance)
- # Get rid of any stale storage and/or mappings associated with the new
- # LPAR's ID, so it doesn't accidentally have access to something it
- # oughtn't.
- LOG.info('Scrubbing stale storage.', instance=self.instance)
- pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], self.stg_ftsk,
- lpars_exist=True)
- return wrap
-
-
-class PowerOn(task.Task):
- """The task to power on the instance."""
-
- def __init__(self, adapter, instance):
- """Create the Task for the power on of the LPAR.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- """
- super(PowerOn, self).__init__(name='pwr_vm')
- self.adapter = adapter
- self.instance = instance
-
- def execute(self):
- vm.power_on(self.adapter, self.instance)
-
- def revert(self, result, flow_failures):
- if isinstance(result, task_fail.Failure):
- # The power on itself failed...can't power off.
- LOG.debug('Power on failed. Not performing power off.',
- instance=self.instance)
- return
-
- LOG.warning('Powering off instance.', instance=self.instance)
- try:
- vm.power_off(self.adapter, self.instance, force_immediate=True)
- except pvm_exc.Error:
- # Don't raise revert exceptions
- LOG.exception("Power-off failed during revert.",
- instance=self.instance)
-
-
-class PowerOff(task.Task):
- """The task to power off a VM."""
-
- def __init__(self, adapter, instance, force_immediate=False):
- """Creates the Task to power off an LPAR.
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance.
- :param force_immediate: Boolean. Perform a VSP hard power off.
- """
- super(PowerOff, self).__init__(name='pwr_off_vm')
- self.instance = instance
- self.adapter = adapter
- self.force_immediate = force_immediate
-
- def execute(self):
- vm.power_off(self.adapter, self.instance,
- force_immediate=self.force_immediate)
-
-
-class Delete(task.Task):
- """The task to delete the instance from the system."""
-
- def __init__(self, adapter, instance):
- """Create the Task to delete the VM from the system.
-
- :param adapter: The adapter for the pypowervm API.
- :param instance: The nova instance.
- """
- super(Delete, self).__init__(name='dlt_vm')
- self.adapter = adapter
- self.instance = instance
-
- def execute(self):
- vm.delete_lpar(self.adapter, self.instance)
diff --git a/nova/virt/powervm/vif.py b/nova/virt/powervm/vif.py
deleted file mode 100644
index 8ab591a15d..0000000000
--- a/nova/virt/powervm/vif.py
+++ /dev/null
@@ -1,373 +0,0 @@
-# Copyright 2016, 2017 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-from oslo_log import log
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-from oslo_utils import importutils
-from pypowervm import exceptions as pvm_ex
-from pypowervm.tasks import cna as pvm_cna
-from pypowervm.tasks import partition as pvm_par
-from pypowervm.wrappers import event as pvm_evt
-
-from nova import exception
-from nova.network import model as network_model
-from nova.virt.powervm import vm
-
-LOG = log.getLogger(__name__)
-
-NOVALINK_VSWITCH = 'NovaLinkVEABridge'
-
-# Provider tag for custom events from this module
-EVENT_PROVIDER_ID = 'NOVA_PVM_VIF'
-
-VIF_TYPE_PVM_SEA = 'pvm_sea'
-VIF_TYPE_PVM_OVS = 'ovs'
-VIF_MAPPING = {VIF_TYPE_PVM_SEA:
- 'nova.virt.powervm.vif.PvmSeaVifDriver',
- VIF_TYPE_PVM_OVS:
- 'nova.virt.powervm.vif.PvmOvsVifDriver'}
-
-
-def _build_vif_driver(adapter, instance, vif):
- """Returns the appropriate VIF Driver for the given VIF.
-
- :param adapter: The pypowervm adapter API interface.
- :param instance: The nova instance.
- :param vif: The virtual interface.
- :return: The appropriate PvmVifDriver for the VIF.
- """
- if vif.get('type') is None:
- LOG.exception("Failed to build vif driver. Missing vif type.",
- instance=instance)
- raise exception.VirtualInterfacePlugException()
-
- # Check the type to the implementations
- if VIF_MAPPING.get(vif['type']):
- return importutils.import_object(
- VIF_MAPPING.get(vif['type']), adapter, instance)
-
- # No matching implementation, raise error.
- LOG.exception("Failed to build vif driver. Invalid vif type provided.",
- instance=instance)
- raise exception.VirtualInterfacePlugException()
-
-
-def _push_vif_event(adapter, action, vif_w, instance, vif_type):
- """Push a custom event to the REST server for a vif action (plug/unplug).
-
- This event prompts the neutron agent to mark the port up or down. It is
- consumed by custom neutron agents (e.g. Shared Ethernet Adapter)
-
- :param adapter: The pypowervm adapter.
- :param action: The action taken on the vif - either 'plug' or 'unplug'
- :param vif_w: The pypowervm wrapper of the affected vif (CNA, VNIC, etc.)
- :param instance: The nova instance for the event
- :param vif_type: The type of event source (pvm_sea, ovs, bridge,
- pvm_sriov etc)
- """
- data = vif_w.href
- detail = jsonutils.dumps(dict(provider=EVENT_PROVIDER_ID, action=action,
- mac=vif_w.mac, type=vif_type))
- event = pvm_evt.Event.bld(adapter, data, detail)
- try:
- event = event.create()
- LOG.debug('Pushed custom event for consumption by neutron agent: %s',
- str(event), instance=instance)
- except Exception:
- with excutils.save_and_reraise_exception(logger=LOG):
- LOG.exception('Custom VIF event push failed. %s', str(event),
- instance=instance)
-
-
-def plug(adapter, instance, vif, new_vif=True):
- """Plugs a virtual interface (network) into a VM.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance object.
- :param vif: The virtual interface to plug into the instance.
- :param new_vif: (Optional, Default: True) If set, indicates that it is
- a brand new VIF. If False, it indicates that the VIF
- is already on the client but should be treated on the
- bridge.
- :return: The wrapper (CNA) representing the plugged virtual network. None
- if the vnet was not created.
- """
- vif_drv = _build_vif_driver(adapter, instance, vif)
-
- try:
- vnet_w = vif_drv.plug(vif, new_vif=new_vif)
- except pvm_ex.HttpError:
- LOG.exception('VIF plug failed for instance.', instance=instance)
- raise exception.VirtualInterfacePlugException()
- # Other exceptions are (hopefully) custom VirtualInterfacePlugException
- # generated lower in the call stack.
-
- # Push a custom event if we really plugged the vif
- if vnet_w is not None:
- _push_vif_event(adapter, 'plug', vnet_w, instance, vif['type'])
-
- return vnet_w
-
-
-def unplug(adapter, instance, vif, cna_w_list=None):
- """Unplugs a virtual interface (network) from a VM.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance object.
- :param vif: The virtual interface to plug into the instance.
- :param cna_w_list: (Optional, Default: None) The list of Client Network
- Adapters from pypowervm. Providing this input
- allows for an improvement in operation speed.
- """
- vif_drv = _build_vif_driver(adapter, instance, vif)
- try:
- vnet_w = vif_drv.unplug(vif, cna_w_list=cna_w_list)
- except pvm_ex.HttpError as he:
- LOG.exception('VIF unplug failed for instance', instance=instance)
- raise exception.VirtualInterfaceUnplugException(reason=he.args[0])
-
- # Push a custom event if we successfully unplugged the vif.
- if vnet_w:
- _push_vif_event(adapter, 'unplug', vnet_w, instance, vif['type'])
-
-
-class PvmVifDriver(metaclass=abc.ABCMeta):
- """Represents an abstract class for a PowerVM Vif Driver.
-
- A VIF Driver understands a given virtual interface type (network). It
- understands how to plug and unplug a given VIF for a virtual machine.
- """
-
- def __init__(self, adapter, instance):
- """Initializes a VIF Driver.
- :param adapter: The pypowervm adapter API interface.
- :param instance: The nova instance that the vif action will be run
- against.
- """
- self.adapter = adapter
- self.instance = instance
-
- @abc.abstractmethod
- def plug(self, vif, new_vif=True):
- """Plugs a virtual interface (network) into a VM.
-
- :param vif: The virtual interface to plug into the instance.
- :param new_vif: (Optional, Default: True) If set, indicates that it is
- a brand new VIF. If False, it indicates that the VIF
- is already on the client but should be treated on the
- bridge.
- :return: The new vif that was created. Only returned if new_vif is
- set to True. Otherwise None is expected.
- """
- pass
-
- def unplug(self, vif, cna_w_list=None):
- """Unplugs a virtual interface (network) from a VM.
-
- :param vif: The virtual interface to plug into the instance.
- :param cna_w_list: (Optional, Default: None) The list of Client Network
- Adapters from pypowervm. Providing this input
- allows for an improvement in operation speed.
- :return cna_w: The deleted Client Network Adapter or None if the CNA
- is not found.
- """
- # This is a default implementation that most implementations will
- # require.
-
- # Need to find the adapters if they were not provided
- if not cna_w_list:
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
-
- cna_w = self._find_cna_for_vif(cna_w_list, vif)
- if not cna_w:
- LOG.warning('Unable to unplug VIF with mac %(mac)s. The VIF was '
- 'not found on the instance.',
- {'mac': vif['address']}, instance=self.instance)
- return None
-
- LOG.info('Deleting VIF with mac %(mac)s.',
- {'mac': vif['address']}, instance=self.instance)
- try:
- cna_w.delete()
- except Exception as e:
- LOG.exception('Unable to unplug VIF with mac %(mac)s.',
- {'mac': vif['address']}, instance=self.instance)
- raise exception.VirtualInterfaceUnplugException(
- reason=str(e))
- return cna_w
-
- @staticmethod
- def _find_cna_for_vif(cna_w_list, vif):
- """Finds the PowerVM CNA for a given Nova VIF.
-
- :param cna_w_list: The list of Client Network Adapter wrappers from
- pypowervm.
- :param vif: The Nova Virtual Interface (virtual network interface).
- :return: The CNA that corresponds to the VIF. None if one is not
- part of the cna_w_list.
- """
- for cna_w in cna_w_list:
- if vm.norm_mac(cna_w.mac) == vif['address']:
- return cna_w
- return None
-
-
-class PvmOvsVifDriver(PvmVifDriver):
- """The Open vSwitch VIF driver for PowerVM."""
-
- def plug(self, vif, new_vif=True):
- """Plugs a virtual interface (network) into a VM.
-
- Creates a 'peer to peer' connection between the Management partition
- hosting the Linux I/O and the client VM. There will be one trunk
- adapter for a given client adapter.
-
- The device will be 'up' on the mgmt partition.
-
- Will make sure that the trunk device has the appropriate metadata (e.g.
- port id) set on it so that the Open vSwitch agent picks it up properly.
-
- :param vif: The virtual interface to plug into the instance.
- :param new_vif: (Optional, Default: True) If set, indicates that it is
- a brand new VIF. If False, it indicates that the VIF
- is already on the client but should be treated on the
- bridge.
- :return: The new vif that was created. Only returned if new_vif is
- set to True. Otherwise None is expected.
- """
-
- # Create the trunk and client adapter.
- lpar_uuid = vm.get_pvm_uuid(self.instance)
- mgmt_uuid = pvm_par.get_this_partition(self.adapter).uuid
-
- mtu = vif['network'].get_meta('mtu')
- if 'devname' in vif:
- dev_name = vif['devname']
- else:
- dev_name = ("nic" + vif['id'])[:network_model.NIC_NAME_LEN]
-
- meta_attrs = ','.join([
- 'iface-id=%s' % (vif.get('ovs_interfaceid') or vif['id']),
- 'iface-status=active',
- 'attached-mac=%s' % vif['address'],
- 'vm-uuid=%s' % self.instance.uuid])
-
- if new_vif:
- return pvm_cna.crt_p2p_cna(
- self.adapter, None, lpar_uuid, [mgmt_uuid], NOVALINK_VSWITCH,
- crt_vswitch=True, mac_addr=vif['address'], dev_name=dev_name,
- ovs_bridge=vif['network']['bridge'],
- ovs_ext_ids=meta_attrs, configured_mtu=mtu)[0]
- else:
- # Bug : https://bugs.launchpad.net/nova-powervm/+bug/1731548
- # When a host is rebooted, something is discarding tap devices for
- # VMs deployed with OVS vif. To prevent VMs losing network
- # connectivity, this is fixed by recreating the tap devices during
- # init of the nova compute service, which will call vif plug with
- # new_vif==False.
-
- # Find the CNA for this vif.
- # TODO(esberglu) improve performance by caching VIOS wrapper(s) and
- # CNA lists (in case >1 vif per VM).
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
- cna_w = self._find_cna_for_vif(cna_w_list, vif)
- if not cna_w:
- LOG.warning('Unable to plug VIF with mac %s for instance. The '
- 'VIF was not found on the instance.',
- vif['address'], instance=self.instance)
- return None
-
- # Find the corresponding trunk adapter
- trunks = pvm_cna.find_trunks(self.adapter, cna_w)
- for trunk in trunks:
- # Set MTU, OVS external ids, and OVS bridge metadata
- trunk.configured_mtu = mtu
- trunk.ovs_ext_ids = meta_attrs
- trunk.ovs_bridge = vif['network']['bridge']
- # Updating the trunk adapter will cause NovaLink to reassociate
- # the tap device.
- trunk.update()
-
- def unplug(self, vif, cna_w_list=None):
- """Unplugs a virtual interface (network) from a VM.
-
- Extends the base implementation, but before calling it will remove
- the adapter from the Open vSwitch and delete the trunk.
-
- :param vif: The virtual interface to plug into the instance.
- :param cna_w_list: (Optional, Default: None) The list of Client Network
- Adapters from pypowervm. Providing this input
- allows for an improvement in operation speed.
- :return cna_w: The deleted Client Network Adapter or None if the CNA
- is not found.
- """
- # Need to find the adapters if they were not provided
- if not cna_w_list:
- cna_w_list = vm.get_cnas(self.adapter, self.instance)
-
- # Find the CNA for this vif.
- cna_w = self._find_cna_for_vif(cna_w_list, vif)
-
- if not cna_w:
- LOG.warning('Unable to unplug VIF with mac %s for instance. The '
- 'VIF was not found on the instance.', vif['address'],
- instance=self.instance)
- return None
-
- # Find and delete the trunk adapters
- trunks = pvm_cna.find_trunks(self.adapter, cna_w)
- for trunk in trunks:
- trunk.delete()
-
- # Delete the client CNA
- return super(PvmOvsVifDriver, self).unplug(vif, cna_w_list=cna_w_list)
-
-
-class PvmSeaVifDriver(PvmVifDriver):
- """The PowerVM Shared Ethernet Adapter VIF Driver."""
-
- def plug(self, vif, new_vif=True):
- """Plugs a virtual interface (network) into a VM.
-
- This method simply creates the client network adapter into the VM.
-
- :param vif: The virtual interface to plug into the instance.
- :param new_vif: (Optional, Default: True) If set, indicates that it is
- a brand new VIF. If False, it indicates that the VIF
- is already on the client but should be treated on the
- bridge.
- :return: The new vif that was created. Only returned if new_vif is
- set to True. Otherwise None is expected.
- """
- # Do nothing if not a new VIF
- if not new_vif:
- return None
-
- lpar_uuid = vm.get_pvm_uuid(self.instance)
-
- # CNA's require a VLAN. The networking-powervm neutron agent puts this
- # in the vif details.
- vlan = int(vif['details']['vlan'])
-
- LOG.debug("Creating SEA-based VIF with VLAN %s", str(vlan),
- instance=self.instance)
- cna_w = pvm_cna.crt_cna(self.adapter, None, lpar_uuid, vlan,
- mac_addr=vif['address'])
-
- return cna_w
diff --git a/nova/virt/powervm/vm.py b/nova/virt/powervm/vm.py
deleted file mode 100644
index 2e5247551f..0000000000
--- a/nova/virt/powervm/vm.py
+++ /dev/null
@@ -1,543 +0,0 @@
-# Copyright 2014, 2017 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-
-from oslo_concurrency import lockutils
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-from oslo_utils import excutils
-from oslo_utils import strutils as stru
-from pypowervm import exceptions as pvm_exc
-from pypowervm.helpers import log_helper as pvm_log
-from pypowervm.tasks import power
-from pypowervm.tasks import power_opts as popts
-from pypowervm.tasks import vterm
-from pypowervm import util as pvm_u
-from pypowervm.utils import lpar_builder as lpar_bldr
-from pypowervm.utils import uuid as pvm_uuid
-from pypowervm.utils import validation as pvm_vldn
-from pypowervm.wrappers import base_partition as pvm_bp
-from pypowervm.wrappers import logical_partition as pvm_lpar
-from pypowervm.wrappers import network as pvm_net
-from pypowervm.wrappers import shared_proc_pool as pvm_spp
-
-from nova.compute import power_state
-from nova import conf
-from nova import exception as exc
-from nova.i18n import _
-from nova.virt import hardware
-
-
-CONF = conf.CONF
-LOG = logging.getLogger(__name__)
-
-_POWERVM_STARTABLE_STATE = (pvm_bp.LPARState.NOT_ACTIVATED,)
-_POWERVM_STOPPABLE_STATE = (
- pvm_bp.LPARState.RUNNING, pvm_bp.LPARState.STARTING,
- pvm_bp.LPARState.OPEN_FIRMWARE, pvm_bp.LPARState.SHUTTING_DOWN,
- pvm_bp.LPARState.ERROR, pvm_bp.LPARState.RESUMING,
- pvm_bp.LPARState.SUSPENDING)
-_POWERVM_TO_NOVA_STATE = {
- pvm_bp.LPARState.MIGRATING_RUNNING: power_state.RUNNING,
- pvm_bp.LPARState.RUNNING: power_state.RUNNING,
- pvm_bp.LPARState.STARTING: power_state.RUNNING,
- # map open firmware state to active since it can be shut down
- pvm_bp.LPARState.OPEN_FIRMWARE: power_state.RUNNING,
- # It is running until it is off.
- pvm_bp.LPARState.SHUTTING_DOWN: power_state.RUNNING,
- # It is running until the suspend completes
- pvm_bp.LPARState.SUSPENDING: power_state.RUNNING,
-
- pvm_bp.LPARState.MIGRATING_NOT_ACTIVE: power_state.SHUTDOWN,
- pvm_bp.LPARState.NOT_ACTIVATED: power_state.SHUTDOWN,
-
- pvm_bp.LPARState.UNKNOWN: power_state.NOSTATE,
- pvm_bp.LPARState.HARDWARE_DISCOVERY: power_state.NOSTATE,
- pvm_bp.LPARState.NOT_AVAILBLE: power_state.NOSTATE,
-
- # While resuming, we should be considered suspended still. Only once
- # resumed will we be active (which is represented by the RUNNING state)
- pvm_bp.LPARState.RESUMING: power_state.SUSPENDED,
- pvm_bp.LPARState.SUSPENDED: power_state.SUSPENDED,
-
- pvm_bp.LPARState.ERROR: power_state.CRASHED}
-
-
-def get_cnas(adapter, instance, **search):
- """Returns the (possibly filtered) current CNAs on the instance.
-
- The Client Network Adapters are the Ethernet adapters for a VM.
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance.
- :param search: Keyword arguments for CNA.search. If omitted, all CNAs are
- returned.
- :return The CNA wrappers that represent the ClientNetworkAdapters on the VM
- """
- meth = pvm_net.CNA.search if search else pvm_net.CNA.get
-
- return meth(adapter, parent_type=pvm_lpar.LPAR,
- parent_uuid=get_pvm_uuid(instance), **search)
-
-
-def get_lpar_names(adp):
- """Get a list of the LPAR names.
-
- :param adp: A pypowervm.adapter.Adapter instance for the PowerVM API.
- :return: A list of string names of the PowerVM Logical Partitions.
- """
- return [x.name for x in pvm_lpar.LPAR.search(adp, is_mgmt_partition=False)]
-
-
-def get_pvm_uuid(instance):
- """Get the corresponding PowerVM VM uuid of an instance uuid.
-
- Maps a OpenStack instance uuid to a PowerVM uuid. The UUID between the
- Nova instance and PowerVM will be 1 to 1 mapped. This method runs the
- algorithm against the instance's uuid to convert it to the PowerVM
- UUID.
-
- :param instance: nova.objects.instance.Instance.
- :return: The PowerVM UUID for the LPAR corresponding to the instance.
- """
- return pvm_uuid.convert_uuid_to_pvm(instance.uuid).upper()
-
-
-def get_instance_wrapper(adapter, instance):
- """Get the LPAR wrapper for a given Nova instance.
-
- :param adapter: The adapter for the pypowervm API
- :param instance: The nova instance.
- :return: The pypowervm logical_partition wrapper.
- """
- pvm_inst_uuid = get_pvm_uuid(instance)
- try:
- return pvm_lpar.LPAR.get(adapter, uuid=pvm_inst_uuid)
- except pvm_exc.Error as e:
- with excutils.save_and_reraise_exception(logger=LOG) as sare:
- LOG.exception("Failed to retrieve LPAR associated with instance.",
- instance=instance)
- if e.response is not None and e.response.status == 404:
- sare.reraise = False
- raise exc.InstanceNotFound(instance_id=pvm_inst_uuid)
-
-
-def power_on(adapter, instance):
- """Powers on a VM.
-
- :param adapter: A pypowervm.adapter.Adapter.
- :param instance: The nova instance to power on.
- :raises: InstancePowerOnFailure
- """
- # Synchronize power-on and power-off ops on a given instance
- with lockutils.lock('power_%s' % instance.uuid):
- entry = get_instance_wrapper(adapter, instance)
- # Get the current state and see if we can start the VM
- if entry.state in _POWERVM_STARTABLE_STATE:
- # Now start the lpar
- try:
- power.power_on(entry, None)
- except pvm_exc.Error as e:
- LOG.exception("PowerVM error during power_on.",
- instance=instance)
- raise exc.InstancePowerOnFailure(reason=str(e))
-
-
-def power_off(adapter, instance, force_immediate=False, timeout=None):
- """Powers off a VM.
-
- :param adapter: A pypowervm.adapter.Adapter.
- :param instance: The nova instance to power off.
- :param timeout: (Optional, Default None) How long to wait for the job
- to complete. By default, is None which indicates it should
- use the default from pypowervm's power off method.
- :param force_immediate: (Optional, Default False) Should it be immediately
- shut down.
- :raises: InstancePowerOffFailure
- """
- # Synchronize power-on and power-off ops on a given instance
- with lockutils.lock('power_%s' % instance.uuid):
- entry = get_instance_wrapper(adapter, instance)
- # Get the current state and see if we can stop the VM
- LOG.debug("Powering off request for instance in state %(state)s. "
- "Force Immediate Flag: %(force)s.",
- {'state': entry.state, 'force': force_immediate},
- instance=instance)
- if entry.state in _POWERVM_STOPPABLE_STATE:
- # Now stop the lpar
- try:
- LOG.debug("Power off executing.", instance=instance)
- kwargs = {'timeout': timeout} if timeout else {}
- if force_immediate:
- power.PowerOp.stop(
- entry, opts=popts.PowerOffOpts().vsp_hard(), **kwargs)
- else:
- power.power_off_progressive(entry, **kwargs)
- except pvm_exc.Error as e:
- LOG.exception("PowerVM error during power_off.",
- instance=instance)
- raise exc.InstancePowerOffFailure(reason=str(e))
- else:
- LOG.debug("Power off not required for instance %(inst)s.",
- {'inst': instance.name})
-
-
-def reboot(adapter, instance, hard):
- """Reboots a VM.
-
- :param adapter: A pypowervm.adapter.Adapter.
- :param instance: The nova instance to reboot.
- :param hard: Boolean True if hard reboot, False otherwise.
- :raises: InstanceRebootFailure
- """
- # Synchronize power-on and power-off ops on a given instance
- with lockutils.lock('power_%s' % instance.uuid):
- try:
- entry = get_instance_wrapper(adapter, instance)
- if entry.state != pvm_bp.LPARState.NOT_ACTIVATED:
- if hard:
- power.PowerOp.stop(
- entry, opts=popts.PowerOffOpts().vsp_hard().restart())
- else:
- power.power_off_progressive(entry, restart=True)
- else:
- # pypowervm does NOT throw an exception if "already down".
- # Any other exception from pypowervm is a legitimate failure;
- # let it raise up.
- # If we get here, pypowervm thinks the instance is down.
- power.power_on(entry, None)
- except pvm_exc.Error as e:
- LOG.exception("PowerVM error during reboot.", instance=instance)
- raise exc.InstanceRebootFailure(reason=str(e))
-
-
-def delete_lpar(adapter, instance):
- """Delete an LPAR.
-
- :param adapter: The adapter for the pypowervm API.
- :param instance: The nova instance corresponding to the lpar to delete.
- """
- lpar_uuid = get_pvm_uuid(instance)
- # Attempt to delete the VM. To avoid failures due to open vterm, we will
- # attempt to close the vterm before issuing the delete.
- try:
- LOG.info('Deleting virtual machine.', instance=instance)
- # Ensure any vterms are closed. Will no-op otherwise.
- vterm.close_vterm(adapter, lpar_uuid)
- # Run the LPAR delete
- resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
- LOG.info('Virtual machine delete status: %d', resp.status,
- instance=instance)
- return resp
- except pvm_exc.HttpError as e:
- with excutils.save_and_reraise_exception(logger=LOG) as sare:
- if e.response and e.response.status == 404:
- # LPAR is already gone - don't fail
- sare.reraise = False
- LOG.info('Virtual Machine not found', instance=instance)
- else:
- LOG.error('HttpError deleting virtual machine.',
- instance=instance)
- except pvm_exc.Error:
- with excutils.save_and_reraise_exception(logger=LOG):
- # Attempting to close vterm did not help so raise exception
- LOG.error('Virtual machine delete failed: LPARID=%s', lpar_uuid)
-
-
-def create_lpar(adapter, host_w, instance):
- """Create an LPAR based on the host based on the instance.
-
- :param adapter: The adapter for the pypowervm API.
- :param host_w: The host's System wrapper.
- :param instance: The nova instance.
- :return: The LPAR wrapper response from the API.
- """
- try:
- # Translate the nova flavor into a PowerVM Wrapper Object.
- lpar_b = VMBuilder(host_w, adapter).lpar_builder(instance)
- pending_lpar_w = lpar_b.build()
- # Run validation against it. This is just for nice(r) error messages.
- pvm_vldn.LPARWrapperValidator(pending_lpar_w,
- host_w).validate_all()
- # Create it. The API returns a new wrapper with the actual system data.
- return pending_lpar_w.create(parent=host_w)
- except lpar_bldr.LPARBuilderException as e:
- # Raise the BuildAbortException since LPAR failed to build
- raise exc.BuildAbortException(instance_uuid=instance.uuid, reason=e)
- except pvm_exc.HttpError as he:
- # Raise the API exception
- LOG.exception("PowerVM HttpError creating LPAR.", instance=instance)
- raise exc.PowerVMAPIFailed(inst_name=instance.name, reason=he)
-
-
-def _translate_vm_state(pvm_state):
- """Find the current state of the lpar.
-
- :return: The appropriate integer state value from power_state, converted
- from the PowerVM state.
- """
- if pvm_state is None:
- return power_state.NOSTATE
- try:
- return _POWERVM_TO_NOVA_STATE[pvm_state.lower()]
- except KeyError:
- return power_state.NOSTATE
-
-
-def get_vm_qp(adapter, lpar_uuid, qprop=None, log_errors=True):
- """Returns one or all quick properties of an LPAR.
-
- :param adapter: The pypowervm adapter.
- :param lpar_uuid: The (powervm) UUID for the LPAR.
- :param qprop: The quick property key to return. If specified, that single
- property value is returned. If None/unspecified, all quick
- properties are returned in a dictionary.
- :param log_errors: Indicator whether to log REST data after an exception
- :return: Either a single quick property value or a dictionary of all quick
- properties.
- """
- kwds = dict(root_id=lpar_uuid, suffix_type='quick', suffix_parm=qprop)
- if not log_errors:
- # Remove the log helper from the list of helpers.
- # Note that adapter.helpers returns a copy - the .remove doesn't affect
- # the adapter's original helpers list.
- helpers = adapter.helpers
- try:
- helpers.remove(pvm_log.log_helper)
- except ValueError:
- # It's not an error if we didn't find it.
- pass
- kwds['helpers'] = helpers
- try:
- resp = adapter.read(pvm_lpar.LPAR.schema_type, **kwds)
- except pvm_exc.HttpError as e:
- with excutils.save_and_reraise_exception(logger=LOG) as sare:
- # 404 error indicates the LPAR has been deleted
- if e.response and e.response.status == 404:
- sare.reraise = False
- raise exc.InstanceNotFound(instance_id=lpar_uuid)
- # else raise the original exception
- return jsonutils.loads(resp.body)
-
-
-def get_vm_info(adapter, instance):
- """Get the InstanceInfo for an instance.
-
- :param adapter: The pypowervm.adapter.Adapter for the PowerVM REST API.
- :param instance: nova.objects.instance.Instance object
- :returns: An InstanceInfo object.
- """
- pvm_uuid = get_pvm_uuid(instance)
- pvm_state = get_vm_qp(adapter, pvm_uuid, 'PartitionState')
- nova_state = _translate_vm_state(pvm_state)
- return hardware.InstanceInfo(nova_state)
-
-
-def norm_mac(mac):
- """Normalizes a MAC address from pypowervm format to OpenStack.
-
- That means that the format will be converted to lower case and will
- have colons added.
-
- :param mac: A pypowervm mac address. Ex. 1234567890AB
- :return: A mac that matches the standard neutron format.
- Ex. 12:34:56:78:90:ab
- """
- # Need the replacement if the mac is already normalized.
- mac = mac.lower().replace(':', '')
- return ':'.join(mac[i:i + 2] for i in range(0, len(mac), 2))
-
-
-class VMBuilder(object):
- """Converts a Nova Instance/Flavor into a pypowervm LPARBuilder."""
- _PVM_PROC_COMPAT = 'powervm:processor_compatibility'
- _PVM_UNCAPPED = 'powervm:uncapped'
- _PVM_DED_SHAR_MODE = 'powervm:dedicated_sharing_mode'
- _PVM_SHAR_PROC_POOL = 'powervm:shared_proc_pool_name'
- _PVM_SRR_CAPABILITY = 'powervm:srr_capability'
-
- # Map of PowerVM extra specs to the lpar builder attributes.
- # '' is used for attributes that are not implemented yet.
- # None means there is no direct attribute mapping and must
- # be handled individually
- _ATTRS_MAP = {
- 'powervm:min_mem': lpar_bldr.MIN_MEM,
- 'powervm:max_mem': lpar_bldr.MAX_MEM,
- 'powervm:min_vcpu': lpar_bldr.MIN_VCPU,
- 'powervm:max_vcpu': lpar_bldr.MAX_VCPU,
- 'powervm:proc_units': lpar_bldr.PROC_UNITS,
- 'powervm:min_proc_units': lpar_bldr.MIN_PROC_U,
- 'powervm:max_proc_units': lpar_bldr.MAX_PROC_U,
- 'powervm:dedicated_proc': lpar_bldr.DED_PROCS,
- 'powervm:shared_weight': lpar_bldr.UNCAPPED_WEIGHT,
- 'powervm:availability_priority': lpar_bldr.AVAIL_PRIORITY,
- _PVM_UNCAPPED: None,
- _PVM_DED_SHAR_MODE: None,
- _PVM_PROC_COMPAT: None,
- _PVM_SHAR_PROC_POOL: None,
- _PVM_SRR_CAPABILITY: None,
- }
-
- _DED_SHARING_MODES_MAP = {
- 'share_idle_procs': pvm_bp.DedicatedSharingMode.SHARE_IDLE_PROCS,
- 'keep_idle_procs': pvm_bp.DedicatedSharingMode.KEEP_IDLE_PROCS,
- 'share_idle_procs_active':
- pvm_bp.DedicatedSharingMode.SHARE_IDLE_PROCS_ACTIVE,
- 'share_idle_procs_always':
- pvm_bp.DedicatedSharingMode.SHARE_IDLE_PROCS_ALWAYS,
- }
-
- def __init__(self, host_w, adapter):
- """Initialize the converter.
-
- :param host_w: The host System wrapper.
- :param adapter: The pypowervm.adapter.Adapter for the PowerVM REST API.
- """
- self.adapter = adapter
- self.host_w = host_w
- kwargs = dict(proc_units_factor=CONF.powervm.proc_units_factor)
- self.stdz = lpar_bldr.DefaultStandardize(host_w, **kwargs)
-
- def lpar_builder(self, inst):
- """Returns the pypowervm LPARBuilder for a given Nova flavor.
-
- :param inst: the VM instance
- """
- attrs = self._format_flavor(inst)
- # TODO(thorst, efried) Add in IBMi attributes
- return lpar_bldr.LPARBuilder(self.adapter, attrs, self.stdz)
-
- def _format_flavor(self, inst):
- """Returns the pypowervm format of the flavor.
-
- :param inst: The Nova VM instance.
- :return: A dict that can be used by the LPAR builder.
- """
- # The attrs are what is sent to pypowervm to convert the lpar.
- attrs = {
- lpar_bldr.NAME: pvm_u.sanitize_partition_name_for_api(inst.name),
- # The uuid is only actually set on a create of an LPAR
- lpar_bldr.UUID: get_pvm_uuid(inst),
- lpar_bldr.MEM: inst.flavor.memory_mb,
- lpar_bldr.VCPU: inst.flavor.vcpus,
- # Set the srr capability to True by default
- lpar_bldr.SRR_CAPABLE: True}
-
- # Loop through the extra specs and process powervm keys
- for key in inst.flavor.extra_specs.keys():
- # If it is not a valid key, then can skip.
- if not self._is_pvm_valid_key(key):
- continue
-
- # Look for the mapping to the lpar builder
- bldr_key = self._ATTRS_MAP.get(key)
-
- # Check for no direct mapping, if the value is none, need to
- # derive the complex type
- if bldr_key is None:
- self._build_complex_type(key, attrs, inst.flavor)
- else:
- # We found a direct mapping
- attrs[bldr_key] = inst.flavor.extra_specs[key]
-
- return attrs
-
- def _is_pvm_valid_key(self, key):
- """Will return if this is a valid PowerVM key.
-
- :param key: The powervm key.
- :return: True if valid key. False if non-powervm key and should be
- skipped.
- """
- # If not a powervm key, then it is not 'pvm_valid'
- if not key.startswith('powervm:'):
- return False
-
- # Check if this is a valid attribute
- if key not in self._ATTRS_MAP:
- # Could be a key from a future release - warn, but ignore.
- LOG.warning("Unhandled PowerVM key '%s'.", key)
- return False
-
- return True
-
- def _build_complex_type(self, key, attrs, flavor):
- """If a key does not directly map, this method derives the right value.
-
- Some types are complex, in that the flavor may have one key that maps
- to several different attributes in the lpar builder. This method
- handles the complex types.
-
- :param key: The flavor's key.
- :param attrs: The attribute map to put the value into.
- :param flavor: The Nova instance flavor.
- :return: The value to put in for the key.
- """
- # Map uncapped to sharing mode
- if key == self._PVM_UNCAPPED:
- attrs[lpar_bldr.SHARING_MODE] = (
- pvm_bp.SharingMode.UNCAPPED
- if stru.bool_from_string(flavor.extra_specs[key], strict=True)
- else pvm_bp.SharingMode.CAPPED)
- elif key == self._PVM_DED_SHAR_MODE:
- # Dedicated sharing modes...map directly
- shr_mode_key = flavor.extra_specs[key]
- mode = self._DED_SHARING_MODES_MAP.get(shr_mode_key)
- if mode is None:
- raise exc.InvalidParameterValue(err=_(
- "Invalid dedicated sharing mode '%s'!") % shr_mode_key)
- attrs[lpar_bldr.SHARING_MODE] = mode
- elif key == self._PVM_SHAR_PROC_POOL:
- pool_name = flavor.extra_specs[key]
- attrs[lpar_bldr.SPP] = self._spp_pool_id(pool_name)
- elif key == self._PVM_PROC_COMPAT:
- # Handle variants of the supported values
- attrs[lpar_bldr.PROC_COMPAT] = re.sub(
- r'\+', '_Plus', flavor.extra_specs[key])
- elif key == self._PVM_SRR_CAPABILITY:
- attrs[lpar_bldr.SRR_CAPABLE] = stru.bool_from_string(
- flavor.extra_specs[key], strict=True)
- else:
- # There was no mapping or we didn't handle it. This is a BUG!
- raise KeyError(_(
- "Unhandled PowerVM key '%s'! Please report this bug.") % key)
-
- def _spp_pool_id(self, pool_name):
- """Returns the shared proc pool id for a given pool name.
-
- :param pool_name: The shared proc pool name.
- :return: The internal API id for the shared proc pool.
- """
- if (pool_name is None or
- pool_name == pvm_spp.DEFAULT_POOL_DISPLAY_NAME):
- # The default pool is 0
- return 0
-
- # Search for the pool with this name
- pool_wraps = pvm_spp.SharedProcPool.search(
- self.adapter, name=pool_name, parent=self.host_w)
-
- # Check to make sure there is a pool with the name, and only one pool.
- if len(pool_wraps) > 1:
- msg = (_('Multiple Shared Processing Pools with name %(pool)s.') %
- {'pool': pool_name})
- raise exc.ValidationError(msg)
- elif len(pool_wraps) == 0:
- msg = (_('Unable to find Shared Processing Pool %(pool)s') %
- {'pool': pool_name})
- raise exc.ValidationError(msg)
-
- # Return the singular pool id.
- return pool_wraps[0].id
diff --git a/nova/virt/powervm/volume/__init__.py b/nova/virt/powervm/volume/__init__.py
deleted file mode 100644
index bca5ce473b..0000000000
--- a/nova/virt/powervm/volume/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from nova import exception
-from nova.i18n import _
-from nova.virt.powervm.volume import fcvscsi
-
-
-def build_volume_driver(adapter, instance, conn_info, stg_ftsk=None):
- drv_type = conn_info.get('driver_volume_type')
- if drv_type != 'fibre_channel':
- reason = _("Invalid connection type of %s") % drv_type
- raise exception.InvalidVolume(reason=reason)
- return fcvscsi.FCVscsiVolumeAdapter(adapter, instance, conn_info,
- stg_ftsk=stg_ftsk)
diff --git a/nova/virt/powervm/volume/fcvscsi.py b/nova/virt/powervm/volume/fcvscsi.py
deleted file mode 100644
index cf2c6e4d25..0000000000
--- a/nova/virt/powervm/volume/fcvscsi.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright 2015, 2018 IBM Corp.
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_concurrency import lockutils
-from oslo_log import log as logging
-from pypowervm import const as pvm_const
-from pypowervm.tasks import hdisk
-from pypowervm.tasks import partition as pvm_tpar
-from pypowervm.tasks import scsi_mapper as tsk_map
-from pypowervm.utils import transaction as pvm_tx
-from pypowervm.wrappers import storage as pvm_stor
-from pypowervm.wrappers import virtual_io_server as pvm_vios
-from taskflow import task
-
-from nova import conf as cfg
-from nova import exception as exc
-from nova.i18n import _
-from nova.virt import block_device
-from nova.virt.powervm import vm
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-LOCAL_FEED_TASK = 'local_feed_task'
-UDID_KEY = 'target_UDID'
-
-# A global variable that will cache the physical WWPNs on the system.
-_vscsi_pfc_wwpns = None
-
-
-@lockutils.synchronized('vscsi_wwpns')
-def wwpns(adapter):
- """Builds the WWPNs of the adapters that will connect the ports.
-
- :return: The list of WWPNs that need to be included in the zone set.
- """
- return pvm_tpar.get_physical_wwpns(adapter, force_refresh=False)
-
-
-class FCVscsiVolumeAdapter(object):
-
- def __init__(self, adapter, instance, connection_info, stg_ftsk=None):
- """Initialize the PowerVMVolumeAdapter
-
- :param adapter: The pypowervm adapter.
- :param instance: The nova instance that the volume should attach to.
- :param connection_info: The volume connection info generated from the
- BDM. Used to determine how to attach the
- volume to the VM.
- :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
- I/O Operations. If provided, the Virtual I/O Server
- mapping updates will be added to the FeedTask. This
- defers the updates to some later point in time. If
- the FeedTask is not provided, the updates will be run
- immediately when the respective method is executed.
- """
- self.adapter = adapter
- self.instance = instance
- self.connection_info = connection_info
- self.vm_uuid = vm.get_pvm_uuid(instance)
- self.reset_stg_ftsk(stg_ftsk=stg_ftsk)
- self._pfc_wwpns = None
-
- @property
- def volume_id(self):
- """Method to return the volume id.
-
- Every driver must implement this method if the default impl will
- not work for their data.
- """
- return block_device.get_volume_id(self.connection_info)
-
- def reset_stg_ftsk(self, stg_ftsk=None):
- """Resets the pypowervm transaction FeedTask to a new value.
-
- The previous updates from the original FeedTask WILL NOT be migrated
- to this new FeedTask.
-
- :param stg_ftsk: (Optional) The pypowervm transaction FeedTask for the
- I/O Operations. If provided, the Virtual I/O Server
- mapping updates will be added to the FeedTask. This
- defers the updates to some later point in time. If
- the FeedTask is not provided, the updates will be run
- immediately when this method is executed.
- """
- if stg_ftsk is None:
- getter = pvm_vios.VIOS.getter(
- self.adapter, xag=[pvm_const.XAG.VIO_SMAP])
- self.stg_ftsk = pvm_tx.FeedTask(LOCAL_FEED_TASK, getter)
- else:
- self.stg_ftsk = stg_ftsk
-
- def _set_udid(self, udid):
- """This method will set the hdisk udid in the connection_info.
-
- :param udid: The hdisk target_udid to be stored in system_metadata
- """
- self.connection_info['data'][UDID_KEY] = udid
-
- def _get_udid(self):
- """This method will return the hdisk udid stored in connection_info.
-
- :return: The target_udid associated with the hdisk
- """
- try:
- return self.connection_info['data'][UDID_KEY]
- except (KeyError, ValueError):
- # It's common to lose our specific data in the BDM. The connection
- # information can be 'refreshed' by operations like live migrate
- # and resize
- LOG.info('Failed to retrieve target_UDID key from BDM for volume '
- 'id %s', self.volume_id, instance=self.instance)
- return None
-
- def attach_volume(self):
- """Attaches the volume."""
-
- # Check if the VM is in a state where the attach is acceptable.
- lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
- capable, reason = lpar_w.can_modify_io()
- if not capable:
- raise exc.VolumeAttachFailed(
- volume_id=self.volume_id, reason=reason)
-
- # Its about to get weird. The transaction manager has a list of
- # VIOSes. We could use those, but they only have SCSI mappings (by
- # design). They do not have storage (super expensive).
- #
- # We need the storage xag when we are determining which mappings to
- # add to the system. But we don't want to tie it to the stg_ftsk. If
- # we do, every retry, every etag gather, etc... takes MUCH longer.
- #
- # So we get the VIOSes with the storage xag here, separately, to save
- # the stg_ftsk from potentially having to run it multiple times.
- attach_ftsk = pvm_tx.FeedTask(
- 'attach_volume_to_vio', pvm_vios.VIOS.getter(
- self.adapter, xag=[pvm_const.XAG.VIO_STOR,
- pvm_const.XAG.VIO_SMAP]))
-
- # Find valid hdisks and map to VM.
- attach_ftsk.add_functor_subtask(
- self._attach_volume_to_vio, provides='vio_modified',
- flag_update=False)
-
- ret = attach_ftsk.execute()
-
- # Check the number of VIOSes
- vioses_modified = 0
- for result in ret['wrapper_task_rets'].values():
- if result['vio_modified']:
- vioses_modified += 1
-
- # Validate that a vios was found
- if vioses_modified == 0:
- msg = (_('Failed to discover valid hdisk on any Virtual I/O '
- 'Server for volume %(volume_id)s.') %
- {'volume_id': self.volume_id})
- ex_args = {'volume_id': self.volume_id, 'reason': msg}
- raise exc.VolumeAttachFailed(**ex_args)
-
- self.stg_ftsk.execute()
-
- def _attach_volume_to_vio(self, vios_w):
- """Attempts to attach a volume to a given VIO.
-
- :param vios_w: The Virtual I/O Server wrapper to attach to.
- :return: True if the volume was attached. False if the volume was
- not (could be the Virtual I/O Server does not have
- connectivity to the hdisk).
- """
- status, device_name, udid = self._discover_volume_on_vios(vios_w)
-
- if hdisk.good_discovery(status, device_name):
- # Found a hdisk on this Virtual I/O Server. Add the action to
- # map it to the VM when the stg_ftsk is executed.
- with lockutils.lock(self.volume_id):
- self._add_append_mapping(vios_w.uuid, device_name,
- tag=self.volume_id)
-
- # Save the UDID for the disk in the connection info. It is
- # used for the detach.
- self._set_udid(udid)
- LOG.debug('Added deferred task to attach device %(device_name)s '
- 'to vios %(vios_name)s.',
- {'device_name': device_name, 'vios_name': vios_w.name},
- instance=self.instance)
-
- # Valid attachment
- return True
-
- return False
-
- def extend_volume(self):
- # The compute node does not need to take any additional steps for the
- # client to see the extended volume.
- pass
-
- def _discover_volume_on_vios(self, vios_w):
- """Discovers an hdisk on a single vios for the volume.
-
- :param vios_w: VIOS wrapper to process
- :returns: Status of the volume or None
- :returns: Device name or None
- :returns: UDID or None
- """
- # Get the initiatior WWPNs, targets and Lun for the given VIOS.
- vio_wwpns, t_wwpns, lun = self._get_hdisk_itls(vios_w)
-
- # Build the ITL map and discover the hdisks on the Virtual I/O
- # Server (if any).
- itls = hdisk.build_itls(vio_wwpns, t_wwpns, lun)
- if len(itls) == 0:
- LOG.debug('No ITLs for VIOS %(vios)s for volume %(volume_id)s.',
- {'vios': vios_w.name, 'volume_id': self.volume_id},
- instance=self.instance)
- return None, None, None
-
- status, device_name, udid = hdisk.discover_hdisk(self.adapter,
- vios_w.uuid, itls)
-
- if hdisk.good_discovery(status, device_name):
- LOG.info('Discovered %(hdisk)s on vios %(vios)s for volume '
- '%(volume_id)s. Status code: %(status)s.',
- {'hdisk': device_name, 'vios': vios_w.name,
- 'volume_id': self.volume_id, 'status': status},
- instance=self.instance)
- elif status == hdisk.LUAStatus.DEVICE_IN_USE:
- LOG.warning('Discovered device %(dev)s for volume %(volume)s '
- 'on %(vios)s is in use. Error code: %(status)s.',
- {'dev': device_name, 'volume': self.volume_id,
- 'vios': vios_w.name, 'status': status},
- instance=self.instance)
-
- return status, device_name, udid
-
- def _get_hdisk_itls(self, vios_w):
- """Returns the mapped ITLs for the hdisk for the given VIOS.
-
- A PowerVM system may have multiple Virtual I/O Servers to virtualize
- the I/O to the virtual machines. Each Virtual I/O server may have their
- own set of initiator WWPNs, target WWPNs and Lun on which hdisk is
- mapped. It will determine and return the ITLs for the given VIOS.
-
- :param vios_w: A virtual I/O Server wrapper.
- :return: List of the i_wwpns that are part of the vios_w,
- :return: List of the t_wwpns that are part of the vios_w,
- :return: Target lun id of the hdisk for the vios_w.
- """
- it_map = self.connection_info['data']['initiator_target_map']
- i_wwpns = it_map.keys()
-
- active_wwpns = vios_w.get_active_pfc_wwpns()
- vio_wwpns = [x for x in i_wwpns if x in active_wwpns]
-
- t_wwpns = []
- for it_key in vio_wwpns:
- t_wwpns.extend(it_map[it_key])
- lun = self.connection_info['data']['target_lun']
-
- return vio_wwpns, t_wwpns, lun
-
- def _add_append_mapping(self, vios_uuid, device_name, tag=None):
- """Update the stg_ftsk to append the mapping to the VIOS.
-
- :param vios_uuid: The UUID of the vios for the pypowervm adapter.
- :param device_name: The hdisk device name.
- :param tag: String tag to set on the physical volume.
- """
- def add_func(vios_w):
- LOG.info("Adding vSCSI mapping to Physical Volume %(dev)s on "
- "vios %(vios)s.",
- {'dev': device_name, 'vios': vios_w.name},
- instance=self.instance)
- pv = pvm_stor.PV.bld(self.adapter, device_name, tag=tag)
- v_map = tsk_map.build_vscsi_mapping(None, vios_w, self.vm_uuid, pv)
- return tsk_map.add_map(vios_w, v_map)
- self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(add_func)
-
- def detach_volume(self):
- """Detach the volume."""
-
- # Check if the VM is in a state where the detach is acceptable.
- lpar_w = vm.get_instance_wrapper(self.adapter, self.instance)
- capable, reason = lpar_w.can_modify_io()
- if not capable:
- raise exc.VolumeDetachFailed(
- volume_id=self.volume_id, reason=reason)
-
- # Run the detach
- try:
- # See logic in attach_volume for why this new FeedTask is here.
- detach_ftsk = pvm_tx.FeedTask(
- 'detach_volume_from_vio', pvm_vios.VIOS.getter(
- self.adapter, xag=[pvm_const.XAG.VIO_STOR,
- pvm_const.XAG.VIO_SMAP]))
- # Find hdisks to detach
- detach_ftsk.add_functor_subtask(
- self._detach_vol_for_vio, provides='vio_modified',
- flag_update=False)
-
- ret = detach_ftsk.execute()
-
- # Warn if no hdisks detached.
- if not any([result['vio_modified']
- for result in ret['wrapper_task_rets'].values()]):
- LOG.warning("Detach Volume: Failed to detach the "
- "volume %(volume_id)s on ANY of the Virtual "
- "I/O Servers.", {'volume_id': self.volume_id},
- instance=self.instance)
-
- except Exception as e:
- LOG.exception('PowerVM error detaching volume from virtual '
- 'machine.', instance=self.instance)
- ex_args = {'volume_id': self.volume_id, 'reason': str(e)}
- raise exc.VolumeDetachFailed(**ex_args)
- self.stg_ftsk.execute()
-
- def _detach_vol_for_vio(self, vios_w):
- """Removes the volume from a specific Virtual I/O Server.
-
- :param vios_w: The VIOS wrapper.
- :return: True if a remove action was done against this VIOS. False
- otherwise.
- """
- LOG.debug("Detach volume %(vol)s from vios %(vios)s",
- dict(vol=self.volume_id, vios=vios_w.name),
- instance=self.instance)
- device_name = None
- udid = self._get_udid()
- try:
- if udid:
- # This will only work if vios_w has the Storage XAG.
- device_name = vios_w.hdisk_from_uuid(udid)
-
- if not udid or not device_name:
- # We lost our bdm data. We'll need to discover it.
- status, device_name, udid = self._discover_volume_on_vios(
- vios_w)
-
- # Check if the hdisk is in a bad state in the I/O Server.
- # Subsequent scrub code on future deploys will clean this up.
- if not hdisk.good_discovery(status, device_name):
- LOG.warning(
- "Detach Volume: The backing hdisk for volume "
- "%(volume_id)s on Virtual I/O Server %(vios)s is "
- "not in a valid state. This may be the result of "
- "an evacuate.",
- {'volume_id': self.volume_id, 'vios': vios_w.name},
- instance=self.instance)
- return False
-
- except Exception:
- LOG.exception(
- "Detach Volume: Failed to find disk on Virtual I/O "
- "Server %(vios_name)s for volume %(volume_id)s. Volume "
- "UDID: %(volume_uid)s.",
- {'vios_name': vios_w.name, 'volume_id': self.volume_id,
- 'volume_uid': udid, }, instance=self.instance)
- return False
-
- # We have found the device name
- LOG.info("Detach Volume: Discovered the device %(hdisk)s "
- "on Virtual I/O Server %(vios_name)s for volume "
- "%(volume_id)s. Volume UDID: %(volume_uid)s.",
- {'hdisk': device_name, 'vios_name': vios_w.name,
- 'volume_id': self.volume_id, 'volume_uid': udid},
- instance=self.instance)
-
- # Add the action to remove the mapping when the stg_ftsk is run.
- partition_id = vm.get_vm_qp(self.adapter, self.vm_uuid,
- qprop='PartitionID')
-
- with lockutils.lock(self.volume_id):
- self._add_remove_mapping(partition_id, vios_w.uuid,
- device_name)
-
- # Add a step to also remove the hdisk
- self._add_remove_hdisk(vios_w, device_name)
-
- # Found a valid element to remove
- return True
-
- def _add_remove_mapping(self, vm_uuid, vios_uuid, device_name):
- """Adds a subtask to remove the storage mapping.
-
- :param vm_uuid: The UUID of the VM instance
- :param vios_uuid: The UUID of the vios for the pypowervm adapter.
- :param device_name: The hdisk device name.
- """
- def rm_func(vios_w):
- LOG.info("Removing vSCSI mapping from physical volume %(dev)s "
- "on vios %(vios)s",
- {'dev': device_name, 'vios': vios_w.name},
- instance=self.instance)
- removed_maps = tsk_map.remove_maps(
- vios_w, vm_uuid,
- tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
- return removed_maps
- self.stg_ftsk.wrapper_tasks[vios_uuid].add_functor_subtask(rm_func)
-
- def _add_remove_hdisk(self, vio_wrap, device_name):
- """Adds a post-mapping task to remove the hdisk from the VIOS.
-
- This removal is only done after the mapping updates have completed.
-
- :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
- from.
- :param device_name: The hdisk name to remove.
- """
- def rm_hdisk():
- LOG.info("Removing hdisk %(hdisk)s from Virtual I/O Server "
- "%(vios)s", {'hdisk': device_name, 'vios': vio_wrap.name},
- instance=self.instance)
- try:
- # Attempt to remove the hDisk
- hdisk.remove_hdisk(self.adapter, CONF.host, device_name,
- vio_wrap.uuid)
- except Exception:
- # If there is a failure, log it, but don't stop the process
- LOG.exception("There was an error removing the hdisk "
- "%(disk)s from Virtual I/O Server %(vios)s.",
- {'disk': device_name, 'vios': vio_wrap.name},
- instance=self.instance)
-
- # Check if there are not multiple mapping for the device
- if not self._check_host_mappings(vio_wrap, device_name):
- name = 'rm_hdisk_%s_%s' % (vio_wrap.name, device_name)
- self.stg_ftsk.add_post_execute(task.FunctorTask(
- rm_hdisk, name=name))
- else:
- LOG.info("hdisk %(disk)s is not removed from Virtual I/O Server "
- "%(vios)s because it has existing storage mappings",
- {'disk': device_name, 'vios': vio_wrap.name},
- instance=self.instance)
-
- def _check_host_mappings(self, vios_wrap, device_name):
- """Checks if the given hdisk has multiple mappings
-
- :param vio_wrap: The Virtual I/O Server wrapper to remove the disk
- from.
- :param device_name: The hdisk name to remove.
- :return: True if there are multiple instances using the given hdisk
- """
- vios_scsi_mappings = next(v.scsi_mappings for v in self.stg_ftsk.feed
- if v.uuid == vios_wrap.uuid)
- mappings = tsk_map.find_maps(
- vios_scsi_mappings, None,
- tsk_map.gen_match_func(pvm_stor.PV, names=[device_name]))
-
- LOG.debug("%(num)d storage mapping(s) found for %(dev)s on VIOS "
- "%(vios)s", {'num': len(mappings), 'dev': device_name,
- 'vios': vios_wrap.name}, instance=self.instance)
- # The mapping is still present as the task feed removes it later.
- return len(mappings) > 1
diff --git a/nova/virt/vmwareapi/constants.py b/nova/virt/vmwareapi/constants.py
index 6452434ce7..2a42174bf7 100644
--- a/nova/virt/vmwareapi/constants.py
+++ b/nova/virt/vmwareapi/constants.py
@@ -27,7 +27,8 @@ MIN_VC_OVS_VERSION = '5.5.0'
DISK_FORMAT_ISO = 'iso'
DISK_FORMAT_VMDK = 'vmdk'
DISK_FORMAT_ISCSI = 'iscsi'
-DISK_FORMATS_ALL = [DISK_FORMAT_ISO, DISK_FORMAT_VMDK]
+DISK_FORMAT_FCD = 'vstorageobject'
+DISK_FORMATS_ALL = [DISK_FORMAT_ISO, DISK_FORMAT_VMDK, DISK_FORMAT_FCD]
DISK_TYPE_THIN = 'thin'
CONTAINER_FORMAT_BARE = 'bare'
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 9934627e1e..aa728ea694 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -27,10 +27,8 @@ from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
from oslo_utils import versionutils as v_utils
-from oslo_vmware import api
from oslo_vmware import exceptions as vexc
from oslo_vmware import pbm
-from oslo_vmware import vim
from oslo_vmware import vim_util
from nova.compute import power_state
@@ -47,6 +45,7 @@ from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import host
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vim_util as nova_vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
@@ -72,6 +71,7 @@ class VMwareVCDriver(driver.ComputeDriver):
"supports_trusted_certs": False,
"supports_pcpus": False,
"supports_accelerators": False,
+ "supports_remote_managed_ports": False,
# Image type support flags
"supports_image_type_aki": False,
@@ -87,13 +87,6 @@ class VMwareVCDriver(driver.ComputeDriver):
"supports_image_type_ploop": False,
}
- # Legacy nodename is of the form: <mo id>(<cluster name>)
- # e.g. domain-26(TestCluster)
- # We assume <mo id> consists of alphanumeric, _ and -.
- # We assume cluster name is everything between the first ( and the last ).
- # We pull out <mo id> for re-use.
- LEGACY_NODENAME = re.compile(r'([\w-]+)\(.+\)')
-
# The vCenter driver includes API that acts on ESX hosts or groups
# of ESX hosts in clusters or non-cluster logical-groupings.
#
@@ -119,7 +112,7 @@ class VMwareVCDriver(driver.ComputeDriver):
_("Invalid Regular Expression %s")
% CONF.vmware.datastore_regex)
- self._session = VMwareAPISession(scheme=scheme)
+ self._session = session.VMwareAPISession(scheme=scheme)
self._check_min_version()
@@ -525,6 +518,10 @@ class VMwareVCDriver(driver.ComputeDriver):
# where cpu traits are added. In the vmware world, this is where we
# would add nested providers representing tenant VDC and similar.
+ def prepare_for_spawn(self, instance):
+ """Perform pre-checks for spawn."""
+ self._vmops.prepare_for_spawn(instance)
+
def spawn(self, context, instance, image_meta, injected_files,
admin_password, allocations, network_info=None,
block_device_info=None, power_on=True, accel_info=None):
@@ -718,50 +715,3 @@ class VMwareVCDriver(driver.ComputeDriver):
def detach_interface(self, context, instance, vif):
"""Detach an interface from the instance."""
self._vmops.detach_interface(context, instance, vif)
-
-
-class VMwareAPISession(api.VMwareAPISession):
- """Sets up a session with the VC/ESX host and handles all
- the calls made to the host.
- """
-
- def __init__(self, host_ip=CONF.vmware.host_ip,
- host_port=CONF.vmware.host_port,
- username=CONF.vmware.host_username,
- password=CONF.vmware.host_password,
- retry_count=CONF.vmware.api_retry_count,
- scheme="https",
- cacert=CONF.vmware.ca_file,
- insecure=CONF.vmware.insecure,
- pool_size=CONF.vmware.connection_pool_size):
- super(VMwareAPISession, self).__init__(
- host=host_ip,
- port=host_port,
- server_username=username,
- server_password=password,
- api_retry_count=retry_count,
- task_poll_interval=CONF.vmware.task_poll_interval,
- scheme=scheme,
- create_session=True,
- cacert=cacert,
- insecure=insecure,
- pool_size=pool_size)
-
- def _is_vim_object(self, module):
- """Check if the module is a VIM Object instance."""
- return isinstance(module, vim.Vim)
-
- def _call_method(self, module, method, *args, **kwargs):
- """Calls a method within the module specified with
- args provided.
- """
- if not self._is_vim_object(module):
- return self.invoke_api(module, method, self.vim, *args, **kwargs)
- else:
- return self.invoke_api(module, method, *args, **kwargs)
-
- def _wait_for_task(self, task_ref):
- """Return a Deferred that will give the result of the given task.
- The task is polled until it completes.
- """
- return self.wait_for_task(task_ref)
diff --git a/nova/virt/vmwareapi/session.py b/nova/virt/vmwareapi/session.py
new file mode 100644
index 0000000000..973db5760f
--- /dev/null
+++ b/nova/virt/vmwareapi/session.py
@@ -0,0 +1,157 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# Copyright (c) 2012 VMware, Inc.
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import abc
+import itertools
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+from oslo_vmware import api
+from oslo_vmware import exceptions as vexc
+from oslo_vmware import vim
+from oslo_vmware.vim_util import get_moref_value
+
+import nova.conf
+
+CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+
+
+class StableMoRefProxy(metaclass=abc.ABCMeta):
+ """Abstract base class which acts as a proxy
+ for Managed-Object-References (MoRef).
+ Those references are usually "stable", meaning
+ they don't change over the life-time of the object.
+ But usually doesn't mean always. In that case, we
+ need to fetch the reference again via some search method,
+ which uses a guaranteed stable identifier (names, uuids, ...)
+ """
+
+ def __init__(self, ref):
+ self.moref = ref
+
+ @property
+ def __class__(self):
+ # Suds accesses the __class__.__name__ attribute
+ # of the object to determine the xml-tag of the object
+ # so we have to fake it
+ return self.moref.__class__
+
+ @abc.abstractmethod
+ def fetch_moref(self, session):
+ """Updates the moref field or raises
+ same exception the initial search would have
+ """
+
+ def __getattr__(self, name):
+ return getattr(self.moref, name)
+
+ def __repr__(self):
+ return "StableMoRefProxy({!r})".format(self.moref)
+
+
+class MoRef(StableMoRefProxy):
+ """MoRef takes a closure to resolve the reference of a managed object
+ That closure is called again, in case we get a ManagedObjectNotFound
+ exception on said reference.
+ """
+
+ def __init__(self, closure, ref=None):
+ self._closure = closure
+ ref = ref or self._closure()
+ super().__init__(ref)
+
+ def fetch_moref(self, _):
+ self.moref = self._closure()
+
+ def __repr__(self):
+ return "MoRef({!r})".format(self.moref)
+
+
+class VMwareAPISession(api.VMwareAPISession):
+ """Sets up a session with the VC/ESX host and handles all
+ the calls made to the host.
+ """
+
+ def __init__(self, host_ip=CONF.vmware.host_ip,
+ host_port=CONF.vmware.host_port,
+ username=CONF.vmware.host_username,
+ password=CONF.vmware.host_password,
+ retry_count=CONF.vmware.api_retry_count,
+ scheme="https",
+ cacert=CONF.vmware.ca_file,
+ insecure=CONF.vmware.insecure,
+ pool_size=CONF.vmware.connection_pool_size):
+ super(VMwareAPISession, self).__init__(
+ host=host_ip,
+ port=host_port,
+ server_username=username,
+ server_password=password,
+ api_retry_count=retry_count,
+ task_poll_interval=CONF.vmware.task_poll_interval,
+ scheme=scheme,
+ create_session=True,
+ cacert=cacert,
+ insecure=insecure,
+ pool_size=pool_size)
+
+ @staticmethod
+ def _is_vim_object(module):
+ """Check if the module is a VIM Object instance."""
+ return isinstance(module, vim.Vim)
+
+ def _call_method(self, module, method, *args, **kwargs):
+ """Calls a method within the module specified with
+ args provided.
+ """
+ try:
+ if not self._is_vim_object(module):
+ return self.invoke_api(module, method, self.vim, *args,
+ **kwargs)
+ return self.invoke_api(module, method, *args, **kwargs)
+ except vexc.ManagedObjectNotFoundException as monfe:
+ with excutils.save_and_reraise_exception() as ctxt:
+ moref = monfe.details.get("obj") if monfe.details else None
+ for arg in itertools.chain(args, kwargs.values()):
+ if not isinstance(arg, StableMoRefProxy):
+ continue
+ moref_arg = get_moref_value(arg.moref)
+ if moref != moref_arg:
+ continue
+ # We have found the argument with the moref
+ # causing the exception and we can try to recover it
+ arg.fetch_moref(self)
+ if not arg.moref:
+ # We didn't recover the reference
+ ctxt.reraise = True
+ break
+ moref_arg = get_moref_value(arg.moref)
+ if moref != moref_arg:
+ # We actually recovered, so do not raise `monfe`
+ LOG.info("Replaced moref %s with %s",
+ moref, moref_arg)
+ ctxt.reraise = False
+ # We only end up here when we have recovered a moref by changing
+ # the stored value of an argument to a different value,
+ # so let's try again (and recover again if it happens more than once)
+ return self._call_method(module, method, *args, **kwargs)
+
+ def _wait_for_task(self, task_ref):
+ """Return a Deferred that will give the result of the given task.
+ The task is polled until it completes.
+ """
+ return self.wait_for_task(task_ref)
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index 01a2e18c8d..7aaf5ca827 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -20,7 +20,6 @@ The VMware API VM utility module to build SOAP object specs.
import collections
import copy
-import functools
from oslo_log import log as logging
from oslo_service import loopingcall
@@ -37,6 +36,7 @@ from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vim_util
LOG = logging.getLogger(__name__)
@@ -119,32 +119,16 @@ def vm_refs_cache_reset():
_VM_REFS_CACHE = {}
-def vm_ref_cache_delete(id):
- _VM_REFS_CACHE.pop(id, None)
+def vm_ref_cache_delete(id_):
+ _VM_REFS_CACHE.pop(id_, None)
-def vm_ref_cache_update(id, vm_ref):
- _VM_REFS_CACHE[id] = vm_ref
+def vm_ref_cache_update(id_, vm_ref):
+ _VM_REFS_CACHE[id_] = vm_ref
-def vm_ref_cache_get(id):
- return _VM_REFS_CACHE.get(id)
-
-
-def _vm_ref_cache(id, func, session, data):
- vm_ref = vm_ref_cache_get(id)
- if not vm_ref:
- vm_ref = func(session, data)
- vm_ref_cache_update(id, vm_ref)
- return vm_ref
-
-
-def vm_ref_cache_from_instance(func):
- @functools.wraps(func)
- def wrapper(session, instance):
- id = instance.uuid
- return _vm_ref_cache(id, func, session, instance)
- return wrapper
+def vm_ref_cache_get(id_):
+ return _VM_REFS_CACHE.get(id_)
# the config key which stores the VNC port
@@ -1131,15 +1115,25 @@ def _get_vm_ref_from_extraconfig(session, instance_uuid):
_get_object_for_optionvalue)
-@vm_ref_cache_from_instance
+class VmMoRefProxy(session.StableMoRefProxy):
+ def __init__(self, ref, uuid):
+ super(VmMoRefProxy, self).__init__(ref)
+ self._uuid = uuid
+
+ def fetch_moref(self, session):
+ self.moref = search_vm_ref_by_identifier(session, self._uuid)
+ if not self.moref:
+ raise exception.InstanceNotFound(instance_id=self._uuid)
+ vm_ref_cache_update(self._uuid, self.moref)
+
+
def get_vm_ref(session, instance):
- """Get reference to the VM through uuid or vm name."""
- uuid = instance.uuid
- vm_ref = (search_vm_ref_by_identifier(session, uuid) or
- _get_vm_ref_from_name(session, instance.name))
- if vm_ref is None:
- raise exception.InstanceNotFound(instance_id=uuid)
- return vm_ref
+ """Get reference to the VM through uuid."""
+ moref = vm_ref_cache_get(instance.uuid)
+ stable_ref = VmMoRefProxy(moref, instance.uuid)
+ if not moref:
+ stable_ref.fetch_moref(session)
+ return stable_ref
def search_vm_ref_by_identifier(session, identifier):
@@ -1151,8 +1145,7 @@ def search_vm_ref_by_identifier(session, identifier):
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
- _get_vm_ref_from_extraconfig(session, identifier) or
- _get_vm_ref_from_name(session, identifier))
+ _get_vm_ref_from_extraconfig(session, identifier))
return vm_ref
@@ -1536,8 +1529,8 @@ def find_rescue_device(hardware_devices, instance):
raise exception.NotFound(msg)
-def get_ephemeral_name(id):
- return 'ephemeral_%d.vmdk' % id
+def get_ephemeral_name(id_):
+ return 'ephemeral_%d.vmdk' % id_
def _detach_and_delete_devices_config_spec(client_factory, devices):
@@ -1619,11 +1612,11 @@ def folder_ref_cache_get(path):
return _FOLDER_PATH_REF_MAPPING.get(path)
-def _get_vm_name(display_name, id):
+def _get_vm_name(display_name, id_):
if display_name:
- return '%s (%s)' % (display_name[:41], id[:36])
- else:
- return id[:36]
+ return '%s (%s)' % (display_name[:41], id_[:36])
+
+ return id_[:36]
def rename_vm(session, vm_ref, instance):
@@ -1631,3 +1624,36 @@ def rename_vm(session, vm_ref, instance):
rename_task = session._call_method(session.vim, "Rename_Task", vm_ref,
newName=vm_name)
session._wait_for_task(rename_task)
+
+
+def _create_fcd_id_obj(client_factory, fcd_id):
+ id_obj = client_factory.create('ns0:ID')
+ id_obj.id = fcd_id
+ return id_obj
+
+
+def attach_fcd(
+ session, vm_ref, fcd_id, ds_ref_val, controller_key, unit_number
+ ):
+ client_factory = session.vim.client.factory
+ disk_id = _create_fcd_id_obj(client_factory, fcd_id)
+ ds_ref = vutil.get_moref(ds_ref_val, 'Datastore')
+ LOG.debug("Attaching fcd (id: %(fcd_id)s, datastore: %(ds_ref_val)s) to "
+ "vm: %(vm_ref)s.",
+ {'fcd_id': fcd_id,
+ 'ds_ref_val': ds_ref_val,
+ 'vm_ref': vm_ref})
+ task = session._call_method(
+ session.vim, "AttachDisk_Task", vm_ref, diskId=disk_id,
+ datastore=ds_ref, controllerKey=controller_key, unitNumber=unit_number)
+ session._wait_for_task(task)
+
+
+def detach_fcd(session, vm_ref, fcd_id):
+ client_factory = session.vim.client.factory
+ disk_id = _create_fcd_id_obj(client_factory, fcd_id)
+ LOG.debug("Detaching fcd (id: %(fcd_id)s) from vm: %(vm_ref)s.",
+ {'fcd_id': fcd_id, 'vm_ref': vm_ref})
+ task = session._call_method(
+ session.vim, "DetachDisk_Task", vm_ref, diskId=disk_id)
+ session._wait_for_task(task)
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 1225581deb..7da453bdb1 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -263,11 +263,11 @@ class VMwareVMOps(object):
parent_folder = folder_ref
return folder_ref
- def _get_folder_name(self, name, id):
+ def _get_folder_name(self, name, id_):
# Maximum folder length must be less than 80 characters.
# The 'id' length is 36. The maximum prefix for name is 40.
# We cannot truncate the 'id' as this is unique across OpenStack.
- return '%s (%s)' % (name[:40], id[:36])
+ return '%s (%s)' % (name[:40], id_[:36])
def build_virtual_machine(self, instance, image_info,
dc_info, datastore, network_info, extra_specs,
@@ -729,6 +729,12 @@ class VMwareVMOps(object):
if new_size is not None:
vi.ii.file_size = new_size
+ def prepare_for_spawn(self, instance):
+ if (int(instance.flavor.memory_mb) % 4 != 0):
+ reason = _("Memory size is not multiple of 4")
+ raise exception.InstanceUnacceptable(instance_id=instance.uuid,
+ reason=reason)
+
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
diff --git a/nova/virt/vmwareapi/volumeops.py b/nova/virt/vmwareapi/volumeops.py
index 613dc671c9..e1d60cc751 100644
--- a/nova/virt/vmwareapi/volumeops.py
+++ b/nova/virt/vmwareapi/volumeops.py
@@ -26,12 +26,30 @@ import nova.conf
from nova import exception
from nova.i18n import _
from nova.virt.vmwareapi import constants
+from nova.virt.vmwareapi import session
from nova.virt.vmwareapi import vm_util
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
+class VolumeMoRefProxy(session.StableMoRefProxy):
+ def __init__(self, connection_info_data):
+ volume_ref_value = connection_info_data.get('volume')
+ ref = None
+ if volume_ref_value:
+ ref = vutil.get_moref(volume_ref_value, 'VirtualMachine')
+ super(VolumeMoRefProxy, self).__init__(ref)
+ self._connection_info_data = connection_info_data
+
+ def fetch_moref(self, session):
+ volume_id = self._connection_info_data.get('volume_id')
+ if not volume_id:
+ volume_id = self._connection_info_data.get('name')
+ if volume_id:
+ self.moref = vm_util._get_vm_ref_from_vm_uuid(session, volume_id)
+
+
class VMwareVolumeOps(object):
"""Management class for Volume-related tasks."""
@@ -300,9 +318,10 @@ class VMwareVolumeOps(object):
connector['instance'] = vutil.get_moref_value(vm_ref)
return connector
- def _get_volume_ref(self, volume_ref_name):
- """Get the volume moref from the ref name."""
- return vutil.get_moref(volume_ref_name, 'VirtualMachine')
+ @staticmethod
+ def _get_volume_ref(connection_info_data):
+ """Get the volume moref from the "data" field in connection_info ."""
+ return VolumeMoRefProxy(connection_info_data)
def _get_vmdk_base_volume_device(self, volume_ref):
# Get the vmdk file name that the VM is pointing to
@@ -317,7 +336,7 @@ class VMwareVolumeOps(object):
LOG.debug("_attach_volume_vmdk: %s", connection_info,
instance=instance)
data = connection_info['data']
- volume_ref = self._get_volume_ref(data['volume'])
+ volume_ref = self._get_volume_ref(data)
# Get details required for adding disk device such as
# adapter_type, disk_type
@@ -367,6 +386,53 @@ class VMwareVolumeOps(object):
device_name=device_name)
LOG.debug("Attached ISCSI: %s", connection_info, instance=instance)
+ def _get_controller_key_and_unit(self, vm_ref, adapter_type):
+ LOG.debug("_get_controller_key_and_unit vm: %(vm_ref)s, adapter: "
+ "%(adapter)s.",
+ {'vm_ref': vm_ref, 'adapter': adapter_type})
+ client_factory = self._session.vim.client.factory
+ devices = self._session._call_method(vutil,
+ "get_object_property",
+ vm_ref,
+ "config.hardware.device")
+ return vm_util.allocate_controller_key_and_unit_number(
+ client_factory, devices, adapter_type)
+
+ def _attach_fcd(self, vm_ref, adapter_type, fcd_id, ds_ref_val):
+ (controller_key, unit_number,
+ controller_spec) = self._get_controller_key_and_unit(
+ vm_ref, adapter_type)
+
+ if controller_spec:
+ # No controller available to attach, create one first.
+ config_spec = self._session.vim.client.factory.create(
+ 'ns0:VirtualMachineConfigSpec')
+ config_spec.deviceChange = [controller_spec]
+ vm_util.reconfigure_vm(self._session, vm_ref, config_spec)
+ (controller_key, unit_number,
+ controller_spec) = self._get_controller_key_and_unit(
+ vm_ref, adapter_type)
+
+ vm_util.attach_fcd(
+ self._session, vm_ref, fcd_id, ds_ref_val, controller_key,
+ unit_number)
+
+ def _attach_volume_fcd(self, connection_info, instance):
+ """Attach fcd volume storage to VM instance."""
+ LOG.debug("_attach_volume_fcd: %s", connection_info, instance=instance)
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
+ data = connection_info['data']
+ adapter_type = data['adapter_type']
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE:
+ state = vm_util.get_vm_state(self._session, instance)
+ if state != power_state.SHUTDOWN:
+ raise exception.Invalid(_('%s does not support disk '
+ 'hotplug.') % adapter_type)
+
+ self._attach_fcd(vm_ref, adapter_type, data['id'], data['ds_ref_val'])
+ LOG.debug("Attached fcd: %s", connection_info, instance=instance)
+
def attach_volume(self, connection_info, instance, adapter_type=None):
"""Attach volume storage to VM instance."""
driver_type = connection_info['driver_volume_type']
@@ -376,6 +442,8 @@ class VMwareVolumeOps(object):
self._attach_volume_vmdk(connection_info, instance, adapter_type)
elif driver_type == constants.DISK_FORMAT_ISCSI:
self._attach_volume_iscsi(connection_info, instance, adapter_type)
+ elif driver_type == constants.DISK_FORMAT_FCD:
+ self._attach_volume_fcd(connection_info, instance)
else:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
@@ -503,7 +571,7 @@ class VMwareVolumeOps(object):
LOG.debug("_detach_volume_vmdk: %s", connection_info,
instance=instance)
data = connection_info['data']
- volume_ref = self._get_volume_ref(data['volume'])
+ volume_ref = self._get_volume_ref(data)
device = self._get_vmdk_backed_disk_device(vm_ref, data)
@@ -558,6 +626,20 @@ class VMwareVolumeOps(object):
self.detach_disk_from_vm(vm_ref, instance, device, destroy_disk=True)
LOG.debug("Detached ISCSI: %s", connection_info, instance=instance)
+ def _detach_volume_fcd(self, connection_info, instance):
+ """Detach fcd volume storage to VM instance."""
+ vm_ref = vm_util.get_vm_ref(self._session, instance)
+ data = connection_info['data']
+ adapter_type = data['adapter_type']
+
+ if adapter_type == constants.ADAPTER_TYPE_IDE:
+ state = vm_util.get_vm_state(self._session, instance)
+ if state != power_state.SHUTDOWN:
+ raise exception.Invalid(_('%s does not support disk '
+ 'hotplug.') % adapter_type)
+
+ vm_util.detach_fcd(self._session, vm_ref, data['id'])
+
def detach_volume(self, connection_info, instance):
"""Detach volume storage to VM instance."""
driver_type = connection_info['driver_volume_type']
@@ -567,6 +649,8 @@ class VMwareVolumeOps(object):
self._detach_volume_vmdk(connection_info, instance)
elif driver_type == constants.DISK_FORMAT_ISCSI:
self._detach_volume_iscsi(connection_info, instance)
+ elif driver_type == constants.DISK_FORMAT_FCD:
+ self._detach_volume_fcd(connection_info, instance)
else:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
@@ -580,7 +664,7 @@ class VMwareVolumeOps(object):
vm_ref = vm_util.get_vm_ref(self._session, instance)
data = connection_info['data']
# Get the volume ref
- volume_ref = self._get_volume_ref(data['volume'])
+ volume_ref = self._get_volume_ref(data)
# Pick the resource pool on which the instance resides. Move the
# volume to the datastore of the instance.
res_pool = self._get_res_pool_of_vm(vm_ref)
diff --git a/nova/virt/zvm/driver.py b/nova/virt/zvm/driver.py
index c3552ecc38..a1fa721515 100644
--- a/nova/virt/zvm/driver.py
+++ b/nova/virt/zvm/driver.py
@@ -46,6 +46,7 @@ class ZVMDriver(driver.ComputeDriver):
"""z/VM implementation of ComputeDriver."""
capabilities = {
"supports_pcpus": False,
+ "supports_remote_managed_ports": False,
# Image type support flags
"supports_image_type_aki": False,
diff --git a/nova/virt/zvm/hypervisor.py b/nova/virt/zvm/hypervisor.py
index fb7cbc5a18..8a2c49d34b 100644
--- a/nova/virt/zvm/hypervisor.py
+++ b/nova/virt/zvm/hypervisor.py
@@ -131,7 +131,7 @@ class Hypervisor(object):
def guest_get_console_output(self, name):
"""get console out put of the given instance
- :returns: The output of the console of the instace, in string format.
+ :returns: The output of the console of the instance, in string format.
"""
return self._reqh.call('guest_get_console_output', name)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index c5de961d28..01efcfec19 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -888,19 +888,23 @@ class API(object):
@retrying.retry(stop_max_attempt_number=5,
retry_on_exception=lambda e:
(isinstance(e, cinder_exception.ClientException) and
- e.code == 500))
+ e.code in (500, 504)))
def attachment_delete(self, context, attachment_id):
try:
cinderclient(
context, '3.44', skip_version_check=True).attachments.delete(
attachment_id)
except cinder_exception.ClientException as ex:
- with excutils.save_and_reraise_exception():
- LOG.error('Delete attachment failed for attachment '
- '%(id)s. Error: %(msg)s Code: %(code)s',
- {'id': attachment_id,
- 'msg': str(ex),
- 'code': getattr(ex, 'code', None)})
+ if ex.code == 404:
+ LOG.warning('Attachment %(id)s does not exist. Ignoring.',
+ {'id': attachment_id})
+ else:
+ with excutils.save_and_reraise_exception():
+ LOG.error('Delete attachment failed for attachment '
+ '%(id)s. Error: %(msg)s Code: %(code)s',
+ {'id': attachment_id,
+ 'msg': str(ex),
+ 'code': getattr(ex, 'code', None)})
@translate_attachment_exception
def attachment_complete(self, context, attachment_id):
@@ -924,3 +928,9 @@ class API(object):
{'id': attachment_id,
'msg': str(ex),
'code': getattr(ex, 'code', None)})
+
+ @translate_volume_exception
+ def reimage_volume(self, context, volume_id, image_id,
+ reimage_reserved=False):
+ cinderclient(context, '3.68').volumes.reimage(
+ volume_id, image_id, reimage_reserved)
diff --git a/nova/weights.py b/nova/weights.py
index 97dc7d3291..75d51dc5f7 100644
--- a/nova/weights.py
+++ b/nova/weights.py
@@ -19,9 +19,14 @@ Pluggable Weighing support
import abc
+from oslo_log import log as logging
+
from nova import loadables
+LOG = logging.getLogger(__name__)
+
+
def normalize(weight_list, minval=None, maxval=None):
"""Normalize the values in a list between 0 and 1.0.
@@ -128,13 +133,40 @@ class BaseWeightHandler(loadables.BaseLoader):
for weigher in weighers:
weights = weigher.weigh_objects(weighed_objs, weighing_properties)
+ LOG.debug(
+ "%s: raw weights %s",
+ weigher.__class__.__name__,
+ {(obj.obj.host, obj.obj.nodename): weight
+ for obj, weight in zip(weighed_objs, weights)}
+ )
+
# Normalize the weights
- weights = normalize(weights,
- minval=weigher.minval,
- maxval=weigher.maxval)
+ weights = list(
+ normalize(
+ weights, minval=weigher.minval, maxval=weigher.maxval))
+
+ LOG.debug(
+ "%s: normalized weights %s",
+ weigher.__class__.__name__,
+ {(obj.obj.host, obj.obj.nodename): weight
+ for obj, weight in zip(weighed_objs, weights)}
+ )
+
+ log_data = {}
for i, weight in enumerate(weights):
obj = weighed_objs[i]
- obj.weight += weigher.weight_multiplier(obj.obj) * weight
+ multiplier = weigher.weight_multiplier(obj.obj)
+ weigher_score = multiplier * weight
+ obj.weight += weigher_score
+
+ log_data[(obj.obj.host, obj.obj.nodename)] = (
+ f"{multiplier} * {weight}")
+
+ LOG.debug(
+ "%s: score (multiplier * weight) %s",
+ weigher.__class__.__name__,
+ {name: log for name, log in log_data.items()}
+ )
return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)
diff --git a/playbooks/ceph/glance-copy-policy.yaml b/playbooks/ceph/glance-copy-policy.yaml
deleted file mode 100644
index 41654a103d..0000000000
--- a/playbooks/ceph/glance-copy-policy.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-- hosts: controller
- tasks:
- - name: create local.sh
- become: yes
- blockinfile:
- path: /opt/stack/devstack/local.sh
- create: True
- mode: 0777
- block: |
- # This policy is default to admin only in glance. Override
- # here to allow everyone and every type of image (private
- # or public) to copy. This way we will be able to test copy
- # image via non-admin as well as on private images.
- echo $'"copy_image": ""' >> /etc/glance/policy.yaml
- sudo systemctl restart 'devstack@g-*'
diff --git a/playbooks/ceph/glance-setup.yaml b/playbooks/ceph/glance-setup.yaml
new file mode 100644
index 0000000000..5792c72237
--- /dev/null
+++ b/playbooks/ceph/glance-setup.yaml
@@ -0,0 +1,39 @@
+- hosts: controller
+ tasks:
+ - name: create local.sh
+ become: yes
+ blockinfile:
+ path: /opt/stack/devstack/local.sh
+ create: True
+ mode: 0777
+ block: |
+ # Delete all existing images
+ source /opt/stack/devstack/openrc admin
+ for img in $(openstack image list -f value -c ID); do
+ openstack image show $img
+ echo Deleting $img
+ openstack image delete $img
+ done
+
+ # Inflate our cirros image to 1G raw
+ arch=$(uname -m)
+ image=$(ls /opt/stack/devstack/files/cirros*${arch}-disk.img | tail -n1)
+ rawimage="/opt/stack/devstack/files/cirros-raw.img"
+ qemu-img convert -O raw "$image" "$rawimage"
+ truncate --size $((950 << 20)) "$rawimage"
+
+ # Upload it to glance as the sole image available so tempest
+ # config will find it. Wait ten seconds after doing this
+ # before the restart below.
+ openstack image create --container-format bare --disk-format raw --public "cirros-raw" < "$rawimage"
+ sleep 10
+ openstack image list
+ openstack image show cirros-raw
+
+ # This policy is default to admin only in glance. Override
+ # here to allow everyone and every type of image (private
+ # or public) to copy. This way we will be able to test copy
+ # image via non-admin as well as on private images.
+ echo $'"copy_image": ""' >> /etc/glance/policy.yaml
+ sudo systemctl restart 'devstack@g-*'
+
diff --git a/playbooks/nova-emulation/pre.yaml b/playbooks/nova-emulation/pre.yaml
new file mode 100644
index 0000000000..f763f82458
--- /dev/null
+++ b/playbooks/nova-emulation/pre.yaml
@@ -0,0 +1,35 @@
+- hosts: controller
+ tasks:
+ - name: create local.sh
+ become: yes
+ blockinfile:
+ path: /opt/stack/devstack/local.sh
+ create: True
+ mode: 0777
+ block: |
+ #!/bin/bash
+ sudo apt update
+ sudo apt install -yy qemu-system qemu-efi-aarch64 qemu-efi-arm
+ # This changes the default image metadata to enable emulation
+ source /opt/stack/devstack/openrc admin
+
+ image_id=$(openstack image list -f value -c ID | awk 'NR==1{print $1}')
+
+ echo "Setting required image metadata properties"
+ openstack image set --property hw_emulation_architecture=aarch64 ${image_id}
+ openstack image set --property hw_firmware_type=uefi ${image_id}
+ openstack image set --property hw_machine_type=virt ${image_id}
+
+ meta_props=$(openstack image show ${image_id} | grep properties | sed 's/,\||/\n/g' | grep hw_)
+
+ for prop in ${meta_props};
+ do
+ if [ -z "${prop}" ]
+ then
+ echo "Image Properties not set"
+ exit 5
+ else
+ echo ${prop}
+ fi
+ done
+
diff --git a/releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml b/releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml
new file mode 100644
index 0000000000..b5232f5ea2
--- /dev/null
+++ b/releasenotes/notes/Do-not-send-mtu-value-in-metadata-for-networks-with-enabled-dhcp-641506f2a13b540f.yaml
@@ -0,0 +1,5 @@
+---
+other:
+ - |
+ For networks which have any subnets with enabled DHCP, MTU value is not send
+ in the metadata. In such case MTU is configured through the DHCP server.
diff --git a/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
new file mode 100644
index 0000000000..b370889171
--- /dev/null
+++ b/releasenotes/notes/add-spice-compression-support-e41676f445544e8d.yaml
@@ -0,0 +1,23 @@
+---
+features:
+ - |
+ The following SPICE-related options are added to the ``spice``
+ configuration group of a Nova configuration:
+
+ - ``image_compression``
+ - ``jpeg_compression``
+ - ``zlib_compression``
+ - ``playback_compression``
+ - ``streaming_mode``
+
+ These configuration options can be used to enable and set the
+ SPICE compression settings for libvirt (QEMU/KVM) provisioned
+ instances. Each configuration option is optional and can be set
+ explictly to configure the associated SPICE compression setting
+ for libvirt. If all configuration options are not set, then none
+ of the SPICE compression settings will be configured for libvirt,
+ which corresponds to the behavior before this change. In this case,
+ the built-in defaults from the libvirt backend (e.g. QEMU) are used.
+
+ Note that those options are only taken into account if SPICE support
+ is enabled (and the VNC support is disabled).
diff --git a/releasenotes/notes/add-vmware-fcd-support-822edccb0e38bc37.yaml b/releasenotes/notes/add-vmware-fcd-support-822edccb0e38bc37.yaml
new file mode 100644
index 0000000000..677ed056f2
--- /dev/null
+++ b/releasenotes/notes/add-vmware-fcd-support-822edccb0e38bc37.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - |
+ Added support for VMware VStorageObject based volumes in
+ VMware vCenter driver. vSphere version 6.5 is required.
diff --git a/releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml b/releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml
new file mode 100644
index 0000000000..47c6b38265
--- /dev/null
+++ b/releasenotes/notes/add-volume-rebuild-b973562ea8f49347.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Added support for rebuilding a volume-backed instance with a different
+ image. This is achieved by reimaging the boot volume i.e. writing new
+ image on the boot volume at cinder side.
+ Previously rebuilding volume-backed instances with same image was
+ possible but this feature allows rebuilding volume-backed instances
+ with a different image than the existing one in the boot volume.
+ This is supported starting from API microversion 2.93.
diff --git a/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
new file mode 100644
index 0000000000..6c5bc98046
--- /dev/null
+++ b/releasenotes/notes/allowing-target-state-for-evacuate-d4c1912c481973d6.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Starting with v2.95 any evacuated instance will be stopped at
+ destination. The required minimum version for Nova computes is
+ 27.0.0 (antelope 2023.1). Operator can still continue using
+ previous behavior by selecting microversion below v2.95.
+upgrade:
+ - |
+ Operators will have to consider upgrading compute hosts to Nova
+ 27.0.0 (antelope 2023.1) in order to take advantage of the new
+ (microversion v2.95) evacuate API behavior. An exception will be
+ raised for older versions.
diff --git a/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml b/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml
new file mode 100644
index 0000000000..66890684af
--- /dev/null
+++ b/releasenotes/notes/antelope-prelude-4a99907b00e739f8.yaml
@@ -0,0 +1,51 @@
+---
+prelude: |
+ The OpenStack 2023.1 (Nova 27.0.0) release includes many new features and
+ bug fixes. Please be sure to read the upgrade section which describes the
+ required actions to upgrade your cloud from 26.0.0 (Zed) to 27.0.0 (2023.1).
+ As a reminder, OpenStack 2023.1 is our first `Skip-Level-Upgrade Release`__
+ (starting from now, we name it a `SLURP release`) where you can
+ rolling-upgrade your compute services from OpenStack Yoga as an experimental
+ feature. Next SLURP release will be 2024.1.
+
+ .. __: https://governance.openstack.org/tc/resolutions/20220210-release-cadence-adjustment.html
+
+ There are a few major changes worth mentioning. This is not an exhaustive
+ list:
+
+ - The latest Compute API microversion supported for 2023.1 is `v2.95`__.
+
+ .. __: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-2023.1
+
+ - `PCI devices can now be scheduled <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ by Nova using the Placement API on a opt-in basis. This will help the
+ nova-scheduler service to better schedule flavors that use PCI
+ (non-Neutron related) resources, will generate less reschedules if an
+ instance cannot be created on a candidate and will help the nova-scheduler
+ to not miss valid candidates if the list was too large.
+
+ - Operators can now ask Nova to `manage the power consumption of dedicated
+ CPUs <https://docs.openstack.org/nova/latest/admin/cpu-topologies.html#configuring-cpu-power-management-for-dedicated-cores>`_
+ so as to either offline them or change their governor if they're
+ currently not in use by any instance or if the instance is stopped.
+
+ - Nova will prevent unexpected compute service renames by `persisting a unique
+ compute UUID on local disk <https://docs.openstack.org/nova/latest/admin/compute-node-identification.html>`_.
+ This stored UUID will be considered the source of truth for knowing whether
+ the compute service hostame has been modified or not. As a reminder,
+ changing a compute hostname is forbidden, particularly when this compute is
+ currently running instances on top of it.
+
+ - `SPICE consoles <https://docs.openstack.org/nova/latest/admin/remote-console-access.html#spice-console>`_
+ can now be configured with compression settings which include choices of the
+ compression algorithm and the compression mode.
+
+ - Fully-Qualified Domain Names are now considered valid for an instance
+ hostname if you use the 2.94 API microversion.
+
+ - By opting into 2.95 API microversion, evacuated instances will remain
+ stopped on the destination host until manually started.
+
+ - Nova APIs now `by default support new RBAC policies <https://docs.openstack.org/nova/latest/configuration/policy.html>`
+ and scopes. See our `Policy Concepts documention <https://docs.openstack.org/nova/latest/configuration/policy-concepts.html>`
+ for further details.
diff --git a/releasenotes/notes/bp-boot-vm-with-unaddressed-port-4cb05bb6dc859d98.yaml b/releasenotes/notes/bp-boot-vm-with-unaddressed-port-4cb05bb6dc859d98.yaml
new file mode 100644
index 0000000000..55c43ddd4c
--- /dev/null
+++ b/releasenotes/notes/bp-boot-vm-with-unaddressed-port-4cb05bb6dc859d98.yaml
@@ -0,0 +1,3 @@
+features:
+ - Nova now allows to create an instance with a non-deferred port that has
+ no fixed IP address if the network backend has level-2 connectivity.
diff --git a/releasenotes/notes/bp-keypair-generation-removal-3004a8643dcd1fd9.yaml b/releasenotes/notes/bp-keypair-generation-removal-3004a8643dcd1fd9.yaml
new file mode 100644
index 0000000000..7adbeb4601
--- /dev/null
+++ b/releasenotes/notes/bp-keypair-generation-removal-3004a8643dcd1fd9.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ The 2.92 microversion makes the following changes:
+
+ * Make public_key a mandatory parameter for keypair creation. This means
+ that by this microversion, Nova will stop to support automatic keypair
+ generations. Only imports will be possible.
+ * Allow 2 new special characters: '@' and '.' (dot),
+ in addition to the existing constraints of ``[a-z][A-Z][0-9][_- ]``
diff --git a/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
new file mode 100644
index 0000000000..95422fce67
--- /dev/null
+++ b/releasenotes/notes/bp-libvirt-cpu-state-mgmt-fbc9c1f9f473003c.yaml
@@ -0,0 +1,18 @@
+---
+features:
+ - |
+ This is now possible to configure nova-compute services using libvirt driver
+ by setting ``[libvirt]cpu_power_management`` to ``True`` in order to let the
+ service to powering down or up physical CPUs depending on whether those CPUs
+ are pinned or not to instances. In order on to support this feature, the
+ compute service needs to be set with ``[compute]cpu_dedicated_set``. If so,
+ all the related CPUs will be powering down until they are used by an
+ instance where the related pinned CPU will be powering up just before
+ starting the guest. If ``[compute]cpu_dedicated_set`` isn't set, then the
+ compute service will refuse to start.
+ By default the power strategy will offline CPUs when powering down and
+ online the CPUs on powering up but another strategy is possible by using
+ ``[libvirt]cpu_power_management_strategy=governor`` which will rather modify
+ the related CPU governor using ``[libvirt]cpu_power_governor_low`` and
+ ``[libvirt]cpu_power_governor_high`` configuration values (respective
+ defaults being ``powersave`` and ``performance``)
diff --git a/releasenotes/notes/bp-pci-device-tracking-in-placement-75ee1d20a57662f2.yaml b/releasenotes/notes/bp-pci-device-tracking-in-placement-75ee1d20a57662f2.yaml
new file mode 100644
index 0000000000..6d30f7c398
--- /dev/null
+++ b/releasenotes/notes/bp-pci-device-tracking-in-placement-75ee1d20a57662f2.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - |
+ Nova started tracking PCI devices in Placement. This is an optional feature
+ disabled by default while we are implementing inventory tracking and
+ scheduling support for both PCI passthrough devices and SR-IOV devices
+ consumed via Neutron ports. Please read our
+ `documentation <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ for more details on what is supported how this feature can be enabled.
diff --git a/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
new file mode 100644
index 0000000000..7a9e53ed26
--- /dev/null
+++ b/releasenotes/notes/bp-pci-device-tracking-in-placement-antelope-082310a2b0337e0e.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Since 26.0.0 (Zed) Nova supports tracking PCI devices in Placement. Now
+ Nova also supports scheduling flavor based PCI device requests via
+ Placement. This support is disable by default. Please read
+ `documentation <https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#pci-tracking-in-placement>`_
+ for more details on what is supported how this feature can be enabled.
diff --git a/releasenotes/notes/bp-pick-guest-arch-based-on-host-arch-in-libvirt-driver-f087c3799d388bb6.yaml b/releasenotes/notes/bp-pick-guest-arch-based-on-host-arch-in-libvirt-driver-f087c3799d388bb6.yaml
new file mode 100644
index 0000000000..d10f753180
--- /dev/null
+++ b/releasenotes/notes/bp-pick-guest-arch-based-on-host-arch-in-libvirt-driver-f087c3799d388bb6.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ image meta now includes the ``hw_emulation_architecture`` property.
+ This allows an operator to define their emulated cpu architecture for
+ an image, and nova will deploy accordingly.
+
+ See the `spec`_ for more details and reasoning.
+
+ .. _spec: https://specs.openstack.org/openstack/nova-specs/specs/yoga/approved/pick-guest-arch-based-on-host-arch-in-libvirt-driver.html \ No newline at end of file
diff --git a/releasenotes/notes/bp-policy-defaults-refresh-2-473c70f641f9f397.yaml b/releasenotes/notes/bp-policy-defaults-refresh-2-473c70f641f9f397.yaml
new file mode 100644
index 0000000000..4c796fd75f
--- /dev/null
+++ b/releasenotes/notes/bp-policy-defaults-refresh-2-473c70f641f9f397.yaml
@@ -0,0 +1,30 @@
+---
+features:
+ - |
+ The Nova policies have been modified to isolate the system and project
+ level APIs policy. This means system users will be allowed to perform
+ the operation on system level resources and will not to allowed any
+ operation on project level resources. Project Level APIs operation will be
+ performed by the project scoped users.
+ Currently, nova supports:
+
+ * ``system admin``
+ * ``project admin``
+ * ``project member``
+ * ``project reader``
+
+ For the details on what changed from the existing policy, please refer the
+ `RBAC new guidelines`_. We have implemented only phase-1
+ `RBAC new guidelines`_.
+ Currently, scope checks and new defaults are disabled by default. You can
+ enable them by switching the below config option in ``nova.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=True
+ enforce_scope=True
+
+ Please refer `Policy New Defaults`_ for detail about policy new defaults
+ and migration plan.
+
+ .. _RBAC new guidelines: https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html#phase-1
+ .. _Policy New Defaults: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html
diff --git a/releasenotes/notes/bp-unified-limits-656b55863df22e16.yaml b/releasenotes/notes/bp-unified-limits-656b55863df22e16.yaml
new file mode 100644
index 0000000000..9a295f2e16
--- /dev/null
+++ b/releasenotes/notes/bp-unified-limits-656b55863df22e16.yaml
@@ -0,0 +1,43 @@
+---
+other:
+ - |
+ This release includes work in progress support for Keystone's unified
+ limits. This should not be used in production. It is included so we can
+ collect early feedback from operators around the performance of the new
+ limits system. There is currently no way to export your existing quotas
+ and import them into Keystone. There is also no proxy API to allow you
+ to update unified limits via Nova APIs. All the update APIs behave as if
+ you are using the noop driver when the unified limits quota driver is
+ configured.
+
+ When you enable unified limits, those are configured in Keystone against
+ the Nova endpoint, using the names:
+
+ * ``class:VCPU``
+ * ``servers``
+ * ``class:MEMORY_MB``
+ * ``server_metadata_items``
+ * ``server_injected_files``
+ * ``server_injected_file_content_bytes``
+ * ``server_injected_file_path_bytes``
+ * ``server_key_pairs``
+ * ``server_groups``
+ * ``server_group_members``
+
+ All other resources classes requested via flavors are also now supported as
+ unified limits. Note that nova configuration is ignored, as the default
+ limits come from the limits registered for the Nova endpoint in Keystone.
+
+ All previous quotas other than ``cores``, ``instances`` and ``ram`` are
+ still enforced, but the limit can only be changed globally in Keystone as
+ registered limits. There are no per project or per user overrides
+ possible.
+
+ Work in progress support for Keystone's unified limits
+ can be enabled via ``[quota]/driver=nova.quota.UnifiedLimitsDriver``
+
+ A config option ``[workarounds]unified_limits_count_pcpu_as_vcpu`` is
+ available for operators who require the legacy quota usage behavior where
+ VCPU = VCPU + PCPU. Note that if ``PCPU`` is specified in the flavor
+ explicitly, it will be expected to have its own unified limit registered
+ and PCPU usage will *not* be merged into VCPU usage.
diff --git a/releasenotes/notes/bp-unshelve_to_host-c9047d518eb67747.yaml b/releasenotes/notes/bp-unshelve_to_host-c9047d518eb67747.yaml
new file mode 100644
index 0000000000..cde6988031
--- /dev/null
+++ b/releasenotes/notes/bp-unshelve_to_host-c9047d518eb67747.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Microversion 2.91 adds the optional parameter ``host`` to
+ the ``unshelve`` server action API.
+ Specifying a destination host is only
+ allowed to admin users and server status must be ``SHELVED_OFFLOADED``
+ otherwise a HTTP 400 (bad request) response is returned.
+ It also allows to set ``availability_zone`` to None to unpin a server
+ from an availability_zone.
diff --git a/releasenotes/notes/bug-1942329-22b08fa4b322881d.yaml b/releasenotes/notes/bug-1942329-22b08fa4b322881d.yaml
new file mode 100644
index 0000000000..496508ca13
--- /dev/null
+++ b/releasenotes/notes/bug-1942329-22b08fa4b322881d.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ As a fix for `bug 1942329 <https://bugs.launchpad.net/neutron/+bug/1942329>`_
+ nova now updates the MAC address of the ``direct-physical`` ports during
+ mova operations to reflect the MAC address of the physical device on the
+ destination host. Those servers that were created before this fix need to be
+ moved or the port needs to be detached and the re-attached to synchronize the
+ MAC address.
diff --git a/releasenotes/notes/bug-1944619-fix-live-migration-rollback.yaml b/releasenotes/notes/bug-1944619-fix-live-migration-rollback.yaml
new file mode 100644
index 0000000000..b6c68ed49f
--- /dev/null
+++ b/releasenotes/notes/bug-1944619-fix-live-migration-rollback.yaml
@@ -0,0 +1,10 @@
+---
+fixes:
+ - |
+ Instances with hardware offloaded ovs ports no longer lose connectivity
+ after failed live migrations. The driver.rollback_live_migration_at_source
+ function is no longer called during during pre_live_migration rollback
+ which previously resulted in connectivity loss following a failed live
+ migration. See `Bug 1944619`_ for more details.
+
+ .. _Bug 1944619: https://bugs.launchpad.net/nova/+bug/1944619
diff --git a/releasenotes/notes/bug-1958636-smm-check-and-enable.yaml b/releasenotes/notes/bug-1958636-smm-check-and-enable.yaml
new file mode 100644
index 0000000000..81afceeb5f
--- /dev/null
+++ b/releasenotes/notes/bug-1958636-smm-check-and-enable.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ [`bug 1958636 <https://bugs.launchpad.net/nova/+bug/1958636>`_]
+ Explicitly check for and enable SMM when firmware requires it.
+ Previously we assumed libvirt would do this for us but this is
+ not true in all cases.
diff --git a/releasenotes/notes/bug-1960230-cleanup-instances-dir-resize-56282e1b436a4908.yaml b/releasenotes/notes/bug-1960230-cleanup-instances-dir-resize-56282e1b436a4908.yaml
new file mode 100644
index 0000000000..7a89c66092
--- /dev/null
+++ b/releasenotes/notes/bug-1960230-cleanup-instances-dir-resize-56282e1b436a4908.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fixed bug `1960230 <https://bugs.launchpad.net/nova/+bug/1960230>`_ that
+ prevented resize of instances that had previously failed and not been
+ cleaned up.
diff --git a/releasenotes/notes/bug-1960401-504eb255253d966a.yaml b/releasenotes/notes/bug-1960401-504eb255253d966a.yaml
new file mode 100644
index 0000000000..ef5582543a
--- /dev/null
+++ b/releasenotes/notes/bug-1960401-504eb255253d966a.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ The `bug 1960401`_ is fixed which can cause invalid `BlockDeviceMappings`
+ to accumulate in the database. This prevented the respective volumes from
+ being attached again to the instance.
+
+ .. _bug 1960401: https://bugs.launchpad.net/nova/+bug/1960401
diff --git a/releasenotes/notes/bug-1967157-extend-encrypted.yaml b/releasenotes/notes/bug-1967157-extend-encrypted.yaml
new file mode 100644
index 0000000000..8ff5f6a2f9
--- /dev/null
+++ b/releasenotes/notes/bug-1967157-extend-encrypted.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ Extending attached encrypted volumes that failed before because they were
+ not being decrypted using libvirt (any other than LUKS) now work as
+ expected and the new size will be visible within the instance. See
+ `Bug 1967157`_ for more details.
+
+ .. _Bug 1967157: https://bugs.launchpad.net/nova/+bug/1967157
diff --git a/releasenotes/notes/bug-1970383-segment-scheduling-permissions-92ba907b10a9eb1c.yaml b/releasenotes/notes/bug-1970383-segment-scheduling-permissions-92ba907b10a9eb1c.yaml
new file mode 100644
index 0000000000..88495079e7
--- /dev/null
+++ b/releasenotes/notes/bug-1970383-segment-scheduling-permissions-92ba907b10a9eb1c.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ `Bug #1970383 <https://bugs.launchpad.net/nova/+bug/1970383>`_: Fixes a
+ permissions error when using the
+ 'query_placement_for_routed_network_aggregates' scheduler variable, which
+ caused a traceback on instance creation for non-admin users.
diff --git a/releasenotes/notes/bug-1978372-optimized-numa-fitting-algorithm-5d5b922b0bdbf818.yaml b/releasenotes/notes/bug-1978372-optimized-numa-fitting-algorithm-5d5b922b0bdbf818.yaml
new file mode 100644
index 0000000000..3f42f70908
--- /dev/null
+++ b/releasenotes/notes/bug-1978372-optimized-numa-fitting-algorithm-5d5b922b0bdbf818.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ The algorithm that is used to see if a multi NUMA guest fits to
+ a multi NUMA host has been optimized to speed up the decision
+ on hosts with high number of NUMA nodes ( > 8). For details see
+ `bug 1978372`_
+
+ .. _bug 1978372: https://bugs.launchpad.net/nova/+bug/1978372
diff --git a/releasenotes/notes/bug-1978444-db46df5f3d5ea19e.yaml b/releasenotes/notes/bug-1978444-db46df5f3d5ea19e.yaml
new file mode 100644
index 0000000000..6c19804074
--- /dev/null
+++ b/releasenotes/notes/bug-1978444-db46df5f3d5ea19e.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ `Bug #1978444 <https://bugs.launchpad.net/nova/+bug/1978444>`_: Now nova
+ retries deleting a volume attachment in case Cinder API returns
+ ``504 Gateway Timeout``. Also, ``404 Not Found`` is now ignored and
+ leaves only a warning message.
diff --git a/releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml b/releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml
new file mode 100644
index 0000000000..a5a3b7c8c2
--- /dev/null
+++ b/releasenotes/notes/bug-1981813-vnic-type-change-9f3e16fae885b57f.yaml
@@ -0,0 +1,9 @@
+---
+fixes:
+ - |
+ `Bug #1981813 <https://bugs.launchpad.net/nova/+bug/1981813>`_: Now nova
+ detects if the ``vnic_type`` of a bound port has been changed in neutron
+ and leaves an ERROR message in the compute service log as such change on a
+ bound port is not supported. Also the restart of the nova-compute service
+ will not crash any more after such port change. Nova will log an ERROR and
+ skip the initialization of the instance with such port during the startup.
diff --git a/releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml b/releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml
new file mode 100644
index 0000000000..943aa99a43
--- /dev/null
+++ b/releasenotes/notes/bug-1982284-libvirt-handle-no-ram-info-was-set-99784934ed80fd72.yaml
@@ -0,0 +1,11 @@
+---
+other:
+ - |
+ A workaround has been added to the libvirt driver to catch and pass
+ migrations that were previously failing with the error:
+
+ ``libvirt.libvirtError: internal error: migration was active, but no RAM info was set``
+
+ See `bug 1982284`_ for more details.
+
+ .. _bug 1982284: https://bugs.launchpad.net/nova/+bug/1982284
diff --git a/releasenotes/notes/bug-1983753-update-requestspec-pci_request-for-resize-a3c6b0a979db723f.yaml b/releasenotes/notes/bug-1983753-update-requestspec-pci_request-for-resize-a3c6b0a979db723f.yaml
new file mode 100644
index 0000000000..89edd12b3d
--- /dev/null
+++ b/releasenotes/notes/bug-1983753-update-requestspec-pci_request-for-resize-a3c6b0a979db723f.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ `Bug #1941005 <https://bugs.launchpad.net/nova/+bug/1941005>`_ is fixed.
+ During resize Nova now uses the PCI requests from the new flavor to select
+ the destination host.
diff --git a/releasenotes/notes/bug-1986838-pci-double-booking-1da71ea4399db65a.yaml b/releasenotes/notes/bug-1986838-pci-double-booking-1da71ea4399db65a.yaml
new file mode 100644
index 0000000000..7200290780
--- /dev/null
+++ b/releasenotes/notes/bug-1986838-pci-double-booking-1da71ea4399db65a.yaml
@@ -0,0 +1,8 @@
+---
+fixes:
+ - |
+ `Bug #1986838 <https://bugs.launchpad.net/nova/+bug/1986838>`_: Nova now
+ correctly schedules an instance that requests multiple PCI devices via
+ multiple PCI aliases in the flavor extra_spec when multiple similar devices
+ are requested but the compute host has only one such device matching with
+ each request individually.
diff --git a/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
new file mode 100644
index 0000000000..0941dd7450
--- /dev/null
+++ b/releasenotes/notes/bug-1996995-qemu_monitor_announce_self-add-configurables-2b2f19d238442f72.yaml
@@ -0,0 +1,28 @@
+---
+fixes:
+ - |
+ Fixes `bug 1996995`_ in which VMs live migrated on certain VXLAN Arista
+ network fabrics were inaccessible until the switch arp cache expired.
+
+ A Nova workaround option of ``enable_qemu_monitor_announce_self`` was added
+ to fix `bug 1815989`_ which when enabled would interact with the QEMU
+ monitor and force a VM to announce itself.
+
+ On certain network fabrics, VMs that are live migrated remain inaccessible
+ via the network despite the QEMU monitor announce_self command successfully
+ being called.
+
+ It was noted that on Arista VXLAN fabrics, testing showed that it required
+ several attempts of running the QEMU announce_self monitor command before
+ the switch would acknowledge a VM's new location on the fabric.
+
+ This fix introduces two operator configurable options.
+ The first option sets the number of times the QEMU monitor announce_self
+ command is called - ``qemu_announce_self_count``
+
+ The second option allows operators to set the delay between the QEMU
+ announce_self commands in seconds for subsequent announce_self commands
+ with ``qemu_announce_self_interval``
+
+ .. _`bug 1996995`: https://bugs.launchpad.net/nova/+bug/1996995
+ .. _`bug 1815989`: https://bugs.launchpad.net/nova/+bug/1815989
diff --git a/releasenotes/notes/default-host-numa-strategy-to-spread-18668c6d80154042.yaml b/releasenotes/notes/default-host-numa-strategy-to-spread-18668c6d80154042.yaml
new file mode 100644
index 0000000000..aec87dc887
--- /dev/null
+++ b/releasenotes/notes/default-host-numa-strategy-to-spread-18668c6d80154042.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - |
+ During the triage of https://bugs.launchpad.net/nova/+bug/1978372
+ we compared the performance of nova's numa allocations strategies
+ as it applied to the large numbers of host and guest numa nodes.
+ Prior to ``Xena`` nova only supported a linear packing strategy.
+ In ``Xena`` ``[compute]/packing_host_numa_cells_allocation_strategy``
+ was introduced maintaining the previous packing behavior by default.
+ The numa allocation strategy has now been defaulted to spread.
+ The old behavior can be restored by defining:
+ ``[compute]/packing_host_numa_cells_allocation_strategy=true``
diff --git a/releasenotes/notes/deprecate-passthrough_whitelist-config-name-0530d502c960d753.yaml b/releasenotes/notes/deprecate-passthrough_whitelist-config-name-0530d502c960d753.yaml
new file mode 100644
index 0000000000..a0c707def4
--- /dev/null
+++ b/releasenotes/notes/deprecate-passthrough_whitelist-config-name-0530d502c960d753.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The [pci]passthrough_whitelist config option is renamed to
+ [pci]device_spec. The old name is deprecated and aliased to the new one.
+ The old name will be removed in a future release.
diff --git a/releasenotes/notes/deprecate-use_forwarded_for-f7b24eaf130782b9.yaml b/releasenotes/notes/deprecate-use_forwarded_for-f7b24eaf130782b9.yaml
new file mode 100644
index 0000000000..4068fedf6a
--- /dev/null
+++ b/releasenotes/notes/deprecate-use_forwarded_for-f7b24eaf130782b9.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - |
+ The default ``api-paste.ini`` file has been updated and now the Metadata
+ API pipeline includes the ``HTTPProxyToWSGI`` middleware.
+
+deprecations:
+ - |
+ The ``[api] use_forwarded_for`` parameter has been deprecated. Instead of
+ using this parameter, add the ``HTTPProxyToWSGI`` middleware to api
+ pipelines, and ``[oslo_middleware] enable_proxy_headers_parsing = True``
+ to nova.conf.
diff --git a/releasenotes/notes/drop-python-3-6-and-3-7-cd3bf1e945f05fd3.yaml b/releasenotes/notes/drop-python-3-6-and-3-7-cd3bf1e945f05fd3.yaml
new file mode 100644
index 0000000000..74ad7fd5e0
--- /dev/null
+++ b/releasenotes/notes/drop-python-3-6-and-3-7-cd3bf1e945f05fd3.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ Python 3.6 & 3.7 support has been dropped. The minimum version of Python now
+ supported by nova is Python 3.8.
diff --git a/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
new file mode 100644
index 0000000000..72a6f861b6
--- /dev/null
+++ b/releasenotes/notes/enable-enforce-scope-and-new-defaults-14db8c75b263b599.yaml
@@ -0,0 +1,23 @@
+---
+upgrade:
+ - |
+ The Nova service enable the API policies (RBAC) new defaults and scope by
+ default. The Default value of config options ``[oslo_policy] enforce_scope``
+ and ``[oslo_policy] oslo_policy.enforce_new_defaults`` have been changed
+ to ``True``.
+
+ This means if you are using system scope token to access Nova API then
+ the request will be failed with 403 error code. Also, new defaults will be
+ enforced by default. To know about the new defaults of each policy
+ rule, refer to the `Policy New Defaults`_. For more detail about the Nova
+ API policies changes, refer to `Policy Concepts`_.
+
+ If you want to disable them then modify the below config options value in
+ ``nova.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=False
+ enforce_scope=False
+
+ .. _`Policy New Defaults`: https://docs.openstack.org/nova/latest/configuration/policy.html
+ .. _`Policy Concepts`: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html
diff --git a/releasenotes/notes/extra-sorting-for-host-cells-c03e37de1e57043b.yaml b/releasenotes/notes/extra-sorting-for-host-cells-c03e37de1e57043b.yaml
new file mode 100644
index 0000000000..ea70805314
--- /dev/null
+++ b/releasenotes/notes/extra-sorting-for-host-cells-c03e37de1e57043b.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ Extra sortings were added to numa_fit_instance_to_host function
+ to balance usage of hypervisor's NUMA cells. Hypervisor's NUMA
+ cells with more free resources (CPU, RAM, PCI if requested)
+ will be used first (spread strategy) when configuration option
+ ``packing_host_numa_cells_allocation_strategy`` was set to False.
+ Default value of ``packing_host_numa_cells_allocation_strategy``
+ option is set to True which leads to packing strategy usage.
diff --git a/releasenotes/notes/fix-group-policy-validation-with-deleted-groups-4f685fd1d6b84192.yaml b/releasenotes/notes/fix-group-policy-validation-with-deleted-groups-4f685fd1d6b84192.yaml
new file mode 100644
index 0000000000..7f7d42bd0e
--- /dev/null
+++ b/releasenotes/notes/fix-group-policy-validation-with-deleted-groups-4f685fd1d6b84192.yaml
@@ -0,0 +1,13 @@
+---
+fixes:
+ - |
+ When the server group policy validation upcall is enabled
+ nova will assert that the policy is not violated on move operations
+ and initial instance creation. As noted in `bug 1890244`_, if a
+ server was created in a server group and that group was later deleted
+ the validation upcall would fail due to an uncaught excpetion if the
+ server group was deleted. This prevented evacuate and other move
+ operations form functioning. This has now been fixed and nova will
+ ignore deleted server groups.
+
+ .. _bug 1890244: https://bugs.launchpad.net/nova/+bug/1890244
diff --git a/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml b/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml
new file mode 100644
index 0000000000..4fd2cc1ca9
--- /dev/null
+++ b/releasenotes/notes/fix-ironic-scheduler-race-08cf8aba0365f512.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ Fixed when placement returns ironic nodes that have just started automatic
+ cleaning as possible valid candidates. This is done by marking all ironic
+ nodes with an instance on them as reserved, such that nova only makes them
+ available once we have double checked Ironic reports the node as available.
+ If you don't have automatic cleaning on, this might mean it takes longer
+ than normal for Ironic nodes to become available for new instances.
+ If you want the old behaviour use the following workaround config:
+ `[workarounds]skip_reserve_in_use_ironic_nodes=true`
diff --git a/releasenotes/notes/greendns-34df7f9fba952bcd.yaml b/releasenotes/notes/greendns-34df7f9fba952bcd.yaml
new file mode 100644
index 0000000000..d42795747c
--- /dev/null
+++ b/releasenotes/notes/greendns-34df7f9fba952bcd.yaml
@@ -0,0 +1,14 @@
+---
+fixes:
+ - |
+ During the havana cycle it was discovered that eventlet
+ monkey patching of greendns broke ipv6.
+ https://bugs.launchpad.net/nova/+bug/1164822
+ Since then nova has been disabling eventlet monkey patching
+ of greendns. Eventlet adressed the ipv6 limitation in v0.17
+ with the introduction of python 3 support in 2015. Nova
+ however continued to disable it, which can result i slow dns
+ queries blocking the entire nova api or other binary
+ because socket.getaddrinfo becomes a blocking call into glibc
+ see: https://bugs.launchpad.net/nova/+bug/1964149 for
+ more details.
diff --git a/releasenotes/notes/guest-iommu-device-4795c3a060aca424.yaml b/releasenotes/notes/guest-iommu-device-4795c3a060aca424.yaml
new file mode 100644
index 0000000000..314c2c0ffe
--- /dev/null
+++ b/releasenotes/notes/guest-iommu-device-4795c3a060aca424.yaml
@@ -0,0 +1,21 @@
+---
+features:
+ - |
+ The Libvirt driver can now add a virtual IOMMU device
+ to all created guests, when running on an x86 host and using the Q35
+ machine type or on AArch64.
+
+ To enable this, provide `hw:viommu_model` in flavor extra
+ spec or equivalent image metadata property `hw_viommu_model` and with the
+ guest CPU architecture and OS allows, we will enable viommu in Libvirt
+ driver. Support values intel|smmuv3|virtio|auto. Default to ``auto``.
+ Which ``auto`` will automatically select ``virtio`` if Libvirt supports it,
+ else ``intel`` on X86 (Q35) and ``smmuv3`` on AArch64.
+ vIOMMU config will raise invalid exception if the guest architecture is
+ neither X86 (Q35) or AArch64.
+
+ Note that, enable vIOMMU might introduce significant performance overhead.
+ You can see performance comparision table from
+ `AMD vIOMMU session on KVM Forum 2021`_.
+ For above reason, vIOMMU should only be enable for workflow that require it.
+ .. _`AMD vIOMMU session on KVM Forum 2021`: https://static.sched.com/hosted_files/kvmforum2021/da/vIOMMU%20KVM%20Forum%202021%20-%20v4.pdf
diff --git a/releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml b/releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml
new file mode 100644
index 0000000000..85b874fb69
--- /dev/null
+++ b/releasenotes/notes/hyperv-experimental-antelope-372e18a05cafc295.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The hyperv driver is marked as experimental and may be removed in a
+ future release. The driver is not tested by the OpenStack project and
+ does not have a clear maintainer.
diff --git a/releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml b/releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml
new file mode 100644
index 0000000000..31f2c70926
--- /dev/null
+++ b/releasenotes/notes/hypervisor-version-weigher-d0bba77e720edafe.yaml
@@ -0,0 +1,20 @@
+---
+features:
+ - |
+ A new hypervisor version weigher has been added to prefer selecting hosts
+ with newer hypervisors installed. For the libvirt driver, this is the version
+ of libvirt on the compute node not the version of qemu. As with all
+ weighers this is enabled by default and its behavior can be modified using
+ the new ``hypervisor_version_weight_multiplier`` config option in the
+ ``filter_scheduler`` section.
+upgrade:
+ - |
+ A new hypervisor version weigher has been added that will prefer selecting
+ hosts with a newer hypervisor installed. This can help simplify rolling
+ upgrades by preferring the already upgraded hosts when moving workloads around
+ using live or cold migration. To restore the old behavior either remove
+ the weigher from the list of enabled weighers or set
+ ``[filter_scheduler] hypervisor_version_weight_multiplier=0``. The default
+ value of the hypervisor_version_weight_multiplier is 1 so only a mild
+ preference is given to new hosts, higher values will make the effect
+ more pronounced and negative values will prefer older hosts.
diff --git a/releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml b/releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml
new file mode 100644
index 0000000000..46ebf0bd2d
--- /dev/null
+++ b/releasenotes/notes/ignore-instance-task-state-for-evacuation-e000f141d0153638.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ If compute service is down in source node and user try to stop
+ instance, instance gets stuck at powering-off, hence evacuation fails with
+ msg: Cannot 'evacuate' instance <instance-id> while it is in
+ task_state powering-off.
+ It is now possible for evacuation to ignore the vm task state.
+ For more details see: `bug 1978983`_
+
+ .. _`bug 1978983`: https://bugs.launchpad.net/nova/+bug/1978983 \ No newline at end of file
diff --git a/releasenotes/notes/lightos-fcafefdfd0939316.yaml b/releasenotes/notes/lightos-fcafefdfd0939316.yaml
new file mode 100644
index 0000000000..dc931412b7
--- /dev/null
+++ b/releasenotes/notes/lightos-fcafefdfd0939316.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Nova now support integration with the Lightbits Labs
+ (http://www.lightbitslabs.com) LightOS storage solution.
+ LightOS is a software-defined, cloud native,
+ high-performance, clustered scale-out and redundant NVMe/TCP storage
+ that performs like local NVMe flash. \ No newline at end of file
diff --git a/releasenotes/notes/microversion-2-94-59649401d5763286.yaml b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
new file mode 100644
index 0000000000..d0927e6f75
--- /dev/null
+++ b/releasenotes/notes/microversion-2-94-59649401d5763286.yaml
@@ -0,0 +1,22 @@
+---
+features:
+ - |
+ The 2.94 microversion has been added. This microversion extends
+ microversion 2.90 by allowing Fully Qualified Domain Names (FQDN) wherever
+ the ``hostname`` is able to be specified. This consists of creating an
+ instance (``POST /servers``), updating an instance
+ (``PUT /servers/{id}``), or rebuilding an instance
+ (``POST /servers/{server_id}/action (rebuild)``). When using an FQDN as the
+ instance hostname, the ``[api]dhcp_domain`` configuration option must be
+ set to the empty string in order for the correct FQDN to appear in the
+ ``hostname`` field in the metadata API.
+
+upgrade:
+ - |
+ In order to make use of microversion's 2.94 FQDN hostnames, the
+ ``[api]dhcp_domain`` config option must be set to the empty string. If
+ this is not done, the ``hostname`` field in the metadata API will be
+ incorrect, as it will include the value of ``[api]dhcp_domain`` appended to
+ the instance's FQDN. Note that simply not setting ``[api]dhcp_domain`` is
+ not enough, as it has a default value of ``novalocal``. It must explicitly
+ be set to the empty string.
diff --git a/releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml b/releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml
new file mode 100644
index 0000000000..f4361477de
--- /dev/null
+++ b/releasenotes/notes/multiple-config-files-with-mod_wsgi-f114ea5fdd8b9a51.yaml
@@ -0,0 +1,14 @@
+---
+fixes:
+ - |
+ apache mod_wsgi does not support passing commandline arguments to the wsgi
+ application that it hosts. As a result when the nova api or metadata api
+ where run under mod_wsgi it was not posible to use multiple config files
+ or non-default file names i.e. nova-api.conf
+ This has been adressed by the intoduction of a new, optional, envionment
+ varible ``OS_NOVA_CONFIG_FILES``. ``OS_NOVA_CONFIG_FILES`` is a ``;``
+ seperated list fo file path relitive to ``OS_NOVA_CONFIG_DIR``.
+ When unset the default ``api-paste.ini`` and ``nova.conf`` will be used
+ form ``/etc/nova``. This is supported for the nova api and nova metadata
+ wsgi applications.
+
diff --git a/releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml b/releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml
new file mode 100644
index 0000000000..72d6e763aa
--- /dev/null
+++ b/releasenotes/notes/new_locked_memory_option-b68a031779366828.yaml
@@ -0,0 +1,13 @@
+---
+features:
+ - |
+ Add new ``hw:locked_memory`` extra spec and ``hw_locked_memory`` image
+ property to lock memory on libvirt guest. Locking memory marks the guest
+ memory allocations as unmovable and unswappable.
+ ``hw:locked_memory`` extra spec and ``hw_locked_memory`` image property
+ accept boolean values in string format like 'Yes' or 'false' value.
+ Exception `LockMemoryForbidden` will raise, if you set lock memory value
+ but not set either flavor extra spec
+ ``hw:mem_page_size`` or image property ``hw_mem_page_size``,
+ so we can ensure that the scheduler can actually account for this correctly
+ and prevent out of memory events.
diff --git a/releasenotes/notes/nova-manage-image-property-26b2e3eaa2ef343b.yaml b/releasenotes/notes/nova-manage-image-property-26b2e3eaa2ef343b.yaml
new file mode 100644
index 0000000000..f3de63fc21
--- /dev/null
+++ b/releasenotes/notes/nova-manage-image-property-26b2e3eaa2ef343b.yaml
@@ -0,0 +1,17 @@
+---
+features:
+ - |
+ New ``nova-manage image_property`` commands have been added to help update
+ instance image properties that have become invalidated by a change of
+ instance machine type.
+
+ * The ``nova-manage image_property show`` command can be used to show the
+ current stored image property value for a given instance and property.
+
+ * The ``nova-manage image_property set`` command can be used to update the
+ stored image properties stored in the database for a given instance and
+ image properties.
+
+ For more detail on command usage, see the machine type documentation:
+
+ https://docs.openstack.org/nova/latest/admin/hw-machine-type.html#device-bus-and-model-image-properties
diff --git a/releasenotes/notes/pci-vpd-capability-0d8039629db4afb8.yaml b/releasenotes/notes/pci-vpd-capability-0d8039629db4afb8.yaml
new file mode 100644
index 0000000000..0ca3518351
--- /dev/null
+++ b/releasenotes/notes/pci-vpd-capability-0d8039629db4afb8.yaml
@@ -0,0 +1,20 @@
+---
+features:
+ - |
+ Add VPD capability parsing support when a PCI VPD capability is exposed
+ via node device XML in Libvirt. The XML data from Libvirt is parsed and
+ formatted into PCI device JSON dict that is sent to Nova API and is stored
+ in the extra_info column of a PciDevice.
+
+ The code gracefully handles the lack of the capability since it is optional
+ or Libvirt may not support it in a particular release.
+
+ A serial number is extracted from PCI VPD of network devices (if present)
+ and is sent to Neutron in port updates.
+
+ Libvirt supports parsing the VPD capability from PCI/PCIe devices and
+ exposing it via nodedev XML as of 7.9.0.
+
+ - https://libvirt.org/news.html#v7-9-0-2021-11-01
+ - https://libvirt.org/drvnodedev.html#VPDCap
+
diff --git a/releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml b/releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml
new file mode 100644
index 0000000000..7e2dccbbf4
--- /dev/null
+++ b/releasenotes/notes/port-binding-removed-shelved-offloaded-f1772a64be007b24.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ [`bug 1983471 <https://bugs.launchpad.net/nova/+bug/1983471>`_]
+ When offloading a shelved instance, the compute will now remove the
+ binding so instance ports will appear as "unbound" in neutron.
diff --git a/releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml b/releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml
new file mode 100644
index 0000000000..171b07d025
--- /dev/null
+++ b/releasenotes/notes/project-reader-rbac-8a1d11b3b2e776fd.yaml
@@ -0,0 +1,36 @@
+---
+features:
+ - |
+ The Nova policies have been modified to drop the system scope. Every
+ API policy is scoped to project. This means that system scoped users
+ will get 403 permission denied error.
+
+ Also, the project reader role is ready to use. Users with reader role
+ can only perform the read-only operations within their project. This
+ role can be used for the audit purposes.
+
+ Currently, nova supports the following roles:
+
+ * ``admin`` (Legacy admin)
+ * ``project member``
+ * ``project reader``
+
+ For the details on what changed from the existing policy, please refer
+ to the `RBAC new guidelines`_. We have implemented only phase-1 of the
+ `RBAC new guidelines`_.
+ Currently, scope checks and new defaults are disabled by default. You can
+ enable them by switching the below config option in ``nova.conf`` file::
+
+ [oslo_policy]
+ enforce_new_defaults=True
+ enforce_scope=True
+
+ We recommend to enable the both scope as well new defaults together
+ otherwise you may experience some late failures with unclear error
+ messages.
+
+ Please refer `Policy New Defaults`_ for detail about policy new defaults
+ and migration plan.
+
+ .. _`RBAC new guidelines`: https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html#phase-1
+ .. _`Policy New Defaults`: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html
diff --git a/releasenotes/notes/register-defaults-for-undefined-hw-image-properties-d86bcf99f4610239.yaml b/releasenotes/notes/register-defaults-for-undefined-hw-image-properties-d86bcf99f4610239.yaml
new file mode 100644
index 0000000000..40ac1e4d85
--- /dev/null
+++ b/releasenotes/notes/register-defaults-for-undefined-hw-image-properties-d86bcf99f4610239.yaml
@@ -0,0 +1,15 @@
+other:
+ - |
+ Default image properties for device buses and models are now persisted in
+ the instance system metadata for the following image properties:
+
+ * ``hw_cdrom_bus``
+ * ``hw_disk_bus``
+ * ``hw_input_bus``
+ * ``hw_pointer_model``
+ * ``hw_video_model``
+ * ``hw_vif_model``
+
+ Instance device buses and models will now remain stable across reboots and
+ will not be changed by new defaults in libosinfo or the OpenStack Nova
+ libvirt driver.
diff --git a/releasenotes/notes/remove-default-cputune-shares-values-85d5ddf4b8e24eaa.yaml b/releasenotes/notes/remove-default-cputune-shares-values-85d5ddf4b8e24eaa.yaml
new file mode 100644
index 0000000000..9dd0987bb8
--- /dev/null
+++ b/releasenotes/notes/remove-default-cputune-shares-values-85d5ddf4b8e24eaa.yaml
@@ -0,0 +1,15 @@
+upgrade:
+ - |
+ In the libvirt driver, the default value of the ``<cputune><shares>``
+ element has been removed, and is now left to libvirt to decide. This is
+ because allowed values are platform dependant, and the previous code was
+ not guaranteed to be supported on all platforms. If any of your flavors are
+ using the quota:cpu_shares extra spec, you may need to resize to a
+ supported value before upgrading.
+
+ To facilitate the transition to no Nova default for ``<cputune><shares>``,
+ its value will be removed during live migration unless a value is set in
+ the ``quota:cpu_shares`` extra spec. This can cause temporary CPU
+ starvation for the live migrated instance if other instances on the
+ destination host still have the old default ``<cputune><shares>`` value. To
+ fix this, hard reboot, cold migrate, or live migrate the other instances.
diff --git a/releasenotes/notes/remove-powervm-6132cc10255ca205.yaml b/releasenotes/notes/remove-powervm-6132cc10255ca205.yaml
new file mode 100644
index 0000000000..50c87bbe11
--- /dev/null
+++ b/releasenotes/notes/remove-powervm-6132cc10255ca205.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ The powervm virt driver has been removed. The driver was not tested by
+ the OpenStack project nor did it have clear maintainers and thus its
+ quality could not be ensured.
diff --git a/releasenotes/notes/remove-qos-queue-vmware-nsx-extension-208d72da23e7ae49.yaml b/releasenotes/notes/remove-qos-queue-vmware-nsx-extension-208d72da23e7ae49.yaml
new file mode 100644
index 0000000000..9c5fb49af8
--- /dev/null
+++ b/releasenotes/notes/remove-qos-queue-vmware-nsx-extension-208d72da23e7ae49.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - |
+ Support for the ``qos-queue`` extension provided by the vmware-nsx neutron
+ plugin for the VMWare NSX Manager has been removed. This extension was
+ removed from the vmware-nsx project when support for NSX-MH was removed in
+ 15.0.0.
diff --git a/releasenotes/notes/remove-sqlalchemy-migrate-907c200314884d81.yaml b/releasenotes/notes/remove-sqlalchemy-migrate-907c200314884d81.yaml
new file mode 100644
index 0000000000..c08080a806
--- /dev/null
+++ b/releasenotes/notes/remove-sqlalchemy-migrate-907c200314884d81.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ The legacy ``sqlalchemy-migrate`` migrations, which have been deprecated
+ since Wallaby, have been removed. There should be no end-user impact.
diff --git a/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
new file mode 100644
index 0000000000..7e80059b80
--- /dev/null
+++ b/releasenotes/notes/rescue-volume-based-instance-c6e3fba236d90be7.yaml
@@ -0,0 +1,6 @@
+---
+fixes:
+ - |
+ Fix rescuing volume based instance by adding a check for 'hw_rescue_disk'
+ and 'hw_rescue_device' properties in image metadata before attempting
+ to rescue instance.
diff --git a/releasenotes/notes/skip-compare-cpu-on-dest-6ae419ddd61fd0f8.yaml b/releasenotes/notes/skip-compare-cpu-on-dest-6ae419ddd61fd0f8.yaml
new file mode 100644
index 0000000000..e7cd4041b1
--- /dev/null
+++ b/releasenotes/notes/skip-compare-cpu-on-dest-6ae419ddd61fd0f8.yaml
@@ -0,0 +1,24 @@
+---
+issues:
+ - |
+ Nova's use of libvirt's compareCPU() API served its purpose over the
+ years, but its design limitations break live migration in subtle
+ ways. For example, the compareCPU() API compares against the host
+ physical CPUID. Some of the features from this CPUID aren not
+ exposed by KVM, and then there are some features that KVM emulates
+ that are not in the host CPUID. The latter can cause bogus live
+ migration failures.
+
+ With QEMU >=2.9 and libvirt >= 4.4.0, libvirt will do the right
+ thing in terms of CPU compatibility checks on the destination host
+ during live migration. Nova satisfies these minimum version
+ requirements by a good margin. So, this workaround provides a way to
+ skip the CPU comparison check on the destination host before
+ migrating a guest, and let libvirt handle it correctly.
+
+ This workaround will be deprecated and removed once Nova replaces
+ the older libvirt APIs with their newer counterparts. The work is
+ being tracked via this `blueprint
+ cpu-selection-with-hypervisor-consideration`_.
+
+ .. _blueprint cpu-selection-with-hypervisor-consideration: https://blueprints.launchpad.net/nova/+spec/cpu-selection-with-hypervisor-consideration
diff --git a/releasenotes/notes/skip-hypervisor-version-check-on-lm-a87f2dcb4f8bf0f2.yaml b/releasenotes/notes/skip-hypervisor-version-check-on-lm-a87f2dcb4f8bf0f2.yaml
new file mode 100644
index 0000000000..00fe6a24c7
--- /dev/null
+++ b/releasenotes/notes/skip-hypervisor-version-check-on-lm-a87f2dcb4f8bf0f2.yaml
@@ -0,0 +1,13 @@
+---
+feature:
+ - |
+ Adds a workaround that allows one to disable hypervisor
+ version-check on live migration. This workaround option can be
+ useful in certain scenarios when upgrading. E.g. if you want to
+ relocate all instances off a compute node due to an emergency
+ hardware issue, and you only have another old compute node ready at
+ the time.
+
+ To enable this, use the config attribute
+ ``[workarounds]skip_hypervisor_version_check_on_lm=True`` in
+ ``nova.conf``. The option defaults to ``False``.
diff --git a/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
new file mode 100644
index 0000000000..fdeb593bd2
--- /dev/null
+++ b/releasenotes/notes/stable-compute-uuid-08663a0955616728.yaml
@@ -0,0 +1,19 @@
+---
+features:
+ - |
+ The compute manager now uses a local file to provide node uuid persistence
+ to guard against problems with renamed services, among other things.
+ Deployers wishing to ensure that *new* compute services get a predicatble
+ uuid before initial startup may provision that file and nova will use it,
+ otherwise nova will generate and write one to a `compute_id` file in
+ `CONF.state_path` the first time it starts up. Accidental renames of a
+ compute node's hostname will be detected and the manager will exit to avoid
+ database corruption. Note that none of this applies to Ironic computes, as
+ they manage nodes and uuids differently.
+upgrade:
+ - |
+ Existing compute nodes will, upon upgrade, perist the uuid of the compute
+ node assigned to their hostname at first startup. Since this must match
+ what is currently in the database, it is important to let nova provision
+ this file from its database. Nova will only persist to a `compute_id` file
+ in the `CONF.state_path` directory, which should already be writable.
diff --git a/releasenotes/notes/too-old-compute-check-code-7dbcde45cfd23394.yaml b/releasenotes/notes/too-old-compute-check-code-7dbcde45cfd23394.yaml
new file mode 100644
index 0000000000..f8e0517401
--- /dev/null
+++ b/releasenotes/notes/too-old-compute-check-code-7dbcde45cfd23394.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ The upgrade check tooling now returns a non-zero exit code in the presence
+ of compute node services that are too old. This is to avoid situations in
+ which Nova control services fail to start after an upgrade.
diff --git a/releasenotes/notes/update-initial-allocation-defaults-94106033b66b8fa0.yaml b/releasenotes/notes/update-initial-allocation-defaults-94106033b66b8fa0.yaml
new file mode 100644
index 0000000000..937c8d1c8a
--- /dev/null
+++ b/releasenotes/notes/update-initial-allocation-defaults-94106033b66b8fa0.yaml
@@ -0,0 +1,21 @@
+---
+upgrade:
+ - |
+ In this release the default values for the initial ram and cpu allocation
+ ratios have been updated to 1.0 and 4.0 respectively. This will not
+ affect any existing compute node resource providers but the new default
+ will take effect on the creation of new resource providers.
+other:
+ - |
+ The default initial allocation ratios enabled ram over commit by default
+ with a factor of ``1.5``. This value was chosen early in nova's history
+ as the predominant workload was web hosting or other light weight
+ virtualization. Similarly the default initial cpu allocation ratio
+ defaulted to 16. As more demanding workload from telco, enterprise,
+ scientific and governmental users became the norm the initial values we
+ had chosen became less and less correct overtime. These have now been
+ updated to reflect a more reasonable default for the majority of our users.
+ As of this release the initial ram allocation value is 1.0 disabling
+ overcommit by default for new compute nodes and the initial cpu allocation
+ ratio is now 4.0 which is a more reasonable overcommit for non idle
+ workloads.
diff --git a/releasenotes/notes/update-libvirt-enlightenments-for-windows-23abea98cc1db667.yaml b/releasenotes/notes/update-libvirt-enlightenments-for-windows-23abea98cc1db667.yaml
new file mode 100644
index 0000000000..c262be1527
--- /dev/null
+++ b/releasenotes/notes/update-libvirt-enlightenments-for-windows-23abea98cc1db667.yaml
@@ -0,0 +1,21 @@
+---
+features:
+ - |
+ The following enlightenments are now added by default to the libvirt XML for Windows guests:
+
+ * vpindex
+ * runtime
+ * synic
+ * reset
+ * frequencies
+ * reenlightenment
+ * tlbflush
+ * ipi
+ * evmc
+
+ This adds to the list of already existing enlightenments, namely:
+
+ * relaxed
+ * vapic
+ * spinlocks retries
+ * vendor_id spoofing
diff --git a/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
new file mode 100644
index 0000000000..924e09a602
--- /dev/null
+++ b/releasenotes/notes/use-compareHypervisorCPU-b75c8f097cc73556.yaml
@@ -0,0 +1,12 @@
+---
+fixes:
+ - |
+ Nova's use of libvirt's compareCPU() API has become error-prone as
+ it doesn't take into account host hypervisor's capabilities. With
+ QEMU >=2.9 and libvirt >= 4.4.0, libvirt will do the right thing in
+ terms of CPU comparison checks via a new replacement API,
+ compareHypervisorCPU(). Nova satisfies the said minimum version
+ requirements of QEMU and libvirt by a good margin.
+
+ This change replaces the usage of older API, compareCPU(), with the
+ new one, compareHypervisorCPU().
diff --git a/releasenotes/notes/use-multipath-0a0aa2b479e02370.yaml b/releasenotes/notes/use-multipath-0a0aa2b479e02370.yaml
new file mode 100644
index 0000000000..950afb0c80
--- /dev/null
+++ b/releasenotes/notes/use-multipath-0a0aa2b479e02370.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ The libvirt driver now allows using Native NVMeoF multipathing
+ for NVMeoF connector, via the configuration attribute in nova-cpu.conf
+ ``[libvirt]/volume_use_multipath``, defaulting to False (disabled).
+
diff --git a/nova/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml b/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml
index d6b6e45968..d6b6e45968 100644
--- a/nova/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml
+++ b/releasenotes/notes/uwsgi-gmr-c00631db79836340.yaml
diff --git a/releasenotes/notes/validate-machine-type-0d5f3dbd1e2ace31.yaml b/releasenotes/notes/validate-machine-type-0d5f3dbd1e2ace31.yaml
new file mode 100644
index 0000000000..8f6518e57e
--- /dev/null
+++ b/releasenotes/notes/validate-machine-type-0d5f3dbd1e2ace31.yaml
@@ -0,0 +1,10 @@
+---
+fixes:
+ - |
+ Added validation for image machine type property. Different APIs which
+ uses machine type for server creation, resize or rebuild will raise
+ InvalidMachineType exception with message "provided machine type is not
+ supported by host" and suggest possible/valid machine types in compute logs.
+ For more details see: `bug 1933097`_
+
+ .. _`bug 1933097`: https://bugs.launchpad.net/nova/+bug/1933097 \ No newline at end of file
diff --git a/releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml b/releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml
new file mode 100644
index 0000000000..2580f73d35
--- /dev/null
+++ b/releasenotes/notes/vdpa-move-ops-a7b3799807807a92.yaml
@@ -0,0 +1,11 @@
+---
+fixes:
+ - |
+ When vDPA was first introduced move operations were implemented in the code
+ but untested either in a real environment or in functional tests. Due to
+ this gap nova elected to block move operations for instance with vDPA
+ devices. All move operations except for live migration have now been tested
+ and found to indeed work so the API blocks have now been removed and
+ functional tests introduced. Other operations such as suspend and
+ live migration require code changes to support and will be enabled as new
+ features in the future.
diff --git a/releasenotes/notes/vdpa-suspend-detach-and-live-migrate-e591e6a03a0c834d.yaml b/releasenotes/notes/vdpa-suspend-detach-and-live-migrate-e591e6a03a0c834d.yaml
new file mode 100644
index 0000000000..45092b5a00
--- /dev/null
+++ b/releasenotes/notes/vdpa-suspend-detach-and-live-migrate-e591e6a03a0c834d.yaml
@@ -0,0 +1,25 @@
+---
+features:
+ - |
+ vDPA support was first introduced in the 23.0.0 (Wallaby)
+ release with limited instance lifecycle operations. Nova now supports
+ all instance lifecycle operations including suspend, attach/detach
+ and hot-plug live migration.
+
+ QEMU and the Linux kernel do not currently support transparent
+ live migration of vDPA devices at this time. Hot-plug live migration
+ unplugs the VDPA device on the source host before the VM is live migrated
+ and automatically hot-plugs the device on the destination after the
+ migration. While this can lead to packet loss it enable live migration
+ to be used when needed until transparent live migration can be added
+ in a future release.
+
+ VDPA Hot-plug live migration requires all compute services to be upgraded
+ to service level 63 to be enabled. Similarly suspend resume need service
+ level 63 and attach/detach require service level 62.
+ As such it will not be available to use during a rolling upgrade but will
+ become available when all host are upgraded to the 26.0.0 (Zed) release.
+
+ With the addition of these features, all instance lifecycle operations are
+ now valid for VMs with VDPA neutron ports.
+
diff --git a/releasenotes/notes/vmware-add-ram-size-multiple-of-4-validation-9740bf60d59ce5e2.yaml b/releasenotes/notes/vmware-add-ram-size-multiple-of-4-validation-9740bf60d59ce5e2.yaml
new file mode 100644
index 0000000000..16bab162bb
--- /dev/null
+++ b/releasenotes/notes/vmware-add-ram-size-multiple-of-4-validation-9740bf60d59ce5e2.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+ - |
+ For the VMware ESXi, VM memory should be multiple of 4. Otherwise creating
+ instance on ESXi fails with error "VimFaultException: Memory (RAM) size is
+ invalid.". Instances will now fail to spawn if flavor memory is not a
+ multiple of 4.
diff --git a/releasenotes/notes/vnic-type-remote-managed-b90cacf1c91df22b.yaml b/releasenotes/notes/vnic-type-remote-managed-b90cacf1c91df22b.yaml
new file mode 100644
index 0000000000..826729f378
--- /dev/null
+++ b/releasenotes/notes/vnic-type-remote-managed-b90cacf1c91df22b.yaml
@@ -0,0 +1,27 @@
+---
+features:
+ - |
+ Added support for off-path networking backends where devices exposed to the
+ hypervisor host are managed remotely (which is the case, for example, with
+ various SmartNIC DPU devices). ``VNIC_TYPE_REMOTE_MANAGED`` ports can now
+ be added to Nova instances as soon as all compute nodes are upgraded to
+ the new compute service version. In order to use this feature, VF PCI/PCIe
+ devices need to be tagged as ``remote_managed: "true"` in the Nova config
+ in the ``passthrough_whitelist`` option.
+
+ This feature relies on Neutron being upgraded to the corresponding release
+ of OpenStack and having an appropriate backend capable of binding
+ ``VNIC_TYPE_REMOTE_MANAGED`` ports (at the time of writing, ML2 with the OVN
+ ML2 mechanism driver is the only supported backend, see the Neutron
+ documentation for more details).
+
+ Note that the PCI devices (VFs or, alternatively, their PF) must have a
+ valid PCI Vital Product Data (VPD) with a serial number present in it for
+ this feature to work properly. Also note that only VFs can be tagged as
+ ``remote_managed: "true"`` and they cannot be used for legacy SR-IOV
+ use-cases.
+
+ Nova operations on instances with ``VNIC_TYPE_REMOTE_MANAGED`` ports
+ follow the same logic as the operations on direct SR-IOV ports.
+
+ This feature is only supported with the Libvirt driver.
diff --git a/releasenotes/notes/yoga-prelude-31dd83eb18c789f6.yaml b/releasenotes/notes/yoga-prelude-31dd83eb18c789f6.yaml
new file mode 100644
index 0000000000..6bb5ac257b
--- /dev/null
+++ b/releasenotes/notes/yoga-prelude-31dd83eb18c789f6.yaml
@@ -0,0 +1,49 @@
+---
+prelude: |
+ The 25.0.0 release includes many new features and bug fixes. Please be
+ sure to read the upgrade section which describes the required actions to
+ upgrade your cloud from 24.0.0 (Xena) to 25.0.0 (Yoga).
+
+ There are a few major changes worth mentioning. This is not an exhaustive
+ list:
+
+ - The latest Compute API microversion supported for Yoga is `v2.90`__ (same
+ as the Xena release).
+
+ .. __: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html
+
+ - Experimental support is added for Keystone's `unified limits`__.
+ This will allow operators to test this feature in non-production
+ systems so we can collect early feedback about performance.
+
+ .. __: https://docs.openstack.org/keystone/latest/admin/unified-limits.html
+
+ - Keystone's policy concepts of system vs. project scope and roles has been
+ implemented in Nova and `defaults roles and scopes have been defined`__,
+ while legacy policies continue to be enabled by default. Operators are
+ encouraged to familiarize with the new policies and `enable them
+ in advance`__ before Nova switches from the legacy roles in a later
+ release.
+
+ .. __: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html#nova-supported-scope-roles
+ .. __: https://docs.openstack.org/nova/latest/configuration/policy-concepts.html#migration-plan
+
+
+ - Support is added for network backends that leverage SmartNICs to
+ `offload the control plane from the host server`__. Accordingly, Neutron
+ needs to be `configured`__ in order to enable it correctly.
+ Increased security is enabled by removing the control plane from the
+ host server and overhead is reduced by leveraging the cpu and ram
+ resources on modern SmartNIC DPUs.
+
+ .. __: https://docs.openstack.org/nova/latest/admin/networking.html#sr-iov
+ .. __: https://docs.openstack.org/neutron/latest/admin/ovn/smartnic_dpu
+
+
+ - Experimental support for `emulated architecture is now implemented`__.
+ AArch64, PPC64LE, MIPs, and s390x guest architectures are
+ available independent of the host architecture. This is strictly not
+ intended for production use for various reasons, including no security
+ guarantees.
+
+ .. __: https://docs.openstack.org/nova/latest/admin/hw-emulation-architecture.html
diff --git a/releasenotes/notes/zed-prelude-a3cddb8b2ac8e293.yaml b/releasenotes/notes/zed-prelude-a3cddb8b2ac8e293.yaml
new file mode 100644
index 0000000000..71fb1fc1f0
--- /dev/null
+++ b/releasenotes/notes/zed-prelude-a3cddb8b2ac8e293.yaml
@@ -0,0 +1,46 @@
+---
+prelude: |
+ The 26.0.0 release includes many new features and bug fixes. Please be
+ sure to read the upgrade section which describes the required actions to
+ upgrade your cloud from 25.0.0 (Yoga) to 26.0.0 (Zed).
+
+ There are a few major changes worth mentioning. This is not an exhaustive
+ list:
+
+ - The latest Compute API microversion supported for Zed is `v2.93`__.
+
+ .. __: https://docs.openstack.org/nova/latest/reference/api-microversion-history.html#maximum-in-zed
+
+ - `Virtual IOMMU devices`__ can now be created and attached to an instance
+ when running on a x86 host
+ and using the libvirt driver.
+
+ .. __: https://docs.openstack.org/nova/latest/admin/pci-passthrough.html#virtual-iommu-support
+
+ - Improved behavior for Windows guest by adding by default following
+ `Hyper-V enlightments`__ on all libvirt guests : `vpindex`, `runtime`,
+ `synic`, `reset`, `frequencies`, `reenlightenment`, `tlbflush`, `ipi` and
+ `evmc`.
+
+ .. __: https://libvirt.org/formatdomain.html#hypervisor-features
+
+ - All lifecycle actions are now fully supported for
+ `instances with vDPA ports`__, including vDPA hot-plug live migration,
+ suspend and attach/detach.
+
+ .. __: https://docs.openstack.org/nova/latest/admin/vdpa.html
+
+ - Volume-backed instances (instances with root disk attached as a volume)
+ can now be rebuilt by specifying a 2.93 microversion instead of returning
+ a HTTP400 exception.
+
+ - The `unshelve` instance API action now provides a new `host` parameter
+ with 2.91 microversion (for only admins).
+
+ - With microversion 2.92, you can only import a public key and not generate
+ a keypair. You can also use an extended name pattern.
+
+ - The default system scope is removed from all APIs hence finishing to
+ implement `phase #1 of new RBAC guidelines`__ that are opt-in.
+
+ .. __: https://governance.openstack.org/tc/goals/selected/consistent-and-secure-rbac.html#phase-1
diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst
new file mode 100644
index 0000000000..d1238479ba
--- /dev/null
+++ b/releasenotes/source/2023.1.rst
@@ -0,0 +1,6 @@
+===========================
+2023.1 Series Release Notes
+===========================
+
+.. release-notes::
+ :branch: stable/2023.1
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 8e2b38889e..ed6f8c2d07 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,9 @@ Nova Release Notes
:maxdepth: 1
unreleased
+ 2023.1
+ zed
+ yoga
xena
wallaby
victoria
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index ee0a0128de..c0bd8bc9a8 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -1,15 +1,17 @@
# Andi Chandler <andi@gowling.com>, 2017. #zanata
# Andi Chandler <andi@gowling.com>, 2018. #zanata
# Andi Chandler <andi@gowling.com>, 2020. #zanata
+# Andi Chandler <andi@gowling.com>, 2022. #zanata
+# Andi Chandler <andi@gowling.com>, 2023. #zanata
msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-04-25 15:23+0000\n"
+"POT-Creation-Date: 2023-03-06 19:02+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2020-04-10 03:50+0000\n"
+"PO-Revision-Date: 2023-01-26 10:17+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -46,12 +48,21 @@ msgstr ""
msgid "**Filtering**"
msgstr "**Filtering**"
+msgid "**New Defaults(Admin, Member and Reader)**"
+msgstr "**New Defaults(Admin, Member and Reader)**"
+
msgid "**Other**"
msgstr "**Other**"
+msgid "**Policies granularity**"
+msgstr "**Policies granularity**"
+
msgid "**Ports**"
msgstr "**Ports**"
+msgid "**Scope**"
+msgstr "**Scope**"
+
msgid "**Sorting**"
msgstr "**Sorting**"
@@ -246,8 +257,8 @@ msgstr "16.1.7"
msgid "16.1.8"
msgstr "16.1.8"
-msgid "16.1.8-37"
-msgstr "16.1.8-37"
+msgid "16.1.8-57"
+msgstr "16.1.8-57"
msgid "17.0.0"
msgstr "17.0.0"
@@ -264,6 +275,9 @@ msgstr "17.0.12"
msgid "17.0.13"
msgstr "17.0.13"
+msgid "17.0.13-73"
+msgstr "17.0.13-73"
+
msgid "17.0.2"
msgstr "17.0.2"
@@ -276,9 +290,192 @@ msgstr "17.0.4"
msgid "17.0.5"
msgstr "17.0.5"
+msgid "17.0.6"
+msgstr "17.0.6"
+
+msgid "17.0.8"
+msgstr "17.0.8"
+
+msgid "17.0.9"
+msgstr "17.0.9"
+
+msgid "18.0.0"
+msgstr "18.0.0"
+
+msgid "18.0.1"
+msgstr "18.0.1"
+
+msgid "18.0.3"
+msgstr "18.0.3"
+
+msgid "18.1.0"
+msgstr "18.1.0"
+
+msgid "18.2.0"
+msgstr "18.2.0"
+
+msgid "18.2.1"
+msgstr "18.2.1"
+
+msgid "18.2.2"
+msgstr "18.2.2"
+
+msgid "18.2.3"
+msgstr "18.2.3"
+
+msgid "18.3.0"
+msgstr "18.3.0"
+
+msgid "18.3.0-55"
+msgstr "18.3.0-55"
+
+msgid "19.0.0"
+msgstr "19.0.0"
+
+msgid "19.0.1"
+msgstr "19.0.1"
+
+msgid "19.0.2"
+msgstr "19.0.2"
+
+msgid "19.0.3"
+msgstr "19.0.3"
+
+msgid "19.1.0"
+msgstr "19.1.0"
+
+msgid "19.2.0"
+msgstr "19.2.0"
+
+msgid "19.3.0"
+msgstr "19.3.0"
+
+msgid "19.3.2"
+msgstr "19.3.2"
+
+msgid "19.3.2-19"
+msgstr "19.3.2-19"
+
+msgid "20.0.0"
+msgstr "20.0.0"
+
+msgid "20.1.0"
+msgstr "20.1.0"
+
+msgid "20.1.1"
+msgstr "20.1.1"
+
+msgid "20.2.0"
+msgstr "20.2.0"
+
+msgid "20.3.0"
+msgstr "20.3.0"
+
+msgid "20.4.0"
+msgstr "20.4.0"
+
+msgid "20.4.1"
+msgstr "20.4.1"
+
+msgid "20.5.0"
+msgstr "20.5.0"
+
+msgid "20.6.1"
+msgstr "20.6.1"
+
msgid "204 NoContent on success"
msgstr "204 NoContent on success"
+msgid "21.0.0"
+msgstr "21.0.0"
+
+msgid "21.1.0"
+msgstr "21.1.0"
+
+msgid "21.1.1"
+msgstr "21.1.1"
+
+msgid "21.1.2"
+msgstr "21.1.2"
+
+msgid "21.2.0"
+msgstr "21.2.0"
+
+msgid "21.2.2"
+msgstr "21.2.2"
+
+msgid "21.2.3"
+msgstr "21.2.3"
+
+msgid "22.0.0"
+msgstr "22.0.0"
+
+msgid "22.0.1"
+msgstr "22.0.1"
+
+msgid "22.1.0"
+msgstr "22.1.0"
+
+msgid "22.2.1"
+msgstr "22.2.1"
+
+msgid "22.2.2"
+msgstr "22.2.2"
+
+msgid "22.3.0"
+msgstr "22.3.0"
+
+msgid "22.4.0"
+msgstr "22.4.0"
+
+msgid "23.0.0"
+msgstr "23.0.0"
+
+msgid "23.0.2"
+msgstr "23.0.2"
+
+msgid "23.1.0"
+msgstr "23.1.0"
+
+msgid "23.2.0"
+msgstr "23.2.0"
+
+msgid "23.2.1"
+msgstr "23.2.1"
+
+msgid "23.2.2"
+msgstr "23.2.2"
+
+msgid "24.0.0"
+msgstr "24.0.0"
+
+msgid "24.1.0"
+msgstr "24.1.0"
+
+msgid "24.1.1"
+msgstr "24.1.1"
+
+msgid "24.2.0"
+msgstr "24.2.0"
+
+msgid "25.0.0"
+msgstr "25.0.0"
+
+msgid "25.0.1"
+msgstr "25.0.1"
+
+msgid "25.1.0"
+msgstr "25.1.0"
+
+msgid "26.0.0"
+msgstr "26.0.0"
+
+msgid "26.1.0"
+msgstr "26.1.0"
+
+msgid "400 for unknown param for query param and for request body."
+msgstr "400 for unknown param for query param and for request body."
+
msgid "404 NotFound for missing resource provider"
msgstr "404 NotFound for missing resource provider"
@@ -289,6 +486,24 @@ msgid "409 Conflict if inventory in use or if some other request concurrently"
msgstr "409 Conflict if inventory in use or if some other request concurrently"
msgid ""
+"A ``--dry-run`` option has been added to the ``nova-manage placement "
+"heal_allocations`` CLI which allows running the command to get output "
+"without committing any changes to placement."
+msgstr ""
+"A ``--dry-run`` option has been added to the ``nova-manage placement "
+"heal_allocations`` CLI which allows running the command to get output "
+"without committing any changes to placement."
+
+msgid ""
+"A ``--force`` flag is provided to skip the above checks but caution should "
+"be taken as this could easily lead to the underlying ABI of the instance "
+"changing when moving between machine types."
+msgstr ""
+"A ``--force`` flag is provided to skip the above checks but caution should "
+"be taken as this could easily lead to the underlying ABI of the instance "
+"changing when moving between machine types."
+
+msgid ""
"A ``default_floating_pool`` configuration option has been added in the "
"``[neutron]`` group. The existing ``default_floating_pool`` option in the "
"``[DEFAULT]`` group is retained and should be used by nova-network users. "
@@ -319,6 +534,9 @@ msgstr ""
"for their eventual migration to the FilterScheduler. The CachingScheduler is "
"deprecated and could be removed as early as Stein."
+msgid "A few examples of versioned notifications that use InstancePayload:"
+msgstr "A few examples of versioned notifications that use InstancePayload:"
+
msgid "Current Series Release Notes"
msgstr "Current Series Release Notes"
@@ -328,9 +546,31 @@ msgstr "Liberty Series Release Notes"
msgid "Mitaka Series Release Notes"
msgstr "Mitaka Series Release Notes"
+msgid ""
+"New configuration option sync_power_state_pool_size has been added to set "
+"the number of greenthreads available for use to sync power states. Default "
+"value (1000) matches the previous implicit default value provided by "
+"Greenpool. This option can be used to reduce the number of concurrent "
+"requests made to the hypervisor or system with real instance power states "
+"for performance reasons."
+msgstr ""
+"New configuration option sync_power_state_pool_size has been added to set "
+"the number of greenthreads available for use to sync power states. The "
+"default value (1000) matches the previous implicit default value provided by "
+"Greenpool. This option can be used to reduce the number of concurrent "
+"requests made to the hypervisor or system with real instance power states "
+"for performance reasons."
+
msgid "Newton Series Release Notes"
msgstr "Newton Series Release Notes"
+msgid ""
+"Nova option 'use_usb_tablet' will be deprecated in favor of the global "
+"'pointer_model'."
+msgstr ""
+"Nova option 'use_usb_tablet' will be deprecated in favour of the global "
+"'pointer_model'."
+
msgid "Ocata Series Release Notes"
msgstr "Ocata Series Release Notes"
@@ -346,9 +586,120 @@ msgstr "Rocky Series Release Notes"
msgid "Stein Series Release Notes"
msgstr "Stein Series Release Notes"
+msgid ""
+"The XenServer configuration option 'iqn_prefix' has been removed. It was not "
+"used anywhere and has no effect on any code, so there should be no impact."
+msgstr ""
+"The XenServer configuration option 'iqn_prefix' has been removed. It was not "
+"used anywhere and has no effect on any code, so there should be no impact."
+
+msgid ""
+"The ``api_rate_limit`` configuration option has been removed. The option was "
+"disabled by default back in the Havana release since it's effectively broken "
+"for more than one API worker. It has been removed because the legacy v2 API "
+"code that was using it has also been removed."
+msgstr ""
+"The ``api_rate_limit`` configuration option has been removed. The option was "
+"disabled by default back in the Havana release since it's effectively broken "
+"for more than one API worker. It has been removed because the legacy v2 API "
+"code that was using it has also been removed."
+
+msgid ""
+"The ``nova-manage vm list`` command is deprecated and will be removed in the "
+"15.0.0 Ocata release. Use the ``nova list`` command from python-novaclient "
+"instead."
+msgstr ""
+"The ``nova-manage vm list`` command is deprecated and will be removed in the "
+"15.0.0 Ocata release. Use the ``nova list`` command from python-novaclient "
+"instead."
+
+msgid ""
+"The default flavors that nova has previously had are no longer created as "
+"part of the first database migration. New deployments will need to create "
+"appropriate flavors before first use."
+msgstr ""
+"The default flavours that Nova previously had are no longer created as part "
+"of the first database migration. New deployments will need to create "
+"appropriate flavours before first use."
+
+msgid ""
+"The network configuration option 'fake_call' has been removed. It hasn't "
+"been used for several cycles, and has no effect on any code, so there should "
+"be no impact."
+msgstr ""
+"The network configuration option 'fake_call' has been removed. It hasn't "
+"been used for several cycles, and has no effect on any code, so there should "
+"be no impact."
+
+msgid ""
+"These commands only work with nova-network which is itself deprecated in "
+"favor of Neutron."
+msgstr ""
+"These commands only work with nova-network which is itself deprecated in "
+"favour of Neutron."
+
msgid "Train Series Release Notes"
msgstr "Train Series Release Notes"
+msgid "Ussuri Series Release Notes"
+msgstr "Ussuri Series Release Notes"
+
+msgid "Victoria Series Release Notes"
+msgstr "Victoria Series Release Notes"
+
+msgid ""
+"Virt drivers are no longer loaded with the import_object_ns function, which "
+"means that only virt drivers in the nova.virt namespace can be loaded."
+msgstr ""
+"Virt drivers are no longer loaded with the import_object_ns function, which "
+"means that only virt drivers in the nova.virt namespace can be loaded."
+
+msgid "Wallaby Series Release Notes"
+msgstr "Wallaby Series Release Notes"
+
+msgid "Xena Series Release Notes"
+msgstr "Xena Series Release Notes"
+
+msgid "Yoga Series Release Notes"
+msgstr "Yoga Series Release Notes"
+
+msgid "Zed Series Release Notes"
+msgstr "Zed Series Release Notes"
+
+msgid "kernels 3.x: 8"
+msgstr "kernels 3.x: 8"
+
+msgid "kernels 4.x: 256"
+msgstr "kernels 4.x: 256"
+
+msgid "kernels prior to 3.0: 1"
+msgstr "kernels prior to 3.0: 1"
+
+msgid ""
+"network_allocate_retries config param now allows only positive integer "
+"values or 0."
+msgstr ""
+"network_allocate_retries config param now allows only positive integer "
+"values or 0."
+
+msgid "nova-maange account scrub"
+msgstr "nova-maange account scrub"
+
+msgid "nova-manage fixed *"
+msgstr "nova-manage fixed *"
+
+msgid "nova-manage floating *"
+msgstr "nova-manage floating *"
+
+msgid "nova-manage network *"
+msgstr "nova-manage network *"
+
+msgid "nova-manage project scrub"
+msgstr "nova-manage project scrub"
+
+msgid "nova-manage vpn *"
+msgstr "nova-manage vpn *"
+
msgid "system_metadata"
msgstr "system_metadata"
diff --git a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
index 216ffc064f..eece7a459c 100644
--- a/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
@@ -3,7 +3,7 @@ msgid ""
msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2020-04-25 15:23+0000\n"
+"POT-Creation-Date: 2022-09-16 12:59+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst
new file mode 100644
index 0000000000..7cd5e908a7
--- /dev/null
+++ b/releasenotes/source/yoga.rst
@@ -0,0 +1,6 @@
+=========================
+Yoga Series Release Notes
+=========================
+
+.. release-notes::
+ :branch: stable/yoga
diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst
new file mode 100644
index 0000000000..9608c05e45
--- /dev/null
+++ b/releasenotes/source/zed.rst
@@ -0,0 +1,6 @@
+========================
+Zed Series Release Notes
+========================
+
+.. release-notes::
+ :branch: stable/zed
diff --git a/requirements.txt b/requirements.txt
index 46a611b1de..e885a4a66f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,8 @@
-pbr>=5.5.1 # Apache-2.0
+# Requirements lower bounds listed here are our best effort to keep them up to
+# date but we do not test them so no guarantee of having them all correct. If
+# you find any incorrect lower bounds, let us know or propose a fix.
+
+pbr>=5.8.0 # Apache-2.0
SQLAlchemy>=1.4.13 # MIT
decorator>=4.1.0 # BSD
eventlet>=0.30.1 # MIT
@@ -8,13 +12,10 @@ lxml>=4.5.0 # BSD
Routes>=2.3.1 # MIT
cryptography>=2.7 # BSD/Apache-2.0
WebOb>=1.8.2 # MIT
-# NOTE(mriedem): greenlet 0.4.14 does not work with older versions of gcc on
-# ppc64le systems, see https://github.com/python-greenlet/greenlet/issues/136.
greenlet>=0.4.15 # MIT
PasteDeploy>=1.5.0 # MIT
Paste>=2.0.2 # MIT
PrettyTable>=0.7.1 # BSD
-sqlalchemy-migrate>=0.13.0 # Apache-2.0
alembic>=1.5.0 # MIT
netaddr>=0.7.18 # BSD
netifaces>=0.10.4 # MIT
@@ -29,41 +30,38 @@ requests>=2.25.1 # Apache-2.0
stevedore>=1.20.0 # Apache-2.0
websockify>=0.9.0 # LGPLv3
oslo.cache>=1.26.0 # Apache-2.0
-oslo.concurrency>=4.4.0 # Apache-2.0
+oslo.concurrency>=5.0.1 # Apache-2.0
oslo.config>=8.6.0 # Apache-2.0
-oslo.context>=3.1.1 # Apache-2.0
-oslo.log>=4.4.0 # Apache-2.0
+oslo.context>=3.4.0 # Apache-2.0
+oslo.log>=4.6.1 # Apache-2.0
+oslo.limit>=1.5.0 # Apache-2.0
oslo.reports>=1.18.0 # Apache-2.0
-oslo.serialization>=4.1.0 # Apache-2.0
+oslo.serialization>=4.2.0 # Apache-2.0
oslo.upgradecheck>=1.3.0
-oslo.utils>=4.8.0 # Apache-2.0
+oslo.utils>=4.12.1 # Apache-2.0
oslo.db>=10.0.0 # Apache-2.0
-oslo.rootwrap>=5.8.0 # Apache-2.0
-oslo.messaging>=10.3.0 # Apache-2.0
-oslo.policy>=3.7.0 # Apache-2.0
-oslo.privsep>=2.4.0 # Apache-2.0
-oslo.i18n>=5.0.1 # Apache-2.0
-oslo.service>=2.5.0 # Apache-2.0
+oslo.rootwrap>=5.15.0 # Apache-2.0
+oslo.messaging>=14.1.0 # Apache-2.0
+oslo.policy>=3.11.0 # Apache-2.0
+oslo.privsep>=2.6.2 # Apache-2.0
+oslo.i18n>=5.1.0 # Apache-2.0
+oslo.service>=2.8.0 # Apache-2.0
rfc3986>=1.2.0 # Apache-2.0
oslo.middleware>=3.31.0 # Apache-2.0
psutil>=3.2.2 # BSD
oslo.versionedobjects>=1.35.0 # Apache-2.0
-os-brick>=4.3.1 # Apache-2.0
+os-brick>=5.2 # Apache-2.0
os-resource-classes>=1.1.0 # Apache-2.0
-os-traits>=2.5.0 # Apache-2.0
-os-vif>=1.15.2 # Apache-2.0
-os-win>=5.4.0 # Apache-2.0
+os-traits>=2.9.0 # Apache-2.0
+os-vif>=3.1.0 # Apache-2.0
castellan>=0.16.0 # Apache-2.0
microversion-parse>=0.2.1 # Apache-2.0
tooz>=1.58.0 # Apache-2.0
cursive>=0.2.1 # Apache-2.0
-pypowervm>=1.1.15 # Apache-2.0
retrying>=1.3.3,!=1.3.0 # Apache-2.0
os-service-types>=1.7.0 # Apache-2.0
-taskflow>=3.8.0 # Apache-2.0
python-dateutil>=2.7.0 # BSD
-zVMCloudConnector>=1.3.0;sys_platform!='win32' # Apache 2.0 License
futurist>=1.8.0 # Apache-2.0
openstacksdk>=0.35.0 # Apache-2.0
-dataclasses>=0.7;python_version=='3.6' # Apache 2.0 License
PyYAML>=5.1 # MIT
+packaging>=21.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 3857fd5e47..fa6f6af656 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -10,7 +10,7 @@ project_urls =
Bug Tracker = https://bugs.launchpad.net/nova/
Documentation = https://docs.openstack.org/nova/
Source Code = https://opendev.org/openstack/nova
-python_requires = >=3.6
+python_requires = >=3.8
classifiers =
Development Status :: 5 - Production/Stable
Environment :: OpenStack
@@ -20,16 +20,21 @@ classifiers =
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.6
- Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
+ Programming Language :: Python :: 3.10
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
[extras]
osprofiler =
osprofiler>=1.4.0 # Apache-2.0
+zvm =
+ zVMCloudConnector>=1.3.0;sys_platform!='win32' # Apache 2.0 License
+hyperv =
+ os-win>=5.5.0 # Apache-2.0
+vmware =
+ oslo.vmware>=3.6.0 # Apache-2.0
[files]
data_files =
@@ -63,7 +68,6 @@ nova.api.extra_spec_validators =
null = nova.api.validation.extra_specs.null
os = nova.api.validation.extra_specs.os
pci_passthrough = nova.api.validation.extra_specs.pci_passthrough
- powervm = nova.api.validation.extra_specs.powervm
quota = nova.api.validation.extra_specs.quota
resources = nova.api.validation.extra_specs.resources
traits = nova.api.validation.extra_specs.traits
diff --git a/test-requirements.txt b/test-requirements.txt
index 44cb2bacf7..bbf04f5a1a 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -8,7 +8,6 @@ types-paramiko>=0.1.3 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0
ddt>=1.2.1 # MIT
fixtures>=3.0.0 # Apache-2.0/BSD
-mock>=3.0.0 # BSD
psycopg2-binary>=2.8 # LGPL/ZPL
PyMySQL>=0.8.0 # MIT License
python-barbicanclient>=4.5.2 # Apache-2.0
@@ -19,10 +18,7 @@ stestr>=2.0.0 # Apache-2.0
osprofiler>=1.4.0 # Apache-2.0
testresources>=2.0.0 # Apache-2.0/BSD
testscenarios>=0.4 # Apache-2.0/BSD
-testtools>=2.2.0 # MIT
+testtools>=2.5.0 # MIT
bandit>=1.1.0 # Apache-2.0
gabbi>=1.35.0 # Apache-2.0
wsgi-intercept>=1.7.0 # MIT License
-
-# vmwareapi driver specific dependencies
-oslo.vmware>=3.6.0 # Apache-2.0
diff --git a/tools/test-setup.sh b/tools/test-setup.sh
index 96a08c4a98..fced9be5e0 100755
--- a/tools/test-setup.sh
+++ b/tools/test-setup.sh
@@ -27,13 +27,19 @@ function is_rhel8 {
cat /etc/*release | grep -q 'release 8'
}
+function is_rhel9 {
+ [ -f /usr/bin/dnf ] && \
+ cat /etc/*release | grep -q -e "Red Hat" -e "CentOS" -e "CloudLinux" && \
+ cat /etc/*release | grep -q 'release 9'
+}
+
function set_conf_line { # file regex value
sudo sh -c "grep -q -e '$2' $1 && \
sed -i 's|$2|$3|g' $1 || \
echo '$3' >> $1"
}
-if is_rhel7 || is_rhel8; then
+if is_rhel7 || is_rhel8 || is_rhel9; then
# mysql needs to be started on centos/rhel
sudo systemctl restart mariadb.service
diff --git a/tox.ini b/tox.ini
index 0777c71828..77c0d9b9d3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,13 +1,8 @@
[tox]
minversion = 3.18.0
-envlist = py39,functional,pep8
-# Automatic envs (pyXX) will only use the python version appropriate to that
-# env and ignore basepython inherited from [testenv] if we set
-# ignore_basepython_conflict.
-ignore_basepython_conflict = True
+envlist = py3,functional,pep8
[testenv]
-basepython = python3
usedevelop = True
allowlist_externals =
bash
@@ -15,6 +10,7 @@ allowlist_externals =
rm
env
make
+install_command = python -I -m pip install -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
LANGUAGE=en_US
@@ -26,11 +22,19 @@ setenv =
# TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0
SQLALCHEMY_WARN_20=1
deps =
- -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
- -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
+extras =
+ zvm
+ hyperv
+ vmware
passenv =
- OS_DEBUG GENERATE_HASHES
+ OS_DEBUG
+ GENERATE_HASHES
+# NOTE(sean-k-mooney) optimization is enabled by default and when enabled
+# asserts are complied out. Disable optimization to allow asserts in
+# nova to fire in unit and functional tests. This can be useful for
+# debugging issue with fixtures and mocks.
+ PYTHONOPTIMIZE
# there is also secret magic in subunit-trace which lets you run in a fail only
# mode. To do this define the TRACE_FAILONLY environmental variable.
commands =
@@ -38,20 +42,57 @@ commands =
env TEST_OSPROFILER=1 stestr run --combine --no-discover 'nova.tests.unit.test_profiler'
stestr slowest
+[testenv:functional{,-py38,-py39,-py310,-py311}]
+description =
+ Run functional tests.
+# As nova functional tests import the PlacementFixture from the placement
+# repository these tests are, by default, set up to run with openstack-placement
+# from pypi. In the gate, Zuul will use the installed version of placement (stable
+# branch version on stable gate run) OR the version of placement the Depends-On in
+# the commit message suggests. If you want to run the tests with latest master from
+# the placement repo, modify the dep line to point at master, example:
+# deps =
+# {[testenv]deps}
+# git+https://opendev.org/openstack/placement#egg=openstack-placement
+# If you want to run the test locally with an un-merged placement change,
+# modify the dep line to point to your dependency or pip install placement
+# into the appropriate tox virtualenv.
+# NOTE: We express the requirement here instead of test-requirements
+# because we do not want placement present during unit tests.
+deps =
+ {[testenv]deps}
+ openstack-placement>=9.0.0.0b1
+extras =
+commands =
+ stestr --test-path=./nova/tests/functional run {posargs}
+ stestr slowest
+
+[testenv:functional-without-sample-db-tests]
+description =
+ Run functional tests by excluding the API|Notification
+ sample tests and DB tests. This env is used in
+ placement-nova-tox-functional-py38 job which is defined and
+ run in placement.
+deps = {[testenv:functional]deps}
+extras =
+commands =
+ stestr --test-path=./nova/tests/functional run --exclude-regex '((?:api|notification)_sample_tests|functional\.db\.)' {posargs}
+
[testenv:mypy]
description =
Run type checks.
envdir = {toxworkdir}/shared
+extras =
commands =
bash tools/mypywrap.sh {posargs}
[testenv:pep8]
description =
Run style checks.
-envdir = {toxworkdir}/shared
deps =
{[testenv]deps}
autopep8
+extras =
commands =
{[testenv:mypy]commands}
# check if autopep8 would alter the formatting but don't actually change it
@@ -68,6 +109,7 @@ commands =
bash -c '! find doc/ -type f -name *.json | xargs -t -n1 python -m json.tool 2>&1 > /dev/null | grep -B1 -v ^python'
[testenv:autopep8]
+extras =
deps = autopep8
commands =
autopep8 --exit-code --max-line-length=79 --in-place -r nova doc setup.py
@@ -76,6 +118,7 @@ commands =
description =
Run style checks on the changes made since HEAD~. For a full run including docs, use 'pep8'
envdir = {toxworkdir}/shared
+extras =
commands =
bash tools/flake8wrap.sh -HEAD
@@ -84,72 +127,11 @@ description =
Determine whether a backport is ready to be merged by checking whether it has
already been merged to master or more recent stable branches.
deps =
+extras =
skipsdist = true
commands =
bash tools/check-cherry-picks.sh
-[testenv:functional]
-description =
- Run functional tests using python3.
-# As nova functional tests import the PlacementFixture from the placement
-# repository these tests are, by default, set up to run with openstack-placement
-# from pypi. In the gate, Zuul will use the installed version of placement (stable
-# branch version on stable gate run) OR the version of placement the Depends-On in
-# the commit message suggests. If you want to run the tests with latest master from
-# the placement repo, modify the dep line to point at master, example:
-# deps =
-# {[testenv]deps}
-# git+https://opendev.org/openstack/placement#egg=openstack-placement
-# If you want to run the test locally with an un-merged placement change,
-# modify the dep line to point to your dependency or pip install placement
-# into the appropriate tox virtualenv.
-# NOTE: We express the requirement here instead of test-requirements
-# because we do not want placement present during unit tests.
-deps =
- {[testenv]deps}
- openstack-placement>=1.0.0
-commands =
- stestr --test-path=./nova/tests/functional run {posargs}
- stestr slowest
-
-[testenv:functional-py36]
-description =
- Run functional tests using python3.6.
-deps = {[testenv:functional]deps}
-commands =
- {[testenv:functional]commands}
-
-[testenv:functional-py37]
-description =
- Run functional tests using python3.7.
-deps = {[testenv:functional]deps}
-commands =
- {[testenv:functional]commands}
-
-[testenv:functional-py38]
-description =
- Run functional tests using python3.8.
-deps = {[testenv:functional]deps}
-commands =
- {[testenv:functional]commands}
-
-[testenv:functional-without-sample-db-tests]
-description =
- Run functional tests by excluding the API|Notification
- sample tests and DB tests. This env is used in
- placement-nova-tox-functional-py38 job which is defined and
- run in placement.
-deps = {[testenv:functional]deps}
-commands =
- stestr --test-path=./nova/tests/functional run --exclude-regex '((?:api|notification)_sample_tests|functional\.db\.)' {posargs}
-
-[testenv:functional-py39]
-description =
- Run functional tests using python3.9.
-deps = {[testenv:functional]deps}
-commands =
- {[testenv:functional]commands}
-
[testenv:api-samples]
envdir = {toxworkdir}/functional
setenv =
@@ -157,17 +139,20 @@ setenv =
GENERATE_SAMPLES=True
PYTHONHASHSEED=0
deps = {[testenv:functional]deps}
+extras =
commands =
stestr --test-path=./nova/tests/functional/api_sample_tests run {posargs}
stestr slowest
[testenv:genconfig]
envdir = {toxworkdir}/shared
+extras =
commands =
oslo-config-generator --config-file=etc/nova/nova-config-generator.conf
[testenv:genpolicy]
envdir = {toxworkdir}/shared
+extras =
commands =
oslopolicy-sample-generator --config-file=etc/nova/nova-policy-generator.conf
@@ -180,6 +165,7 @@ envdir = {toxworkdir}/shared
setenv =
{[testenv]setenv}
PYTHON=coverage run --source nova --parallel-mode
+extras =
commands =
coverage erase
stestr run {posargs}
@@ -190,6 +176,7 @@ commands =
[testenv:debug]
envdir = {toxworkdir}/shared
+extras =
commands =
oslo_debug_helper {posargs}
@@ -197,6 +184,7 @@ commands =
deps =
{[testenv]deps}
-r{toxinidir}/doc/requirements.txt
+extras =
commands =
{posargs}
@@ -207,7 +195,9 @@ description =
# to install (test-)requirements.txt for docs.
deps =
-c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/requirements.txt
-r{toxinidir}/doc/requirements.txt
+extras =
commands =
rm -rf doc/build/html doc/build/doctrees
sphinx-build -W --keep-going -b html -j auto doc/source doc/build/html
@@ -219,6 +209,7 @@ description =
Build PDF documentation.
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
+extras =
commands =
rm -rf doc/build/pdf
sphinx-build -W --keep-going -b latex -j auto doc/source doc/build/pdf
@@ -229,6 +220,7 @@ description =
Generate the API guide. Called from CI scripts to test and publish to docs.openstack.org.
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
+extras =
commands =
rm -rf api-guide/build
sphinx-build -W --keep-going -b html -j auto api-guide/source api-guide/build/html
@@ -238,6 +230,7 @@ description =
Generate the API ref. Called from CI scripts to test and publish to docs.openstack.org.
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
+extras =
commands =
rm -rf api-ref/build
sphinx-build -W --keep-going -b html -j auto api-ref/source api-ref/build/html
@@ -247,6 +240,7 @@ description =
Generate release notes.
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
+extras =
commands =
rm -rf releasenotes/build
sphinx-build -W --keep-going -b html -j auto releasenotes/source releasenotes/build/html
@@ -256,6 +250,7 @@ description =
Build all documentation including API guides and refs.
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
+extras =
commands =
{[testenv:docs]commands}
{[testenv:api-guide]commands}
@@ -266,6 +261,7 @@ commands =
# NOTE(browne): This is required for the integration test job of the bandit
# project. Please do not remove.
envdir = {toxworkdir}/shared
+extras =
commands = bandit -r nova -x tests -n 5 -ll
[flake8]
@@ -294,7 +290,7 @@ exclude = .venv,.git,.tox,dist,*lib/python*,*egg,build,releasenotes
# to 25 and run 'tox -epep8'.
# 39 is currently the most complex thing we have
# TODO(jogo): get this number down to 25 or so
-max-complexity=40
+max-complexity = 40
[hacking]
import_exceptions = typing,nova.i18n
@@ -347,6 +343,9 @@ extension =
N367 = checks:do_not_alias_mock_class
N368 = checks:do_not_use_mock_class_as_new_mock_value
N369 = checks:check_lockutils_rwlocks
+ N370 = checks:check_six
+ N371 = checks:import_stock_mock
+ N372 = checks:check_set_daemon
paths =
./nova/hacking
@@ -358,12 +357,6 @@ paths =
# explicitly to avoid unnecessarily installing the checked-out repo too
usedevelop = False
deps = bindep
+extras =
commands =
bindep test
-
-[testenv:lower-constraints]
-usedevelop = False
-deps =
- -c{toxinidir}/lower-constraints.txt
- -r{toxinidir}/test-requirements.txt
- -r{toxinidir}/requirements.txt