summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--HACKING.rst5
-rw-r--r--api-guide/source/extensions.rst150
-rw-r--r--api-guide/source/microversions.rst146
-rw-r--r--api-guide/source/server_concepts.rst265
-rw-r--r--api-guide/source/versions.rst22
-rw-r--r--devstack/tempest-dsvm-cells-rc1
-rw-r--r--devstack/tempest-dsvm-lxc-rc14
-rw-r--r--doc/api_samples/all_extensions/server-create-req.json (renamed from doc/api_samples/all_extensions/server-post-req.json)0
-rw-r--r--doc/api_samples/all_extensions/server-create-resp.json (renamed from doc/api_samples/all_extensions/server-post-resp.json)0
-rw-r--r--doc/api_samples/os-access-ips/server-put-req.json6
-rw-r--r--doc/api_samples/os-disk-config/server-update-put-req.json5
-rw-r--r--doc/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json69
-rw-r--r--doc/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json71
-rw-r--r--doc/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json (renamed from nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl)2
-rw-r--r--doc/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json22
-rw-r--r--doc/api_samples/os-server-groups/v2.13/server-groups-get-resp.json2
-rw-r--r--doc/api_samples/os-server-groups/v2.13/server-groups-list-resp.json2
-rw-r--r--doc/api_samples/os-server-groups/v2.13/server-groups-post-resp.json2
-rw-r--r--doc/api_samples/server-migrations/force_complete.json3
-rw-r--r--doc/api_samples/server-migrations/live-migrate-server.json7
-rw-r--r--doc/api_samples/servers/server-create-req.json12
-rw-r--r--doc/api_samples/servers/server-create-resp.json22
-rw-r--r--doc/api_samples/servers/server-update-req.json8
-rw-r--r--doc/api_samples/servers/server-update-resp.json (renamed from doc/api_samples/os-disk-config/server-update-put-resp.json)4
-rw-r--r--doc/api_samples/servers/v2.16/server-get-resp.json58
-rw-r--r--doc/api_samples/servers/v2.16/servers-details-resp.json60
-rw-r--r--doc/api_samples/servers/v2.17/server-action-trigger-crash-dump.json3
-rw-r--r--doc/api_samples/servers/v2.19/server-action-rebuild-resp.json57
-rw-r--r--doc/api_samples/servers/v2.19/server-action-rebuild.json13
-rw-r--r--doc/api_samples/servers/v2.19/server-get-resp.json59
-rw-r--r--doc/api_samples/servers/v2.19/server-post-req.json13
-rw-r--r--doc/api_samples/servers/v2.19/server-post-resp.json16
-rw-r--r--doc/api_samples/servers/v2.19/server-put-req.json6
-rw-r--r--doc/api_samples/servers/v2.19/server-put-resp.json (renamed from doc/api_samples/os-access-ips/server-put-resp.json)22
-rw-r--r--doc/api_samples/servers/v2.19/servers-details-resp.json61
-rw-r--r--doc/api_samples/servers/v2.19/servers-list-resp.json18
-rw-r--r--doc/api_samples/versions/v21-version-get-resp.json2
-rw-r--r--doc/api_samples/versions/versions-get-resp.json2
-rw-r--r--doc/ext/support_matrix.py34
-rw-r--r--doc/ext/versioned_notifications.py112
-rw-r--r--doc/notification_samples/service-update.json21
-rw-r--r--doc/source/code-review.rst6
-rw-r--r--doc/source/conf.py1
-rw-r--r--doc/source/development.environment.rst6
-rw-r--r--doc/source/feature_classification.rst2
-rw-r--r--doc/source/filter_scheduler.rst4
-rw-r--r--doc/source/index.rst1
-rw-r--r--doc/source/notifications.rst277
-rw-r--r--doc/source/scheduler_evolution.rst93
-rw-r--r--doc/source/support-matrix.ini49
-rw-r--r--doc/source/support-matrix.rst2
-rw-r--r--doc/source/threading.rst1
-rw-r--r--etc/nova/nova-config-generator.conf3
-rw-r--r--etc/nova/policy.json583
-rw-r--r--etc/nova/rootwrap.conf2
-rw-r--r--etc/nova/rootwrap.d/compute.filters6
-rw-r--r--etc/nova/rootwrap.d/network.filters6
-rw-r--r--nova/api/ec2/__init__.py64
-rw-r--r--nova/api/ec2/ec2utils.py6
-rw-r--r--nova/api/metadata/base.py5
-rw-r--r--nova/api/metadata/handler.py11
-rw-r--r--nova/api/openstack/__init__.py40
-rw-r--r--nova/api/openstack/api_version_request.py10
-rw-r--r--nova/api/openstack/auth.py12
-rw-r--r--nova/api/openstack/compute/aggregates.py7
-rw-r--r--nova/api/openstack/compute/availability_zone.py5
-rw-r--r--nova/api/openstack/compute/baremetal_nodes.py5
-rw-r--r--nova/api/openstack/compute/console_output.py2
-rw-r--r--nova/api/openstack/compute/extended_availability_zone.py3
-rw-r--r--nova/api/openstack/compute/extended_server_attributes.py49
-rw-r--r--nova/api/openstack/compute/extended_status.py3
-rw-r--r--nova/api/openstack/compute/extended_volumes.py3
-rw-r--r--nova/api/openstack/compute/extension_info.py2
-rw-r--r--nova/api/openstack/compute/flavors_extraspecs.py2
-rw-r--r--nova/api/openstack/compute/floating_ips.py6
-rw-r--r--nova/api/openstack/compute/image_metadata.py5
-rw-r--r--nova/api/openstack/compute/image_size.py3
-rw-r--r--nova/api/openstack/compute/instance_actions.py14
-rw-r--r--nova/api/openstack/compute/legacy_v2/contrib/aggregates.py7
-rw-r--r--nova/api/openstack/compute/legacy_v2/contrib/baremetal_nodes.py6
-rw-r--r--nova/api/openstack/compute/legacy_v2/contrib/migrations.py8
-rw-r--r--nova/api/openstack/compute/legacy_v2/contrib/rescue.py2
-rw-r--r--nova/api/openstack/compute/legacy_v2/contrib/services.py9
-rw-r--r--nova/api/openstack/compute/legacy_v2/contrib/volumes.py16
-rw-r--r--nova/api/openstack/compute/legacy_v2/servers.py2
-rw-r--r--nova/api/openstack/compute/migrations.py8
-rw-r--r--nova/api/openstack/compute/schemas/scheduler_hints.py5
-rw-r--r--nova/api/openstack/compute/schemas/server_migrations.py26
-rw-r--r--nova/api/openstack/compute/schemas/servers.py22
-rw-r--r--nova/api/openstack/compute/security_groups.py9
-rw-r--r--nova/api/openstack/compute/server_metadata.py5
-rw-r--r--nova/api/openstack/compute/server_migrations.py78
-rw-r--r--nova/api/openstack/compute/servers.py82
-rw-r--r--nova/api/openstack/compute/services.py9
-rw-r--r--nova/api/openstack/compute/views/servers.py4
-rw-r--r--nova/api/openstack/compute/volumes.py42
-rw-r--r--nova/api/openstack/rest_api_version_history.rst53
-rw-r--r--nova/api/openstack/urlmap.py6
-rw-r--r--nova/api/openstack/wsgi.py280
-rw-r--r--nova/api/opts.py1
-rw-r--r--nova/api/validation/parameter_types.py96
-rw-r--r--nova/api/validation/validators.py2
-rw-r--r--nova/availability_zones.py23
-rw-r--r--nova/block_device.py9
-rw-r--r--nova/cache_utils.py174
-rw-r--r--nova/cells/manager.py21
-rw-r--r--nova/cells/messaging.py16
-rw-r--r--nova/cells/opts.py82
-rw-r--r--nova/cells/rpc_driver.py18
-rw-r--r--nova/cells/rpcapi.py9
-rw-r--r--nova/cells/scheduler.py25
-rw-r--r--nova/cells/state.py33
-rw-r--r--nova/cells/utils.py12
-rw-r--r--nova/cells/weights/mute_child.py13
-rw-r--r--nova/cells/weights/ram_by_instance_type.py11
-rw-r--r--nova/cells/weights/weight_offset.py11
-rw-r--r--nova/cert/rpcapi.py15
-rw-r--r--nova/cmd/all.py6
-rw-r--r--nova/cmd/api_metadata.py5
-rw-r--r--nova/cmd/cert.py5
-rw-r--r--nova/cmd/compute.py5
-rw-r--r--nova/cmd/conductor.py5
-rw-r--r--nova/cmd/dhcpbridge.py14
-rw-r--r--nova/cmd/manage.py39
-rw-r--r--nova/cmd/network.py5
-rw-r--r--nova/cmd/novncproxy.py20
-rw-r--r--nova/compute/api.py397
-rw-r--r--nova/compute/cells_api.py2
-rw-r--r--nova/compute/manager.py279
-rw-r--r--nova/compute/monitors/cpu/virt_driver.py5
-rw-r--r--nova/compute/resource_tracker.py54
-rw-r--r--nova/compute/rpcapi.py107
-rw-r--r--nova/compute/stats.py3
-rw-r--r--nova/conductor/__init__.py8
-rw-r--r--nova/conductor/api.py60
-rw-r--r--nova/conductor/manager.py92
-rw-r--r--nova/conductor/rpcapi.py67
-rw-r--r--nova/conductor/tasks/live_migrate.py45
-rw-r--r--nova/conf/__init__.py24
-rw-r--r--nova/conf/availability_zone.py38
-rw-r--r--nova/conf/cells.py806
-rw-r--r--nova/conf/cert.py66
-rw-r--r--nova/conf/conductor.py60
-rw-r--r--nova/conf/ironic.py4
-rw-r--r--nova/conf/opts.py7
-rw-r--r--nova/conf/pci.py118
-rw-r--r--nova/conf/scheduler.py871
-rw-r--r--nova/conf/serial_console.py2
-rw-r--r--nova/conf/virt.py288
-rw-r--r--nova/conf/vnc.py335
-rw-r--r--nova/conf/wsgi.py92
-rw-r--r--nova/config.py9
-rw-r--r--nova/console/websocketproxy.py6
-rw-r--r--nova/consoleauth/manager.py35
-rw-r--r--nova/context.py40
-rw-r--r--nova/crypto.py22
-rw-r--r--nova/db/api.py84
-rw-r--r--nova/db/sqlalchemy/api.py3109
-rw-r--r--nova/db/sqlalchemy/api_migrations/migrate_repo/versions/005_flavors.py88
-rw-r--r--nova/db/sqlalchemy/api_models.py50
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/314_add_resource_provider_tables.py84
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/315_add_migration_progresss_detail.py30
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/316_add_disk_ratio_for_compute_nodes.py27
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/317_add_aggregate_uuid.py32
-rw-r--r--nova/db/sqlalchemy/models.py81
-rw-r--r--nova/db/sqlalchemy/utils.py2
-rw-r--r--nova/exception.py55
-rw-r--r--nova/hacking/checks.py112
-rw-r--r--nova/image/download/__init__.py2
-rw-r--r--nova/image/glance.py60
-rw-r--r--nova/keymgr/barbican.py7
-rw-r--r--nova/locale/cs/LC_MESSAGES/nova-log-critical.po14
-rw-r--r--nova/locale/cs/LC_MESSAGES/nova-log-error.po35
-rw-r--r--nova/locale/cs/LC_MESSAGES/nova-log-info.po76
-rw-r--r--nova/locale/cs/LC_MESSAGES/nova-log-warning.po63
-rw-r--r--nova/locale/cs/LC_MESSAGES/nova.po176
-rw-r--r--nova/locale/de/LC_MESSAGES/nova-log-critical.po14
-rw-r--r--nova/locale/de/LC_MESSAGES/nova-log-error.po35
-rw-r--r--nova/locale/de/LC_MESSAGES/nova-log-info.po77
-rw-r--r--nova/locale/de/LC_MESSAGES/nova.po162
-rw-r--r--nova/locale/es/LC_MESSAGES/nova-log-critical.po14
-rw-r--r--nova/locale/es/LC_MESSAGES/nova-log-error.po38
-rw-r--r--nova/locale/es/LC_MESSAGES/nova-log-info.po80
-rw-r--r--nova/locale/es/LC_MESSAGES/nova-log-warning.po34
-rw-r--r--nova/locale/es/LC_MESSAGES/nova.po257
-rw-r--r--nova/locale/es_MX/LC_MESSAGES/nova-log-critical.po14
-rw-r--r--nova/locale/fr/LC_MESSAGES/nova-log-critical.po14
-rw-r--r--nova/locale/fr/LC_MESSAGES/nova-log-error.po35
-rw-r--r--nova/locale/fr/LC_MESSAGES/nova-log-info.po80
-rw-r--r--nova/locale/fr/LC_MESSAGES/nova-log-warning.po35
-rw-r--r--nova/locale/fr/LC_MESSAGES/nova.po360
-rw-r--r--nova/locale/it/LC_MESSAGES/nova-log-error.po24
-rw-r--r--nova/locale/it/LC_MESSAGES/nova-log-info.po78
-rw-r--r--nova/locale/it/LC_MESSAGES/nova.po543
-rw-r--r--nova/locale/ja/LC_MESSAGES/nova-log-critical.po16
-rw-r--r--nova/locale/ja/LC_MESSAGES/nova.po156
-rw-r--r--nova/locale/ko_KR/LC_MESSAGES/nova.po520
-rw-r--r--nova/locale/nova-log-error.pot371
-rw-r--r--nova/locale/nova-log-info.pot461
-rw-r--r--nova/locale/nova-log-warning.pot539
-rw-r--r--nova/locale/nova.pot1715
-rw-r--r--nova/locale/pa_IN/LC_MESSAGES/nova-log-critical.po14
-rw-r--r--nova/locale/pt_BR/LC_MESSAGES/nova.po712
-rw-r--r--nova/locale/ru/LC_MESSAGES/nova.po561
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova-log-critical.po14
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova-log-error.po42
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova-log-info.po76
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova-log-warning.po45
-rw-r--r--nova/locale/tr_TR/LC_MESSAGES/nova.po155
-rw-r--r--nova/locale/zh_CN/LC_MESSAGES/nova-log-critical.po14
-rw-r--r--nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po35
-rw-r--r--nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po76
-rw-r--r--nova/locale/zh_CN/LC_MESSAGES/nova-log-warning.po33
-rw-r--r--nova/locale/zh_CN/LC_MESSAGES/nova.po178
-rw-r--r--nova/locale/zh_TW/LC_MESSAGES/nova.po509
-rw-r--r--nova/network/__init__.py2
-rw-r--r--nova/network/api.py7
-rw-r--r--nova/network/l3.py6
-rw-r--r--nova/network/linux_net.py21
-rw-r--r--nova/network/manager.py12
-rw-r--r--nova/network/model.py21
-rw-r--r--nova/network/neutronv2/api.py172
-rw-r--r--nova/network/neutronv2/constants.py1
-rw-r--r--nova/network/rpcapi.py19
-rw-r--r--nova/network/security_group/neutron_driver.py8
-rw-r--r--nova/network/security_group/openstack_driver.py2
-rw-r--r--nova/network/security_group/security_group_base.py6
-rw-r--r--nova/objects/__init__.py3
-rw-r--r--nova/objects/aggregate.py33
-rw-r--r--nova/objects/bandwidth_usage.py17
-rw-r--r--nova/objects/base.py10
-rw-r--r--nova/objects/block_device.py22
-rw-r--r--nova/objects/compute_node.py111
-rw-r--r--nova/objects/fields.py157
-rw-r--r--nova/objects/host_mapping.py57
-rw-r--r--nova/objects/image_meta.py35
-rw-r--r--nova/objects/instance.py77
-rw-r--r--nova/objects/migrate_data.py31
-rw-r--r--nova/objects/migration.py33
-rw-r--r--nova/objects/notification.py150
-rw-r--r--nova/objects/pci_device.py141
-rw-r--r--nova/objects/request_spec.py69
-rw-r--r--nova/objects/resource_provider.py197
-rw-r--r--nova/objects/security_group_rule.py9
-rw-r--r--nova/objects/service.py89
-rw-r--r--nova/objects/virtual_interface.py10
-rw-r--r--nova/openstack/common/cliutils.py12
-rw-r--r--nova/openstack/common/memorycache.py97
-rw-r--r--nova/opts.py21
-rw-r--r--nova/pci/devspec.py4
-rw-r--r--nova/pci/manager.py9
-rw-r--r--nova/pci/request.py26
-rw-r--r--nova/pci/utils.py4
-rw-r--r--nova/pci/whitelist.py12
-rw-r--r--nova/rpc.py50
-rw-r--r--nova/scheduler/driver.py30
-rw-r--r--nova/scheduler/filters/availability_zone_filter.py5
-rw-r--r--nova/scheduler/filters/compute_capabilities_filter.py2
-rw-r--r--nova/scheduler/filters/compute_filter.py3
-rw-r--r--nova/scheduler/filters/disk_filter.py7
-rw-r--r--nova/scheduler/filters/exact_core_filter.py4
-rw-r--r--nova/scheduler/filters/exact_disk_filter.py4
-rw-r--r--nova/scheduler/filters/exact_ram_filter.py4
-rw-r--r--nova/scheduler/filters/retry_filter.py5
-rw-r--r--nova/scheduler/host_manager.py4
-rw-r--r--nova/scheduler/ironic_host_manager.py1
-rw-r--r--nova/scheduler/manager.py28
-rw-r--r--nova/scheduler/utils.py16
-rw-r--r--nova/scheduler/weights/disk.py38
-rw-r--r--nova/servicegroup/api.py1
-rw-r--r--nova/servicegroup/drivers/mc.py10
-rw-r--r--nova/servicegroup/drivers/zk.py200
-rw-r--r--nova/test.py11
-rw-r--r--nova/tests/fixtures.py30
-rw-r--r--nova/tests/functional/api/client.py17
-rw-r--r--nova/tests/functional/api_paste_fixture.py11
-rw-r--r--nova/tests/functional/api_sample_tests/api_sample_base.py14
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v21_comp.json.tpl724
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-req.json.tpl (renamed from nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-post-req.json.tpl)0
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-resp.json.tpl (renamed from nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-post-resp.json.tpl)0
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-put-req.json.tpl6
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-update-put-req.json.tpl5
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json.tpl69
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json.tpl71
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json.tpl27
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json.tpl22
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-get-resp.json.tpl2
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-list-resp.json.tpl2
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-resp.json.tpl2
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/server-migrations/force_complete.json.tpl3
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/server-migrations/live-migrate-server.json.tpl7
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/server-create-req.json.tpl12
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/server-create-resp.json.tpl22
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/server-update-req.json.tpl8
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/server-update-resp.json.tpl (renamed from nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-update-put-resp.json.tpl)4
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.17/server-action-trigger-crash-dump.json.tpl3
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild-resp.json.tpl57
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild.json.tpl13
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-get-resp.json.tpl59
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-req.json.tpl13
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-resp.json.tpl16
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-req.json.tpl6
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-resp.json.tpl (renamed from nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-put-resp.json.tpl)6
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-details-resp.json.tpl61
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-list-resp.json.tpl18
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/versions/v21-version-get-resp.json.tpl2
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/versions/versions-get-resp.json.tpl2
-rw-r--r--nova/tests/functional/api_sample_tests/test_access_ips.py15
-rw-r--r--nova/tests/functional/api_sample_tests/test_cloudpipe.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_disk_config.py10
-rw-r--r--nova/tests/functional/api_sample_tests/test_evacuate.py8
-rw-r--r--nova/tests/functional/api_sample_tests/test_extended_server_attributes.py31
-rw-r--r--nova/tests/functional/api_sample_tests/test_extension_info.py7
-rw-r--r--nova/tests/functional/api_sample_tests/test_fixed_ips.py10
-rw-r--r--nova/tests/functional/api_sample_tests/test_flavor_access.py8
-rw-r--r--nova/tests/functional/api_sample_tests/test_flavor_manage.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_flavor_rxtx.py4
-rw-r--r--nova/tests/functional/api_sample_tests/test_hypervisors.py8
-rw-r--r--nova/tests/functional/api_sample_tests/test_instance_actions.py31
-rw-r--r--nova/tests/functional/api_sample_tests/test_keypairs.py30
-rw-r--r--nova/tests/functional/api_sample_tests/test_migrate_server.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_multinic.py2
-rw-r--r--nova/tests/functional/api_sample_tests/test_remote_consoles.py8
-rw-r--r--nova/tests/functional/api_sample_tests/test_server_groups.py8
-rw-r--r--nova/tests/functional/api_sample_tests/test_server_migrations.py52
-rw-r--r--nova/tests/functional/api_sample_tests/test_servers.py112
-rw-r--r--nova/tests/functional/api_sample_tests/test_services.py23
-rw-r--r--nova/tests/functional/api_sample_tests/test_versions.py8
-rw-r--r--nova/tests/functional/api_sample_tests/test_volumes.py11
-rw-r--r--nova/tests/functional/api_samples_test_base.py113
-rw-r--r--nova/tests/functional/db/api/test_migrations.py48
-rw-r--r--nova/tests/functional/db/test_flavor_model.py61
-rw-r--r--nova/tests/functional/db/test_request_spec.py5
-rw-r--r--nova/tests/functional/db/test_resource_provider.py112
-rw-r--r--nova/tests/functional/integrated_helpers.py3
-rw-r--r--nova/tests/functional/libvirt/test_numa_servers.py3
-rw-r--r--nova/tests/functional/libvirt/test_rt_servers.py15
-rw-r--r--nova/tests/functional/notification_sample_tests/__init__.py0
-rw-r--r--nova/tests/functional/notification_sample_tests/notification_sample_base.py92
-rw-r--r--nova/tests/functional/notification_sample_tests/test_service_update.py64
-rw-r--r--nova/tests/functional/regressions/README.rst24
-rw-r--r--nova/tests/functional/regressions/__init__.py0
-rw-r--r--nova/tests/functional/regressions/test_bug_1522536.py70
-rw-r--r--nova/tests/functional/test_instance_actions.py79
-rw-r--r--nova/tests/functional/test_servers.py228
-rwxr-xr-xnova/tests/live_migration/hooks/ceph.sh317
-rwxr-xr-xnova/tests/live_migration/hooks/nfs.sh47
-rwxr-xr-xnova/tests/live_migration/hooks/run_tests.sh95
-rwxr-xr-xnova/tests/live_migration/hooks/utils.sh72
-rw-r--r--nova/tests/unit/api/openstack/compute/legacy_v2/test_servers.py96
-rw-r--r--nova/tests/unit/api/openstack/compute/test_access_ips.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_agents.py16
-rw-r--r--nova/tests/unit/api/openstack/compute/test_aggregates.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_api.py15
-rw-r--r--nova/tests/unit/api/openstack/compute/test_attach_interfaces.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_auth.py39
-rw-r--r--nova/tests/unit/api/openstack/compute/test_availability_zone.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_cloudpipe_update.py6
-rw-r--r--nova/tests/unit/api/openstack/compute/test_config_drive.py15
-rw-r--r--nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_consoles.py9
-rw-r--r--nova/tests/unit/api/openstack/compute/test_createserverext.py18
-rw-r--r--nova/tests/unit/api/openstack/compute/test_disk_config.py26
-rw-r--r--nova/tests/unit/api/openstack/compute/test_extended_availability_zone.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_extended_server_attributes.py51
-rw-r--r--nova/tests/unit/api/openstack/compute/test_extended_status.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_extended_volumes.py5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_fixed_ips.py7
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_access.py25
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_disabled.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_manage.py17
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_rxtx.py2
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavor_swap.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavorextradata.py11
-rw-r--r--nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py85
-rw-r--r--nova/tests/unit/api/openstack/compute/test_floating_ips.py8
-rw-r--r--nova/tests/unit/api/openstack/compute/test_fping.py8
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hide_server_addresses.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hosts.py12
-rw-r--r--nova/tests/unit/api/openstack/compute/test_hypervisors.py5
-rw-r--r--nova/tests/unit/api/openstack/compute/test_image_size.py4
-rw-r--r--nova/tests/unit/api/openstack/compute/test_instance_actions.py46
-rw-r--r--nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py7
-rw-r--r--nova/tests/unit/api/openstack/compute/test_keypairs.py44
-rw-r--r--nova/tests/unit/api/openstack/compute/test_microversions.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrations.py12
-rw-r--r--nova/tests/unit/api/openstack/compute/test_multiple_create.py43
-rw-r--r--nova/tests/unit/api/openstack/compute/test_neutron_security_groups.py73
-rw-r--r--nova/tests/unit/api/openstack/compute/test_pci.py19
-rw-r--r--nova/tests/unit/api/openstack/compute/test_plugin_framework.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_scheduler_hints.py18
-rw-r--r--nova/tests/unit/api/openstack/compute/test_security_group_default_rules.py12
-rw-r--r--nova/tests/unit/api/openstack/compute/test_security_groups.py223
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_actions.py64
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_group_quotas.py9
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_groups.py8
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_metadata.py170
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_migrations.py108
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_start_stop.py21
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_usage.py3
-rw-r--r--nova/tests/unit/api/openstack/compute/test_serversV21.py397
-rw-r--r--nova/tests/unit/api/openstack/compute/test_services.py30
-rw-r--r--nova/tests/unit/api/openstack/compute/test_shelve.py19
-rw-r--r--nova/tests/unit/api/openstack/compute/test_user_data.py21
-rw-r--r--nova/tests/unit/api/openstack/compute/test_versions.py52
-rw-r--r--nova/tests/unit/api/openstack/compute/test_volumes.py112
-rw-r--r--nova/tests/unit/api/openstack/fakes.py22
-rw-r--r--nova/tests/unit/api/openstack/test_wsgi.py98
-rw-r--r--nova/tests/unit/api_samples_test_base/__init__.py0
-rw-r--r--nova/tests/unit/api_samples_test_base/test_compare_result.py470
-rw-r--r--nova/tests/unit/cells/test_cells_messaging.py46
-rw-r--r--nova/tests/unit/cells/test_cells_state_manager.py68
-rw-r--r--nova/tests/unit/compute/test_compute.py567
-rw-r--r--nova/tests/unit/compute/test_compute_api.py324
-rw-r--r--nova/tests/unit/compute/test_compute_cells.py3
-rw-r--r--nova/tests/unit/compute/test_compute_mgr.py404
-rw-r--r--nova/tests/unit/compute/test_compute_utils.py10
-rw-r--r--nova/tests/unit/compute/test_compute_xen.py5
-rw-r--r--nova/tests/unit/compute/test_keypairs.py14
-rw-r--r--nova/tests/unit/compute/test_multiple_nodes.py13
-rw-r--r--nova/tests/unit/compute/test_resource_tracker.py61
-rw-r--r--nova/tests/unit/compute/test_resources.py4
-rw-r--r--nova/tests/unit/compute/test_rpcapi.py151
-rw-r--r--nova/tests/unit/compute/test_shelve.py20
-rw-r--r--nova/tests/unit/compute/test_stats.py26
-rw-r--r--nova/tests/unit/compute/test_tracker.py48
-rw-r--r--nova/tests/unit/compute/test_virtapi.py3
-rw-r--r--nova/tests/unit/conductor/tasks/test_live_migrate.py174
-rw-r--r--nova/tests/unit/conductor/test_conductor.py139
-rw-r--r--nova/tests/unit/conf_fixture.py10
-rw-r--r--nova/tests/unit/console/test_console.py2
-rw-r--r--nova/tests/unit/console/test_websocketproxy.py3
-rw-r--r--nova/tests/unit/consoleauth/test_consoleauth.py79
-rw-r--r--nova/tests/unit/db/fakes.py15
-rw-r--r--nova/tests/unit/db/test_db_api.py317
-rw-r--r--nova/tests/unit/db/test_migrations.py36
-rw-r--r--nova/tests/unit/db/test_models.py86
-rw-r--r--nova/tests/unit/fake_notifier.py28
-rw-r--r--nova/tests/unit/fake_policy.py3
-rw-r--r--nova/tests/unit/fake_volume.py22
-rw-r--r--nova/tests/unit/image/test_glance.py142
-rw-r--r--nova/tests/unit/image/test_transfer_modules.py2
-rw-r--r--nova/tests/unit/keymgr/test_barbican.py11
-rw-r--r--nova/tests/unit/network/security_group/test_neutron_driver.py7
-rw-r--r--nova/tests/unit/network/test_api.py4
-rw-r--r--nova/tests/unit/network/test_l3.py26
-rw-r--r--nova/tests/unit/network/test_linux_net.py13
-rw-r--r--nova/tests/unit/network/test_manager.py14
-rw-r--r--nova/tests/unit/network/test_neutronv2.py289
-rw-r--r--nova/tests/unit/network/test_rpcapi.py31
-rw-r--r--nova/tests/unit/objects/test_aggregate.py24
-rw-r--r--nova/tests/unit/objects/test_compute_node.py152
-rw-r--r--nova/tests/unit/objects/test_fields.py154
-rw-r--r--nova/tests/unit/objects/test_instance.py135
-rw-r--r--nova/tests/unit/objects/test_keypair.py74
-rw-r--r--nova/tests/unit/objects/test_migrate_data.py29
-rw-r--r--nova/tests/unit/objects/test_migration.py28
-rw-r--r--nova/tests/unit/objects/test_notification.py244
-rw-r--r--nova/tests/unit/objects/test_objects.py121
-rw-r--r--nova/tests/unit/objects/test_pci_device.py319
-rw-r--r--nova/tests/unit/objects/test_request_spec.py21
-rw-r--r--nova/tests/unit/objects/test_resource_provider.py270
-rw-r--r--nova/tests/unit/objects/test_security_group_rule.py5
-rw-r--r--nova/tests/unit/objects/test_service.py78
-rw-r--r--nova/tests/unit/pci/test_devspec.py2
-rw-r--r--nova/tests/unit/pci/test_manager.py33
-rw-r--r--nova/tests/unit/pci/test_stats.py24
-rw-r--r--nova/tests/unit/scheduler/fakes.py12
-rw-r--r--nova/tests/unit/scheduler/filters/test_disk_filters.py24
-rw-r--r--nova/tests/unit/scheduler/filters/test_exact_core_filter.py6
-rw-r--r--nova/tests/unit/scheduler/filters/test_exact_disk_filter.py6
-rw-r--r--nova/tests/unit/scheduler/filters/test_exact_ram_filter.py6
-rw-r--r--nova/tests/unit/scheduler/ironic_fakes.py12
-rw-r--r--nova/tests/unit/scheduler/test_caching_scheduler.py1
-rw-r--r--nova/tests/unit/scheduler/test_chance_scheduler.py48
-rw-r--r--nova/tests/unit/scheduler/test_filter_scheduler.py70
-rw-r--r--nova/tests/unit/scheduler/test_filters.py129
-rw-r--r--nova/tests/unit/scheduler/test_host_manager.py129
-rw-r--r--nova/tests/unit/scheduler/test_ironic_host_manager.py100
-rw-r--r--nova/tests/unit/scheduler/test_scheduler.py131
-rw-r--r--nova/tests/unit/scheduler/test_scheduler_utils.py50
-rw-r--r--nova/tests/unit/scheduler/weights/test_weights_disk.py111
-rw-r--r--nova/tests/unit/servicegroup/test_mc_servicegroup.py4
-rw-r--r--nova/tests/unit/servicegroup/test_zk_driver.py101
-rw-r--r--nova/tests/unit/test_api_validation.py59
-rw-r--r--nova/tests/unit/test_availability_zones.py8
-rw-r--r--nova/tests/unit/test_block_device.py3
-rw-r--r--nova/tests/unit/test_cache.py121
-rw-r--r--nova/tests/unit/test_cinder.py68
-rw-r--r--nova/tests/unit/test_context.py15
-rw-r--r--nova/tests/unit/test_crypto.py16
-rw-r--r--nova/tests/unit/test_exception.py8
-rw-r--r--nova/tests/unit/test_fixtures.py15
-rw-r--r--nova/tests/unit/test_hacking.py104
-rw-r--r--nova/tests/unit/test_metadata.py39
-rw-r--r--nova/tests/unit/test_notifications.py4
-rw-r--r--nova/tests/unit/test_notifier.py53
-rw-r--r--nova/tests/unit/test_nova_manage.py64
-rw-r--r--nova/tests/unit/test_policy.py138
-rw-r--r--nova/tests/unit/test_quota.py20
-rw-r--r--nova/tests/unit/test_rpc.py342
-rw-r--r--nova/tests/unit/test_utils.py12
-rw-r--r--nova/tests/unit/test_wsgi.py35
-rw-r--r--nova/tests/unit/utils.py5
-rw-r--r--nova/tests/unit/virt/disk/vfs/test_localfs.py2
-rw-r--r--nova/tests/unit/virt/fakelibosinfo.py131
-rw-r--r--nova/tests/unit/virt/hyperv/test_driver.py30
-rw-r--r--nova/tests/unit/virt/hyperv/test_eventhandler.py2
-rw-r--r--nova/tests/unit/virt/hyperv/test_hostops.py5
-rw-r--r--nova/tests/unit/virt/hyperv/test_livemigrationops.py9
-rw-r--r--nova/tests/unit/virt/hyperv/test_vmops.py48
-rw-r--r--nova/tests/unit/virt/hyperv/test_volumeops.py8
-rw-r--r--nova/tests/unit/virt/ironic/test_client_wrapper.py6
-rw-r--r--nova/tests/unit/virt/ironic/test_driver.py178
-rw-r--r--nova/tests/unit/virt/ironic/test_patcher.py2
-rw-r--r--nova/tests/unit/virt/ironic/utils.py10
-rw-r--r--nova/tests/unit/virt/libvirt/fake_libvirt_utils.py7
-rw-r--r--nova/tests/unit/virt/libvirt/fakelibvirt.py8
-rw-r--r--nova/tests/unit/virt/libvirt/storage/test_rbd.py201
-rw-r--r--nova/tests/unit/virt/libvirt/test_blockinfo.py13
-rw-r--r--nova/tests/unit/virt/libvirt/test_config.py121
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py1180
-rw-r--r--nova/tests/unit/virt/libvirt/test_fakelibvirt.py3
-rw-r--r--nova/tests/unit/virt/libvirt/test_firewall.py79
-rw-r--r--nova/tests/unit/virt/libvirt/test_guest.py20
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagebackend.py169
-rw-r--r--nova/tests/unit/virt/libvirt/test_imagecache.py105
-rw-r--r--nova/tests/unit/virt/libvirt/test_utils.py8
-rw-r--r--nova/tests/unit/virt/libvirt/test_vif.py220
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_disco.py68
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py15
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_iscsi.py14
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_nfs.py1
-rw-r--r--nova/tests/unit/virt/libvirt/volume/test_scality.py2
-rw-r--r--nova/tests/unit/virt/test_hardware.py15
-rw-r--r--nova/tests/unit/virt/test_images.py2
-rw-r--r--nova/tests/unit/virt/test_osinfo.py89
-rw-r--r--nova/tests/unit/virt/test_virt.py7
-rw-r--r--nova/tests/unit/virt/test_virt_drivers.py26
-rw-r--r--nova/tests/unit/virt/vmwareapi/fake.py10
-rw-r--r--nova/tests/unit/virt/vmwareapi/stubs.py23
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_configdrive.py14
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_driver_api.py103
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_ds_util.py27
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_images.py18
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_network_util.py6
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_read_write_util.py3
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vif.py3
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vm_util.py36
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_vmops.py75
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_volumeops.py2
-rw-r--r--nova/tests/unit/virt/xenapi/client/test_session.py6
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_utils.py8
-rw-r--r--nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py5
-rw-r--r--nova/tests/unit/virt/xenapi/test_agent.py2
-rw-r--r--nova/tests/unit/virt/xenapi/test_vif.py189
-rw-r--r--nova/tests/unit/virt/xenapi/test_vm_utils.py88
-rw-r--r--nova/tests/unit/virt/xenapi/test_volume_utils.py1
-rw-r--r--nova/tests/unit/virt/xenapi/test_xenapi.py169
-rw-r--r--nova/tests/unit/volume/test_cinder.py39
-rw-r--r--nova/utils.py14
-rw-r--r--nova/virt/configdrive.py4
-rw-r--r--nova/virt/disk/api.py30
-rw-r--r--nova/virt/disk/mount/nbd.py12
-rw-r--r--nova/virt/driver.py125
-rw-r--r--nova/virt/fake.py10
-rw-r--r--nova/virt/firewall.py132
-rw-r--r--nova/virt/hardware.py32
-rw-r--r--nova/virt/hyperv/constants.py47
-rw-r--r--nova/virt/hyperv/driver.py12
-rw-r--r--nova/virt/hyperv/eventhandler.py2
-rw-r--r--nova/virt/hyperv/hostops.py3
-rw-r--r--nova/virt/hyperv/imagecache.py5
-rw-r--r--nova/virt/hyperv/livemigrationops.py32
-rw-r--r--nova/virt/hyperv/snapshotops.py13
-rw-r--r--nova/virt/hyperv/vmops.py34
-rw-r--r--nova/virt/hyperv/volumeops.py12
-rw-r--r--nova/virt/images.py14
-rw-r--r--nova/virt/ironic/client_wrapper.py6
-rw-r--r--nova/virt/ironic/driver.py101
-rw-r--r--nova/virt/ironic/patcher.py6
-rw-r--r--nova/virt/libvirt/blockinfo.py8
-rw-r--r--nova/virt/libvirt/config.py100
-rw-r--r--nova/virt/libvirt/driver.py797
-rw-r--r--nova/virt/libvirt/firewall.py18
-rw-r--r--nova/virt/libvirt/guest.py18
-rw-r--r--nova/virt/libvirt/host.py4
-rw-r--r--nova/virt/libvirt/imagebackend.py177
-rw-r--r--nova/virt/libvirt/imagecache.py28
-rw-r--r--nova/virt/libvirt/storage/rbd_utils.py169
-rw-r--r--nova/virt/libvirt/utils.py16
-rw-r--r--nova/virt/libvirt/vif.py117
-rw-r--r--nova/virt/libvirt/volume/disco.py67
-rw-r--r--nova/virt/libvirt/volume/fibrechannel.py1
-rw-r--r--nova/virt/libvirt/volume/glusterfs.py2
-rw-r--r--nova/virt/libvirt/volume/iscsi.py1
-rw-r--r--nova/virt/libvirt/volume/nfs.py3
-rw-r--r--nova/virt/libvirt/volume/quobyte.py2
-rw-r--r--nova/virt/netutils.py13
-rw-r--r--nova/virt/opts.py13
-rw-r--r--nova/virt/osinfo.py136
-rw-r--r--nova/virt/virtapi.py6
-rw-r--r--nova/virt/vmwareapi/driver.py5
-rw-r--r--nova/virt/vmwareapi/ds_util.py21
-rw-r--r--nova/virt/vmwareapi/images.py8
-rw-r--r--nova/virt/vmwareapi/vm_util.py14
-rw-r--r--nova/virt/vmwareapi/vmops.py43
-rw-r--r--nova/virt/xenapi/driver.py25
-rw-r--r--nova/virt/xenapi/fake.py5
-rw-r--r--nova/virt/xenapi/firewall.py70
-rw-r--r--nova/virt/xenapi/host.py4
-rw-r--r--nova/virt/xenapi/image/glance.py2
-rw-r--r--nova/virt/xenapi/pool.py3
-rw-r--r--nova/virt/xenapi/vif.py75
-rw-r--r--nova/virt/xenapi/vm_utils.py23
-rw-r--r--nova/virt/xenapi/vmops.py28
-rw-r--r--nova/virt/xenapi/volume_utils.py2
-rw-r--r--nova/vnc/__init__.py59
-rw-r--r--nova/vnc/xvp_proxy.py21
-rw-r--r--nova/volume/cinder.py71
-rw-r--r--nova/volume/encryptors/base.py1
-rw-r--r--nova/wsgi.py53
-rw-r--r--openstack-common.conf1
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost2
-rw-r--r--releasenotes/notes/add-novnc-proxy-config-to-vnc-group-f5bb68740f623744.yaml4
-rw-r--r--releasenotes/notes/add-xvp-config-to-vnc-group-349cca99f05fcfd3.yaml5
-rw-r--r--releasenotes/notes/aggregate-uuid-generation-1f029af7a9af519b.yaml7
-rw-r--r--releasenotes/notes/attach-detach-vol-for-shelved-and-shelved-offloaded-instances-93f70cfd49299f05.yaml8
-rw-r--r--releasenotes/notes/block-live-migrate-with-attached-volumes-ee02afbfe46937c7.yaml5
-rw-r--r--releasenotes/notes/bp-boot-from-uefi-b413b96017db76dd.yaml3
-rw-r--r--releasenotes/notes/bp-get-valid-server-state-a817488f4c8d3822.yaml8
-rw-r--r--releasenotes/notes/bp-instance-crash-dump-7ccbba7799dc66f9.yaml4
-rw-r--r--releasenotes/notes/bp-rbd-instance-snapshots-130e860b726ddc16.yaml12
-rw-r--r--releasenotes/notes/bp-split-network-plane-for-live-migration-40bc127734173759.yaml10
-rw-r--r--releasenotes/notes/bp-virt-driver-cpu-thread-pinning-1aaeeb6648f8e009.yaml9
-rw-r--r--releasenotes/notes/compute_upgrade_levels_auto-97acebc7b45b76df.yaml6
-rw-r--r--releasenotes/notes/config_scheduler_driver-e751ae392bc1a1d0.yaml9
-rw-r--r--releasenotes/notes/config_scheduler_host_manager_driver-a543a74ea70f5e90.yaml9
-rw-r--r--releasenotes/notes/disk-weight-scheduler-98647f9c6317d21d.yaml8
-rw-r--r--releasenotes/notes/disk_ratio_to_rt-b6224ab8c0272d86.yaml19
-rw-r--r--releasenotes/notes/force-live-migration-be5a10cd9c8eb981.yaml4
-rw-r--r--releasenotes/notes/instance-actions-read-deleted-instances-18bbb327924b66c7.yaml9
-rw-r--r--releasenotes/notes/instance-hostname-used-to-populate-ports-dns-name-08341ec73dc076c0.yaml22
-rw-r--r--releasenotes/notes/libvirt-deprecate-migration-flags-config-4ba1e2d6c9ef09ff.yaml9
-rw-r--r--releasenotes/notes/libvirt-live-migration-flags-mangling-a2407a31ddf17427.yaml12
-rw-r--r--releasenotes/notes/libvirt-live-migration-new-tunneled-option-d7ebb1eb1e95e683.yaml7
-rw-r--r--releasenotes/notes/live_migration_uri-dependent-on-virt_type-595c46c2310f45c3.yaml7
-rw-r--r--releasenotes/notes/lock_policy-75bea372036acbd5.yaml6
-rw-r--r--releasenotes/notes/neutron-ovs-bridge-name-7b3477103622f4cc.yaml4
-rw-r--r--releasenotes/notes/optional_project_id-6aebf1cb394d498f.yaml16
-rw-r--r--releasenotes/notes/request-spec-api-db-b9cc6e0624d563c5.yaml19
-rw-r--r--releasenotes/notes/service-status-notification-e137297f5d5aa45d.yaml9
-rw-r--r--releasenotes/notes/soft-affinity-for-server-group-f45e191bd8cdbd15.yaml7
-rw-r--r--releasenotes/notes/switch-to-oslo-cache-7114a0ab2dea52df.yaml9
-rw-r--r--releasenotes/notes/upgrade_rootwrap_compute_filters-428ca239f2e4e63d.yaml13
-rw-r--r--releasenotes/notes/user-settable-server-description-89dcfc75677e31bc.yaml16
-rw-r--r--releasenotes/notes/versioned-notifications-423f4d8d2a3992c6.yaml9
-rw-r--r--releasenotes/notes/vmware_limits-16edee7a9ad023bc.yaml40
-rw-r--r--releasenotes/notes/xen_rename-03edd9b78f3e81e5.yaml5
-rw-r--r--releasenotes/notes/zookeeper-servicegroup-driver-removed-c3bcaa6f9fe976ed.yaml15
-rw-r--r--requirements.txt81
-rwxr-xr-xrun_tests.sh4
-rw-r--r--setup.cfg15
-rw-r--r--test-requirements.txt32
-rw-r--r--tests-py3.txt29
-rw-r--r--tools/ebtables.workaround35
-rwxr-xr-xtools/reserve-migrations.py75
-rwxr-xr-xtools/xenserver/rotate_xen_guest_logs.sh10
-rw-r--r--tox.ini71
670 files changed, 29097 insertions, 13361 deletions
diff --git a/HACKING.rst b/HACKING.rst
index 881d8de006..397cf52245 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -53,6 +53,11 @@ Nova Specific Commandments
- [N340] Check nova.utils.spawn() is used instead of greenthread.spawn() and eventlet.spawn()
- [N341] contextlib.nested is deprecated
- [N342] Config options should be in the central location ``nova/conf/``
+- [N343] Check for common double word typos
+- [N344] Python 3: do not use dict.iteritems.
+- [N345] Python 3: do not use dict.iterkeys.
+- [N346] Python 3: do not use dict.itervalues.
+- [N347] Provide enough help text for config options
Creating Unit Tests
-------------------
diff --git a/api-guide/source/extensions.rst b/api-guide/source/extensions.rst
index c9615f0dc8..90b6f41e35 100644
--- a/api-guide/source/extensions.rst
+++ b/api-guide/source/extensions.rst
@@ -2,146 +2,10 @@
Extensions
==========
-The OpenStack Compute API v2.0 is extensible. Extensions serve two purposes:
-They allow the introduction of new features in the API without requiring
-a version change and they allow the introduction of vendor specific
-niche functionality. Applications can programmatically list available
-extensions by performing a **GET** on the ``/extensions`` URI. Note that
-this is a versioned request; that is, an extension available in one API
-version might not be available in another.
-
-Extensions may also be queried individually by their unique alias. This
-provides the simplest method of checking if an extension is available
-because an unavailable extension issues an itemNotFound (404)
-response.
-
-Extensions may define new data types, parameters, actions, headers,
-states, and resources.
-
-NOTE: Extensions is a deprecated concept in Nova and their support
-will be removed in a future version. If your product or cloud relies
-on extensions you should work on getting those features into the main
-upstream project.
-
-Important
-~~~~~~~~~
-
-Applications should ignore response data that contains extension
-elements. An extended state should always be treated as an ``UNKNOWN``
-state if the application does not support the extension. Applications
-should also verify that an extension is available before submitting an
-extended request.
-
-
-**Example: Extended server: JSON response**
-
-.. code::
-
- {
- "servers": [
- {
- "id": "52415800-8b69-11e0-9b19-734f6af67565",
- "tenant_id": "1234",
- "user_id": "5678",
- "name": "sample-server",
- "updated": "2010-10-10T12:00:00Z",
- "created": "2010-08-10T12:00:00Z",
- "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0",
- "status": "BUILD",
- "progress": 60,
- "accessIPv4" : "67.23.10.132",
- "accessIPv6" : "::babe:67.23.10.132",
- "image" : {
- "id": "52415800-8b69-11e0-9b19-734f6f006e54",
- "links": [
- {
- "rel": "self",
- "href": "http://servers.api.openstack.org/v2/1234/images/52415800-8b69-11e0-9b19-734f6f006e54"
- },
- {
- "rel": "bookmark",
- "href": "http://servers.api.openstack.org/1234/images/52415800-8b69-11e0-9b19-734f6f006e54"
- }
- ]
- },
- "flavor" : {
- "id": "52415800-8b69-11e0-9b19-734f216543fd",
- "links": [
- {
- "rel": "self",
- "href": "http://servers.api.openstack.org/v2/1234/flavors/52415800-8b69-11e0-9b19-734f216543fd"
- },
- {
- "rel": "bookmark",
- "href": "http://servers.api.openstack.org/1234/flavors/52415800-8b69-11e0-9b19-734f216543fd"
- }
- ]
- },
- "addresses": {
- "public" : [
- {
- "version": 4,
- "addr": "67.23.10.132"
- },
- {
- "version": 6,
- "addr": "::babe:67.23.10.132"
- },
- {
- "version": 4,
- "addr": "67.23.10.131"
- },
- {
- "version": 6,
- "addr": "::babe:4317:0A83"
- }
- ],
- "private" : [
- {
- "version": 4,
- "addr": "10.176.42.16"
- },
- {
- "version": 6,
- "addr": "::babe:10.176.42.16"
- }
- ]
- },
- "metadata": {
- "Server Label": "Web Head 1",
- "Image Version": "2.1"
- },
- "links": [
- {
- "rel": "self",
- "href": "http://servers.api.openstack.org/v2/1234/servers/52415800-8b69-11e0-9b19-734f6af67565"
- },
- {
- "rel": "bookmark",
- "href": "http://servers.api.openstack.org/1234/servers/52415800-8b69-11e0-9b19-734f6af67565"
- }
- ],
- "RS-CBS:volumes": [
- {
- "name": "OS",
- "href": "https://cbs.api.rackspacecloud.com/12934/volumes/19"
- },
- {
- "name": "Work",
- "href": "https://cbs.api.rackspacecloud.com/12934/volumes/23"
- }
- ]
- }
- ]
- }
-
-
-**Example: Extended action: JSON response**
-
-.. code::
-
- {
- "RS-CBS:attach-volume":{
- "href":"https://cbs.api.rackspacecloud.com/12934/volumes/19"
- }
- }
+Extensions are a deprecated concept in Nova. Support for extensions will be
+removed in a future release. In order to keep backwards-compatibility with
+legacy V2 API users, the ``extension_info`` API will remain as part of the
+Compute API. However, API extensions will not be supported anymore;
+there is only one standard API now. For the current V2.1 API, ``Microversions``
+are the new mechanism for implementing API features and changes. For more
+detail about microversions, please refer to :doc:`microversions`.
diff --git a/api-guide/source/microversions.rst b/api-guide/source/microversions.rst
index d496ad909d..54fc5d7b9b 100644
--- a/api-guide/source/microversions.rst
+++ b/api-guide/source/microversions.rst
@@ -15,24 +15,128 @@
Microversions
=============
-API v2.1 supports Microversions: small, documented changes to the API. A user
-can use Microversions to discover the latest API version supported in their
-cloud. A cloud that is upgraded to support newer versions will still support
-all older versions to maintain the backward compatibility for those users who
-depend on older versions. Users can also discover new features easily with
-Microversions, so that they can benefit from all the advantages and
-improvements of the current cloud.
-
-There are multiple cases which you can resolve with Microversions:
-
-Legacy v2 API user with new cloud
-=================================
-
-The minimum version of Microversions is `2.1`, which is a version compatible
-with the legacy v2 API. The legacy v2 API user doesn't need to worry that their
-older client software will be broken when their cloud is upgraded with new
-versions. And the cloud operator doesn't need to worry that upgrading their
-cloud to newer versions will break any user with older clients that don't
-expect these changes.
-
-TODO: add more use-cases for Microversions
+API v2.1 supports microversions: small, documented changes to the API. A user
+can use microversions to discover the latest API microversion supported in
+their cloud. A cloud that is upgraded to support newer microversions will still
+support all older microversions to maintain the backward compatibility for
+those users who depend on older microversions. Users can also discover new
+features easily with microversions, so that they can benefit from all the
+advantages and improvements of the current cloud.
+
+There are multiple cases which you can resolve with microversions:
+
+- **Older clients with new cloud**
+
+Before using an old client to talk to a newer cloud, the old client can check
+the minimum version of microversions to verify whether the cloud is compatible
+with the old API. This prevents the old client from breaking with backwards
+incompatible API changes.
+
+Currently the minimum version of microversions is `2.1`, which is a
+microversion compatible with the legacy v2 API. That means the legacy v2 API
+user doesn't need to worry that their older client software will be broken when
+their cloud is upgraded with new versions. And the cloud operator doesn't need
+to worry that upgrading their cloud to newer versions will break any user with
+older clients that don't expect these changes.
+
+- **User discovery of available features between clouds**
+
+The new features can be discovered by microversions. The user client should
+check the microversions firstly, and new features are only enabled when clouds
+support. In this way, the user client can work with clouds that have deployed
+different microversions simultaneously.
+
+Version Discovery
+=================
+
+The Version API will return the minimum and maximum microversions. These values
+are used by the client to discover the API's supported microversion(s).
+
+Requests to '/' will get version info for all endpoints. A response would look
+as follows::
+
+ {
+ "versions": [
+ {
+ "id": "v2.0",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/",
+ "rel": "self"
+ }
+ ],
+ "status": "SUPPORTED",
+ "version": "",
+ "min_version": "",
+ "updated": "2011-01-21T11:33:21Z"
+ },
+ {
+ "id": "v2.1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2.1/",
+ "rel": "self"
+ }
+ ],
+ "status": "CURRENT",
+ "version": "2.14",
+ "min_version": "2.1",
+ "updated": "2013-07-23T11:33:21Z"
+ }
+ ]
+ }
+
+"version" is the maximum microversion, "min_version" is the minimum
+microversion. If the value is the empty string, it means this endpoint doesn't
+support microversions; it is a legacy v2 API endpoint -- for example, the
+endpoint `http://openstack.example.com/v2/` in the above sample. The endpoint
+`http://openstack.example.com/v2.1/` supports microversions; the maximum
+microversion is '2.14', and the minimum microversion is '2.1'. The client
+should specify a microversion between (and including) the minimum and maximum
+microversion to access the endpoint.
+
+You can also obtain specific endpoint version information by performing a GET
+on the base version URL (e.g., `http://openstack.example.com/v2.1/`). You can
+get more information about the version API at :doc:`versions`.
+
+Client Interaction
+==================
+
+A client specifies the microversion of the API they want by using the following
+HTTP header::
+
+ X-OpenStack-Nova-API-Version: 2.4
+
+This acts conceptually like the "Accept" header. Semantically this means:
+
+* If `X-OpenStack-Nova-API-Version` is not provided, act as if the minimum
+ supported microversion was specified.
+
+* If `X-OpenStack-Nova-API-Version` is provided, respond with the API at
+ that microversion. If that's outside of the range of microversions supported,
+ return 406 Not Acceptable.
+
+* If `X-OpenStack-Nova-API-Version` is ``latest`` (special keyword), act as
+ if maximum was specified.
+
+.. warning:: The ``latest`` value is mostly meant for integration testing and
+ would be dangerous to rely on in client code since microversions are not
+ following semver and therefore backward compatibility is not guaranteed.
+ Clients should always require a specific microversion but limit what is
+ acceptable to the microversion range that it understands at the time.
+
+This means that out of the box, an old client without any knowledge of
+microversions can work with an Openstack installation with microversions
+support.
+
+Two extra headers are always returned in the response:
+
+* X-OpenStack-Nova-API-Version: microversion_number
+* Vary: X-OpenStack-Nova-API-Version
+
+The first header specifies the microversion number of the API which was
+executed.
+
+The second header is used as a hint to caching proxies that the response
+is also dependent on the X-OpenStack-Nova-API-Version and not just
+the body and query parameters. See :rfc:`2616` section 14.44 for details.
diff --git a/api-guide/source/server_concepts.rst b/api-guide/source/server_concepts.rst
index a2317f50bc..d836debd2b 100644
--- a/api-guide/source/server_concepts.rst
+++ b/api-guide/source/server_concepts.rst
@@ -125,8 +125,260 @@ operations on the server.
Server query
~~~~~~~~~~~~
-TODO: We should introduce that there are multiple methods to filter the
-response of list servers.
+Nova allows both general user and administrator to filter the server
+query result by using query options.
+
+For general user, ``reservation_id``, ``name``, ``status``, ``image``,
+``flavor``, ``ip``, ``changes-since``, ``all_tenants``,
+``ip6 (microversion 2.5)`` are supported options to be used. The other
+options will be ignored by nova silently only with a debug log.
+
+For administrator, there are more fields can be used.
+Usually the filter is operated on the database schema definition of
+``class Instance``, e.g there is a field named 'locked' in the schema
+then the filter can use 'locked' as search options to filter servers.
+Also, there are some special options such as ``changes-since`` can
+be used and interpreted by nova.
+
+- **General user & Administrator supported options**
+ General user supported options are listed above and administrator can
+ use almost all the options except the options parameters for sorting
+ and pagination.
+
+.. code::
+ Precondition:
+ there are 2 servers existing in cloud with following info:
+
+ "servers":[
+ {
+ "name": "t1",
+ "locked": "true",
+ ...
+ }
+ {
+ "name":"t2",
+ "locked": "false",
+ ...
+ }
+
+ **Example: General user query server with administrator only options**
+
+.. code::
+ Request with non-administrator context:
+ GET /servers/detail?locked=1
+ Note that 'locked' is not returned through API layer
+
+ Response:
+ {
+ "servers":[
+ {
+ "name": "t1",
+ ...
+ }
+ {
+ "name":"t2",
+ ...
+ }
+ ]
+ }
+
+ **Example: Administrator query server with administrator only options**
+
+.. code::
+ Request with administrator context:
+ GET /servers/detail?locked=1
+
+ Response:
+ {
+ "servers":[
+ {
+ "name": "t1",
+ ...
+ }
+ ]
+ }
+
+- **Exact matching and regex matching of the search options**
+
+ Depending on the name of a filter, matching for that filter is performed
+ using either exact matching or as regular expression matching.
+ ``project_id``, ``user_id``, ``image_ref``, ``vm_state``,
+ ``instance_type_id``, ``uuid``, ``metadata``, ``host``, ``system_metadata``
+ are the options that are applied by exact matching when filtering.
+
+ **Example: User query server using exact matching on host**
+
+.. code::
+ Precondition:
+ Request with administrator context:
+ GET /servers/detail
+
+ Response:
+
+ {
+ "servers":[
+ {
+ "name": "t1",
+ "OS-EXT-SRV-ATTR:host": "devstack"
+ ...
+ }
+ {
+ "name": "t2",
+ "OS-EXT-SRV-ATTR:host": "devstack1"
+ ...
+ }
+ ]
+ }
+
+ Request with administrator context:
+ GET /servers/detail?host=devstack
+
+ Response:
+
+ {
+ "servers":[
+ {
+ "name": "t1",
+ "OS-EXT-SRV-ATTR:host": "devstack"
+ ...
+ }
+ ]
+ }
+
+ **Example: Query server using regex matching on name**
+
+.. code::
+ Precondition:
+ Request with administrator context:
+ GET /servers/detail
+
+ Response:
+
+ {
+ "servers":[
+ {
+ "name": "test11",
+ ...
+ }
+ {
+ "name": "test21",
+ ...
+ }
+ {
+ "name": "t1",
+ ...
+ }
+ {
+ "name": "t14",
+ ...
+ }
+ ]
+ }
+
+ Request with administrator context:
+ GET /servers/detail?name=t1
+
+ Response:
+
+ {
+ "servers":[
+ {
+ "name": "test11",
+ ...
+ }
+ {
+ "name": "t1",
+ ...
+ }
+ {
+ "name": "t14",
+ ...
+ }
+ ]
+ }
+
+ **Example: User query server using exact matching on host and
+ regex matching on name**
+
+.. code::
+ Precondition:
+ Request with administrator context:
+ GET /servers/detail
+
+ Response:
+
+ {
+ "servers":[
+ {
+ "name": "test1",
+ "OS-EXT-SRV-ATTR:host": "devstack"
+ ...
+ }
+ {
+ "name": "t2",
+ "OS-EXT-SRV-ATTR:host": "devstack1"
+ ...
+ }
+ {
+ "name": "test3",
+ "OS-EXT-SRV-ATTR:host": "devstack1"
+ ...
+ }
+ ]
+ }
+
+ Request with administrator context:
+ GET /servers/detail?host=devstack1&name=test
+
+ Response:
+
+ {
+ "servers":[
+ {
+ "name": "test3",
+ "OS-EXT-SRV-ATTR:host": "devstack1"
+ ...
+ }
+ ]
+ }
+
+- **Speical keys are used to tweek the query**
+ ``changes-since`` returns instances updated after the given time,
+ ``deleted`` return (or exclude) deleted instances and ``soft_deleted``
+ modify behavior of 'deleted' to either include or exclude instances whose
+ vm_state is SOFT_DELETED. Please see: :doc:`polling_changes-since_parameter`
+
+ **Example: User query server with special keys changes-since**
+
+.. code::
+ Precondition:
+ GET /servers/detail
+
+ Response:
+ {
+ "servers":[
+ {
+ "name": "t1"
+ "updated": "2015-12-15T15:55:52Z"
+ ...
+ }
+ {
+ "name": "t2",
+ "updated": "2015-12-17T15:55:52Z"
+ ...
+ }
+ }
+
+ GET /servers/detail?changes-since='2015-12-16T15:55:52Z'
+
+ Response:
+ {
+ {
+ "name": "t2",
+ "updated": "2015-12-17T15:55:52Z"
+ ...
+ }
+ }
Server actions
~~~~~~~~~~~~~~~
@@ -173,7 +425,7 @@ Server actions
spawned in the virt layer and revert all changes, the original server
will still be used from then on.
- Also, there there is a periodic task configured by configuration option
+ Also, there is a periodic task configured by configuration option
resize_confirm_window(in seconds), if this value is not 0, nova compute
will check whether the server is in resized state longer than
value of resize_confirm_window, it will automatically confirm the resize
@@ -294,6 +546,13 @@ Server actions
Administrators may use this to evacuate servers from a host that needs to
undergo maintenance tasks.
+- **Trigger crash dump**
+
+ Trigger crash dump usually utilized by either administrator or the server's
+ owner, it will dump the memory image as dump file into the given server,
+ and then reboot the kernel again. And this feature depends on the setting
+ about the NMI in the server.
+
Server passwords
~~~~~~~~~~~~~~~~
diff --git a/api-guide/source/versions.rst b/api-guide/source/versions.rst
index 9ca2feeae7..9399044b44 100644
--- a/api-guide/source/versions.rst
+++ b/api-guide/source/versions.rst
@@ -85,30 +85,23 @@ links and MIME types to available versions.
]
}
-The API with ``CURRENT`` status is the newest API and continue improved by the
-Nova project. The API with ``SUPPORTED`` is the old API and new features is
-frozen. The API with ``DEPRECATED`` status is the API will be removed in the
+The API with ``CURRENT`` status is the newest API and continues to be improved by the
+Nova project. The API with ``SUPPORTED`` status is the old API, where new features are
+frozen. The API with ``DEPRECATED`` status is the API that will be removed in the
foreseeable future. Providers should work with developers and partners to
ensure there is adequate time to migrate to the new version before deprecated
-versions are discontinued. For any API which is under development and didn't
-release yet, the API status is ``EXPERIMENTAL``.
+versions are discontinued. For any API which is under development but isn't
+released as yet, the API status is ``EXPERIMENTAL``.
Your application can programmatically determine available API versions
by performing a **GET** on the root URL (i.e. with the version and
-everything to the right of it truncated) returned from the
-authentication system.
+everything following that truncated) returned from the authentication system.
You can also obtain additional information about a specific version by
performing a **GET** on the base version URL (such as,
``https://servers.api.openstack.org/v2.1/``). Version request URLs must
always end with a trailing slash (``/``). If you omit the slash, the
-server might respond with a 302 redirection request. Format extensions
-can be placed after the slash (such as,
-``https://servers.api.openstack.org/v2.1/.json``).
-
-.. note:: This special case does not hold true for other API requests. In
- general, requests such as ``/servers.json`` and ``/servers/.json`` are
- handled equivalently.
+server might respond with a 302 redirection request.
For examples of the list versions and get version details requests and
responses, see `*API versions*
@@ -118,4 +111,3 @@ The detailed version response contains pointers to both a human-readable
and a machine-processable description of the API service. The
machine-processable description is written in the Web Application
Description Language (WADL).
-
diff --git a/devstack/tempest-dsvm-cells-rc b/devstack/tempest-dsvm-cells-rc
index 15a9ff94f5..f2b97f9d96 100644
--- a/devstack/tempest-dsvm-cells-rc
+++ b/devstack/tempest-dsvm-cells-rc
@@ -76,6 +76,7 @@ r="$r|(?:tempest\.scenario\.test_encrypted_cinder_volumes\.TestEncryptedCinderVo
r="$r|(?:tempest\.thirdparty\.boto\.test_ec2_network\.EC2NetworkTest\.test_disassociate_not_associated_floating_ip)"
r="$r|(?:tempest\.scenario\.test_server_basic_ops\.TestServerBasicOps\.test_server_basicops)"
r="$r|(?:tempest\.scenario\.test_snapshot_pattern\.TestSnapshotPattern\.test_snapshot_pattern)"
+r="$r|(?:tempest\.scenario\.test_rebuild_instance_with_volume\.TestRebuildInstanceWithVolume\.test_rebuild_instance_with_volume)"
r="$r|(?:tempest\.api\.compute\.admin\.test_hosts\.HostsAdminTestJSON\.test_show_host_detail)"
r="$r|(?:tempest\.api\.compute\.test_tenant_networks\.ComputeTenantNetworksTest\.test_list_show_tenant_networks)"
# https://bugs.launchpad.net/nova/+bug/1489581
diff --git a/devstack/tempest-dsvm-lxc-rc b/devstack/tempest-dsvm-lxc-rc
index 9ed53b3d53..6b2820684f 100644
--- a/devstack/tempest-dsvm-lxc-rc
+++ b/devstack/tempest-dsvm-lxc-rc
@@ -30,6 +30,20 @@
r="^(?!.*"
r="$r(?:.*\[.*\bslow\b.*\])"
+
+# NOTE(thomasem): Skipping these tests due to Ubuntu bug:
+# https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1536280.
+# These exclusions should be able to be removed once that bug is addressed.
+r="$r|(?:tempest\.api\.compute\.servers\.test_server_personality\.ServerPersonalityTestJSON\.test_rebuild_server_with_personality)"
+r="$r|(?:tempest\.api\.compute\.admin\.test_servers\.ServersAdminTestJSON\.test_rebuild_server_in_error_state)"
+r="$r|(?:tempest\.api\.compute\.servers\.test_list_server_filters\.ListServerFiltersTestJSON\.test_list_servers_filter_by_shutoff_status)"
+r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_lock_unlock_server)"
+r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_rebuild_server)"
+r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_rebuild_server_in_stop_state)"
+r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_stop_start_server)"
+r="$r|(?:tempest\.api\.compute\.servers\.test_server_actions\.ServerActionsTestJSON\.test_get_console_output_server_id_in_shutoff_status)"
+r="$r|(?:tempest\.api\.compute\.servers\.test_servers\.ServersTestJSON\.test_update_server_name_in_stop_state)"
+r="$r|(?:tempest\.api\.compute\.servers\.test_disk_config\.ServerDiskConfigTestJSON*)"
r="$r|(?:tempest\.api\.compute\.servers\.test_delete_server\.DeleteServersTestJSON*)"
r="$r).*$"
diff --git a/doc/api_samples/all_extensions/server-post-req.json b/doc/api_samples/all_extensions/server-create-req.json
index 439f9b97ac..439f9b97ac 100644
--- a/doc/api_samples/all_extensions/server-post-req.json
+++ b/doc/api_samples/all_extensions/server-create-req.json
diff --git a/doc/api_samples/all_extensions/server-post-resp.json b/doc/api_samples/all_extensions/server-create-resp.json
index d20092ebc5..d20092ebc5 100644
--- a/doc/api_samples/all_extensions/server-post-resp.json
+++ b/doc/api_samples/all_extensions/server-create-resp.json
diff --git a/doc/api_samples/os-access-ips/server-put-req.json b/doc/api_samples/os-access-ips/server-put-req.json
deleted file mode 100644
index e0af5a9e49..0000000000
--- a/doc/api_samples/os-access-ips/server-put-req.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "server": {
- "accessIPv4": "4.3.2.1",
- "accessIPv6": "80fe::"
- }
-} \ No newline at end of file
diff --git a/doc/api_samples/os-disk-config/server-update-put-req.json b/doc/api_samples/os-disk-config/server-update-put-req.json
deleted file mode 100644
index 898ab886ad..0000000000
--- a/doc/api_samples/os-disk-config/server-update-put-req.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "server": {
- "OS-DCF:diskConfig": "AUTO"
- }
-} \ No newline at end of file
diff --git a/doc/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json b/doc/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json
new file mode 100644
index 0000000000..bbcc25fc95
--- /dev/null
+++ b/doc/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json
@@ -0,0 +1,69 @@
+{
+ "server": {
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-16T02:55:07Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "3bf189131c61d0e71b0a8686a897a0f50d1693b48c47b721fe77155b",
+ "id": "c278163e-36f9-4cf2-b1ac-80db4c63f7a8",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/c278163e-36f9-4cf2-b1ac-80db4c63f7a8",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "OS-EXT-SRV-ATTR:host": "c5f474bf81474f9dbbc404d5b2e4e9b3",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:reservation_id": "r-12345678",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:kernel_id": null,
+ "OS-EXT-SRV-ATTR:ramdisk_id": null,
+ "OS-EXT-SRV-ATTR:user_data": null,
+ "locked": false,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "progress": 0,
+ "status": "ACTIVE",
+ "host_status": "UP",
+ "tenant_id": "openstack",
+ "updated": "2013-09-16T02:55:08Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json b/doc/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json
new file mode 100644
index 0000000000..47c163a194
--- /dev/null
+++ b/doc/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json
@@ -0,0 +1,71 @@
+{
+ "servers": [
+ {
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-16T02:55:03Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "63cf07a9fd82e1d2294926ec5c0d2e1e0ca449224246df75e16f23dc",
+ "id": "a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/a8c1c13d-ec7e-47c7-b4ff-077f72c1ca46",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "OS-EXT-SRV-ATTR:host": "bc8efe4fdb7148a4bb921a2b03d17de6",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "fake-mini",
+ "OS-EXT-SRV-ATTR:instance_name": "instance-00000001",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:reservation_id": "r-12345678",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:kernel_id": null,
+ "OS-EXT-SRV-ATTR:ramdisk_id": null,
+ "OS-EXT-SRV-ATTR:user_data": null,
+ "locked": false,
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "progress": 0,
+ "status": "ACTIVE",
+ "host_status": "UP",
+ "tenant_id": "openstack",
+ "updated": "2013-09-16T02:55:05Z",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl b/doc/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json
index f259deefdb..cbb6236c4e 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/instance-instance-get-resp.json.tpl
+++ b/doc/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json
@@ -24,4 +24,4 @@
"start_time": "2012-12-05T00:00:00.000000",
"user_id": "789"
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json b/doc/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json
new file mode 100644
index 0000000000..b9ae31ac53
--- /dev/null
+++ b/doc/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json
@@ -0,0 +1,22 @@
+{
+ "instanceActions": [
+ {
+ "action": "resize",
+ "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13",
+ "message": "",
+ "project_id": "842",
+ "request_id": "req-25517360-b757-47d3-be45-0e8d2a01b36a",
+ "start_time": "2012-12-05T01:00:00.000000",
+ "user_id": "789"
+ },
+ {
+ "action": "reboot",
+ "instance_uuid": "b48316c5-71e8-45e4-9884-6c78055b9b13",
+ "message": "",
+ "project_id": "147",
+ "request_id": "req-3293a3f1-b44c-4609-b8d2-d81b105636b8",
+ "start_time": "2012-12-05T00:00:00.000000",
+ "user_id": "789"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-server-groups/v2.13/server-groups-get-resp.json b/doc/api_samples/os-server-groups/v2.13/server-groups-get-resp.json
index e4f7f7d707..4f9e2248fc 100644
--- a/doc/api_samples/os-server-groups/v2.13/server-groups-get-resp.json
+++ b/doc/api_samples/os-server-groups/v2.13/server-groups-get-resp.json
@@ -5,7 +5,7 @@
"policies": ["anti-affinity"],
"members": [],
"metadata": {},
- "project_id": "c7c9f4f175e247acb56c108fd724d667",
+ "project_id": "openstack",
"user_id": "fake"
}
}
diff --git a/doc/api_samples/os-server-groups/v2.13/server-groups-list-resp.json b/doc/api_samples/os-server-groups/v2.13/server-groups-list-resp.json
index bc1dab68ca..d330e4df46 100644
--- a/doc/api_samples/os-server-groups/v2.13/server-groups-list-resp.json
+++ b/doc/api_samples/os-server-groups/v2.13/server-groups-list-resp.json
@@ -6,7 +6,7 @@
"policies": ["anti-affinity"],
"members": [],
"metadata": {},
- "project_id": "c7c9f4f175e247acb56c108fd724d667",
+ "project_id": "openstack",
"user_id": "fake"
}
]
diff --git a/doc/api_samples/os-server-groups/v2.13/server-groups-post-resp.json b/doc/api_samples/os-server-groups/v2.13/server-groups-post-resp.json
index e4f7f7d707..4f9e2248fc 100644
--- a/doc/api_samples/os-server-groups/v2.13/server-groups-post-resp.json
+++ b/doc/api_samples/os-server-groups/v2.13/server-groups-post-resp.json
@@ -5,7 +5,7 @@
"policies": ["anti-affinity"],
"members": [],
"metadata": {},
- "project_id": "c7c9f4f175e247acb56c108fd724d667",
+ "project_id": "openstack",
"user_id": "fake"
}
}
diff --git a/doc/api_samples/server-migrations/force_complete.json b/doc/api_samples/server-migrations/force_complete.json
new file mode 100644
index 0000000000..e2adb7b5a0
--- /dev/null
+++ b/doc/api_samples/server-migrations/force_complete.json
@@ -0,0 +1,3 @@
+{
+ "force_complete": null
+}
diff --git a/doc/api_samples/server-migrations/live-migrate-server.json b/doc/api_samples/server-migrations/live-migrate-server.json
new file mode 100644
index 0000000000..251863d785
--- /dev/null
+++ b/doc/api_samples/server-migrations/live-migrate-server.json
@@ -0,0 +1,7 @@
+{
+ "os-migrateLive": {
+ "host": "01c0cadef72d47e28a672a76060d492c",
+ "block_migration": false,
+ "disk_over_commit": false
+ }
+}
diff --git a/doc/api_samples/servers/server-create-req.json b/doc/api_samples/servers/server-create-req.json
new file mode 100644
index 0000000000..ca53f3a7c2
--- /dev/null
+++ b/doc/api_samples/servers/server-create-req.json
@@ -0,0 +1,12 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/doc/api_samples/servers/server-create-resp.json b/doc/api_samples/servers/server-create-resp.json
new file mode 100644
index 0000000000..c60685f4a8
--- /dev/null
+++ b/doc/api_samples/servers/server-create-resp.json
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "6NpUwoz2QDRN",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/doc/api_samples/servers/server-update-req.json b/doc/api_samples/servers/server-update-req.json
new file mode 100644
index 0000000000..8e24d867f8
--- /dev/null
+++ b/doc/api_samples/servers/server-update-req.json
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "4.3.2.1",
+ "accessIPv6": "80fe::",
+ "OS-DCF:diskConfig": "AUTO",
+ "name" : "new-server-test"
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/os-disk-config/server-update-put-resp.json b/doc/api_samples/servers/server-update-resp.json
index ce6e08455a..11496a66e6 100644
--- a/doc/api_samples/os-disk-config/server-update-put-resp.json
+++ b/doc/api_samples/servers/server-update-resp.json
@@ -1,8 +1,8 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "accessIPv4": "",
- "accessIPv6": "",
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
"addresses": {
"private": [
{
diff --git a/doc/api_samples/servers/v2.16/server-get-resp.json b/doc/api_samples/servers/v2.16/server-get-resp.json
new file mode 100644
index 0000000000..7b09e16463
--- /dev/null
+++ b/doc/api_samples/servers/v2.16/server-get-resp.json
@@ -0,0 +1,58 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "92154fab69d5883ba2c8622b7e65f745dd33257221c07af363c51b29",
+ "id": "0e44cc9c-e052-415d-afbf-469b0d384170",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/0e44cc9c-e052-415d-afbf-469b0d384170",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "host_status": "UP",
+ "tenant_id": "openstack",
+ "updated": "2013-09-03T04:01:33Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.16/servers-details-resp.json b/doc/api_samples/servers/v2.16/servers-details-resp.json
new file mode 100644
index 0000000000..5256384042
--- /dev/null
+++ b/doc/api_samples/servers/v2.16/servers-details-resp.json
@@ -0,0 +1,60 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2013-09-03T04:01:32Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "bcf92836fc9ed4203a75cb0337afc7f917d2be504164b995c2334b25",
+ "id": "f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/f5dc173b-6804-445a-a6d8-c705dad5b5eb",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "host_status": "UP",
+ "tenant_id": "openstack",
+ "updated": "2013-09-03T04:01:32Z",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.17/server-action-trigger-crash-dump.json b/doc/api_samples/servers/v2.17/server-action-trigger-crash-dump.json
new file mode 100644
index 0000000000..968c7c05e8
--- /dev/null
+++ b/doc/api_samples/servers/v2.17/server-action-trigger-crash-dump.json
@@ -0,0 +1,3 @@
+{
+ "trigger_crash_dump": null
+}
diff --git a/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json b/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json
new file mode 100644
index 0000000000..bf182c5fa1
--- /dev/null
+++ b/doc/api_samples/servers/v2.19/server-action-rebuild-resp.json
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "seekr3t",
+ "created": "2013-11-14T06:29:00Z",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "28d8d56f0e3a77e20891f455721cbb68032e017045e20aa5dfc6cb66",
+ "id": "a0a80a94-3d81-4a10-822a-daa0cf9e870b",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/a0a80a94-3d81-4a10-822a-daa0cf9e870b",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "foobar",
+ "description" : "description of foobar",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2013-11-14T06:29:02Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.19/server-action-rebuild.json b/doc/api_samples/servers/v2.19/server-action-rebuild.json
new file mode 100644
index 0000000000..76dbb0f6f5
--- /dev/null
+++ b/doc/api_samples/servers/v2.19/server-action-rebuild.json
@@ -0,0 +1,13 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "1.2.3.4",
+ "accessIPv6" : "80fe::",
+ "imageRef" : "70a599e0-31e7-49b7-b260-868f441e862b",
+ "name" : "foobar",
+ "description" : "description of foobar",
+ "adminPass" : "seekr3t",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ }
+ }
+}
diff --git a/doc/api_samples/servers/v2.19/server-get-resp.json b/doc/api_samples/servers/v2.19/server-get-resp.json
new file mode 100644
index 0000000000..8acab59366
--- /dev/null
+++ b/doc/api_samples/servers/v2.19/server-get-resp.json
@@ -0,0 +1,59 @@
+{
+ "server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2015-12-07T17:24:14Z",
+ "description": "new-server-description",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "c656e68b04b483cfc87cdbaa2346557b174ec1cb6be6afbd2a0133a0",
+ "id": "ddb205dc-717e-496e-8e96-88a3b31b075d",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/ddb205dc-717e-496e-8e96-88a3b31b075d",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/ddb205dc-717e-496e-8e96-88a3b31b075d",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2015-12-07T17:24:15Z",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/servers/v2.19/server-post-req.json b/doc/api_samples/servers/v2.19/server-post-req.json
new file mode 100644
index 0000000000..24cdb9c2e5
--- /dev/null
+++ b/doc/api_samples/servers/v2.19/server-post-req.json
@@ -0,0 +1,13 @@
+{
+ "server" : {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "name" : "new-server-test",
+ "description" : "new-server-description",
+ "imageRef" : "http://glance.openstack.example.com/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "flavorRef" : "http://openstack.example.com/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/servers/v2.19/server-post-resp.json b/doc/api_samples/servers/v2.19/server-post-resp.json
new file mode 100644
index 0000000000..5994b55f6c
--- /dev/null
+++ b/doc/api_samples/servers/v2.19/server-post-resp.json
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "rySfUy7xL4C5",
+ "id": "19923676-e78b-46fb-af62-a5942aece2ac",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/19923676-e78b-46fb-af62-a5942aece2ac",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/19923676-e78b-46fb-af62-a5942aece2ac",
+ "rel": "bookmark"
+ }
+ ]
+ }
+} \ No newline at end of file
diff --git a/doc/api_samples/servers/v2.19/server-put-req.json b/doc/api_samples/servers/v2.19/server-put-req.json
new file mode 100644
index 0000000000..cf6ceef7b5
--- /dev/null
+++ b/doc/api_samples/servers/v2.19/server-put-req.json
@@ -0,0 +1,6 @@
+{
+ "server" : {
+ "name" : "updated-server-test",
+ "description" : "updated-server-description"
+ }
+}
diff --git a/doc/api_samples/os-access-ips/server-put-resp.json b/doc/api_samples/servers/v2.19/server-put-resp.json
index bf99434259..79c57982fa 100644
--- a/doc/api_samples/os-access-ips/server-put-resp.json
+++ b/doc/api_samples/servers/v2.19/server-put-resp.json
@@ -1,5 +1,7 @@
{
"server": {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
"addresses": {
"private": [
{
@@ -8,7 +10,8 @@
}
]
},
- "created": "2013-11-06T08:11:57Z",
+ "created": "2015-12-07T19:19:36Z",
+ "description": "updated-server-description",
"flavor": {
"id": "1",
"links": [
@@ -18,8 +21,8 @@
}
]
},
- "hostId": "ea0fd522e5bc2fea872429b331304a6f930f2d9aa2a5dc95b3c6061a",
- "id": "fea9595c-ce6e-4565-987e-2d301fe056ac",
+ "hostId": "4e17a358ca9bbc8ac6e215837b6410c0baa21b2463fefe3e8f712b31",
+ "id": "c509708e-f0c6-461f-b2b3-507547959eb2",
"image": {
"id": "70a599e0-31e7-49b7-b260-868f441e862b",
"links": [
@@ -31,24 +34,23 @@
},
"links": [
{
- "href": "http://openstack.example.com/v2/openstack/servers/fea9595c-ce6e-4565-987e-2d301fe056ac",
+ "href": "http://openstack.example.com/v2/openstack/servers/c509708e-f0c6-461f-b2b3-507547959eb2",
"rel": "self"
},
{
- "href": "http://openstack.example.com/openstack/servers/fea9595c-ce6e-4565-987e-2d301fe056ac",
+ "href": "http://openstack.example.com/openstack/servers/c509708e-f0c6-461f-b2b3-507547959eb2",
"rel": "bookmark"
}
],
+ "locked": false,
"metadata": {
"My Server Name": "Apache1"
},
- "name": "new-server-test",
- "accessIPv4": "4.3.2.1",
- "accessIPv6": "80fe::",
+ "name": "updated-server-test",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
- "updated": "2013-11-06T08:11:58Z",
+ "updated": "2015-12-07T19:19:36Z",
"user_id": "fake"
}
-}
+} \ No newline at end of file
diff --git a/doc/api_samples/servers/v2.19/servers-details-resp.json b/doc/api_samples/servers/v2.19/servers-details-resp.json
new file mode 100644
index 0000000000..3f1a38c919
--- /dev/null
+++ b/doc/api_samples/servers/v2.19/servers-details-resp.json
@@ -0,0 +1,61 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "1.2.3.4",
+ "accessIPv6": "80fe::",
+ "addresses": {
+ "private": [
+ {
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "addr": "192.168.0.3",
+ "version": 4
+ }
+ ]
+ },
+ "created": "2015-12-07T19:54:48Z",
+ "description": "new-server-description",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "a672ab12738567bfcb852c846d66a6ce5c3555b42d73db80bdc6f1a4",
+ "id": "91965362-fd86-4543-8ce1-c17074d2984d",
+ "image": {
+ "id": "70a599e0-31e7-49b7-b260-868f441e862b",
+ "links": [
+ {
+ "href": "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/91965362-fd86-4543-8ce1-c17074d2984d",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/91965362-fd86-4543-8ce1-c17074d2984d",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "2015-12-07T19:54:49Z",
+ "user_id": "fake"
+ }
+ ]
+}
diff --git a/doc/api_samples/servers/v2.19/servers-list-resp.json b/doc/api_samples/servers/v2.19/servers-list-resp.json
new file mode 100644
index 0000000000..ffaccb8a7c
--- /dev/null
+++ b/doc/api_samples/servers/v2.19/servers-list-resp.json
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "78d95942-8805-4597-b1af-3d0e38330758",
+ "links": [
+ {
+ "href": "http://openstack.example.com/v2/openstack/servers/78d95942-8805-4597-b1af-3d0e38330758",
+ "rel": "self"
+ },
+ {
+ "href": "http://openstack.example.com/openstack/servers/78d95942-8805-4597-b1af-3d0e38330758",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index cba453de14..2f23b09a08 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.15",
+ "version": "2.22",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index fea7835f2a..c37948ab98 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.15",
+ "version": "2.22",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/ext/support_matrix.py b/doc/ext/support_matrix.py
index 2cad277d68..67e9b44dfb 100644
--- a/doc/ext/support_matrix.py
+++ b/doc/ext/support_matrix.py
@@ -115,9 +115,8 @@ class SupportMatrixTarget(object):
class SupportMatrixDirective(rst.Directive):
- option_spec = {
- 'support-matrix': six.text_type,
- }
+ # The argument is the filename, e.g. support-matrix.ini
+ required_arguments = 1
def run(self):
matrix = self._load_support_matrix()
@@ -132,8 +131,7 @@ class SupportMatrixDirective(rst.Directive):
cfg = configparser.SafeConfigParser()
env = self.state.document.settings.env
- fname = self.options.get("support-matrix",
- "support-matrix.ini")
+ fname = self.arguments[0]
rel_fpath, fpath = env.relfn2path(fname)
with open(fpath) as fp:
cfg.readfp(fp)
@@ -143,9 +141,17 @@ class SupportMatrixDirective(rst.Directive):
env.note_dependency(rel_fpath)
matrix = SupportMatrix()
+ matrix.targets = self._get_targets(cfg)
+ matrix.features = self._get_features(cfg, matrix.targets)
+ return matrix
+
+ def _get_targets(self, cfg):
# The 'targets' section is special - it lists all the
# hypervisors that this file records data for
+
+ targets = {}
+
for item in cfg.options("targets"):
if not item.startswith("driver-impl-"):
continue
@@ -176,10 +182,16 @@ class SupportMatrixDirective(rst.Directive):
raise Exception("'%s' field is malformed in '[%s]' section" %
(item, "DEFAULT"))
- matrix.targets[key] = target
+ targets[key] = target
+
+ return targets
+ def _get_features(self, cfg, targets):
# All sections except 'targets' describe some feature of
# the Nova hypervisor driver implementation
+
+ features = []
+
for section in cfg.sections():
if section == "targets":
continue
@@ -227,7 +239,7 @@ class SupportMatrixDirective(rst.Directive):
continue
key = item[12:]
- if key not in matrix.targets:
+ if key not in targets:
raise Exception(
"Driver impl '%s' in '[%s]' not declared" %
(item, section))
@@ -244,19 +256,19 @@ class SupportMatrixDirective(rst.Directive):
if cfg.has_option(section, noteskey):
notes = cfg.get(section, noteskey)
- target = matrix.targets[key]
+ target = targets[key]
impl = SupportMatrixImplementation(status,
notes)
feature.implementations[target.key] = impl
- for key in matrix.targets:
+ for key in targets:
if key not in feature.implementations:
raise Exception("'%s' missing in '[%s]' section" %
(target.key, section))
- matrix.features.append(feature)
+ features.append(feature)
- return matrix
+ return features
def _build_markup(self, matrix):
"""Constructs the docutils content for the support matrix
diff --git a/doc/ext/versioned_notifications.py b/doc/ext/versioned_notifications.py
new file mode 100644
index 0000000000..5f7fe5ed18
--- /dev/null
+++ b/doc/ext/versioned_notifications.py
@@ -0,0 +1,112 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+This provides a sphinx extension able to list the implemented versioned
+notifications into the developer documentation.
+
+It is used via a single directive in the .rst file
+
+ .. versioned_notifications::
+
+"""
+
+from sphinx.util.compat import Directive
+from docutils import nodes
+
+from nova.objects import base
+from nova.objects import notification
+
+
+def full_name(cls):
+ return cls.__module__ + '.' + cls.__name__
+
+
+class VersionedNotificationDirective(Directive):
+
+ LINK_PREFIX = 'https://git.openstack.org/cgit/openstack/nova/plain/'
+ SAMPLE_ROOT = 'doc/notification_samples/'
+
+ def run(self):
+ notifications = self._collect_notifications()
+ return self._build_markup(notifications)
+
+ def _collect_notifications(self):
+ notifications = []
+ ovos = base.NovaObjectRegistry.obj_classes()
+ for name, cls in ovos.items():
+ cls = cls[0]
+ if (issubclass(cls, notification.NotificationBase) and
+ cls != notification.NotificationBase):
+
+ payload_name = cls.fields['payload'].objname
+ payload_cls = ovos[payload_name][0]
+
+ notifications.append((full_name(cls), full_name(payload_cls),
+ cls.sample))
+ return notifications
+
+ def _build_markup(self, notifications):
+ content = []
+ cols = ['Notification class', 'Payload class', 'Sample file link']
+ table = nodes.table()
+ content.append(table)
+ group = nodes.tgroup(cols=len(cols))
+ table.append(group)
+
+ head = nodes.thead()
+ group.append(head)
+
+ for i in range(len(cols)):
+ group.append(nodes.colspec(colwidth=1))
+
+ body = nodes.tbody()
+ group.append(body)
+
+ # fill the table header
+ row = nodes.row()
+ body.append(row)
+ for col_name in cols:
+ col = nodes.entry()
+ row.append(col)
+ text = nodes.strong(text=col_name)
+ col.append(text)
+
+ # fill the table content, one notification per row
+ for name, payload, sample in notifications:
+ row = nodes.row()
+ body.append(row)
+ col = nodes.entry()
+ row.append(col)
+ text = nodes.literal(text=name)
+ col.append(text)
+
+ col = nodes.entry()
+ row.append(col)
+ text = nodes.literal(text=payload)
+ col.append(text)
+
+ col = nodes.entry()
+ row.append(col)
+ ref = nodes.reference(refuri=self.LINK_PREFIX +
+ self.SAMPLE_ROOT + sample)
+ txt = nodes.inline()
+ col.append(txt)
+ txt.append(ref)
+ ref.append(nodes.literal(text=sample))
+
+ return content
+
+
+def setup(app):
+ app.add_directive('versioned_notifications',
+ VersionedNotificationDirective)
diff --git a/doc/notification_samples/service-update.json b/doc/notification_samples/service-update.json
new file mode 100644
index 0000000000..219dec9ae2
--- /dev/null
+++ b/doc/notification_samples/service-update.json
@@ -0,0 +1,21 @@
+{
+ "priority": "INFO",
+ "payload": {
+ "nova_object.namespace": "nova",
+ "nova_object.name": "ServiceStatusPayload",
+ "nova_object.version": "1.0",
+ "nova_object.data": {
+ "host": "host1",
+ "disabled": false,
+ "last_seen_up": "2012-10-29T13:42:05Z",
+ "binary": "nova-compute",
+ "topic": "compute",
+ "disabled_reason": null,
+ "report_count": 1,
+ "forced_down": false,
+ "version": 7
+ }
+ },
+ "event_type": "service.update",
+ "publisher_id": "nova-compute:host1"
+}
diff --git a/doc/source/code-review.rst b/doc/source/code-review.rst
index cbe2745108..bd32aecc22 100644
--- a/doc/source/code-review.rst
+++ b/doc/source/code-review.rst
@@ -268,3 +268,9 @@ Three sections are left intentionally unexplained (``prelude``, ``issues`` and
``other``). Those are targeted to be filled in close to the release time for
providing details about the soon-ish release. Don't use them unless you know
exactly what you are doing.
+
+
+Notifications
+=============
+* Every new notification type shall use the new versioned notification
+ infrastructure documented in :doc:`notifications`
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 797b09cbac..af23503ad2 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -35,6 +35,7 @@ extensions = ['sphinx.ext.autodoc',
'oslosphinx',
"ext.support_matrix",
'oslo_config.sphinxconfiggen',
+ 'ext.versioned_notifications'
]
config_generator_config_file = '../../etc/nova/nova-config-generator.conf'
diff --git a/doc/source/development.environment.rst b/doc/source/development.environment.rst
index 3cd0466490..ce5d044b00 100644
--- a/doc/source/development.environment.rst
+++ b/doc/source/development.environment.rst
@@ -83,16 +83,16 @@ Install the prerequisite packages.
On Ubuntu::
- sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev graphviz libsqlite3-dev python-tox
+ sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev graphviz libsqlite3-dev python-tox python3-dev python3 gettext
On Fedora-based distributions (e.g., Fedora/RHEL/CentOS/Scientific Linux)::
- sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel libvirt-devel graphviz sqlite-devel
+ sudo yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel libvirt-devel graphviz sqlite-devel python3-devel python3 gettext
sudo pip-python install tox
On openSUSE-based distributions (SLES 12, openSUSE 13.1, Factory or Tumbleweed)::
- sudo zypper in gcc git libffi-devel libmysqlclient-devel libvirt-devel libxslt-devel postgresql-devel python-devel python-pip python-tox python-virtualenv
+ sudo zypper in gcc git libffi-devel libmysqlclient-devel libvirt-devel libxslt-devel postgresql-devel python-devel python-pip python-tox python-virtualenv python3-devel python3 gettext-runtime
Mac OS X Systems
diff --git a/doc/source/feature_classification.rst b/doc/source/feature_classification.rst
index 603560faeb..d51dadc054 100644
--- a/doc/source/feature_classification.rst
+++ b/doc/source/feature_classification.rst
@@ -110,7 +110,7 @@ With little testing there are likely to be many unknown bugs.
For a feature to be considered complete, we must have:
* Complete API docs (concept and REST call definition)
-* Complete Adminstrator docs
+* Complete Administrator docs
* Tempest tests that define if the feature works correctly
* Has enough functionality, and works reliably enough to be useful
in real world scenarios
diff --git a/doc/source/filter_scheduler.rst b/doc/source/filter_scheduler.rst
index d3fbe0bd0f..e69e4a1d6a 100644
--- a/doc/source/filter_scheduler.rst
+++ b/doc/source/filter_scheduler.rst
@@ -379,6 +379,9 @@ The Filter Scheduler weighs hosts based on the config option
Sort with the largest weight winning. If the multiplier is negative, the
host with least RAM available will win (useful for stacking hosts, instead
of spreading).
+* |DiskWeigher| Hosts are weighted and sorted by free disk space with the largest
+ weight winning. If the multiplier is negative, the host with less disk space available
+ will win (useful for stacking hosts, instead of spreading).
* |MetricsWeigher| This weigher can compute the weight based on the compute node
host's various metrics. The to-be weighed metrics and their weighing ratio
are specified in the configuration file as the followings::
@@ -455,3 +458,4 @@ in :mod:`nova.tests.scheduler`.
.. |IoOpsWeigher| replace:: :class:`IoOpsWeigher <nova.scheduler.weights.io_ops.IoOpsWeigher>`
.. |ServerGroupSoftAffinityWeigher| replace:: :class:`ServerGroupSoftAffinityWeigher <nova.scheduler.weights.affinity.ServerGroupSoftAffinityWeigher>`
.. |ServerGroupSoftAntiAffinityWeigher| replace:: :class:`ServerGroupSoftAntiAffinityWeigher <nova.scheduler.weights.affinity.ServerGroupSoftAntiAffinityWeigher>`
+.. |DiskWeigher| replace:: :class:`DiskWeigher <nova.scheduler.weights.disk.DiskWeigher>`
diff --git a/doc/source/index.rst b/doc/source/index.rst
index e46843ad75..abb09e2521 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -157,6 +157,7 @@ Open Development.
block_device_mapping
addmethod.openstackapi
conductor
+ notifications
Architecture Evolution Plans
-----------------------------
diff --git a/doc/source/notifications.rst b/doc/source/notifications.rst
new file mode 100644
index 0000000000..f8d6579783
--- /dev/null
+++ b/doc/source/notifications.rst
@@ -0,0 +1,277 @@
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+Notifications in Nova
+=====================
+Similarly to other OpenStack services Nova emits notifications to the message
+bus with the Notifier class provided by oslo.messaging [1]_. From the
+notification consumer point of view a notification consists of two parts: an
+envelope with a fixed structure defined by oslo.messaging and a payload defined
+by the service emitting the notification. The envelope format is the
+following::
+
+ {
+ "priority": <string, selected from a predefined list by the sender>,
+ "event_type": <string, defined by the sender>,
+ "timestamp": <string, the isotime of when the notification emitted>,
+ "publisher_id": <string, defined by the sender>,
+ "message_id": <uuid, generated by oslo>,
+ "payload": <json serialized dict, defined by the sender>
+ }
+
+There are two types of notifications in Nova: legacy notifications which have
+an unversioned payload and newer notifications which have a versioned payload.
+
+Unversioned notifications
+-------------------------
+Nova code uses the nova.rpc.get_notifier call to get a configured
+oslo.messaging Notifier object and it uses the oslo provided functions on the
+Notifier object to emit notifications. The configuration of the returned
+Notifier object depends on the parameters of the get_notifier call and the
+value of the oslo.messaging configuration options `notification_driver` and
+`notification_topics`. There are notification configuration options in Nova
+which are specific for certain notification types like
+`notify_on_state_change`, `notify_api_faults`, `default_notification_level`,
+etc.
+
+The structure of the payload of the unversioned notifications is defined in the
+code that emits the notification and no documentation or enforced backward
+compatibility contract exists for that format.
+
+
+Versioned notifications
+-----------------------
+The versioned notification concept is created to fix the shortcomings of the
+unversioned notifications. The envelope structure of the emitted notification
+is the same as in the unversioned notification case as it is provided by
+oslo.messaging. However the payload is not a free form dictionary but a
+serialized oslo versionedobject [2]_.
+
+.. _service.update:
+
+For example the wire format of the `service.update` notification looks like the
+following::
+
+ {
+ "priority":"INFO",
+ "payload":{
+ "nova_object.namespace":"nova",
+ "nova_object.name":"ServiceStatusPayload",
+ "nova_object.version":"1.0",
+ "nova_object.data":{
+ "host":"host1",
+ "disabled":false,
+ "last_seen_up":null,
+ "binary":"nova-compute",
+ "topic":"compute",
+ "disabled_reason":null,
+ "report_count":1,
+ "forced_down":false,
+ "version":2
+ }
+ },
+ "event_type":"service.update",
+ "publisher_id":"nova-compute:host1"
+ }
+
+The serialized oslo versionedobject as a payload provides a version number to
+the consumer so the consumer can detect if the structure of the payload is
+changed. Nova provides the following contract regarding the versioned
+notification payload:
+
+* the payload version defined by the `the nova_object.version` field of the
+ payload will be increased if and only if the syntax or the semantics of the
+ `nova_object.data` field of the payload is changed.
+* a minor version bump indicates a backward compatible change which means that
+ only new fields are added to the payload so a well written consumer can still
+ consume the new payload without any change.
+* a major version bump indicates a backward incompatible change of the payload
+ which can mean removed fields, type change, etc in the payload.
+
+There is a Nova configuration parameter `notification_format` that can be used
+to specify which notifications are emitted by Nova. The possible values are
+`unversioned`, `versioned`, `both` and the default value is `both`.
+
+How to add a new versioned notification
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To support the above contract from the Nova code every versioned notification
+is modeled with oslo versionedobjects. Every versioned notification class
+shall inherit from the `nova.objects.notification.NotificationBase` which
+already defines three mandatory fields of the notification `event_type`,
+`publisher_id` and `priority`. The new notification class shall add a new field
+`payload` with an appropriate payload type. The payload object of the
+notifications shall inherit from the
+`nova.objects.notification.NotificationPayloadBase` class and shall define the
+fields of the payload as versionedobject fields. The base classes are described
+in [3]_.
+
+The following code example defines the necessary model classes for a new
+notification `myobject.update`::
+
+ @notification.notification_sample('myobject-update.json')
+ @base.NovaObjectRegistry.register
+ class MyObjectNotification(notification.NotificationBase):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'payload': fields.ObjectField('MyObjectUpdatePayload')
+ }
+
+
+ @base.NovaObjectRegistry.register
+ class MyObjectUpdatePayload(notification.NotificationPayloadBase):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+ fields = {
+ 'some_data': fields.StringField(),
+ 'another_data': fields.StringField(),
+ }
+
+
+After that the notification can be populated and emitted with the following
+code::
+
+ payload = MyObjectUpdatePayload(some_data="foo", another_data="bar")
+ MyObjectNotification(
+ publisher=notification.NotificationPublisher.from_service_obj(
+ <nova.objects.service.Service instance that emits the notification>),
+ event_type=notification.EventType(
+ object='myobject',
+ action=fields.NotificationAction.UPDATE),
+ priority=fields.NotificationPriority.INFO,
+ payload=payload).emit(context)
+
+The above code will generate the following notification on the wire::
+
+ {
+ "priority":"INFO",
+ "payload":{
+ "nova_object.namespace":"nova",
+ "nova_object.name":"MyObjectUpdatePayload",
+ "nova_object.version":"1.0",
+ "nova_object.data":{
+ "some_data":"foo",
+ "another_data":"bar",
+ }
+ },
+ "event_type":"myobject.update",
+ "publisher_id":"<the name of the service>:<the host where the service runs>"
+ }
+
+
+There is a possibility to reuse an existing versionedobject as notification
+payload by adding a `SCHEMA` field for the payload class that defines a mapping
+between the fields of existing objects and the fields of the new payload
+object. For example the service.status notification reuses the existing
+`nova.objects.service.Service` object when defines the notification's payload::
+
+ @notification.notification_sample('service-update.json')
+ @base.NovaObjectRegistry.register
+ class ServiceStatusNotification(notification.NotificationBase):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'payload': fields.ObjectField('ServiceStatusPayload')
+ }
+
+ @base.NovaObjectRegistry.register
+ class ServiceStatusPayload(notification.NotificationPayloadBase):
+ SCHEMA = {
+ 'host': ('service', 'host'),
+ 'binary': ('service', 'binary'),
+ 'topic': ('service', 'topic'),
+ 'report_count': ('service', 'report_count'),
+ 'disabled': ('service', 'disabled'),
+ 'disabled_reason': ('service', 'disabled_reason'),
+ 'availability_zone': ('service', 'availability_zone'),
+ 'last_seen_up': ('service', 'last_seen_up'),
+ 'forced_down': ('service', 'forced_down'),
+ 'version': ('service', 'version')
+ }
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+ fields = {
+ 'host': fields.StringField(nullable=True),
+ 'binary': fields.StringField(nullable=True),
+ 'topic': fields.StringField(nullable=True),
+ 'report_count': fields.IntegerField(),
+ 'disabled': fields.BooleanField(),
+ 'disabled_reason': fields.StringField(nullable=True),
+ 'availability_zone': fields.StringField(nullable=True),
+ 'last_seen_up': fields.DateTimeField(nullable=True),
+ 'forced_down': fields.BooleanField(),
+ 'version': fields.IntegerField(),
+ }
+
+ def populate_schema(self, service):
+ super(ServiceStatusPayload, self).populate_schema(service=service)
+
+If the `SCHEMA` field is defined then the payload object needs to be populated
+with the `populate_schema` call before it can be emitted::
+
+ payload = ServiceStatusPayload()
+ payload.populate_schema(service=<nova.object.service.Service object>)
+ ServiceStatusNotification(
+ publisher=notification.NotificationPublisher.from_service_obj(
+ <nova.object.service.Service object>),
+ event_type=notification.EventType(
+ object='service',
+ action=fields.NotificationAction.UPDATE),
+ priority=fields.NotificationPriority.INFO,
+ payload=payload).emit(context)
+
+The above code will emit the :ref:`already shown notification<service.update>`
+on the wire.
+
+Every item in the `SCHEMA` has the syntax of::
+
+ <payload field name which needs to be filled>:
+ (<name of the parameter of the populate_schema call>,
+ <the name of a field of the parameter object>)
+
+The mapping defined in the `SCHEMA` field has the following semantics. When
+the `populate_schema` function is called the content of the `SCHEMA` field is
+enumerated and the value of the field of the pointed parameter object is copied
+to the requested payload field. So in the above example the `host` field of
+the payload object is populated from the value of the `host` field of the
+`service` object that is passed as a parameter to the `populate_schema` call.
+
+A notification payload object can reuse fields from multiple existing
+objects. Also a notification can have both new and reused fields in its
+payload.
+
+Note that the notification's publisher instance can be created two different
+ways. It can be created by instantiating the `NotificationPublisher` object
+with a `host` and a `binary` string parameter or it can be generated from a
+`Service` object by calling `NotificationPublisher.from_service_obj` function.
+
+Versioned notifications shall have a sample file stored under
+`doc/sample_notifications` directory and the notification object shall be
+decorated with the `notification_sample` decorator. For example the
+`service.update` notification has a sample file stored in
+`doc/sample_notifications/service-update.json` and the
+ServiceUpdateNotification class is decorated accordingly.
+
+Existing versioned notifications
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. versioned_notifications::
+
+
+
+.. [1] http://docs.openstack.org/developer/oslo.messaging/notifier.html
+.. [2] http://docs.openstack.org/developer/oslo.versionedobjects
+.. [3] http://docs.openstack.org/developer/nova/devref/api/nova.objects.notification.html
diff --git a/doc/source/scheduler_evolution.rst b/doc/source/scheduler_evolution.rst
index f3b0e8e594..43f1ffd5ba 100644
--- a/doc/source/scheduler_evolution.rst
+++ b/doc/source/scheduler_evolution.rst
@@ -11,42 +11,43 @@
License for the specific language governing permissions and limitations
under the License.
-====================
+===================
Scheduler Evolution
-====================
+===================
-The scheduler evolution has been a priority item for both the kilo and liberty
+Evolving the scheduler has been a priority item over several
releases: http://specs.openstack.org/openstack/nova-specs/#priorities
-Over time the scheduler and the rest of nova have become very tightly
-coupled. This effort is focusing on a better separation of concerns between
-the nova-scheduler and the rest of Nova.
+The scheduler has become tightly coupled with the rest of nova,
+limiting its capabilities, accuracy, flexibility and maintainability.
+The goal of scheduler evolution is to bring about a better separation of
+concerns between scheduling functionality and the rest of nova.
Once this effort has completed, its conceivable that the nova-scheduler could
-become a separate git repo, outside of Nova but within the compute project.
-But this is not the current focus of this effort.
+become a separate git repo, outside of nova but within the compute project.
+This is not the current focus.
Problem Use Cases
==================
Many users are wanting to do more advanced things with the scheduler, but the
-current architecture is just not ready to support those in a maintainable way.
-Lets look at a few key use cases that need to be easier to support once this
-initial work is complete.
+current architecture is not ready to support those use cases in a maintainable way.
+A few examples will help to illustrate where the scheduler falls
+short:
Cross Project Affinity
-----------------------
-It is possible that when you boot from a volume, you want it to pick a compute
-node that is close to that volume, automatically.
-There are similar use cases around a pre-created port and needing to be in a
-particular location for the best performance of that port.
+It can be desirable, when booting from a volume, to use a compute node
+that is close to the shared storage where that volume is. Similarly, for
+the sake of performance, it can be desirable to use a compute node that
+is in a particular location in relation to a pre-created port.
Accessing Aggregates in Filters and Weights
--------------------------------------------
-Any DB access in a filter or weight seriously slows down the scheduler.
-Until the end of kilo, there was no way to deal with the scheduler access
+Any DB access in a filter or weight slows down the scheduler. Until the
+end of kilo, there was no way to deal with the scheduler accessing
information about aggregates without querying the DB in every call to
host_passes() in a filter.
@@ -57,22 +58,21 @@ For certain use cases, radically different schedulers may perform much better
than the filter scheduler. We should not block this innovation. It is
unreasonable to assume a single scheduler will work for all use cases.
-However, we really need a single strong scheduler interface, to enable these
-sorts of innovation in a maintainable way.
+However, to enable this kind of innovation in a maintainable way, a
+single strong scheduler interface is required.
Project Scale issues
---------------------
-There are interesting ideas for new schedulers, like the solver scheduler.
-There are frequently requests to add new scheduler filters and weights for
-to look at various different aspects of the compute host.
-Currently the Nova team just doesn't have the bandwidth to deal with all these
+There are many interesting ideas for new schedulers, like the solver scheduler,
+and frequent requests to add new filters and weights to the scheduling system.
+The current nova team does not have the bandwidth to deal with all these
requests. A dedicated scheduler team could work on these items independently
-from the rest of Nova.
+of the rest of nova.
-The problem we currently have, is that the nova-scheduler code is not separate
-from the rest of Nova, so its not currently possible to work on the scheduler
-in isolation. We need a stable interface before we can make the split.
+The tight coupling that currently exists makes it impossible to work
+on the scheduler in isolation. A stable interface is required before
+the code can be split out.
Key areas we are evolving
==========================
@@ -83,17 +83,17 @@ the scheduler evolution work.
Fixing the Scheduler DB model
------------------------------
-We need the Nova and scheduler data models to be independent of each other.
+We need the nova and scheduler data models to be independent of each other.
The first step is breaking the link between the ComputeNode and Service
DB tables. In theory where the Service information is stored should be
pluggable through the service group API, and should be independent of the
scheduler service. For example, it could be managed via zookeeper rather
-than polling the Nova DB.
+than polling the nova DB.
-There are also places where filters and weights call into the Nova DB to
+There are also places where filters and weights call into the nova DB to
find out information about aggregates. This needs to be sent to the
-scheduler, rather than reading directly form the nova database.
+scheduler, rather than reading directly from the nova database.
Versioning Scheduler Placement Interfaces
------------------------------------------
@@ -105,7 +105,9 @@ backwards compatibility needed for live-upgrades.
Luckily we already have the oslo.versionedobjects infrastructure we can use
to model this data in a way that can be versioned across releases.
-This effort is mostly focusing around the request_spec.
+This effort is mostly focusing around the request_spec. See, for
+example, `this spec`_.
+
Sending host and node stats to the scheduler
---------------------------------------------
@@ -133,30 +135,33 @@ Resource Tracker
The recent work to add support for NUMA and PCI pass through have shown we
have no good pattern to extend the resource tracker. Ideally we want to keep
-the innovation inside the Nova tree, but we also need it to be easier.
+the innovation inside the nova tree, but we also need it to be easier.
This is very related to the effort to re-think how we model resources, as
-covered by the discussion.
+covered by discussion about `resource providers`_.
Parallelism and Concurrency
----------------------------
The current design of the nova-scheduler is very racy, and can lead to
-excessive numbers of build retries before the correct host is found.
-The recent NUMA features are particularly impacted by how the scheduler
-currently works.
-All this has lead to many people only running a single nova-scheduler
-process configured to use a very small greenthread pool.
+excessive numbers of build retries before the correct host is found. The
+recent NUMA features are particularly impacted by how the scheduler
+works. All this has lead to many people running only a single
+nova-scheduler process configured to use a very small greenthread pool.
The work on cells v2 will mean that we soon need the scheduler to scale for
much larger problems. The current scheduler works best with less than 1k nodes
but we will need the scheduler to work with at least 10k nodes.
Various ideas have been discussed to reduce races when running multiple
-nova-scheduler processes.
-One idea is to use two-phase commit "style" resource tracker claims.
-Another idea involves using incremental updates so it is more efficient to
-keep the scheduler's state up to date, potentially using Kafka.
+nova-scheduler processes. One idea is to use two-phase commit "style"
+resource tracker claims. Another idea involves using incremental updates
+so it is more efficient to keep the scheduler's state up to date,
+potentially using Kafka.
-For more details, see the backlog spec that describes more of the details
+For more details, see the `backlog spec`_ that describes more of the details
around this problem.
+
+.. _this spec: http://specs.openstack.org/openstack/nova-specs/specs/kilo/approved/sched-select-destinations-use-request-spec-object.html
+.. _resource providers: https://blueprints.launchpad.net/nova/+spec/resource-providers
+.. _backlog spec: http://specs.openstack.org/openstack/nova-specs/specs/backlog/approved/parallel-scheduler.html
diff --git a/doc/source/support-matrix.ini b/doc/source/support-matrix.ini
index 6929b3dcb2..edb017ce50 100644
--- a/doc/source/support-matrix.ini
+++ b/doc/source/support-matrix.ini
@@ -533,6 +533,10 @@ driver-impl-libvirt-kvm-ppc64=complete
driver-impl-libvirt-kvm-s390x=complete
driver-impl-libvirt-qemu-x86=complete
driver-impl-libvirt-lxc=complete
+driver-notes-libvirt-lxc=Fails in latest Ubuntu Trusty kernel
+ from security repository (3.13.0-76-generic), but works in upstream
+ 3.13.x kernels as well as default Ubuntu Trusty latest kernel
+ (3.13.0-58-generic).
driver-impl-libvirt-xen=complete
driver-impl-vmware=complete
driver-impl-hyperv=complete
@@ -540,6 +544,28 @@ driver-impl-ironic=complete
driver-impl-libvirt-vz-vm=complete
driver-impl-libvirt-vz-ct=complete
+[operation.trigger-crash-dump]
+title=Trigger crash dump
+status=optional
+notes=The trigger crash dump operation is a mechanism for triggering
+ a crash dump in an instance by injecting an NMI (Non-maskable Interrupt)
+ into the instance. It provides a means to dump the production memory image
+ as a dump file which is useful for users. Therefore this operation is
+ considered optional to support.
+cli=
+driver-impl-xenserver=missing
+driver-impl-libvirt-kvm-x86=complete
+driver-impl-libvirt-kvm-ppc64=complete
+driver-impl-libvirt-kvm-s390x=complete
+driver-impl-libvirt-qemu-x86=complete
+driver-impl-libvirt-lxc=missing
+driver-impl-libvirt-xen=missing
+driver-impl-vmware=missing
+driver-impl-hyperv=missing
+driver-impl-ironic=missing
+driver-impl-libvirt-vz-vm=missing
+driver-impl-libvirt-vz-ct=missing
+
[operation.unpause]
title=Resume instance CPUs (unpause)
status=optional
@@ -622,7 +648,7 @@ driver-impl-libvirt-lxc=complete
driver-impl-libvirt-xen=complete
driver-impl-vmware=complete
driver-impl-hyperv=complete
-driver-impl-ironic=missing
+driver-impl-ironic=complete
driver-impl-libvirt-vz-vm=complete
driver-impl-libvirt-vz-ct=missing
@@ -759,7 +785,7 @@ driver-impl-vmware=missing
driver-impl-hyperv=missing
driver-notes-hyperv=Will be complete when this review is merged:
https://review.openstack.org/#/c/145004/
-driver-impl-ironic=unknown
+driver-impl-ironic=missing
driver-impl-libvirt-vz-vm=missing
driver-impl-libvirt-vz-ct=missing
@@ -1030,3 +1056,22 @@ driver-impl-hyperv=missing
driver-impl-ironic=missing
driver-impl-libvirt-vz-vm=complete
driver-impl-libvirt-vz-ct=complete
+
+[operation.uefi-boot]
+title=uefi boot
+status=optional
+notes=This allows users to boot a guest with uefi firmware.
+cli=
+driver-impl-xenserver=missing
+driver-impl-libvirt-kvm-x86=complete
+driver-impl-libvirt-kvm-ppc64=missing
+driver-impl-libvirt-kvm-s390x=missing
+driver-impl-libvirt-qemu-x86=complete
+driver-impl-libvirt-lxc=missing
+driver-impl-libvirt-xen=missing
+driver-impl-vmware=missing
+driver-impl-hyperv=missing
+driver-impl-ironic=partial
+driver-notes-ironic=depends on hardware support
+driver-impl-libvirt-vz-vm=missing
+driver-impl-libvirt-vz-ct=missing
diff --git a/doc/source/support-matrix.rst b/doc/source/support-matrix.rst
index fa3d2a482a..ccf9090f47 100644
--- a/doc/source/support-matrix.rst
+++ b/doc/source/support-matrix.rst
@@ -39,4 +39,4 @@ following general guiding principles were applied
optional feature becoming mandatory at a later date, based on other
principles above.
-.. support_matrix::
+.. support_matrix:: support-matrix.ini
diff --git a/doc/source/threading.rst b/doc/source/threading.rst
index a5fcbd88f3..8fbc973172 100644
--- a/doc/source/threading.rst
+++ b/doc/source/threading.rst
@@ -31,6 +31,7 @@ delays in the case that there is only a single green thread::
from eventlet import greenthread
...
greenthread.sleep(0)
+
In current code, time.sleep(0)does the same thing as greenthread.sleep(0) if
time module is patched through eventlet.monkey_patch(). To be explicit, we recommend
contributors use ``greenthread.sleep()`` instead of ``time.sleep()``.
diff --git a/etc/nova/nova-config-generator.conf b/etc/nova/nova-config-generator.conf
index ce4eee8463..49e7abc8d9 100644
--- a/etc/nova/nova-config-generator.conf
+++ b/etc/nova/nova-config-generator.conf
@@ -4,12 +4,13 @@ wrap_width = 79
namespace = nova
namespace = nova.conf
namespace = nova.api
+namespace = nova.cache_utils
namespace = nova.cells
namespace = nova.compute
namespace = nova.network
namespace = nova.network.neutronv2
namespace = nova.virt
-namespace = nova.openstack.common.memorycache
+namespace = oslo.cache
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.policy
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index 071b0568e5..d6b072d50b 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -5,25 +5,25 @@
"cells_scheduler_filter:TargetCellFilter": "is_admin:True",
- "compute:create": "",
- "compute:create:attach_network": "",
- "compute:create:attach_volume": "",
+ "compute:create": "rule:admin_or_owner",
+ "compute:create:attach_network": "rule:admin_or_owner",
+ "compute:create:attach_volume": "rule:admin_or_owner",
"compute:create:forced_host": "is_admin:True",
- "compute:get": "",
- "compute:get_all": "",
+ "compute:get": "rule:admin_or_owner",
+ "compute:get_all": "rule:admin_or_owner",
"compute:get_all_tenants": "is_admin:True",
- "compute:update": "",
+ "compute:update": "rule:admin_or_owner",
- "compute:get_instance_metadata": "",
- "compute:get_all_instance_metadata": "",
- "compute:get_all_instance_system_metadata": "",
- "compute:update_instance_metadata": "",
- "compute:delete_instance_metadata": "",
+ "compute:get_instance_metadata": "rule:admin_or_owner",
+ "compute:get_all_instance_metadata": "rule:admin_or_owner",
+ "compute:get_all_instance_system_metadata": "rule:admin_or_owner",
+ "compute:update_instance_metadata": "rule:admin_or_owner",
+ "compute:delete_instance_metadata": "rule:admin_or_owner",
- "compute:get_diagnostics": "",
- "compute:get_instance_diagnostics": "",
+ "compute:get_diagnostics": "rule:admin_or_owner",
+ "compute:get_instance_diagnostics": "rule:admin_or_owner",
"compute:start": "rule:admin_or_owner",
"compute:stop": "rule:admin_or_owner",
@@ -32,61 +32,61 @@
"compute:unlock": "rule:admin_or_owner",
"compute:unlock_override": "rule:admin_api",
- "compute:get_vnc_console": "",
- "compute:get_spice_console": "",
- "compute:get_rdp_console": "",
- "compute:get_serial_console": "",
- "compute:get_mks_console": "",
- "compute:get_console_output": "",
+ "compute:get_vnc_console": "rule:admin_or_owner",
+ "compute:get_spice_console": "rule:admin_or_owner",
+ "compute:get_rdp_console": "rule:admin_or_owner",
+ "compute:get_serial_console": "rule:admin_or_owner",
+ "compute:get_mks_console": "rule:admin_or_owner",
+ "compute:get_console_output": "rule:admin_or_owner",
- "compute:reset_network": "",
- "compute:inject_network_info": "",
- "compute:add_fixed_ip": "",
- "compute:remove_fixed_ip": "",
+ "compute:reset_network": "rule:admin_or_owner",
+ "compute:inject_network_info": "rule:admin_or_owner",
+ "compute:add_fixed_ip": "rule:admin_or_owner",
+ "compute:remove_fixed_ip": "rule:admin_or_owner",
- "compute:attach_volume": "",
- "compute:detach_volume": "",
- "compute:swap_volume": "",
+ "compute:attach_volume": "rule:admin_or_owner",
+ "compute:detach_volume": "rule:admin_or_owner",
+ "compute:swap_volume": "rule:admin_or_owner",
- "compute:attach_interface": "",
- "compute:detach_interface": "",
+ "compute:attach_interface": "rule:admin_or_owner",
+ "compute:detach_interface": "rule:admin_or_owner",
- "compute:set_admin_password": "",
+ "compute:set_admin_password": "rule:admin_or_owner",
- "compute:rescue": "",
- "compute:unrescue": "",
+ "compute:rescue": "rule:admin_or_owner",
+ "compute:unrescue": "rule:admin_or_owner",
- "compute:suspend": "",
- "compute:resume": "",
+ "compute:suspend": "rule:admin_or_owner",
+ "compute:resume": "rule:admin_or_owner",
- "compute:pause": "",
- "compute:unpause": "",
+ "compute:pause": "rule:admin_or_owner",
+ "compute:unpause": "rule:admin_or_owner",
- "compute:shelve": "",
- "compute:shelve_offload": "",
- "compute:unshelve": "",
+ "compute:shelve": "rule:admin_or_owner",
+ "compute:shelve_offload": "rule:admin_or_owner",
+ "compute:unshelve": "rule:admin_or_owner",
- "compute:snapshot": "",
- "compute:snapshot_volume_backed": "",
- "compute:backup": "",
+ "compute:snapshot": "rule:admin_or_owner",
+ "compute:snapshot_volume_backed": "rule:admin_or_owner",
+ "compute:backup": "rule:admin_or_owner",
- "compute:resize": "",
- "compute:confirm_resize": "",
- "compute:revert_resize": "",
+ "compute:resize": "rule:admin_or_owner",
+ "compute:confirm_resize": "rule:admin_or_owner",
+ "compute:revert_resize": "rule:admin_or_owner",
- "compute:rebuild": "",
- "compute:reboot": "",
+ "compute:rebuild": "rule:admin_or_owner",
+ "compute:reboot": "rule:admin_or_owner",
"compute:delete": "rule:admin_or_owner",
"compute:soft_delete": "rule:admin_or_owner",
"compute:force_delete": "rule:admin_or_owner",
- "compute:security_groups:add_to_instance": "",
- "compute:security_groups:remove_from_instance": "",
+ "compute:security_groups:add_to_instance": "rule:admin_or_owner",
+ "compute:security_groups:remove_from_instance": "rule:admin_or_owner",
- "compute:restore": "",
+ "compute:restore": "rule:admin_or_owner",
- "compute:volume_snapshot_create": "",
- "compute:volume_snapshot_delete": "",
+ "compute:volume_snapshot_create": "rule:admin_or_owner",
+ "compute:volume_snapshot_delete": "rule:admin_or_owner",
"admin_api": "is_admin:True",
"compute_extension:accounts": "rule:admin_api",
@@ -105,95 +105,95 @@
"compute_extension:admin_actions:migrate": "rule:admin_api",
"compute_extension:aggregates": "rule:admin_api",
"compute_extension:agents": "rule:admin_api",
- "compute_extension:attach_interfaces": "",
+ "compute_extension:attach_interfaces": "rule:admin_or_owner",
"compute_extension:baremetal_nodes": "rule:admin_api",
"compute_extension:cells": "rule:admin_api",
"compute_extension:cells:create": "rule:admin_api",
"compute_extension:cells:delete": "rule:admin_api",
"compute_extension:cells:update": "rule:admin_api",
"compute_extension:cells:sync_instances": "rule:admin_api",
- "compute_extension:certificates": "",
+ "compute_extension:certificates": "rule:admin_or_owner",
"compute_extension:cloudpipe": "rule:admin_api",
"compute_extension:cloudpipe_update": "rule:admin_api",
- "compute_extension:config_drive": "",
- "compute_extension:console_output": "",
- "compute_extension:consoles": "",
- "compute_extension:createserverext": "",
- "compute_extension:deferred_delete": "",
- "compute_extension:disk_config": "",
+ "compute_extension:config_drive": "rule:admin_or_owner",
+ "compute_extension:console_output": "rule:admin_or_owner",
+ "compute_extension:consoles": "rule:admin_or_owner",
+ "compute_extension:createserverext": "rule:admin_or_owner",
+ "compute_extension:deferred_delete": "rule:admin_or_owner",
+ "compute_extension:disk_config": "rule:admin_or_owner",
"compute_extension:evacuate": "rule:admin_api",
"compute_extension:extended_server_attributes": "rule:admin_api",
- "compute_extension:extended_status": "",
- "compute_extension:extended_availability_zone": "",
- "compute_extension:extended_ips": "",
- "compute_extension:extended_ips_mac": "",
- "compute_extension:extended_vif_net": "",
- "compute_extension:extended_volumes": "",
+ "compute_extension:extended_status": "rule:admin_or_owner",
+ "compute_extension:extended_availability_zone": "rule:admin_or_owner",
+ "compute_extension:extended_ips": "rule:admin_or_owner",
+ "compute_extension:extended_ips_mac": "rule:admin_or_owner",
+ "compute_extension:extended_vif_net": "rule:admin_or_owner",
+ "compute_extension:extended_volumes": "rule:admin_or_owner",
"compute_extension:fixed_ips": "rule:admin_api",
- "compute_extension:flavor_access": "",
+ "compute_extension:flavor_access": "rule:admin_or_owner",
"compute_extension:flavor_access:addTenantAccess": "rule:admin_api",
"compute_extension:flavor_access:removeTenantAccess": "rule:admin_api",
- "compute_extension:flavor_disabled": "",
- "compute_extension:flavor_rxtx": "",
- "compute_extension:flavor_swap": "",
- "compute_extension:flavorextradata": "",
- "compute_extension:flavorextraspecs:index": "",
- "compute_extension:flavorextraspecs:show": "",
+ "compute_extension:flavor_disabled": "rule:admin_or_owner",
+ "compute_extension:flavor_rxtx": "rule:admin_or_owner",
+ "compute_extension:flavor_swap": "rule:admin_or_owner",
+ "compute_extension:flavorextradata": "rule:admin_or_owner",
+ "compute_extension:flavorextraspecs:index": "rule:admin_or_owner",
+ "compute_extension:flavorextraspecs:show": "rule:admin_or_owner",
"compute_extension:flavorextraspecs:create": "rule:admin_api",
"compute_extension:flavorextraspecs:update": "rule:admin_api",
"compute_extension:flavorextraspecs:delete": "rule:admin_api",
"compute_extension:flavormanage": "rule:admin_api",
- "compute_extension:floating_ip_dns": "",
- "compute_extension:floating_ip_pools": "",
- "compute_extension:floating_ips": "",
+ "compute_extension:floating_ip_dns": "rule:admin_or_owner",
+ "compute_extension:floating_ip_pools": "rule:admin_or_owner",
+ "compute_extension:floating_ips": "rule:admin_or_owner",
"compute_extension:floating_ips_bulk": "rule:admin_api",
- "compute_extension:fping": "",
+ "compute_extension:fping": "rule:admin_or_owner",
"compute_extension:fping:all_tenants": "rule:admin_api",
"compute_extension:hide_server_addresses": "is_admin:False",
"compute_extension:hosts": "rule:admin_api",
"compute_extension:hypervisors": "rule:admin_api",
- "compute_extension:image_size": "",
- "compute_extension:instance_actions": "",
+ "compute_extension:image_size": "rule:admin_or_owner",
+ "compute_extension:instance_actions": "rule:admin_or_owner",
"compute_extension:instance_actions:events": "rule:admin_api",
"compute_extension:instance_usage_audit_log": "rule:admin_api",
- "compute_extension:keypairs": "",
- "compute_extension:keypairs:index": "",
- "compute_extension:keypairs:show": "",
- "compute_extension:keypairs:create": "",
- "compute_extension:keypairs:delete": "",
- "compute_extension:multinic": "",
+ "compute_extension:keypairs": "rule:admin_or_owner",
+ "compute_extension:keypairs:index": "rule:admin_or_owner",
+ "compute_extension:keypairs:show": "rule:admin_or_owner",
+ "compute_extension:keypairs:create": "rule:admin_or_owner",
+ "compute_extension:keypairs:delete": "rule:admin_or_owner",
+ "compute_extension:multinic": "rule:admin_or_owner",
"compute_extension:networks": "rule:admin_api",
- "compute_extension:networks:view": "",
+ "compute_extension:networks:view": "rule:admin_or_owner",
"compute_extension:networks_associate": "rule:admin_api",
- "compute_extension:os-tenant-networks": "",
- "compute_extension:quotas:show": "",
+ "compute_extension:os-tenant-networks": "rule:admin_or_owner",
+ "compute_extension:quotas:show": "rule:admin_or_owner",
"compute_extension:quotas:update": "rule:admin_api",
"compute_extension:quotas:delete": "rule:admin_api",
- "compute_extension:quota_classes": "",
- "compute_extension:rescue": "",
+ "compute_extension:quota_classes": "rule:admin_or_owner",
+ "compute_extension:rescue": "rule:admin_or_owner",
"compute_extension:security_group_default_rules": "rule:admin_api",
- "compute_extension:security_groups": "",
+ "compute_extension:security_groups": "rule:admin_or_owner",
"compute_extension:server_diagnostics": "rule:admin_api",
- "compute_extension:server_groups": "",
- "compute_extension:server_password": "",
- "compute_extension:server_usage": "",
+ "compute_extension:server_groups": "rule:admin_or_owner",
+ "compute_extension:server_password": "rule:admin_or_owner",
+ "compute_extension:server_usage": "rule:admin_or_owner",
"compute_extension:services": "rule:admin_api",
- "compute_extension:shelve": "",
+ "compute_extension:shelve": "rule:admin_or_owner",
"compute_extension:shelveOffload": "rule:admin_api",
"compute_extension:simple_tenant_usage:show": "rule:admin_or_owner",
"compute_extension:simple_tenant_usage:list": "rule:admin_api",
- "compute_extension:unshelve": "",
+ "compute_extension:unshelve": "rule:admin_or_owner",
"compute_extension:users": "rule:admin_api",
- "compute_extension:virtual_interfaces": "",
- "compute_extension:virtual_storage_arrays": "",
- "compute_extension:volumes": "",
- "compute_extension:volume_attachments:index": "",
- "compute_extension:volume_attachments:show": "",
- "compute_extension:volume_attachments:create": "",
- "compute_extension:volume_attachments:update": "",
- "compute_extension:volume_attachments:delete": "",
- "compute_extension:volumetypes": "",
- "compute_extension:availability_zone:list": "",
+ "compute_extension:virtual_interfaces": "rule:admin_or_owner",
+ "compute_extension:virtual_storage_arrays": "rule:admin_or_owner",
+ "compute_extension:volumes": "rule:admin_or_owner",
+ "compute_extension:volume_attachments:index": "rule:admin_or_owner",
+ "compute_extension:volume_attachments:show": "rule:admin_or_owner",
+ "compute_extension:volume_attachments:create": "rule:admin_or_owner",
+ "compute_extension:volume_attachments:update": "rule:admin_or_owner",
+ "compute_extension:volume_attachments:delete": "rule:admin_or_owner",
+ "compute_extension:volumetypes": "rule:admin_or_owner",
+ "compute_extension:availability_zone:list": "rule:admin_or_owner",
"compute_extension:availability_zone:detail": "rule:admin_api",
"compute_extension:used_limits_for_admin": "rule:admin_api",
"compute_extension:migrations:index": "rule:admin_api",
@@ -202,82 +202,86 @@
"compute_extension:console_auth_tokens": "rule:admin_api",
"compute_extension:os-server-external-events:create": "rule:admin_api",
- "network:get_all": "",
- "network:get": "",
- "network:create": "",
- "network:delete": "",
- "network:associate": "",
- "network:disassociate": "",
- "network:get_vifs_by_instance": "",
- "network:allocate_for_instance": "",
- "network:deallocate_for_instance": "",
- "network:validate_networks": "",
- "network:get_instance_uuids_by_ip_filter": "",
- "network:get_instance_id_by_floating_address": "",
- "network:setup_networks_on_host": "",
- "network:get_backdoor_port": "",
+ "network:get_all": "rule:admin_or_owner",
+ "network:get": "rule:admin_or_owner",
+ "network:create": "rule:admin_or_owner",
+ "network:delete": "rule:admin_or_owner",
+ "network:associate": "rule:admin_or_owner",
+ "network:disassociate": "rule:admin_or_owner",
+ "network:get_vifs_by_instance": "rule:admin_or_owner",
+ "network:allocate_for_instance": "rule:admin_or_owner",
+ "network:deallocate_for_instance": "rule:admin_or_owner",
+ "network:validate_networks": "rule:admin_or_owner",
+ "network:get_instance_uuids_by_ip_filter": "rule:admin_or_owner",
+ "network:get_instance_id_by_floating_address": "rule:admin_or_owner",
+ "network:setup_networks_on_host": "rule:admin_or_owner",
+ "network:get_backdoor_port": "rule:admin_or_owner",
- "network:get_floating_ip": "",
- "network:get_floating_ip_pools": "",
- "network:get_floating_ip_by_address": "",
- "network:get_floating_ips_by_project": "",
- "network:get_floating_ips_by_fixed_address": "",
- "network:allocate_floating_ip": "",
- "network:associate_floating_ip": "",
- "network:disassociate_floating_ip": "",
- "network:release_floating_ip": "",
- "network:migrate_instance_start": "",
- "network:migrate_instance_finish": "",
+ "network:get_floating_ip": "rule:admin_or_owner",
+ "network:get_floating_ip_pools": "rule:admin_or_owner",
+ "network:get_floating_ip_by_address": "rule:admin_or_owner",
+ "network:get_floating_ips_by_project": "rule:admin_or_owner",
+ "network:get_floating_ips_by_fixed_address": "rule:admin_or_owner",
+ "network:allocate_floating_ip": "rule:admin_or_owner",
+ "network:associate_floating_ip": "rule:admin_or_owner",
+ "network:disassociate_floating_ip": "rule:admin_or_owner",
+ "network:release_floating_ip": "rule:admin_or_owner",
+ "network:migrate_instance_start": "rule:admin_or_owner",
+ "network:migrate_instance_finish": "rule:admin_or_owner",
- "network:get_fixed_ip": "",
- "network:get_fixed_ip_by_address": "",
- "network:add_fixed_ip_to_instance": "",
- "network:remove_fixed_ip_from_instance": "",
- "network:add_network_to_project": "",
- "network:get_instance_nw_info": "",
+ "network:get_fixed_ip": "rule:admin_or_owner",
+ "network:get_fixed_ip_by_address": "rule:admin_or_owner",
+ "network:add_fixed_ip_to_instance": "rule:admin_or_owner",
+ "network:remove_fixed_ip_from_instance": "rule:admin_or_owner",
+ "network:add_network_to_project": "rule:admin_or_owner",
+ "network:get_instance_nw_info": "rule:admin_or_owner",
- "network:get_dns_domains": "",
- "network:add_dns_entry": "",
- "network:modify_dns_entry": "",
- "network:delete_dns_entry": "",
- "network:get_dns_entries_by_address": "",
- "network:get_dns_entries_by_name": "",
- "network:create_private_dns_domain": "",
- "network:create_public_dns_domain": "",
- "network:delete_dns_domain": "",
+ "network:get_dns_domains": "rule:admin_or_owner",
+ "network:add_dns_entry": "rule:admin_or_owner",
+ "network:modify_dns_entry": "rule:admin_or_owner",
+ "network:delete_dns_entry": "rule:admin_or_owner",
+ "network:get_dns_entries_by_address": "rule:admin_or_owner",
+ "network:get_dns_entries_by_name": "rule:admin_or_owner",
+ "network:create_private_dns_domain": "rule:admin_or_owner",
+ "network:create_public_dns_domain": "rule:admin_or_owner",
+ "network:delete_dns_domain": "rule:admin_or_owner",
"network:attach_external_network": "rule:admin_api",
- "network:get_vif_by_mac_address": "",
+ "network:get_vif_by_mac_address": "rule:admin_or_owner",
"os_compute_api:servers:detail:get_all_tenants": "is_admin:True",
"os_compute_api:servers:index:get_all_tenants": "is_admin:True",
- "os_compute_api:servers:confirm_resize": "",
- "os_compute_api:servers:create": "",
- "os_compute_api:servers:create:attach_network": "",
- "os_compute_api:servers:create:attach_volume": "",
+ "os_compute_api:servers:confirm_resize": "rule:admin_or_owner",
+ "os_compute_api:servers:create": "rule:admin_or_owner",
+ "os_compute_api:servers:create:attach_network": "rule:admin_or_owner",
+ "os_compute_api:servers:create:attach_volume": "rule:admin_or_owner",
"os_compute_api:servers:create:forced_host": "rule:admin_api",
- "os_compute_api:servers:delete": "",
- "os_compute_api:servers:update": "",
- "os_compute_api:servers:detail": "",
- "os_compute_api:servers:index": "",
- "os_compute_api:servers:reboot": "",
- "os_compute_api:servers:rebuild": "",
- "os_compute_api:servers:resize": "",
- "os_compute_api:servers:revert_resize": "",
- "os_compute_api:servers:show": "",
- "os_compute_api:servers:create_image": "",
- "os_compute_api:servers:create_image:allow_volume_backed": "",
+ "os_compute_api:servers:delete": "rule:admin_or_owner",
+ "os_compute_api:servers:update": "rule:admin_or_owner",
+ "os_compute_api:servers:detail": "rule:admin_or_owner",
+ "os_compute_api:servers:index": "rule:admin_or_owner",
+ "os_compute_api:servers:reboot": "rule:admin_or_owner",
+ "os_compute_api:servers:rebuild": "rule:admin_or_owner",
+ "os_compute_api:servers:resize": "rule:admin_or_owner",
+ "os_compute_api:servers:revert_resize": "rule:admin_or_owner",
+ "os_compute_api:servers:show": "rule:admin_or_owner",
+ "os_compute_api:servers:show:host_status": "rule:admin_api",
+ "os_compute_api:servers:create_image": "rule:admin_or_owner",
+ "os_compute_api:servers:create_image:allow_volume_backed": "rule:admin_or_owner",
"os_compute_api:servers:start": "rule:admin_or_owner",
"os_compute_api:servers:stop": "rule:admin_or_owner",
- "os_compute_api:os-access-ips:discoverable": "",
- "os_compute_api:os-access-ips": "",
+ "os_compute_api:servers:trigger_crash_dump": "rule:admin_or_owner",
+ "os_compute_api:servers:migrations:force_complete": "rule:admin_api",
+ "os_compute_api:servers:discoverable": "@",
+ "os_compute_api:os-access-ips:discoverable": "@",
+ "os_compute_api:os-access-ips": "rule:admin_or_owner",
"os_compute_api:os-admin-actions": "rule:admin_api",
- "os_compute_api:os-admin-actions:discoverable": "",
+ "os_compute_api:os-admin-actions:discoverable": "@",
"os_compute_api:os-admin-actions:reset_network": "rule:admin_api",
"os_compute_api:os-admin-actions:inject_network_info": "rule:admin_api",
"os_compute_api:os-admin-actions:reset_state": "rule:admin_api",
- "os_compute_api:os-admin-password": "",
- "os_compute_api:os-admin-password:discoverable": "",
- "os_compute_api:os-aggregates:discoverable": "",
+ "os_compute_api:os-admin-password": "rule:admin_or_owner",
+ "os_compute_api:os-admin-password:discoverable": "@",
+ "os_compute_api:os-aggregates:discoverable": "@",
"os_compute_api:os-aggregates:index": "rule:admin_api",
"os_compute_api:os-aggregates:create": "rule:admin_api",
"os_compute_api:os-aggregates:show": "rule:admin_api",
@@ -287,199 +291,198 @@
"os_compute_api:os-aggregates:remove_host": "rule:admin_api",
"os_compute_api:os-aggregates:set_metadata": "rule:admin_api",
"os_compute_api:os-agents": "rule:admin_api",
- "os_compute_api:os-agents:discoverable": "",
- "os_compute_api:os-attach-interfaces": "",
- "os_compute_api:os-attach-interfaces:discoverable": "",
+ "os_compute_api:os-agents:discoverable": "@",
+ "os_compute_api:os-attach-interfaces": "rule:admin_or_owner",
+ "os_compute_api:os-attach-interfaces:discoverable": "@",
"os_compute_api:os-baremetal-nodes": "rule:admin_api",
- "os_compute_api:os-baremetal-nodes:discoverable": "",
- "os_compute_api:os-block-device-mapping-v1:discoverable": "",
+ "os_compute_api:os-baremetal-nodes:discoverable": "@",
+ "os_compute_api:os-block-device-mapping-v1:discoverable": "@",
"os_compute_api:os-cells": "rule:admin_api",
"os_compute_api:os-cells:create": "rule:admin_api",
"os_compute_api:os-cells:delete": "rule:admin_api",
"os_compute_api:os-cells:update": "rule:admin_api",
"os_compute_api:os-cells:sync_instances": "rule:admin_api",
- "os_compute_api:os-cells:discoverable": "",
- "os_compute_api:os-certificates:create": "",
- "os_compute_api:os-certificates:show": "",
- "os_compute_api:os-certificates:discoverable": "",
+ "os_compute_api:os-cells:discoverable": "@",
+ "os_compute_api:os-certificates:create": "rule:admin_or_owner",
+ "os_compute_api:os-certificates:show": "rule:admin_or_owner",
+ "os_compute_api:os-certificates:discoverable": "@",
"os_compute_api:os-cloudpipe": "rule:admin_api",
- "os_compute_api:os-cloudpipe:discoverable": "",
- "os_compute_api:os-config-drive": "",
- "os_compute_api:os-consoles:discoverable": "",
- "os_compute_api:os-consoles:create": "",
- "os_compute_api:os-consoles:delete": "",
- "os_compute_api:os-consoles:index": "",
- "os_compute_api:os-consoles:show": "",
- "os_compute_api:os-console-output:discoverable": "",
- "os_compute_api:os-console-output": "",
- "os_compute_api:os-remote-consoles": "",
- "os_compute_api:os-remote-consoles:discoverable": "",
- "os_compute_api:os-create-backup:discoverable": "",
+ "os_compute_api:os-cloudpipe:discoverable": "@",
+ "os_compute_api:os-config-drive": "rule:admin_or_owner",
+ "os_compute_api:os-consoles:discoverable": "@",
+ "os_compute_api:os-consoles:create": "rule:admin_or_owner",
+ "os_compute_api:os-consoles:delete": "rule:admin_or_owner",
+ "os_compute_api:os-consoles:index": "rule:admin_or_owner",
+ "os_compute_api:os-consoles:show": "rule:admin_or_owner",
+ "os_compute_api:os-console-output:discoverable": "@",
+ "os_compute_api:os-console-output": "rule:admin_or_owner",
+ "os_compute_api:os-remote-consoles": "rule:admin_or_owner",
+ "os_compute_api:os-remote-consoles:discoverable": "@",
+ "os_compute_api:os-create-backup:discoverable": "@",
"os_compute_api:os-create-backup": "rule:admin_or_owner",
- "os_compute_api:os-deferred-delete": "",
- "os_compute_api:os-deferred-delete:discoverable": "",
- "os_compute_api:os-disk-config": "",
- "os_compute_api:os-disk-config:discoverable": "",
+ "os_compute_api:os-deferred-delete": "rule:admin_or_owner",
+ "os_compute_api:os-deferred-delete:discoverable": "@",
+ "os_compute_api:os-disk-config": "rule:admin_or_owner",
+ "os_compute_api:os-disk-config:discoverable": "@",
"os_compute_api:os-evacuate": "rule:admin_api",
- "os_compute_api:os-evacuate:discoverable": "",
+ "os_compute_api:os-evacuate:discoverable": "@",
"os_compute_api:os-extended-server-attributes": "rule:admin_api",
- "os_compute_api:os-extended-server-attributes:discoverable": "",
- "os_compute_api:os-extended-status": "",
- "os_compute_api:os-extended-status:discoverable": "",
- "os_compute_api:os-extended-availability-zone": "",
- "os_compute_api:os-extended-availability-zone:discoverable": "",
- "os_compute_api:extensions": "",
- "os_compute_api:extensions:discoverable": "",
- "os_compute_api:extension_info:discoverable": "",
- "os_compute_api:os-extended-volumes": "",
- "os_compute_api:os-extended-volumes:discoverable": "",
+ "os_compute_api:os-extended-server-attributes:discoverable": "@",
+ "os_compute_api:os-extended-status": "rule:admin_or_owner",
+ "os_compute_api:os-extended-status:discoverable": "@",
+ "os_compute_api:os-extended-availability-zone": "rule:admin_or_owner",
+ "os_compute_api:os-extended-availability-zone:discoverable": "@",
+ "os_compute_api:extensions": "rule:admin_or_owner",
+ "os_compute_api:extensions:discoverable": "@",
+ "os_compute_api:extension_info:discoverable": "@",
+ "os_compute_api:os-extended-volumes": "rule:admin_or_owner",
+ "os_compute_api:os-extended-volumes:discoverable": "@",
"os_compute_api:os-fixed-ips": "rule:admin_api",
- "os_compute_api:os-fixed-ips:discoverable": "",
- "os_compute_api:os-flavor-access": "",
- "os_compute_api:os-flavor-access:discoverable": "",
+ "os_compute_api:os-fixed-ips:discoverable": "@",
+ "os_compute_api:os-flavor-access": "rule:admin_or_owner",
+ "os_compute_api:os-flavor-access:discoverable": "@",
"os_compute_api:os-flavor-access:remove_tenant_access": "rule:admin_api",
"os_compute_api:os-flavor-access:add_tenant_access": "rule:admin_api",
- "os_compute_api:os-flavor-rxtx": "",
- "os_compute_api:os-flavor-rxtx:discoverable": "",
- "os_compute_api:flavors": "",
- "os_compute_api:flavors:discoverable": "",
- "os_compute_api:os-flavor-extra-specs:discoverable": "",
- "os_compute_api:os-flavor-extra-specs:index": "",
- "os_compute_api:os-flavor-extra-specs:show": "",
+ "os_compute_api:os-flavor-rxtx": "rule:admin_or_owner",
+ "os_compute_api:os-flavor-rxtx:discoverable": "@",
+ "os_compute_api:flavors": "rule:admin_or_owner",
+ "os_compute_api:flavors:discoverable": "@",
+ "os_compute_api:os-flavor-extra-specs:discoverable": "@",
+ "os_compute_api:os-flavor-extra-specs:index": "rule:admin_or_owner",
+ "os_compute_api:os-flavor-extra-specs:show": "rule:admin_or_owner",
"os_compute_api:os-flavor-extra-specs:create": "rule:admin_api",
"os_compute_api:os-flavor-extra-specs:update": "rule:admin_api",
"os_compute_api:os-flavor-extra-specs:delete": "rule:admin_api",
- "os_compute_api:os-flavor-manage:discoverable": "",
+ "os_compute_api:os-flavor-manage:discoverable": "@",
"os_compute_api:os-flavor-manage": "rule:admin_api",
- "os_compute_api:os-floating-ip-dns": "",
- "os_compute_api:os-floating-ip-dns:discoverable": "",
+ "os_compute_api:os-floating-ip-dns": "rule:admin_or_owner",
+ "os_compute_api:os-floating-ip-dns:discoverable": "@",
"os_compute_api:os-floating-ip-dns:domain:update": "rule:admin_api",
"os_compute_api:os-floating-ip-dns:domain:delete": "rule:admin_api",
- "os_compute_api:os-floating-ip-pools": "",
- "os_compute_api:os-floating-ip-pools:discoverable": "",
- "os_compute_api:os-floating-ips": "",
- "os_compute_api:os-floating-ips:discoverable": "",
+ "os_compute_api:os-floating-ip-pools": "rule:admin_or_owner",
+ "os_compute_api:os-floating-ip-pools:discoverable": "@",
+ "os_compute_api:os-floating-ips": "rule:admin_or_owner",
+ "os_compute_api:os-floating-ips:discoverable": "@",
"os_compute_api:os-floating-ips-bulk": "rule:admin_api",
- "os_compute_api:os-floating-ips-bulk:discoverable": "",
- "os_compute_api:os-fping": "",
- "os_compute_api:os-fping:discoverable": "",
+ "os_compute_api:os-floating-ips-bulk:discoverable": "@",
+ "os_compute_api:os-fping": "rule:admin_or_owner",
+ "os_compute_api:os-fping:discoverable": "@",
"os_compute_api:os-fping:all_tenants": "rule:admin_api",
"os_compute_api:os-hide-server-addresses": "is_admin:False",
- "os_compute_api:os-hide-server-addresses:discoverable": "",
+ "os_compute_api:os-hide-server-addresses:discoverable": "@",
"os_compute_api:os-hosts": "rule:admin_api",
- "os_compute_api:os-hosts:discoverable": "",
+ "os_compute_api:os-hosts:discoverable": "@",
"os_compute_api:os-hypervisors": "rule:admin_api",
- "os_compute_api:os-hypervisors:discoverable": "",
- "os_compute_api:images:discoverable": "",
- "os_compute_api:image-size": "",
- "os_compute_api:image-size:discoverable": "",
- "os_compute_api:os-instance-actions": "",
- "os_compute_api:os-instance-actions:discoverable": "",
+ "os_compute_api:os-hypervisors:discoverable": "@",
+ "os_compute_api:images:discoverable": "@",
+ "os_compute_api:image-size": "rule:admin_or_owner",
+ "os_compute_api:image-size:discoverable": "@",
+ "os_compute_api:os-instance-actions": "rule:admin_or_owner",
+ "os_compute_api:os-instance-actions:discoverable": "@",
"os_compute_api:os-instance-actions:events": "rule:admin_api",
"os_compute_api:os-instance-usage-audit-log": "rule:admin_api",
- "os_compute_api:os-instance-usage-audit-log:discoverable": "",
- "os_compute_api:ips:discoverable": "",
+ "os_compute_api:os-instance-usage-audit-log:discoverable": "@",
+ "os_compute_api:ips:discoverable": "@",
"os_compute_api:ips:index": "rule:admin_or_owner",
"os_compute_api:ips:show": "rule:admin_or_owner",
- "os_compute_api:os-keypairs:discoverable": "",
- "os_compute_api:os-keypairs": "",
+ "os_compute_api:os-keypairs:discoverable": "@",
+ "os_compute_api:os-keypairs": "rule:admin_or_owner",
"os_compute_api:os-keypairs:index": "rule:admin_api or user_id:%(user_id)s",
"os_compute_api:os-keypairs:show": "rule:admin_api or user_id:%(user_id)s",
"os_compute_api:os-keypairs:create": "rule:admin_api or user_id:%(user_id)s",
"os_compute_api:os-keypairs:delete": "rule:admin_api or user_id:%(user_id)s",
- "os_compute_api:limits:discoverable": "",
- "os_compute_api:limits": "",
- "os_compute_api:os-lock-server:discoverable": "",
+ "os_compute_api:limits:discoverable": "@",
+ "os_compute_api:limits": "rule:admin_or_owner",
+ "os_compute_api:os-lock-server:discoverable": "@",
"os_compute_api:os-lock-server:lock": "rule:admin_or_owner",
"os_compute_api:os-lock-server:unlock": "rule:admin_or_owner",
"os_compute_api:os-lock-server:unlock:unlock_override": "rule:admin_api",
- "os_compute_api:os-migrate-server:discoverable": "",
+ "os_compute_api:os-migrate-server:discoverable": "@",
"os_compute_api:os-migrate-server:migrate": "rule:admin_api",
"os_compute_api:os-migrate-server:migrate_live": "rule:admin_api",
- "os_compute_api:os-multinic": "",
- "os_compute_api:os-multinic:discoverable": "",
+ "os_compute_api:os-multinic": "rule:admin_or_owner",
+ "os_compute_api:os-multinic:discoverable": "@",
"os_compute_api:os-networks": "rule:admin_api",
- "os_compute_api:os-networks:view": "",
- "os_compute_api:os-networks:discoverable": "",
+ "os_compute_api:os-networks:view": "rule:admin_or_owner",
+ "os_compute_api:os-networks:discoverable": "@",
"os_compute_api:os-networks-associate": "rule:admin_api",
- "os_compute_api:os-networks-associate:discoverable": "",
- "os_compute_api:os-pause-server:discoverable": "",
+ "os_compute_api:os-networks-associate:discoverable": "@",
+ "os_compute_api:os-pause-server:discoverable": "@",
"os_compute_api:os-pause-server:pause": "rule:admin_or_owner",
"os_compute_api:os-pause-server:unpause": "rule:admin_or_owner",
- "os_compute_api:os-pci:pci_servers": "",
- "os_compute_api:os-pci:discoverable": "",
+ "os_compute_api:os-pci:pci_servers": "rule:admin_or_owner",
+ "os_compute_api:os-pci:discoverable": "@",
"os_compute_api:os-pci:index": "rule:admin_api",
"os_compute_api:os-pci:detail": "rule:admin_api",
"os_compute_api:os-pci:show": "rule:admin_api",
- "os_compute_api:os-personality:discoverable": "",
- "os_compute_api:os-preserve-ephemeral-rebuild:discoverable": "",
- "os_compute_api:os-quota-sets:discoverable": "",
+ "os_compute_api:os-personality:discoverable": "@",
+ "os_compute_api:os-preserve-ephemeral-rebuild:discoverable": "@",
+ "os_compute_api:os-quota-sets:discoverable": "@",
"os_compute_api:os-quota-sets:show": "rule:admin_or_owner",
- "os_compute_api:os-quota-sets:defaults": "",
+ "os_compute_api:os-quota-sets:defaults": "@",
"os_compute_api:os-quota-sets:update": "rule:admin_api",
"os_compute_api:os-quota-sets:delete": "rule:admin_api",
"os_compute_api:os-quota-sets:detail": "rule:admin_api",
"os_compute_api:os-quota-class-sets:update": "rule:admin_api",
"os_compute_api:os-quota-class-sets:show": "is_admin:True or quota_class:%(quota_class)s",
- "os_compute_api:os-quota-class-sets:discoverable": "",
- "os_compute_api:os-rescue": "",
- "os_compute_api:os-rescue:discoverable": "",
- "os_compute_api:os-scheduler-hints:discoverable": "",
- "os_compute_api:os-security-group-default-rules:discoverable": "",
+ "os_compute_api:os-quota-class-sets:discoverable": "@",
+ "os_compute_api:os-rescue": "rule:admin_or_owner",
+ "os_compute_api:os-rescue:discoverable": "@",
+ "os_compute_api:os-scheduler-hints:discoverable": "@",
+ "os_compute_api:os-security-group-default-rules:discoverable": "@",
"os_compute_api:os-security-group-default-rules": "rule:admin_api",
- "os_compute_api:os-security-groups": "",
- "os_compute_api:os-security-groups:discoverable": "",
+ "os_compute_api:os-security-groups": "rule:admin_or_owner",
+ "os_compute_api:os-security-groups:discoverable": "@",
"os_compute_api:os-server-diagnostics": "rule:admin_api",
- "os_compute_api:os-server-diagnostics:discoverable": "",
- "os_compute_api:os-server-password": "",
- "os_compute_api:os-server-password:discoverable": "",
- "os_compute_api:os-server-usage": "",
- "os_compute_api:os-server-usage:discoverable": "",
- "os_compute_api:os-server-groups": "",
- "os_compute_api:os-server-groups:discoverable": "",
+ "os_compute_api:os-server-diagnostics:discoverable": "@",
+ "os_compute_api:os-server-password": "rule:admin_or_owner",
+ "os_compute_api:os-server-password:discoverable": "@",
+ "os_compute_api:os-server-usage": "rule:admin_or_owner",
+ "os_compute_api:os-server-usage:discoverable": "@",
+ "os_compute_api:os-server-groups": "rule:admin_or_owner",
+ "os_compute_api:os-server-groups:discoverable": "@",
"os_compute_api:os-services": "rule:admin_api",
- "os_compute_api:os-services:discoverable": "",
- "os_compute_api:server-metadata:discoverable": "",
+ "os_compute_api:os-services:discoverable": "@",
+ "os_compute_api:server-metadata:discoverable": "@",
"os_compute_api:server-metadata:index": "rule:admin_or_owner",
"os_compute_api:server-metadata:show": "rule:admin_or_owner",
"os_compute_api:server-metadata:delete": "rule:admin_or_owner",
"os_compute_api:server-metadata:create": "rule:admin_or_owner",
"os_compute_api:server-metadata:update": "rule:admin_or_owner",
"os_compute_api:server-metadata:update_all": "rule:admin_or_owner",
- "os_compute_api:servers:discoverable": "",
- "os_compute_api:os-shelve:shelve": "",
- "os_compute_api:os-shelve:shelve:discoverable": "",
+ "os_compute_api:os-shelve:shelve": "rule:admin_or_owner",
+ "os_compute_api:os-shelve:shelve:discoverable": "@",
"os_compute_api:os-shelve:shelve_offload": "rule:admin_api",
- "os_compute_api:os-simple-tenant-usage:discoverable": "",
+ "os_compute_api:os-simple-tenant-usage:discoverable": "@",
"os_compute_api:os-simple-tenant-usage:show": "rule:admin_or_owner",
"os_compute_api:os-simple-tenant-usage:list": "rule:admin_api",
- "os_compute_api:os-suspend-server:discoverable": "",
+ "os_compute_api:os-suspend-server:discoverable": "@",
"os_compute_api:os-suspend-server:suspend": "rule:admin_or_owner",
"os_compute_api:os-suspend-server:resume": "rule:admin_or_owner",
"os_compute_api:os-tenant-networks": "rule:admin_or_owner",
- "os_compute_api:os-tenant-networks:discoverable": "",
- "os_compute_api:os-shelve:unshelve": "",
- "os_compute_api:os-user-data:discoverable": "",
- "os_compute_api:os-virtual-interfaces": "",
- "os_compute_api:os-virtual-interfaces:discoverable": "",
- "os_compute_api:os-volumes": "",
- "os_compute_api:os-volumes:discoverable": "",
- "os_compute_api:os-volumes-attachments:index": "",
- "os_compute_api:os-volumes-attachments:show": "",
- "os_compute_api:os-volumes-attachments:create": "",
- "os_compute_api:os-volumes-attachments:update": "",
- "os_compute_api:os-volumes-attachments:delete": "",
- "os_compute_api:os-volumes-attachments:discoverable": "",
- "os_compute_api:os-availability-zone:list": "",
- "os_compute_api:os-availability-zone:discoverable": "",
+ "os_compute_api:os-tenant-networks:discoverable": "@",
+ "os_compute_api:os-shelve:unshelve": "rule:admin_or_owner",
+ "os_compute_api:os-user-data:discoverable": "@",
+ "os_compute_api:os-virtual-interfaces": "rule:admin_or_owner",
+ "os_compute_api:os-virtual-interfaces:discoverable": "@",
+ "os_compute_api:os-volumes": "rule:admin_or_owner",
+ "os_compute_api:os-volumes:discoverable": "@",
+ "os_compute_api:os-volumes-attachments:index": "rule:admin_or_owner",
+ "os_compute_api:os-volumes-attachments:show": "rule:admin_or_owner",
+ "os_compute_api:os-volumes-attachments:create": "rule:admin_or_owner",
+ "os_compute_api:os-volumes-attachments:update": "rule:admin_or_owner",
+ "os_compute_api:os-volumes-attachments:delete": "rule:admin_or_owner",
+ "os_compute_api:os-volumes-attachments:discoverable": "@",
+ "os_compute_api:os-availability-zone:list": "rule:admin_or_owner",
+ "os_compute_api:os-availability-zone:discoverable": "@",
"os_compute_api:os-availability-zone:detail": "rule:admin_api",
"os_compute_api:os-used-limits": "rule:admin_api",
- "os_compute_api:os-used-limits:discoverable": "",
+ "os_compute_api:os-used-limits:discoverable": "@",
"os_compute_api:os-migrations:index": "rule:admin_api",
- "os_compute_api:os-migrations:discoverable": "",
+ "os_compute_api:os-migrations:discoverable": "@",
"os_compute_api:os-assisted-volume-snapshots:create": "rule:admin_api",
"os_compute_api:os-assisted-volume-snapshots:delete": "rule:admin_api",
- "os_compute_api:os-assisted-volume-snapshots:discoverable": "",
+ "os_compute_api:os-assisted-volume-snapshots:discoverable": "@",
"os_compute_api:os-console-auth-tokens": "rule:admin_api",
"os_compute_api:os-server-external-events:create": "rule:admin_api"
}
diff --git a/etc/nova/rootwrap.conf b/etc/nova/rootwrap.conf
index 5dc07891e0..c6fd5642b7 100644
--- a/etc/nova/rootwrap.conf
+++ b/etc/nova/rootwrap.conf
@@ -10,7 +10,7 @@ filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
# explicitly specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
-exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
+exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/sbin,/usr/local/bin
# Enable logging to syslog
# Default value is False
diff --git a/etc/nova/rootwrap.d/compute.filters b/etc/nova/rootwrap.d/compute.filters
index 6d65fb0f74..c846b89ecd 100644
--- a/etc/nova/rootwrap.d/compute.filters
+++ b/etc/nova/rootwrap.d/compute.filters
@@ -92,6 +92,9 @@ vrouter-port-control: CommandFilter, vrouter-port-control, root
# nova/virt/libvirt/vif.py: 'ebrctl', ...
ebrctl: CommandFilter, ebrctl, root
+# nova/virt/libvirt/vif.py: 'mm-ctl', ...
+mm-ctl: CommandFilter, mm-ctl, root
+
# nova/network/linux_net.py: 'ovs-ofctl', ....
ovs-ofctl: CommandFilter, ovs-ofctl, root
@@ -243,3 +246,6 @@ ploop: CommandFilter, ploop, root
# nova/virt/libvirt/utils.py: 'xend', 'status'
xend: CommandFilter, xend, root
+
+# nova/virt/libvirt/utils.py:
+touch: CommandFilter, touch, root
diff --git a/etc/nova/rootwrap.d/network.filters b/etc/nova/rootwrap.d/network.filters
index 527ab40c27..52b7130ea8 100644
--- a/etc/nova/rootwrap.d/network.filters
+++ b/etc/nova/rootwrap.d/network.filters
@@ -42,9 +42,6 @@ ivs-ctl: CommandFilter, ivs-ctl, root
# nova/virt/libvirt/vif.py: 'ifc_ctl', ...
ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root
-# nova/virt/libvirt/vif.py: 'mm-ctl', ...
-mm-ctl: CommandFilter, mm-ctl, root
-
# nova/network/linux_net.py: 'ebtables', '-D' ...
# nova/network/linux_net.py: 'ebtables', '-I' ...
ebtables: CommandFilter, ebtables, root
@@ -89,3 +86,6 @@ sysctl: CommandFilter, sysctl, root
# nova/network/linux_net.py: 'conntrack'
conntrack: CommandFilter, conntrack, root
+
+# nova/network/linux_net.py: 'fp-vdev'
+fp-vdev: CommandFilter, fp-vdev, root
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 95792796fa..f9c0a9a599 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -17,33 +17,69 @@
import webob.dec
import webob.exc
+from oslo_log import log as logging
+
+from nova.i18n import _LW
from nova import wsgi
+LOG = logging.getLogger(__name__)
+
+_DEPRECATED_MIDDLEWARE = (
+ '%s has been deprecated and removed from Nova in Mitaka. '
+ 'You will need to remove lines referencing it in your paste.ini before '
+ 'upgrade to Newton or your cloud will break.')
+
_DEPRECATION_MESSAGE = ('The in tree EC2 API has been removed in Mitaka. '
'Please remove entries from api-paste.ini')
+# NOTE(sdague): this whole file is safe to remove in Newton. We just
+# needed a release cycle for it.
+
class DeprecatedMiddleware(wsgi.Middleware):
def __init__(self, *args, **kwargs):
super(DeprecatedMiddleware, self).__init__(args[0])
+ LOG.warn(_LW(_DEPRECATED_MIDDLEWARE % type(self).__name__)) # noqa
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
- return webob.exc.HTTPException(message=_DEPRECATION_MESSAGE)
+ # deprecated middleware needs to be a no op, not an exception
+ return req.get_response(self.application)
+
+
+class FaultWrapper(DeprecatedMiddleware):
+ pass
+
+
+class Lockout(DeprecatedMiddleware):
+ pass
+
+
+class EC2KeystoneAuth(DeprecatedMiddleware):
+ pass
+
+
+class NoAuth(DeprecatedMiddleware):
+ pass
+
+
+class Requestify(DeprecatedMiddleware):
+ pass
+
+
+class Authorizer(DeprecatedMiddleware):
+ pass
+
+
+class RequestLogging(DeprecatedMiddleware):
+ pass
+
+
+class Validator(DeprecatedMiddleware):
+ pass
-class DeprecatedApplication(wsgi.Application):
+class Executor(wsgi.Application):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
- return webob.exc.HTTPException(message=_DEPRECATION_MESSAGE)
-
-
-FaultWrapper = DeprecatedMiddleware
-RequestLogging = DeprecatedMiddleware
-Lockout = DeprecatedMiddleware
-EC2KeystoneAuth = DeprecatedMiddleware
-NoAuth = DeprecatedMiddleware
-Requestify = DeprecatedMiddleware
-Authorizer = DeprecatedMiddleware
-Validator = DeprecatedMiddleware
-Executor = DeprecatedApplication
+ return webob.exc.HTTPNotFound(explanation=_DEPRECATION_MESSAGE)
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 87d415cc41..3f9af9b7fa 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -22,6 +22,7 @@ from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
+from nova import cache_utils
from nova import context
from nova import exception
from nova.i18n import _
@@ -29,7 +30,6 @@ from nova.i18n import _LI
from nova.network import model as network_model
from nova import objects
from nova.objects import base as obj_base
-from nova.openstack.common import memorycache
LOG = logging.getLogger(__name__)
# NOTE(vish): cache mapping for one week
@@ -42,13 +42,13 @@ def memoize(func):
def memoizer(context, reqid):
global _CACHE
if not _CACHE:
- _CACHE = memorycache.get_client()
+ _CACHE = cache_utils.get_client(expiration_time=_CACHE_TIME)
key = "%s:%s" % (func.__name__, reqid)
key = str(key)
value = _CACHE.get(key)
if value is None:
value = func(context, reqid)
- _CACHE.set(key, value, time=_CACHE_TIME)
+ _CACHE.set(key, value)
return value
return memoizer
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index 7260695381..5d216b2075 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -479,11 +479,11 @@ class InstanceMetadata(object):
path = 'openstack/%s/%s' % (version, VD_JSON_NAME)
yield (path, self.lookup(path))
- for (cid, content) in six.iteritems(self.content):
if self._check_version(LIBERTY, version, ALL_OPENSTACK_VERSIONS):
path = 'openstack/%s/%s' % (version, NW_JSON_NAME)
yield (path, self.lookup(path))
+ for (cid, content) in six.iteritems(self.content):
yield ('%s/%s/%s' % ("openstack", CONTENT_DIR, cid), content)
@@ -544,7 +544,8 @@ def get_metadata_by_address(address):
def get_metadata_by_instance_id(instance_id, address, ctxt=None):
ctxt = ctxt or context.get_admin_context()
instance = objects.Instance.get_by_uuid(
- ctxt, instance_id, expected_attrs=['ec2_ids', 'flavor', 'info_cache'])
+ ctxt, instance_id, expected_attrs=['ec2_ids', 'flavor', 'info_cache',
+ 'metadata', 'system_metadata'])
return InstanceMetadata(instance, address)
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index 29ec73baed..7cda30ce98 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -26,13 +26,13 @@ import webob.dec
import webob.exc
from nova.api.metadata import base
+from nova import cache_utils
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.network.neutronv2 import api as neutronapi
-from nova.openstack.common import memorycache
from nova import utils
from nova import wsgi
@@ -72,7 +72,8 @@ class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
- self._cache = memorycache.get_client()
+ self._cache = cache_utils.get_client(
+ expiration_time=CONF.metadata_cache_expiration)
def get_metadata_by_remote_address(self, address):
if not address:
@@ -90,7 +91,7 @@ class MetadataRequestHandler(wsgi.Application):
return None
if CONF.metadata_cache_expiration > 0:
- self._cache.set(cache_key, data, CONF.metadata_cache_expiration)
+ self._cache.set(cache_key, data)
return data
@@ -107,7 +108,7 @@ class MetadataRequestHandler(wsgi.Application):
return None
if CONF.metadata_cache_expiration > 0:
- self._cache.set(cache_key, data, CONF.metadata_cache_expiration)
+ self._cache.set(cache_key, data)
return data
@@ -254,7 +255,7 @@ class MetadataRequestHandler(wsgi.Application):
instance_id = instance_data['device_id']
tenant_id = instance_data['tenant_id']
- # instance_data is unicode-encoded, while memorycache doesn't like
+ # instance_data is unicode-encoded, while cache_utils doesn't like
# that. Therefore we convert to str
if isinstance(instance_id, six.text_type):
instance_id = instance_id.encode('utf-8')
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index ab1f864ae6..690396e427 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -60,7 +60,13 @@ api_opts = [
'list. Specify the extension aliases here. '
'This option will be removed in the near future. '
'After that point you have to run all of the API.',
- deprecated_for_removal=True, deprecated_group='osapi_v21')
+ deprecated_for_removal=True, deprecated_group='osapi_v21'),
+ cfg.StrOpt('project_id_regex',
+ default=None,
+ help='DEPRECATED: The validation regex for project_ids '
+ 'used in urls. This defaults to [0-9a-f\-]+ if not set, '
+ 'which matches normal uuids created by keystone.',
+ deprecated_for_removal=True, deprecated_group='osapi_v21')
]
api_opts_group = cfg.OptGroup(name='osapi_v21', title='API v2.1 Options')
@@ -196,14 +202,40 @@ class APIMapper(routes.Mapper):
class ProjectMapper(APIMapper):
def resource(self, member_name, collection_name, **kwargs):
+ # NOTE(sdague): project_id parameter is only valid if its hex
+ # or hex + dashes (note, integers are a subset of this). This
+ # is required to hand our overlaping routes issues.
+ project_id_regex = '[0-9a-f\-]+'
+ if CONF.osapi_v21.project_id_regex:
+ project_id_regex = CONF.osapi_v21.project_id_regex
+
+ project_id_token = '{project_id:%s}' % project_id_regex
+ if 'parent_resource' not in kwargs:
+ kwargs['path_prefix'] = '%s/' % project_id_token
+ else:
+ parent_resource = kwargs['parent_resource']
+ p_collection = parent_resource['collection_name']
+ p_member = parent_resource['member_name']
+ kwargs['path_prefix'] = '%s/%s/:%s_id' % (
+ project_id_token,
+ p_collection,
+ p_member)
+ routes.Mapper.resource(
+ self,
+ member_name,
+ collection_name,
+ **kwargs)
+
+ # while we are in transition mode, create additional routes
+ # for the resource that do not include project_id.
if 'parent_resource' not in kwargs:
- kwargs['path_prefix'] = '{project_id}/'
+ del kwargs['path_prefix']
else:
parent_resource = kwargs['parent_resource']
p_collection = parent_resource['collection_name']
p_member = parent_resource['member_name']
- kwargs['path_prefix'] = '{project_id}/%s/:%s_id' % (p_collection,
- p_member)
+ kwargs['path_prefix'] = '%s/:%s_id' % (p_collection,
+ p_member)
routes.Mapper.resource(self, member_name,
collection_name,
**kwargs)
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
index 47fa7083d1..5a31625773 100644
--- a/nova/api/openstack/api_version_request.py
+++ b/nova/api/openstack/api_version_request.py
@@ -57,6 +57,14 @@ REST_API_VERSION_HISTORY = """REST API Version History:
* 2.14 - Remove onSharedStorage from evacuate request body and remove
adminPass from the response body
* 2.15 - Add soft-affinity and soft-anti-affinity policies
+ * 2.16 - Exposes host_status for servers/detail and servers/{server_id}
+ * 2.17 - Add trigger_crash_dump to server actions
+ * 2.18 - Makes project_id optional in v2.1
+ * 2.19 - Allow user to set and get the server description
+ * 2.20 - Add attach and detach volume operations for instances in shelved
+ and shelved_offloaded state
+ * 2.21 - Make os-instance-actions read deleted instances
+ * 2.22 - Add API to force live migration to complete
"""
# The minimum and maximum versions of the API supported
@@ -65,7 +73,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = "2.1"
-_MAX_API_VERSION = "2.15"
+_MAX_API_VERSION = "2.22"
DEFAULT_API_VERSION = _MIN_API_VERSION
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 61b9773d1b..bbaf74d20d 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -74,10 +74,14 @@ class NoAuthMiddleware(NoAuthMiddlewareBase):
return self.base_call(req, True, always_admin=False)
-# TODO(johnthetubaguy) this should be removed in the M release
-class NoAuthMiddlewareV3(NoAuthMiddlewareBase):
- """Return a fake token if one isn't specified."""
+class NoAuthMiddlewareV2_18(NoAuthMiddlewareBase):
+ """Return a fake token if one isn't specified.
+
+ This provides a version of the middleware which does not add
+ project_id into server management urls.
+
+ """
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
- return self.base_call(req, False)
+ return self.base_call(req, False, always_admin=False)
diff --git a/nova/api/openstack/compute/aggregates.py b/nova/api/openstack/compute/aggregates.py
index 5dd4067df6..8f7f0ef75d 100644
--- a/nova/api/openstack/compute/aggregates.py
+++ b/nova/api/openstack/compute/aggregates.py
@@ -209,8 +209,11 @@ class AggregateController(wsgi.Controller):
def _build_aggregate_items(self, aggregate):
keys = aggregate.obj_fields
for key in keys:
- if (aggregate.obj_attr_is_set(key)
- or key in aggregate.obj_extra_fields):
+ # NOTE(danms): Skip the uuid field because we have no microversion
+ # to expose it
+ if ((aggregate.obj_attr_is_set(key)
+ or key in aggregate.obj_extra_fields) and
+ key != 'uuid'):
yield key, getattr(aggregate, key)
diff --git a/nova/api/openstack/compute/availability_zone.py b/nova/api/openstack/compute/availability_zone.py
index 7eb02b762d..97b2520ed8 100644
--- a/nova/api/openstack/compute/availability_zone.py
+++ b/nova/api/openstack/compute/availability_zone.py
@@ -12,16 +12,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
-
from nova.api.openstack.compute.schemas import availability_zone as schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import availability_zones
+import nova.conf
from nova import objects
from nova import servicegroup
-CONF = cfg.CONF
+CONF = nova.conf.CONF
ALIAS = "os-availability-zone"
ATTRIBUTE_NAME = "availability_zone"
authorize = extensions.os_compute_authorizer(ALIAS)
diff --git a/nova/api/openstack/compute/baremetal_nodes.py b/nova/api/openstack/compute/baremetal_nodes.py
index f8ea93d0fa..faef487f1a 100644
--- a/nova/api/openstack/compute/baremetal_nodes.py
+++ b/nova/api/openstack/compute/baremetal_nodes.py
@@ -16,19 +16,18 @@
"""The bare-metal admin extension."""
-from oslo_config import cfg
from oslo_utils import importutils
import webob
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
+import nova.conf
from nova.i18n import _
ironic_client = importutils.try_import('ironicclient.client')
ironic_exc = importutils.try_import('ironicclient.exc')
-CONF = cfg.CONF
ALIAS = "os-baremetal-nodes"
authorize = extensions.os_compute_authorizer(ALIAS)
@@ -39,6 +38,7 @@ node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path']
interface_fields = ['id', 'address', 'datapath_id', 'port_no']
+CONF = nova.conf.CONF
CONF.import_opt('api_version',
'nova.virt.ironic.driver',
group='ironic')
@@ -54,7 +54,6 @@ CONF.import_opt('admin_password',
CONF.import_opt('admin_tenant_name',
'nova.virt.ironic.driver',
group='ironic')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
def _check_ironic_client_enabled():
diff --git a/nova/api/openstack/compute/console_output.py b/nova/api/openstack/compute/console_output.py
index 2e06552ae3..268fa42a94 100644
--- a/nova/api/openstack/compute/console_output.py
+++ b/nova/api/openstack/compute/console_output.py
@@ -35,7 +35,7 @@ class ConsoleOutputController(wsgi.Controller):
super(ConsoleOutputController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
- @extensions.expected_errors((400, 404, 409, 501))
+ @extensions.expected_errors((404, 409, 501))
@wsgi.action('os-getConsoleOutput')
@validation.schema(console_output.get_console_output)
def get_console_output(self, req, id, body):
diff --git a/nova/api/openstack/compute/extended_availability_zone.py b/nova/api/openstack/compute/extended_availability_zone.py
index 57ffced3ea..f4337ac3fb 100644
--- a/nova/api/openstack/compute/extended_availability_zone.py
+++ b/nova/api/openstack/compute/extended_availability_zone.py
@@ -26,6 +26,9 @@ PREFIX = "OS-EXT-AZ"
class ExtendedAZController(wsgi.Controller):
def _extend_server(self, context, server, instance):
+ # NOTE(mriedem): The OS-EXT-AZ prefix should not be used for new
+ # attributes after v2.1. They are only in v2.1 for backward compat
+ # with v2.0.
key = "%s:availability_zone" % PREFIX
az = avail_zone.get_instance_availability_zone(context, instance)
server[key] = az or ''
diff --git a/nova/api/openstack/compute/extended_server_attributes.py b/nova/api/openstack/compute/extended_server_attributes.py
index 867f4b9e97..66061f2b9f 100644
--- a/nova/api/openstack/compute/extended_server_attributes.py
+++ b/nova/api/openstack/compute/extended_server_attributes.py
@@ -17,16 +17,18 @@
from nova.api.openstack import api_version_request
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
-
+from nova import compute
ALIAS = "os-extended-server-attributes"
authorize = extensions.os_compute_soft_authorizer(ALIAS)
+soft_authorize = extensions.os_compute_soft_authorizer('servers')
class ExtendedServerAttributesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(ExtendedServerAttributesController, self).__init__(*args,
**kwargs)
+ self.compute_api = compute.API(skip_policy_check=True)
def _extend_server(self, context, server, instance, req):
key = "OS-EXT-SRV-ATTR:hypervisor_hostname"
@@ -34,6 +36,11 @@ class ExtendedServerAttributesController(wsgi.Controller):
properties = ['host', 'name']
if api_version_request.is_supported(req, min_version='2.3'):
+ # NOTE(mriedem): These will use the OS-EXT-SRV-ATTR prefix below
+ # and that's OK for microversion 2.3 which is being compatible
+ # with v2.0 for the ec2 API split out from Nova. After this,
+ # however, new microversoins should not be using the
+ # OS-EXT-SRV-ATTR prefix.
properties += ['reservation_id', 'launch_index',
'hostname', 'kernel_id', 'ramdisk_id',
'root_device_name', 'user_data']
@@ -41,29 +48,59 @@ class ExtendedServerAttributesController(wsgi.Controller):
if attr == 'name':
key = "OS-EXT-SRV-ATTR:instance_%s" % attr
else:
+ # NOTE(mriedem): Nothing after microversion 2.3 should use the
+ # OS-EXT-SRV-ATTR prefix for the attribute key name.
key = "OS-EXT-SRV-ATTR:%s" % attr
server[key] = instance[attr]
+ def _server_host_status(self, context, server, instance, req):
+ host_status = self.compute_api.get_instance_host_status(instance)
+ server['host_status'] = host_status
+
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
+ authorize_extend = False
+ authorize_host_status = False
if authorize(context):
+ authorize_extend = True
+ if (api_version_request.is_supported(req, min_version='2.16') and
+ soft_authorize(context, action='show:host_status')):
+ authorize_host_status = True
+ if authorize_extend or authorize_host_status:
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show' method.
- self._extend_server(context, server, db_instance, req)
+ if authorize_extend:
+ self._extend_server(context, server, db_instance, req)
+ if authorize_host_status:
+ self._server_host_status(context, server, db_instance, req)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
+ authorize_extend = False
+ authorize_host_status = False
if authorize(context):
+ authorize_extend = True
+ if (api_version_request.is_supported(req, min_version='2.16') and
+ soft_authorize(context, action='show:host_status')):
+ authorize_host_status = True
+ if authorize_extend or authorize_host_status:
servers = list(resp_obj.obj['servers'])
+ instances = req.get_db_instances()
+ # Instances is guaranteed to be in the cache due to
+ # the core API adding it in its 'detail' method.
+ if authorize_host_status:
+ host_statuses = self.compute_api.get_instances_host_statuses(
+ instances.values())
for server in servers:
- db_instance = req.get_db_instance(server['id'])
- # server['id'] is guaranteed to be in the cache due to
- # the core API adding it in its 'detail' method.
- self._extend_server(context, server, db_instance, req)
+ if authorize_extend:
+ instance = instances[server['id']]
+ self._extend_server(context, server, instance, req)
+ if authorize_host_status:
+ server['host_status'] = host_statuses[server['id']]
class ExtendedServerAttributes(extensions.V21APIExtensionBase):
diff --git a/nova/api/openstack/compute/extended_status.py b/nova/api/openstack/compute/extended_status.py
index 1d49be241a..13ec50f944 100644
--- a/nova/api/openstack/compute/extended_status.py
+++ b/nova/api/openstack/compute/extended_status.py
@@ -30,6 +30,9 @@ class ExtendedStatusController(wsgi.Controller):
# to make it same as V2. If needed it can be added with
# microversion.
for state in ['task_state', 'vm_state', 'power_state']:
+ # NOTE(mriedem): The OS-EXT-STS prefix should not be used for new
+ # attributes after v2.1. They are only in v2.1 for backward compat
+ # with v2.0.
key = "%s:%s" % ('OS-EXT-STS', state)
server[key] = instance[state]
diff --git a/nova/api/openstack/compute/extended_volumes.py b/nova/api/openstack/compute/extended_volumes.py
index 321a8d1b54..49037795d9 100644
--- a/nova/api/openstack/compute/extended_volumes.py
+++ b/nova/api/openstack/compute/extended_volumes.py
@@ -35,6 +35,9 @@ class ExtendedVolumesController(wsgi.Controller):
volume_attached['delete_on_termination'] = (
bdm['delete_on_termination'])
volumes_attached.append(volume_attached)
+ # NOTE(mriedem): The os-extended-volumes prefix should not be used for
+ # new attributes after v2.1. They are only in v2.1 for backward compat
+ # with v2.0.
key = "%s:volumes_attached" % ExtendedVolumes.alias
server[key] = volumes_attached
diff --git a/nova/api/openstack/compute/extension_info.py b/nova/api/openstack/compute/extension_info.py
index 7fd395ebe0..45e8195ea8 100644
--- a/nova/api/openstack/compute/extension_info.py
+++ b/nova/api/openstack/compute/extension_info.py
@@ -82,7 +82,7 @@ v21_to_v2_extension_list_mapping = {
v2_extension_suppress_list = ['servers', 'images', 'versions', 'flavors',
'os-block-device-mapping-v1', 'os-consoles',
'extensions', 'image-metadata', 'ips', 'limits',
- 'server-metadata'
+ 'server-metadata', 'server-migrations'
]
# v2.1 plugins which should appear under a different name in v2
diff --git a/nova/api/openstack/compute/flavors_extraspecs.py b/nova/api/openstack/compute/flavors_extraspecs.py
index 716f347323..6482cfe67d 100644
--- a/nova/api/openstack/compute/flavors_extraspecs.py
+++ b/nova/api/openstack/compute/flavors_extraspecs.py
@@ -52,7 +52,7 @@ class FlavorExtraSpecsController(wsgi.Controller):
raise webob.exc.HTTPBadRequest(
explanation=error.format_message())
- @extensions.expected_errors(())
+ @extensions.expected_errors(404)
def index(self, req, flavor_id):
"""Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
diff --git a/nova/api/openstack/compute/floating_ips.py b/nova/api/openstack/compute/floating_ips.py
index 109d0759a6..96c08256d2 100644
--- a/nova/api/openstack/compute/floating_ips.py
+++ b/nova/api/openstack/compute/floating_ips.py
@@ -71,7 +71,8 @@ def get_instance_by_floating_ip_addr(self, context, address):
raise webob.exc.HTTPConflict(explanation=ex.format_message())
if instance_id:
- return common.get_instance(self.compute_api, context, instance_id)
+ return common.get_instance(self.compute_api, context, instance_id,
+ expected_attrs=['flavor'])
def disassociate_floating_ip(self, context, instance, address):
@@ -193,7 +194,8 @@ class FloatingIPActionController(wsgi.Controller):
address = body['addFloatingIp']['address']
- instance = common.get_instance(self.compute_api, context, id)
+ instance = common.get_instance(self.compute_api, context, id,
+ expected_attrs=['flavor'])
cached_nwinfo = compute_utils.get_nw_info_for_instance(instance)
if not cached_nwinfo:
LOG.warning(
diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py
index 0d2f450fc4..f9b0f43bb0 100644
--- a/nova/api/openstack/compute/image_metadata.py
+++ b/nova/api/openstack/compute/image_metadata.py
@@ -154,3 +154,8 @@ class ImageMetadata(extensions.V21APIExtensionBase):
"/{project_id}/images/{image_id}/metadata",
controller=wsgi_resource,
action='update_all', conditions={"method": ['PUT']})
+ # Also connect the non project_id route
+ mapper.connect("metadata",
+ "/images/{image_id}/metadata",
+ controller=wsgi_resource,
+ action='update_all', conditions={"method": ['PUT']})
diff --git a/nova/api/openstack/compute/image_size.py b/nova/api/openstack/compute/image_size.py
index d4f8d71726..f76b874b3d 100644
--- a/nova/api/openstack/compute/image_size.py
+++ b/nova/api/openstack/compute/image_size.py
@@ -24,6 +24,9 @@ authorize = extensions.os_compute_soft_authorizer(ALIAS)
class ImageSizeController(wsgi.Controller):
def _extend_image(self, image, image_cache):
+ # NOTE(mriedem): The OS-EXT-* prefix should not be used for new
+ # attributes after v2.1. They are only in v2.1 for backward compat
+ # with v2.0.
key = "OS-EXT-IMG-SIZE:size"
image[key] = image_cache['size']
diff --git a/nova/api/openstack/compute/instance_actions.py b/nova/api/openstack/compute/instance_actions.py
index b915ea8dd7..a84c5b1c0e 100644
--- a/nova/api/openstack/compute/instance_actions.py
+++ b/nova/api/openstack/compute/instance_actions.py
@@ -20,6 +20,7 @@ from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova.i18n import _
+from nova import utils
ALIAS = "os-instance-actions"
authorize = extensions.os_compute_authorizer(ALIAS)
@@ -49,11 +50,20 @@ class InstanceActionsController(wsgi.Controller):
event[key] = event_raw.get(key)
return event
+ @wsgi.Controller.api_version("2.1", "2.20")
+ def _get_instance(self, req, context, server_id):
+ return common.get_instance(self.compute_api, context, server_id)
+
+ @wsgi.Controller.api_version("2.21") # noqa
+ def _get_instance(self, req, context, server_id):
+ with utils.temporary_mutation(context, read_deleted='yes'):
+ return common.get_instance(self.compute_api, context, server_id)
+
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns the list of actions recorded for a given instance."""
context = req.environ["nova.context"]
- instance = common.get_instance(self.compute_api, context, server_id)
+ instance = self._get_instance(req, context, server_id)
authorize(context, target=instance)
actions_raw = self.action_api.actions_get(context, instance)
actions = [self._format_action(action) for action in actions_raw]
@@ -63,7 +73,7 @@ class InstanceActionsController(wsgi.Controller):
def show(self, req, server_id, id):
"""Return data about the given instance action."""
context = req.environ['nova.context']
- instance = common.get_instance(self.compute_api, context, server_id)
+ instance = self._get_instance(req, context, server_id)
authorize(context, target=instance)
action = self.action_api.action_get_by_request_id(context, instance,
id)
diff --git a/nova/api/openstack/compute/legacy_v2/contrib/aggregates.py b/nova/api/openstack/compute/legacy_v2/contrib/aggregates.py
index 6e0a0a31fe..1c2fb8bb51 100644
--- a/nova/api/openstack/compute/legacy_v2/contrib/aggregates.py
+++ b/nova/api/openstack/compute/legacy_v2/contrib/aggregates.py
@@ -278,8 +278,11 @@ class AggregateController(object):
def _build_aggregate_items(self, aggregate):
keys = aggregate.obj_fields
for key in keys:
- if (aggregate.obj_attr_is_set(key)
- or key in aggregate.obj_extra_fields):
+ # NOTE(danms): Skip the uuid field because we have no microversion
+ # to expose it
+ if ((aggregate.obj_attr_is_set(key)
+ or key in aggregate.obj_extra_fields) and
+ key != 'uuid'):
yield key, getattr(aggregate, key)
diff --git a/nova/api/openstack/compute/legacy_v2/contrib/baremetal_nodes.py b/nova/api/openstack/compute/legacy_v2/contrib/baremetal_nodes.py
index 3ce1610091..ad1fda8350 100644
--- a/nova/api/openstack/compute/legacy_v2/contrib/baremetal_nodes.py
+++ b/nova/api/openstack/compute/legacy_v2/contrib/baremetal_nodes.py
@@ -15,12 +15,12 @@
"""The bare-metal admin extension with Ironic Proxy."""
-from oslo_config import cfg
from oslo_utils import importutils
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
+import nova.conf
from nova.i18n import _
ironic_client = importutils.try_import('ironicclient.client')
@@ -35,8 +35,7 @@ node_ext_fields = ['uuid', 'task_state', 'updated_at', 'pxe_config_path']
interface_fields = ['id', 'address', 'datapath_id', 'port_no']
-CONF = cfg.CONF
-
+CONF = nova.conf.CONF
CONF.import_opt('api_version',
'nova.virt.ironic.driver',
group='ironic')
@@ -52,7 +51,6 @@ CONF.import_opt('admin_password',
CONF.import_opt('admin_tenant_name',
'nova.virt.ironic.driver',
group='ironic')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
def _check_ironic_client_enabled():
diff --git a/nova/api/openstack/compute/legacy_v2/contrib/migrations.py b/nova/api/openstack/compute/legacy_v2/contrib/migrations.py
index 91a5493c77..472dbf6dfa 100644
--- a/nova/api/openstack/compute/legacy_v2/contrib/migrations.py
+++ b/nova/api/openstack/compute/legacy_v2/contrib/migrations.py
@@ -31,6 +31,10 @@ def output(migrations_obj):
From a MigrationsList's object this method returns a list of
primitive objects with the only necessary fields.
"""
+ detail_keys = ['memory_total', 'memory_processed', 'memory_remaining',
+ 'disk_total', 'disk_processed', 'disk_remaining']
+ # Note(Shaohe Feng): We need to leverage the oslo.versionedobjects.
+ # Then we can pass the target version to it's obj_to_primitive.
objects = obj_base.obj_to_primitive(migrations_obj)
objects = [x for x in objects if not x['hidden']]
for obj in objects:
@@ -38,6 +42,10 @@ def output(migrations_obj):
del obj['deleted_at']
del obj['migration_type']
del obj['hidden']
+ if 'memory_total' in obj:
+ for key in detail_keys:
+ del obj[key]
+
return objects
diff --git a/nova/api/openstack/compute/legacy_v2/contrib/rescue.py b/nova/api/openstack/compute/legacy_v2/contrib/rescue.py
index 1e04d7c8bc..1b4e91c0cd 100644
--- a/nova/api/openstack/compute/legacy_v2/contrib/rescue.py
+++ b/nova/api/openstack/compute/legacy_v2/contrib/rescue.py
@@ -14,7 +14,6 @@
"""The rescue mode extension."""
-from oslo_config import cfg
import webob
from webob import exc
@@ -26,7 +25,6 @@ from nova import exception
from nova import utils
-CONF = cfg.CONF
authorize = exts.extension_authorizer('compute', 'rescue')
diff --git a/nova/api/openstack/compute/legacy_v2/contrib/services.py b/nova/api/openstack/compute/legacy_v2/contrib/services.py
index c7e92db390..625a10157d 100644
--- a/nova/api/openstack/compute/legacy_v2/contrib/services.py
+++ b/nova/api/openstack/compute/legacy_v2/contrib/services.py
@@ -34,6 +34,8 @@ class ServiceController(object):
self.ext_mgr = ext_mgr
def _get_services(self, req):
+ api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata')
+
context = req.environ['nova.context']
authorize(context)
@@ -41,8 +43,11 @@ class ServiceController(object):
# permission checks
nova_context.require_admin_context(context)
- services = self.host_api.service_get_all(
- context, set_zones=True)
+ services = [
+ s
+ for s in self.host_api.service_get_all(context, set_zones=True)
+ if s['binary'] not in api_services
+ ]
host = ''
if 'host' in req.GET:
diff --git a/nova/api/openstack/compute/legacy_v2/contrib/volumes.py b/nova/api/openstack/compute/legacy_v2/contrib/volumes.py
index 52e46738f6..6b0e9966e2 100644
--- a/nova/api/openstack/compute/legacy_v2/contrib/volumes.py
+++ b/nova/api/openstack/compute/legacy_v2/contrib/volumes.py
@@ -59,9 +59,21 @@ def _translate_volume_summary_view(context, vol):
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
+ # NOTE(ildikov): The attachments field in the volume info that
+ # Cinder sends is converted to an OrderedDict with the
+ # instance_uuid as key to make it easier for the multiattach
+ # feature to check the required information. Multiattach will
+ # be enable in the Nova API in Newton.
+ # The format looks like the following:
+ # attachments = {'instance_uuid': {
+ # 'attachment_id': 'attachment_uuid',
+ # 'mountpoint': '/dev/sda/
+ # }
+ # }
+ attachment = vol['attachments'].items()[0]
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
- vol['instance_uuid'],
- vol['mountpoint'])]
+ attachment[0],
+ attachment[1].get('mountpoint'))]
else:
d['attachments'] = [{}]
diff --git a/nova/api/openstack/compute/legacy_v2/servers.py b/nova/api/openstack/compute/legacy_v2/servers.py
index 751c744a83..9efa3f3051 100644
--- a/nova/api/openstack/compute/legacy_v2/servers.py
+++ b/nova/api/openstack/compute/legacy_v2/servers.py
@@ -84,6 +84,7 @@ CREATE_EXCEPTIONS = {
exception.InstanceExists: exc.HTTPConflict,
exception.NoUniqueMatch: exc.HTTPConflict,
exception.Invalid: exc.HTTPBadRequest,
+ exception.InstanceGroupNotFound: exc.HTTPBadRequest,
}
CREATE_EXCEPTIONS_MSGS = {
@@ -233,6 +234,7 @@ class Controller(wsgi.Controller):
instance_list = objects.InstanceList()
if is_detail:
+ instance_list._context = context
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
diff --git a/nova/api/openstack/compute/migrations.py b/nova/api/openstack/compute/migrations.py
index 919d0b86e2..265f23b059 100644
--- a/nova/api/openstack/compute/migrations.py
+++ b/nova/api/openstack/compute/migrations.py
@@ -29,6 +29,10 @@ def output(migrations_obj):
From a MigrationsList's object this method returns a list of
primitive objects with the only necessary fields.
"""
+ detail_keys = ['memory_total', 'memory_processed', 'memory_remaining',
+ 'disk_total', 'disk_processed', 'disk_remaining']
+ # Note(Shaohe Feng): We need to leverage the oslo.versionedobjects.
+ # Then we can pass the target version to it's obj_to_primitive.
objects = obj_base.obj_to_primitive(migrations_obj)
objects = [x for x in objects if not x['hidden']]
for obj in objects:
@@ -36,6 +40,10 @@ def output(migrations_obj):
del obj['deleted_at']
del obj['migration_type']
del obj['hidden']
+ if 'memory_total' in obj:
+ for key in detail_keys:
+ del obj[key]
+
return objects
diff --git a/nova/api/openstack/compute/schemas/scheduler_hints.py b/nova/api/openstack/compute/schemas/scheduler_hints.py
index 7cfd498446..40aa1f31b1 100644
--- a/nova/api/openstack/compute/schemas/scheduler_hints.py
+++ b/nova/api/openstack/compute/schemas/scheduler_hints.py
@@ -19,9 +19,8 @@ _hints = {
'type': 'object',
'properties': {
'group': {
- # NOTE: The value of 'group' is stored to value which is
- # defined as varchar(255) in instance_system_metadata table.
- 'type': 'string', 'maxLength': 255,
+ 'type': 'string',
+ 'format': 'uuid'
},
'different_host': {
# NOTE: The value of 'different_host' is the set of server
diff --git a/nova/api/openstack/compute/schemas/server_migrations.py b/nova/api/openstack/compute/schemas/server_migrations.py
new file mode 100644
index 0000000000..201c543e68
--- /dev/null
+++ b/nova/api/openstack/compute/schemas/server_migrations.py
@@ -0,0 +1,26 @@
+# Copyright 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+force_complete = {
+ 'type': 'object',
+ 'properties': {
+ 'force_complete': {
+ 'type': 'null'
+ }
+ },
+ 'required': ['force_complete'],
+ 'additionalProperties': False,
+}
diff --git a/nova/api/openstack/compute/schemas/servers.py b/nova/api/openstack/compute/schemas/servers.py
index 4393243600..054ec259dd 100644
--- a/nova/api/openstack/compute/schemas/servers.py
+++ b/nova/api/openstack/compute/schemas/servers.py
@@ -58,6 +58,11 @@ base_create_v20['properties']['server'][
'properties']['name'] = parameter_types.name_with_leading_trailing_spaces
+base_create_v219 = copy.deepcopy(base_create)
+base_create_v219['properties']['server'][
+ 'properties']['description'] = parameter_types.description
+
+
base_update = {
'type': 'object',
'properties': {
@@ -78,6 +83,9 @@ base_update_v20 = copy.deepcopy(base_update)
base_update_v20['properties']['server'][
'properties']['name'] = parameter_types.name_with_leading_trailing_spaces
+base_update_v219 = copy.deepcopy(base_update)
+base_update_v219['properties']['server'][
+ 'properties']['description'] = parameter_types.description
base_rebuild = {
'type': 'object',
@@ -104,6 +112,9 @@ base_rebuild_v20 = copy.deepcopy(base_rebuild)
base_rebuild_v20['properties']['rebuild'][
'properties']['name'] = parameter_types.name_with_leading_trailing_spaces
+base_rebuild_v219 = copy.deepcopy(base_rebuild)
+base_rebuild_v219['properties']['rebuild'][
+ 'properties']['description'] = parameter_types.description
base_resize = {
'type': 'object',
@@ -161,3 +172,14 @@ reboot = {
'required': ['reboot'],
'additionalProperties': False
}
+
+trigger_crash_dump = {
+ 'type': 'object',
+ 'properties': {
+ 'trigger_crash_dump': {
+ 'type': 'null'
+ }
+ },
+ 'required': ['trigger_crash_dump'],
+ 'additionalProperties': False
+}
diff --git a/nova/api/openstack/compute/security_groups.py b/nova/api/openstack/compute/security_groups.py
index 28554a8fca..5dd8801c72 100644
--- a/nova/api/openstack/compute/security_groups.py
+++ b/nova/api/openstack/compute/security_groups.py
@@ -438,7 +438,10 @@ class SecurityGroupsOutputController(wsgi.Controller):
if not len(servers):
return
key = "security_groups"
- context = _authorize_context(req)
+ context = req.environ['nova.context']
+ if not softauth(context):
+ return
+
if not openstack_driver.is_neutron_security_groups():
for server in servers:
instance = req.get_db_instance(server['id'])
@@ -472,8 +475,6 @@ class SecurityGroupsOutputController(wsgi.Controller):
ATTRIBUTE_NAME, [{'name': 'default'}])
def _show(self, req, resp_obj):
- if not softauth(req.environ['nova.context']):
- return
if 'server' in resp_obj.obj:
self._extend_servers(req, [resp_obj.obj['server']])
@@ -487,8 +488,6 @@ class SecurityGroupsOutputController(wsgi.Controller):
@wsgi.extends
def detail(self, req, resp_obj):
- if not softauth(req.environ['nova.context']):
- return
self._extend_servers(req, list(resp_obj.obj['servers']))
diff --git a/nova/api/openstack/compute/server_metadata.py b/nova/api/openstack/compute/server_metadata.py
index cbec6d5dd3..f7be44f97c 100644
--- a/nova/api/openstack/compute/server_metadata.py
+++ b/nova/api/openstack/compute/server_metadata.py
@@ -191,3 +191,8 @@ class ServerMetadata(extensions.V21APIExtensionBase):
"/{project_id}/servers/{server_id}/metadata",
controller=wsgi_resource,
action='update_all', conditions={"method": ['PUT']})
+ # Also connect the non project_id routes
+ mapper.connect("metadata",
+ "/servers/{server_id}/metadata",
+ controller=wsgi_resource,
+ action='update_all', conditions={"method": ['PUT']})
diff --git a/nova/api/openstack/compute/server_migrations.py b/nova/api/openstack/compute/server_migrations.py
new file mode 100644
index 0000000000..caa37c1e0a
--- /dev/null
+++ b/nova/api/openstack/compute/server_migrations.py
@@ -0,0 +1,78 @@
+# Copyright 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova.api.openstack import common
+from nova.api.openstack.compute.schemas import server_migrations
+from nova.api.openstack import extensions
+from nova.api.openstack import wsgi
+from nova.api import validation
+from nova import compute
+from nova import exception
+
+ALIAS = 'servers:migrations'
+authorize = extensions.os_compute_authorizer(ALIAS)
+
+
+class ServerMigrationsController(wsgi.Controller):
+ """The server migrations API controller for the OpenStack API."""
+
+ def __init__(self):
+ self.compute_api = compute.API(skip_policy_check=True)
+ super(ServerMigrationsController, self).__init__()
+
+ @wsgi.Controller.api_version("2.22")
+ @wsgi.response(202)
+ @extensions.expected_errors((400, 403, 404, 409))
+ @wsgi.action('force_complete')
+ @validation.schema(server_migrations.force_complete)
+ def _force_complete(self, req, id, server_id, body):
+ context = req.environ['nova.context']
+ authorize(context, action='force_complete')
+
+ instance = common.get_instance(self.compute_api, context, server_id)
+ try:
+ self.compute_api.live_migrate_force_complete(context, instance, id)
+ except exception.InstanceNotFound as e:
+ raise exc.HTTPNotFound(explanation=e.format_message())
+ except (exception.MigrationNotFoundByStatus,
+ exception.InvalidMigrationState,
+ exception.MigrationNotFoundForInstance) as e:
+ raise exc.HTTPBadRequest(explanation=e.format_message())
+ except exception.InstanceIsLocked as e:
+ raise exc.HTTPConflict(explanation=e.format_message())
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(
+ state_error, 'force_complete', server_id)
+
+
+class ServerMigrations(extensions.V21APIExtensionBase):
+ """Server Migrations API."""
+ name = "ServerMigrations"
+ alias = 'server-migrations'
+ version = 1
+
+ def get_resources(self):
+ parent = {'member_name': 'server',
+ 'collection_name': 'servers'}
+ member_actions = {'action': 'POST'}
+ resources = [extensions.ResourceExtension(
+ 'migrations', ServerMigrationsController(),
+ parent=parent, member_actions=member_actions)]
+ return resources
+
+ def get_controller_extensions(self):
+ return []
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index fda67f30cb..68cf726d26 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -82,6 +82,10 @@ class ServersController(wsgi.Controller):
schema_server_update_v20 = schema_servers.base_update_v20
schema_server_rebuild_v20 = schema_servers.base_rebuild_v20
+ schema_server_create_v219 = schema_servers.base_create_v219
+ schema_server_update_v219 = schema_servers.base_update_v219
+ schema_server_rebuild_v219 = schema_servers.base_rebuild_v219
+
@staticmethod
def _add_location(robj):
# Just in case...
@@ -207,6 +211,9 @@ class ServersController(wsgi.Controller):
propagate_map_exceptions=True)
if list(self.create_schema_manager):
self.create_schema_manager.map(self._create_extension_schema,
+ self.schema_server_create_v219,
+ '2.19')
+ self.create_schema_manager.map(self._create_extension_schema,
self.schema_server_create, '2.1')
self.create_schema_manager.map(self._create_extension_schema,
self.schema_server_create_v20,
@@ -224,6 +231,9 @@ class ServersController(wsgi.Controller):
propagate_map_exceptions=True)
if list(self.update_schema_manager):
self.update_schema_manager.map(self._update_extension_schema,
+ self.schema_server_update_v219,
+ '2.19')
+ self.update_schema_manager.map(self._update_extension_schema,
self.schema_server_update, '2.1')
self.update_schema_manager.map(self._update_extension_schema,
self.schema_server_update_v20,
@@ -241,6 +251,9 @@ class ServersController(wsgi.Controller):
propagate_map_exceptions=True)
if list(self.rebuild_schema_manager):
self.rebuild_schema_manager.map(self._rebuild_extension_schema,
+ self.schema_server_rebuild_v219,
+ '2.19')
+ self.rebuild_schema_manager.map(self._rebuild_extension_schema,
self.schema_server_rebuild, '2.1')
self.rebuild_schema_manager.map(self._rebuild_extension_schema,
self.schema_server_rebuild_v20,
@@ -404,6 +417,7 @@ class ServersController(wsgi.Controller):
instance_list = objects.InstanceList()
if is_detail:
+ instance_list._context = context
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
@@ -420,7 +434,7 @@ class ServersController(wsgi.Controller):
:param is_detail: True if you plan on showing the details of the
instance in the response, False otherwise.
"""
- expected_attrs = ['flavor', 'pci_devices']
+ expected_attrs = ['flavor', 'pci_devices', 'numa_topology']
if is_detail:
expected_attrs = self._view_builder.get_show_expected_attrs(
expected_attrs)
@@ -513,7 +527,8 @@ class ServersController(wsgi.Controller):
@wsgi.response(202)
@extensions.expected_errors((400, 403, 409, 413))
@validation.schema(schema_server_create_v20, '2.0', '2.0')
- @validation.schema(schema_server_create, '2.1')
+ @validation.schema(schema_server_create, '2.1', '2.18')
+ @validation.schema(schema_server_create_v219, '2.19')
def create(self, req, body):
"""Creates a new server for a given user."""
@@ -522,6 +537,16 @@ class ServersController(wsgi.Controller):
password = self._get_server_admin_password(server_dict)
name = common.normalize_name(server_dict['name'])
+ if api_version_request.is_supported(req, min_version='2.19'):
+ if 'description' in server_dict:
+ # This is allowed to be None
+ description = server_dict['description']
+ else:
+ # No default description
+ description = None
+ else:
+ description = name
+
# Arguments to be passed to instance create function
create_kwargs = {}
@@ -595,7 +620,7 @@ class ServersController(wsgi.Controller):
inst_type,
image_uuid,
display_name=name,
- display_description=name,
+ display_description=description,
availability_zone=availability_zone,
forced_host=host, forced_node=node,
metadata=server_dict.get('metadata', {}),
@@ -647,6 +672,7 @@ class ServersController(wsgi.Controller):
exception.NetworkRequiresSubnet,
exception.NetworkNotFound,
exception.NetworkDuplicated,
+ exception.InvalidBDM,
exception.InvalidBDMSnapshot,
exception.InvalidBDMVolume,
exception.InvalidBDMImage,
@@ -663,7 +689,8 @@ class ServersController(wsgi.Controller):
exception.ImageNUMATopologyCPUOutOfRange,
exception.ImageNUMATopologyCPUDuplicates,
exception.ImageNUMATopologyCPUsUnassigned,
- exception.ImageNUMATopologyMemoryOutOfRange) as error:
+ exception.ImageNUMATopologyMemoryOutOfRange,
+ exception.InstanceGroupNotFound) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.InstanceExists,
@@ -766,7 +793,8 @@ class ServersController(wsgi.Controller):
@extensions.expected_errors((400, 404))
@validation.schema(schema_server_update_v20, '2.0', '2.0')
- @validation.schema(schema_server_update, '2.1')
+ @validation.schema(schema_server_update, '2.1', '2.18')
+ @validation.schema(schema_server_update_v219, '2.19')
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
@@ -778,6 +806,10 @@ class ServersController(wsgi.Controller):
update_dict['display_name'] = common.normalize_name(
body['server']['name'])
+ if 'description' in body['server']:
+ # This is allowed to be None (remove description)
+ update_dict['display_description'] = body['server']['description']
+
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point,
body['server'], update_dict)
@@ -872,15 +904,6 @@ class ServersController(wsgi.Controller):
except exception.QuotaError as error:
raise exc.HTTPForbidden(
explanation=error.format_message())
- except exception.FlavorNotFound:
- msg = _("Unable to locate requested flavor.")
- raise exc.HTTPBadRequest(explanation=msg)
- except exception.CannotResizeToSameFlavor:
- msg = _("Resize requires a flavor change.")
- raise exc.HTTPBadRequest(explanation=msg)
- except (exception.CannotResizeDisk,
- exception.AutoDiskConfigDisabledByImage) as e:
- raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
@@ -894,8 +917,11 @@ class ServersController(wsgi.Controller):
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
- except (exception.NoValidHost,
- exception.AutoDiskConfigDisabledByImage) as e:
+ except (exception.AutoDiskConfigDisabledByImage,
+ exception.CannotResizeDisk,
+ exception.CannotResizeToSameFlavor,
+ exception.FlavorNotFound,
+ exception.NoValidHost) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.Invalid:
msg = _("Invalid instance image.")
@@ -971,7 +997,8 @@ class ServersController(wsgi.Controller):
@extensions.expected_errors((400, 403, 404, 409, 413))
@wsgi.action('rebuild')
@validation.schema(schema_server_rebuild_v20, '2.0', '2.0')
- @validation.schema(schema_server_rebuild, '2.1')
+ @validation.schema(schema_server_rebuild, '2.1', '2.18')
+ @validation.schema(schema_server_rebuild_v219, '2.19')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
rebuild_dict = body['rebuild']
@@ -987,6 +1014,7 @@ class ServersController(wsgi.Controller):
attr_map = {
'name': 'display_name',
+ 'description': 'display_description',
'metadata': 'metadata',
}
@@ -1162,6 +1190,26 @@ class ServersController(wsgi.Controller):
common.raise_http_conflict_for_instance_invalid_state(state_error,
'stop', id)
+ @wsgi.Controller.api_version("2.17")
+ @wsgi.response(202)
+ @extensions.expected_errors((400, 404, 409))
+ @wsgi.action('trigger_crash_dump')
+ @validation.schema(schema_servers.trigger_crash_dump)
+ def _action_trigger_crash_dump(self, req, id, body):
+ """Trigger crash dump in an instance"""
+ context = req.environ['nova.context']
+ instance = self._get_instance(context, id)
+ authorize(context, instance, 'trigger_crash_dump')
+ try:
+ self.compute_api.trigger_crash_dump(context, instance)
+ except exception.InstanceInvalidState as state_error:
+ common.raise_http_conflict_for_instance_invalid_state(state_error,
+ 'trigger_crash_dump', id)
+ except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
+ raise webob.exc.HTTPConflict(explanation=e.format_message())
+ except exception.NMINotSupported as e:
+ raise webob.exc.HTTPBadRequest(explanation=e.format_message())
+
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
diff --git a/nova/api/openstack/compute/services.py b/nova/api/openstack/compute/services.py
index f2c9a67217..0c21442dd1 100644
--- a/nova/api/openstack/compute/services.py
+++ b/nova/api/openstack/compute/services.py
@@ -39,9 +39,16 @@ class ServiceController(wsgi.Controller):
"disable-log-reason": self._disable_log_reason}
def _get_services(self, req):
+ api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata')
+
context = req.environ['nova.context']
authorize(context)
- _services = self.host_api.service_get_all(context, set_zones=True)
+
+ _services = [
+ s
+ for s in self.host_api.service_get_all(context, set_zones=True)
+ if s['binary'] not in api_services
+ ]
host = ''
if 'host' in req.GET:
diff --git a/nova/api/openstack/compute/views/servers.py b/nova/api/openstack/compute/views/servers.py
index 7d72cbc7ae..14947f5173 100644
--- a/nova/api/openstack/compute/views/servers.py
+++ b/nova/api/openstack/compute/views/servers.py
@@ -309,4 +309,8 @@ class ViewBuilderV21(ViewBuilder):
server["server"]["locked"] = (True if instance["locked_by"]
else False)
+ if api_version_request.is_supported(request, min_version="2.19"):
+ server["server"]["description"] = instance.get(
+ "display_description")
+
return server
diff --git a/nova/api/openstack/compute/volumes.py b/nova/api/openstack/compute/volumes.py
index 3db1106e1f..7dc2caaebe 100644
--- a/nova/api/openstack/compute/volumes.py
+++ b/nova/api/openstack/compute/volumes.py
@@ -18,12 +18,14 @@
from oslo_utils import strutils
from webob import exc
+from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import volumes as volumes_schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
+from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova import objects
@@ -55,9 +57,21 @@ def _translate_volume_summary_view(context, vol):
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
+ # NOTE(ildikov): The attachments field in the volume info that
+ # Cinder sends is converted to an OrderedDict with the
+ # instance_uuid as key to make it easier for the multiattach
+ # feature to check the required information. Multiattach will
+ # be enable in the Nova API in Newton.
+ # The format looks like the following:
+ # attachments = {'instance_uuid': {
+ # 'attachment_id': 'attachment_uuid',
+ # 'mountpoint': '/dev/sda/
+ # }
+ # }
+ attachment = vol['attachments'].items()[0]
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
- vol['instance_uuid'],
- vol['mountpoint'])]
+ attachment[0],
+ attachment[1].get('mountpoint'))]
else:
d['attachments'] = [{}]
@@ -212,6 +226,19 @@ def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
return d
+def _check_request_version(req, min_version, method, server_id, server_state):
+ if not api_version_request.is_supported(req, min_version=min_version):
+ exc_inv = exception.InstanceInvalidState(
+ attr='vm_state',
+ instance_uuid=server_id,
+ state=server_state,
+ method=method)
+ common.raise_http_conflict_for_instance_invalid_state(
+ exc_inv,
+ method,
+ server_id)
+
+
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
@@ -278,6 +305,12 @@ class VolumeAttachmentController(wsgi.Controller):
device = body['volumeAttachment'].get('device')
instance = common.get_instance(self.compute_api, context, server_id)
+
+ if instance.vm_state in (vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED):
+ _check_request_version(req, '2.20', 'attach_volume',
+ server_id, instance.vm_state)
+
try:
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
@@ -371,7 +404,10 @@ class VolumeAttachmentController(wsgi.Controller):
volume_id = id
instance = common.get_instance(self.compute_api, context, server_id)
-
+ if instance.vm_state in (vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED):
+ _check_request_version(req, '2.20', 'detach_volume',
+ server_id, instance.vm_state)
try:
volume = self.volume_api.get(context, volume_id)
except exception.VolumeNotFound as e:
diff --git a/nova/api/openstack/rest_api_version_history.rst b/nova/api/openstack/rest_api_version_history.rst
index d3a67a2fb1..5cd44edb59 100644
--- a/nova/api/openstack/rest_api_version_history.rst
+++ b/nova/api/openstack/rest_api_version_history.rst
@@ -37,8 +37,8 @@ user documentation.
Fixes status code for ``os-keypairs`` delete method from 202 to 204
-2.3
----
+2.3 (Maximum in Kilo)
+---------------------
Exposed additional attributes in ``os-extended-server-attributes``:
``reservation_id``, ``launch_index``, ``ramdisk_id``, ``kernel_id``, ``hostname``,
@@ -124,8 +124,8 @@ user documentation.
Exposed attribute ``forced_down`` for ``os-services``.
Added ability to change the ``forced_down`` attribute by calling an update.
-2.12
-----
+2.12 (Maximum in Liberty)
+-------------------------
Exposes VIF ``net-id`` attribute in ``os-virtual-interfaces``.
User will be able to get Virtual Interfaces ``net-id`` in Virtual Interfaces
@@ -150,3 +150,48 @@ user documentation.
From this version of the API users can choose 'soft-affinity' and
'soft-anti-affinity' rules too for server-groups.
+
+2.16
+----
+
+ Exposes new host_status attribute for servers/detail and servers/{server_id}.
+ Ability to get nova-compute status when querying servers. By default, this is
+ only exposed to cloud administrators.
+
+2.17
+----
+
+ Add a new API for triggering crash dump in an instance. Different operation
+ systems in instance may need different configurations to trigger crash dump.
+
+2.18
+----
+ Establishes a set of routes that makes project_id an optional construct in v2.1.
+
+2.19
+----
+ Allow the user to set and get the server description.
+ The user will be able to set the description when creating, rebuilding,
+ or updating a server, and get the description as part of the server details.
+
+2.20
+----
+ From this version of the API user can call detach and attach volumes for
+ instances which are in shelved and shelved_offloaded state.
+
+2.21
+----
+
+ The ``os-instance-actions`` API now returns information from deleted
+ instances.
+
+2.22
+----
+
+ A new resource servers:migrations added. A new API to force live migration
+ to complete added::
+
+ POST /servers/<uuid>/migrations/<id>/action
+ {
+ "force_complete": null
+ }
diff --git a/nova/api/openstack/urlmap.py b/nova/api/openstack/urlmap.py
index 9c35bd31fa..8482f9b8e5 100644
--- a/nova/api/openstack/urlmap.py
+++ b/nova/api/openstack/urlmap.py
@@ -19,10 +19,10 @@ from oslo_log import log as logging
import paste.urlmap
import six
-if six.PY3:
- from urllib import request as urllib2
-else:
+if six.PY2:
import urllib2
+else:
+ from urllib import request as urllib2
from nova.api.openstack import wsgi
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 256d1bfc34..1a530162f0 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -86,6 +86,17 @@ def get_media_map():
return dict(_MEDIA_TYPE_MAP.items())
+# NOTE(rlrossit): This function allows a get on both a dict-like and an
+# object-like object. cache_db_items() is used on both versioned objects and
+# dicts, so the function can't be totally changed over to [] syntax, nor
+# can it be changed over to use getattr().
+def item_get(item, item_key):
+ if hasattr(item, '__getitem__'):
+ return item[item_key]
+ else:
+ return getattr(item, item_key)
+
+
class Request(wsgi.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
@@ -105,7 +116,7 @@ class Request(wsgi.Request):
"""
db_items = self._extension_data['db_items'].setdefault(key, {})
for item in items:
- db_items[item[item_key]] = item
+ db_items[item_get(item, item_key)] = item
def get_db_items(self, key):
"""Allow an API extension to get previously stored objects within
@@ -260,17 +271,7 @@ class ActionDispatcher(object):
raise NotImplementedError()
-class TextDeserializer(ActionDispatcher):
- """Default request body deserialization."""
-
- def deserialize(self, datastring, action='default'):
- return self.dispatch(datastring, action=action)
-
- def default(self, datastring):
- return {}
-
-
-class JSONDeserializer(TextDeserializer):
+class JSONDeserializer(ActionDispatcher):
def _from_json(self, datastring):
try:
@@ -279,59 +280,23 @@ class JSONDeserializer(TextDeserializer):
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
+ def deserialize(self, datastring, action='default'):
+ return self.dispatch(datastring, action=action)
+
def default(self, datastring):
return {'body': self._from_json(datastring)}
-class DictSerializer(ActionDispatcher):
- """Default request body serialization."""
+class JSONDictSerializer(ActionDispatcher):
+ """Default JSON request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
- return ""
-
-
-class JSONDictSerializer(DictSerializer):
- """Default JSON request body serialization."""
-
- def default(self, data):
return six.text_type(jsonutils.dumps(data))
-def serializers(**serializers):
- """Attaches serializers to a method.
-
- This decorator associates a dictionary of serializers with a
- method. Note that the function attributes are directly
- manipulated; the method is not wrapped.
- """
-
- def decorator(func):
- if not hasattr(func, 'wsgi_serializers'):
- func.wsgi_serializers = {}
- func.wsgi_serializers.update(serializers)
- return func
- return decorator
-
-
-def deserializers(**deserializers):
- """Attaches deserializers to a method.
-
- This decorator associates a dictionary of deserializers with a
- method. Note that the function attributes are directly
- manipulated; the method is not wrapped.
- """
-
- def decorator(func):
- if not hasattr(func, 'wsgi_deserializers'):
- func.wsgi_deserializers = {}
- func.wsgi_deserializers.update(deserializers)
- return func
- return decorator
-
-
def response(code):
"""Attaches response code to a method.
@@ -347,29 +312,21 @@ def response(code):
class ResponseObject(object):
- """Bundles a response object with appropriate serializers.
+ """Bundles a response object
- Object that app methods may return in order to bind alternate
- serializers with a response object to be serialized. Its use is
- optional.
+ Object that app methods may return in order to allow its response
+ to be modified by extensions in the code. Its use is optional (and
+ should only be used if you really know what you are doing).
"""
- def __init__(self, obj, code=None, headers=None, **serializers):
- """Binds serializers with an object.
-
- Takes keyword arguments akin to the @serializer() decorator
- for specifying serializers. Serializers specified will be
- given preference over default serializers or method-specific
- serializers on return.
- """
+ def __init__(self, obj, code=None, headers=None):
+ """Builds a response object."""
self.obj = obj
- self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = headers or {}
- self.serializer = None
- self.media_type = None
+ self.serializer = JSONDictSerializer()
def __getitem__(self, key):
"""Retrieves a header with the given name."""
@@ -386,76 +343,14 @@ class ResponseObject(object):
del self._headers[key.lower()]
- def _bind_method_serializers(self, meth_serializers):
- """Binds method serializers with the response object.
-
- Binds the method serializers with the response object.
- Serializers specified to the constructor will take precedence
- over serializers specified to this method.
-
- :param meth_serializers: A dictionary with keys mapping to
- response types and values containing
- serializer objects.
- """
-
- # We can't use update because that would be the wrong
- # precedence
- for mtype, serializer in meth_serializers.items():
- self.serializers.setdefault(mtype, serializer)
-
- def get_serializer(self, content_type, default_serializers=None):
- """Returns the serializer for the wrapped object.
-
- Returns the serializer for the wrapped object subject to the
- indicated content type. If no serializer matching the content
- type is attached, an appropriate serializer drawn from the
- default serializers will be used. If no appropriate
- serializer is available, raises InvalidContentType.
- """
-
- default_serializers = default_serializers or {}
-
- try:
- mtype = get_media_map().get(content_type, content_type)
- if mtype in self.serializers:
- return mtype, self.serializers[mtype]
- else:
- return mtype, default_serializers[mtype]
- except (KeyError, TypeError):
- raise exception.InvalidContentType(content_type=content_type)
-
- def preserialize(self, content_type, default_serializers=None):
- """Prepares the serializer that will be used to serialize.
-
- Determines the serializer that will be used and prepares an
- instance of it for later call. This allows the serializer to
- be accessed by extensions for, e.g., template extension.
- """
-
- mtype, serializer = self.get_serializer(content_type,
- default_serializers)
- self.media_type = mtype
- self.serializer = serializer()
-
- def attach(self, **kwargs):
- """Attach slave templates to serializers."""
-
- if self.media_type in kwargs:
- self.serializer.attach(kwargs[self.media_type])
-
- def serialize(self, request, content_type, default_serializers=None):
+ def serialize(self, request, content_type):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
- if self.serializer:
- serializer = self.serializer
- else:
- _mtype, _serializer = self.get_serializer(content_type,
- default_serializers)
- serializer = _serializer()
+ serializer = self.serializer
body = None
if self.obj is not None:
@@ -486,8 +381,12 @@ class ResponseObject(object):
return self._headers.copy()
-def action_peek_json(body):
- """Determine action to invoke."""
+def action_peek(body):
+ """Determine action to invoke.
+
+ This looks inside the json body and fetches out the action method
+ name.
+ """
try:
decoded = jsonutils.loads(body)
@@ -500,7 +399,7 @@ def action_peek_json(body):
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
- # Return the action and the decoded body...
+ # Return the action name
return list(decoded.keys())[0]
@@ -561,13 +460,9 @@ class Resource(wsgi.Application):
"""
support_api_request_version = False
- def __init__(self, controller, action_peek=None, inherits=None,
- **deserializers):
+ def __init__(self, controller, inherits=None):
""":param controller: object that implement methods created by routes
lib
- :param action_peek: dictionary of routines for peeking into an
- action request body to determine the
- desired action
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
@@ -576,15 +471,8 @@ class Resource(wsgi.Application):
self.controller = controller
- default_deserializers = dict(json=JSONDeserializer)
- default_deserializers.update(deserializers)
-
- self.default_deserializers = default_deserializers
self.default_serializers = dict(json=JSONDictSerializer)
- self.action_peek = dict(json=action_peek_json)
- self.action_peek.update(action_peek or {})
-
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
@@ -647,31 +535,36 @@ class Resource(wsgi.Application):
return args
def get_body(self, request):
- try:
- content_type = request.get_content_type()
- except exception.InvalidContentType:
- LOG.debug("Unrecognized Content-Type provided in request")
- return None, b''
+ content_type = request.get_content_type()
return content_type, request.body
- def deserialize(self, meth, content_type, body):
- meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
- try:
- mtype = get_media_map().get(content_type, content_type)
- if mtype in meth_deserializers:
- deserializer = meth_deserializers[mtype]
- else:
- deserializer = self.default_deserializers[mtype]
- except (KeyError, TypeError):
- raise exception.InvalidContentType(content_type=content_type)
-
- if (hasattr(deserializer, 'want_controller')
- and deserializer.want_controller):
- return deserializer(self.controller).deserialize(body)
- else:
- return deserializer().deserialize(body)
-
+ def deserialize(self, body):
+ return JSONDeserializer().deserialize(body)
+
+ # NOTE(sdague): I didn't start the fire, however here is what all
+ # of this is about.
+ #
+ # In the legacy v2 code stack, extensions could extend actions
+ # with a generator that let 1 method be split into a top and
+ # bottom half. The top half gets executed before the main
+ # processing of the request (so effectively gets to modify the
+ # request before it gets to the main method).
+ #
+ # Returning a response triggers a shortcut to fail out. The
+ # response will nearly always be a failure condition, as it ends
+ # up skipping further processing one level up from here.
+ #
+ # This then passes on the list of extensions, in reverse order,
+ # on. post_process will run through all those, again with same
+ # basic logic.
+ #
+ # In tree this is only used in the legacy v2 stack, and only in
+ # the DiskConfig and SchedulerHints from what I can see.
+ #
+ # pre_process_extensions can be removed when the legacyv2 code
+ # goes away. post_process_extensions can be massively simplified
+ # at that point.
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
@@ -699,7 +592,8 @@ class Resource(wsgi.Application):
# Regular functions only perform post-processing
post.append(ext)
- # Run post-processing in the reverse order
+ # None is response, it means we keep going. We reverse the
+ # extension list for post-processing.
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
@@ -759,8 +653,15 @@ class Resource(wsgi.Application):
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
- content_type, body = self.get_body(request)
- accept = request.best_match_content_type()
+
+ # NOTE(sdague): we filter out InvalidContentTypes early so we
+ # know everything is good from here on out.
+ try:
+ content_type, body = self.get_body(request)
+ accept = request.best_match_content_type()
+ except exception.InvalidContentType:
+ msg = _("Unsupported Content-Type")
+ return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
@@ -805,10 +706,7 @@ class Resource(wsgi.Application):
if request.content_length == 0:
contents = {'body': None}
else:
- contents = self.deserialize(meth, content_type, body)
- except exception.InvalidContentType:
- msg = _("Unsupported Content-Type")
- return Fault(webob.exc.HTTPBadRequest(explanation=msg))
+ contents = self.deserialize(body)
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
@@ -851,19 +749,14 @@ class Resource(wsgi.Application):
# Run post-processing extensions
if resp_obj:
# Do a preserialize to set up the response object
- serializers = getattr(meth, 'wsgi_serializers', {})
- resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
- resp_obj.preserialize(accept, self.default_serializers)
-
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
- response = resp_obj.serialize(request, accept,
- self.default_serializers)
+ response = resp_obj.serialize(request, accept)
if hasattr(response, 'headers'):
for hdr, val in list(response.headers.items()):
@@ -892,7 +785,6 @@ class Resource(wsgi.Application):
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
-
# Look up the method
try:
if not self.controller:
@@ -908,9 +800,7 @@ class Resource(wsgi.Application):
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
- # OK, it's an action; figure out which action...
- mtype = get_media_map().get(content_type)
- action_name = self.action_peek[mtype](body)
+ action_name = action_peek(body)
else:
action_name = action
@@ -1195,14 +1085,9 @@ class Fault(webob.exc.HTTPException):
self.wrapped_exc.headers['Vary'] = \
API_VERSION_REQUEST_HEADER
- content_type = req.best_match_content_type()
- serializer = {
- 'application/json': JSONDictSerializer(),
- }[content_type]
-
- self.wrapped_exc.content_type = content_type
+ self.wrapped_exc.content_type = 'application/json'
self.wrapped_exc.charset = 'UTF-8'
- self.wrapped_exc.text = serializer.serialize(fault_data)
+ self.wrapped_exc.text = JSONDictSerializer().serialize(fault_data)
return self.wrapped_exc
@@ -1239,20 +1124,15 @@ class RateLimitFault(webob.exc.HTTPException):
to our error format.
"""
user_locale = request.best_match_language()
- content_type = request.best_match_content_type()
self.content['overLimit']['message'] = \
i18n.translate(self.content['overLimit']['message'], user_locale)
self.content['overLimit']['details'] = \
i18n.translate(self.content['overLimit']['details'], user_locale)
- serializer = {
- 'application/json': JSONDictSerializer(),
- }[content_type]
-
- content = serializer.serialize(self.content)
+ content = JSONDictSerializer().serialize(self.content)
self.wrapped_exc.charset = 'UTF-8'
- self.wrapped_exc.content_type = content_type
+ self.wrapped_exc.content_type = "application/json"
self.wrapped_exc.text = content
return self.wrapped_exc
diff --git a/nova/api/opts.py b/nova/api/opts.py
index 492b58637a..8976afcbe2 100644
--- a/nova/api/opts.py
+++ b/nova/api/opts.py
@@ -108,7 +108,6 @@ import nova.scheduler.weights.metrics
import nova.scheduler.weights.ram
import nova.service
import nova.servicegroup.api
-import nova.servicegroup.drivers.zk
import nova.spice
import nova.utils
import nova.vnc
diff --git a/nova/api/validation/parameter_types.py b/nova/api/validation/parameter_types.py
index 1e55df42a6..9362aaf4ce 100644
--- a/nova/api/validation/parameter_types.py
+++ b/nova/api/validation/parameter_types.py
@@ -48,31 +48,72 @@ def _get_all_chars():
# empty string is tested. Otherwise it is not deterministic which
# constraint fails and this causes issues for some unittests when
# PYTHONHASHSEED is set randomly.
-def _get_printable(exclude=None):
- if exclude is None:
- exclude = []
- return ''.join(c for c in _get_all_chars()
- if _is_printable(c) and c not in exclude)
+def _build_regex_range(ws=True, invert=False, exclude=None):
+ """Build a range regex for a set of characters in utf8.
-_printable_ws = ''.join(c for c in _get_all_chars()
- if unicodedata.category(c) == "Zs")
+ This builds a valid range regex for characters in utf8 by
+ iterating the entire space and building up a set of x-y ranges for
+ all the characters we find which are valid.
+ :param ws: should we include whitespace in this range.
+ :param exclude: any characters we want to exclude
+ :param invert: invert the logic
-def _get_printable_no_ws(exclude=None):
+ The inversion is useful when we want to generate a set of ranges
+ which is everything that's not a certain class. For instance,
+ produce all all the non printable characters as a set of ranges.
+ """
if exclude is None:
exclude = []
- return ''.join(c for c in _get_all_chars()
- if _is_printable(c) and
- unicodedata.category(c) != "Zs" and
- c not in exclude)
+ regex = ""
+ # are we currently in a range
+ in_range = False
+ # last character we found, for closing ranges
+ last = None
+ # last character we added to the regex, this lets us know that we
+ # already have B in the range, which means we don't need to close
+ # it out with B-B. While the later seems to work, it's kind of bad form.
+ last_added = None
+
+ def valid_char(char):
+ if char in exclude:
+ result = False
+ elif ws:
+ result = _is_printable(char)
+ else:
+ # Zs is the unicode class for space characters, of which
+ # there are about 10 in this range.
+ result = (_is_printable(char) and
+ unicodedata.category(char) != "Zs")
+ if invert is True:
+ return not result
+ return result
+
+ # iterate through the entire character range. in_
+ for c in _get_all_chars():
+ if valid_char(c):
+ if not in_range:
+ regex += re.escape(c)
+ last_added = c
+ in_range = True
+ else:
+ if in_range and last != last_added:
+ regex += "-" + re.escape(last)
+ in_range = False
+ last = c
+ else:
+ if in_range:
+ regex += "-" + re.escape(c)
+ return regex
valid_name_regex_base = '^(?![%s])[%s]*(?<![%s])$'
valid_name_regex = valid_name_regex_base % (
- re.escape(_printable_ws), re.escape(_get_printable()),
- re.escape(_printable_ws))
+ _build_regex_range(ws=False, invert=True),
+ _build_regex_range(),
+ _build_regex_range(ws=False, invert=True))
# This regex allows leading/trailing whitespace
@@ -82,27 +123,34 @@ valid_name_leading_trailing_spaces_regex_base = (
valid_cell_name_regex = valid_name_regex_base % (
- re.escape(_printable_ws),
- re.escape(_get_printable(exclude=['!', '.', '@'])),
- re.escape(_printable_ws))
+ _build_regex_range(ws=False, invert=True),
+ _build_regex_range(exclude=['!', '.', '@']),
+ _build_regex_range(ws=False, invert=True))
# cell's name disallow '!', '.' and '@'.
valid_cell_name_leading_trailing_spaces_regex = (
valid_name_leading_trailing_spaces_regex_base % {
- 'ws': re.escape(_printable_ws),
- 'no_ws': re.escape(_get_printable_no_ws(exclude=['!', '.', '@']))})
+ 'ws': _build_regex_range(exclude=['!', '.', '@']),
+ 'no_ws': _build_regex_range(ws=False, exclude=['!', '.', '@'])})
valid_name_leading_trailing_spaces_regex = (
valid_name_leading_trailing_spaces_regex_base % {
- 'ws': re.escape(_printable_ws),
- 'no_ws': re.escape(_get_printable_no_ws())})
+ 'ws': _build_regex_range(),
+ 'no_ws': _build_regex_range(ws=False)})
valid_name_regex_obj = re.compile(valid_name_regex, re.UNICODE)
+valid_description_regex_base = '^[%s]*$'
+
+
+valid_description_regex = valid_description_regex_base % (
+ _build_regex_range())
+
+
boolean = {
'type': ['boolean', 'string'],
'enum': [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on',
@@ -175,6 +223,12 @@ name_with_leading_trailing_spaces = {
}
+description = {
+ 'type': ['string', 'null'], 'minLength': 0, 'maxLength': 255,
+ 'pattern': valid_description_regex,
+}
+
+
tcp_udp_port = {
'type': ['integer', 'string'], 'pattern': '^[0-9]*$',
'minimum': 0, 'maximum': 65535,
diff --git a/nova/api/validation/validators.py b/nova/api/validation/validators.py
index 2ae5dfe907..27899b76e4 100644
--- a/nova/api/validation/validators.py
+++ b/nova/api/validation/validators.py
@@ -80,7 +80,7 @@ def _soft_validate_additional_properties(validator,
instance,
schema):
"""This validator function is used for legacy v2 compatible mode in v2.1.
- This will skip all the addtional properties checking but keep check the
+ This will skip all the additional properties checking but keep check the
'patternProperties'. 'patternProperties' is used for metadata API.
If there are not any properties on the instance that are not specified in
diff --git a/nova/availability_zones.py b/nova/availability_zones.py
index dbd8697c37..4ae4f3ef36 100644
--- a/nova/availability_zones.py
+++ b/nova/availability_zones.py
@@ -16,35 +16,24 @@
"""Availability zone helper functions."""
import collections
+import nova.conf
-from oslo_config import cfg
-
+from nova import cache_utils
from nova import objects
-from nova.openstack.common import memorycache
# NOTE(vish): azs don't change that often, so cache them for an hour to
# avoid hitting the db multiple times on every request.
AZ_CACHE_SECONDS = 60 * 60
MC = None
-availability_zone_opts = [
- cfg.StrOpt('internal_service_availability_zone',
- default='internal',
- help='The availability_zone to show internal services under'),
- cfg.StrOpt('default_availability_zone',
- default='nova',
- help='Default compute node availability_zone'),
- ]
-
-CONF = cfg.CONF
-CONF.register_opts(availability_zone_opts)
+CONF = nova.conf.CONF
def _get_cache():
global MC
if MC is None:
- MC = memorycache.get_client()
+ MC = cache_utils.get_client(expiration_time=AZ_CACHE_SECONDS)
return MC
@@ -113,7 +102,7 @@ def update_host_availability_zone_cache(context, host, availability_zone=None):
cache = _get_cache()
cache_key = _make_cache_key(host)
cache.delete(cache_key)
- cache.set(cache_key, availability_zone, AZ_CACHE_SECONDS)
+ cache.set(cache_key, availability_zone)
def get_availability_zones(context, get_only_available=False,
@@ -195,5 +184,5 @@ def get_instance_availability_zone(context, instance):
if not az:
elevated = context.elevated()
az = get_host_availability_zone(elevated, host)
- cache.set(cache_key, az, AZ_CACHE_SECONDS)
+ cache.set(cache_key, az)
return az
diff --git a/nova/block_device.py b/nova/block_device.py
index cf670a6486..31dd9bda73 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -15,18 +15,17 @@
import re
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
import six
+import nova.conf
from nova import exception
from nova.i18n import _
from nova import utils
from nova.virt import driver
-CONF = cfg.CONF
-CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
@@ -494,7 +493,7 @@ _pref = re.compile('^((x?v|s|h)d)')
def strip_prefix(device_name):
"""remove both leading /dev/ and xvd or sd or vd or hd."""
device_name = strip_dev(device_name)
- return _pref.sub('', device_name)
+ return _pref.sub('', device_name) if device_name else device_name
_nums = re.compile('\d+')
@@ -504,7 +503,7 @@ def get_device_letter(device_name):
letter = strip_prefix(device_name)
# NOTE(vish): delete numbers in case we have something like
# /dev/sda1
- return _nums.sub('', letter)
+ return _nums.sub('', letter) if device_name else device_name
def instance_block_mapping(instance, bdms):
diff --git a/nova/cache_utils.py b/nova/cache_utils.py
new file mode 100644
index 0000000000..864e7b6d41
--- /dev/null
+++ b/nova/cache_utils.py
@@ -0,0 +1,174 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Super simple fake memcache client."""
+
+import copy
+
+from oslo_cache import core as cache
+from oslo_config import cfg
+
+from nova.i18n import _
+
+
+# NOTE(dims): There are many copies of memcache_opts with memcached_servers
+# in various projects as this used to be in a copy of memory_cache.py
+# Since we are making a change in just our copy, oslo-config-generator fails
+# with cfg.DuplicateOptError unless we override the comparison check
+class _DeprecatedListOpt(cfg.ListOpt):
+ def __ne__(self, another):
+ self_dict = copy.deepcopy(vars(self))
+ another_dict = copy.deepcopy(vars(another))
+ self_dict.pop('help')
+ self_dict.pop('deprecated_for_removal')
+ another_dict.pop('help')
+ another_dict.pop('deprecated_for_removal')
+ return self_dict != another_dict
+
+
+memcache_opts = [
+ _DeprecatedListOpt('memcached_servers',
+ help='DEPRECATED: Memcached servers or None for in '
+ 'process cache. "memcached_servers" opt is '
+ 'deprecated in Mitaka. In Newton release '
+ 'oslo.cache config options should be used as '
+ 'this option will be removed. Please add a '
+ '[cache] group in your nova.conf file and '
+ 'add "enable" and "memcache_servers" option in '
+ 'this section.',
+ deprecated_for_removal=True),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(memcache_opts)
+
+WEEK = 604800
+
+
+def list_opts():
+ """Entry point for oslo-config-generator."""
+ return [(None, copy.deepcopy(memcache_opts))]
+
+
+def get_memcached_client(expiration_time=0):
+ """Used ONLY when memcached is explicitly needed."""
+ # If the operator uses the old style [DEFAULT]/memcached_servers
+ # then we just respect that setting
+ if CONF.memcached_servers:
+ return CacheClient(
+ _get_custom_cache_region(expiration_time=expiration_time,
+ backend='dogpile.cache.memcached',
+ url=CONF.memcached_servers))
+ # If the operator still uses the new style [cache]/memcache_servers
+ # and has [cache]/enabled flag on then we let oslo_cache configure
+ # the region from the configuration settings
+ elif CONF.cache.enabled and CONF.cache.memcache_servers:
+ return CacheClient(
+ _get_default_cache_region(expiration_time=expiration_time))
+ raise RuntimeError(_('memcached_servers not defined'))
+
+
+def get_client(expiration_time=0):
+ """Used to get a caching client."""
+ # If the operator still uses the old style [DEFAULT]/memcached_servers
+ # then we just respect that setting
+ if CONF.memcached_servers:
+ return CacheClient(
+ _get_custom_cache_region(expiration_time=expiration_time,
+ backend='dogpile.cache.memcached',
+ url=CONF.memcached_servers))
+ # If the operator has [cache]/enabled flag on then we let oslo_cache
+ # configure the region from configuration settings.
+ elif CONF.cache.enabled:
+ return CacheClient(
+ _get_default_cache_region(expiration_time=expiration_time))
+ # If [cache]/enabled flag is off and [DEFAULT]/memcached_servers is
+ # absent we use the dictionary backend
+ return CacheClient(
+ _get_custom_cache_region(expiration_time=expiration_time,
+ backend='oslo_cache.dict'))
+
+
+def _get_default_cache_region(expiration_time):
+ region = cache.create_region()
+ if expiration_time != 0:
+ CONF.cache.expiration_time = expiration_time
+ cache.configure_cache_region(CONF, region)
+ return region
+
+
+def _get_custom_cache_region(expiration_time=WEEK,
+ backend=None,
+ url=None):
+ """Create instance of oslo_cache client.
+
+ For backends you can pass specific parameters by kwargs.
+ For 'dogpile.cache.memcached' backend 'url' parameter must be specified.
+
+ :param backend: backend name
+ :param expiration_time: interval in seconds to indicate maximum
+ time-to-live value for each key
+ :param url: memcached url(s)
+ """
+
+ region = cache.create_region()
+ region_params = {}
+ if expiration_time != 0:
+ region_params['expiration_time'] = expiration_time
+
+ if backend == 'oslo_cache.dict':
+ region_params['arguments'] = {'expiration_time': expiration_time}
+ elif backend == 'dogpile.cache.memcached':
+ region_params['arguments'] = {'url': url}
+ else:
+ raise RuntimeError(_('old style configuration can use '
+ 'only dictionary or memcached backends'))
+
+ region.configure(backend, **region_params)
+ return region
+
+
+class CacheClient(object):
+ """Replicates a tiny subset of memcached client interface."""
+
+ def __init__(self, region):
+ self.region = region
+
+ def get(self, key):
+ value = self.region.get(key)
+ if value == cache.NO_VALUE:
+ return None
+ return value
+
+ def get_or_create(self, key, creator):
+ return self.region.get_or_create(key, creator)
+
+ def set(self, key, value):
+ return self.region.set(key, value)
+
+ def add(self, key, value):
+ return self.region.get_or_create(key, lambda: value)
+
+ def delete(self, key):
+ return self.region.delete(key)
+
+ def get_multi(self, keys):
+ values = self.region.get_multi(keys)
+ return [None if value is cache.NO_VALUE else value for value in
+ values]
+
+ def delete_multi(self, keys):
+ return self.region.delete_multi(keys)
diff --git a/nova/cells/manager.py b/nova/cells/manager.py
index a257e1c5b2..d335e549b5 100644
--- a/nova/cells/manager.py
+++ b/nova/cells/manager.py
@@ -19,7 +19,6 @@ Cells Service Manager
import datetime
import time
-from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import periodic_task
@@ -31,6 +30,7 @@ from six.moves import range
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
+import nova.conf
from nova import context
from nova import exception
from nova.i18n import _LW
@@ -39,23 +39,8 @@ from nova import objects
from nova.objects import base as base_obj
from nova.objects import instance as instance_obj
-cell_manager_opts = [
- cfg.StrOpt('driver',
- default='nova.cells.rpc_driver.CellsRPCDriver',
- help='Cells communication driver to use'),
- cfg.IntOpt("instance_updated_at_threshold",
- default=3600,
- help="Number of seconds after an instance was updated "
- "or deleted to continue to update cells"),
- cfg.IntOpt("instance_update_num_instances",
- default=1,
- help="Number of instances to update per periodic task run")
-]
-
-
-CONF = cfg.CONF
-CONF.import_opt('name', 'nova.cells.opts', group='cells')
-CONF.register_opts(cell_manager_opts, group='cells')
+
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index 056454b25e..949e9401a7 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -31,7 +31,6 @@ import sys
import traceback
from eventlet import queue
-from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
@@ -48,6 +47,7 @@ from nova import compute
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import vm_states
+import nova.conf
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.db import base
@@ -58,19 +58,7 @@ from nova.objects import base as objects_base
from nova import rpc
from nova import utils
-
-cell_messaging_opts = [
- cfg.IntOpt('max_hop_count',
- default=10,
- help='Maximum number of hops for cells routing.'),
- cfg.StrOpt('scheduler',
- default='nova.cells.scheduler.CellsScheduler',
- help='Cells scheduler to use')]
-
-CONF = cfg.CONF
-CONF.import_opt('name', 'nova.cells.opts', group='cells')
-CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
-CONF.register_opts(cell_messaging_opts, group='cells')
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/cells/opts.py b/nova/cells/opts.py
index 3a0ace4fb3..027524e8c8 100644
--- a/nova/cells/opts.py
+++ b/nova/cells/opts.py
@@ -15,57 +15,9 @@
"""
Global cells config options
"""
+import nova.conf
-import itertools
-
-from oslo_config import cfg
-from oslo_utils import importutils
-
-
-cells_opts = [
- cfg.BoolOpt('enable',
- default=False,
- help='Enable cell functionality'),
- cfg.StrOpt('topic',
- default='cells',
- help='The topic cells nodes listen on'),
- cfg.StrOpt('manager',
- default='nova.cells.manager.CellsManager',
- help='Manager for cells'),
- cfg.StrOpt('name',
- default='nova',
- help='Name of this cell'),
- cfg.ListOpt('capabilities',
- default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
- help='Key/Multi-value list with the capabilities of the cell'),
- cfg.IntOpt('call_timeout',
- default=60,
- help='Seconds to wait for response from a call to a cell.'),
- cfg.FloatOpt('reserve_percent',
- default=10.0,
- help='Percentage of cell capacity to hold in reserve. '
- 'Affects both memory and disk utilization'),
- cfg.StrOpt('cell_type',
- default='compute',
- choices=('api', 'compute'),
- help='Type of cell'),
- cfg.IntOpt("mute_child_interval",
- default=300,
- help='Number of seconds after which a lack of capability and '
- 'capacity updates signals the child cell is to be '
- 'treated as a mute.'),
- cfg.IntOpt('bandwidth_update_interval',
- default=600,
- help='Seconds between bandwidth updates for cells.'),
- cfg.IntOpt('instance_update_sync_database_limit',
- default=100,
- help='Number of instances to pull from the database at one '
- 'time for a sync. If there are more instances to update '
- 'the results will be paged through'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(cells_opts, group='cells')
+CONF = nova.conf.CONF
def get_cell_type():
@@ -77,32 +29,4 @@ def get_cell_type():
def list_opts():
- return [
- ('cells',
- itertools.chain(
- cells_opts,
- importutils.import_module(
- "nova.cells.manager").cell_manager_opts,
- importutils.import_module(
- "nova.cells.messaging").cell_messaging_opts,
- importutils.import_module(
- "nova.cells.rpc_driver").cell_rpc_driver_opts,
- importutils.import_module(
- "nova.cells.scheduler").cell_scheduler_opts,
- importutils.import_module(
- "nova.cells.state").cell_state_manager_opts,
- importutils.import_module(
- "nova.cells.weights.mute_child").mute_weigher_opts,
- importutils.import_module(
- "nova.cells.weights.ram_by_instance_type").ram_weigher_opts,
- importutils.import_module(
- "nova.cells.weights.weight_offset").weigher_opts
- )),
- ('upgrade_levels',
- itertools.chain(
- [importutils.import_module(
- "nova.cells.rpc_driver").rpcapi_cap_opt],
- [importutils.import_module(
- "nova.cells.rpcapi").rpcapi_cap_opt],
- )),
- ]
+ return []
diff --git a/nova/cells/rpc_driver.py b/nova/cells/rpc_driver.py
index 62209a30fb..898f135ca9 100644
--- a/nova/cells/rpc_driver.py
+++ b/nova/cells/rpc_driver.py
@@ -17,26 +17,14 @@
"""
Cells RPC Communication Driver
"""
-from oslo_config import cfg
import oslo_messaging as messaging
from nova.cells import driver
+import nova.conf
from nova import rpc
-cell_rpc_driver_opts = [
- cfg.StrOpt('rpc_driver_queue_base',
- default='cells.intercell',
- help="Base queue name to use when communicating between "
- "cells. Various topics by message type will be "
- "appended to this.")]
-CONF = cfg.CONF
-CONF.register_opts(cell_rpc_driver_opts, group='cells')
-CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
-
-rpcapi_cap_opt = cfg.StrOpt('intercell',
- help='Set a version cap for messages sent between cells services')
-CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
+CONF = nova.conf.CONF
class CellsRPCDriver(driver.BaseCellsDriver):
@@ -133,7 +121,7 @@ class InterCellRPCAPI(object):
"""
transport_url = next_hop.db_info['transport_url']
if transport_url not in self.transports:
- transport = messaging.get_transport(cfg.CONF, transport_url,
+ transport = messaging.get_transport(nova.conf.CONF, transport_url,
rpc.TRANSPORT_ALIASES)
self.transports[transport_url] = transport
else:
diff --git a/nova/cells/rpcapi.py b/nova/cells/rpcapi.py
index ae27aacc93..d6d64df3ca 100644
--- a/nova/cells/rpcapi.py
+++ b/nova/cells/rpcapi.py
@@ -23,11 +23,11 @@ services. That communication is handled by the cells driver via the
messaging module.
"""
-from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
+import nova.conf
from nova import exception
from nova.i18n import _LE
from nova import objects
@@ -35,13 +35,8 @@ from nova.objects import base as objects_base
from nova import rpc
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-CONF.import_opt('enable', 'nova.cells.opts', group='cells')
-CONF.import_opt('topic', 'nova.cells.opts', group='cells')
-rpcapi_cap_opt = cfg.StrOpt('cells',
- help='Set a version cap for messages sent to local cells services')
-CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
+CONF = nova.conf.CONF
class CellsAPI(object):
diff --git a/nova/cells/scheduler.py b/nova/cells/scheduler.py
index f625cd7a89..f218df500f 100644
--- a/nova/cells/scheduler.py
+++ b/nova/cells/scheduler.py
@@ -19,7 +19,6 @@ Cells Scheduler
import copy
import time
-from oslo_config import cfg
from oslo_log import log as logging
from six.moves import range
@@ -29,6 +28,7 @@ from nova import compute
from nova.compute import instance_actions
from nova.compute import vm_states
from nova import conductor
+import nova.conf
from nova.db import base
from nova import exception
from nova.i18n import _LE, _LI
@@ -37,30 +37,9 @@ from nova.objects import base as obj_base
from nova.scheduler import utils as scheduler_utils
from nova import utils
-cell_scheduler_opts = [
- cfg.ListOpt('scheduler_filter_classes',
- default=['nova.cells.filters.all_filters'],
- help='Filter classes the cells scheduler should use. '
- 'An entry of "nova.cells.filters.all_filters" '
- 'maps to all cells filters included with nova.'),
- cfg.ListOpt('scheduler_weight_classes',
- default=['nova.cells.weights.all_weighers'],
- help='Weigher classes the cells scheduler should use. '
- 'An entry of "nova.cells.weights.all_weighers" '
- 'maps to all cell weighers included with nova.'),
- cfg.IntOpt('scheduler_retries',
- default=10,
- help='How many retries when no cells are available.'),
- cfg.IntOpt('scheduler_retry_delay',
- default=2,
- help='How often to retry in seconds when no cells are '
- 'available.')
-]
-
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-CONF.register_opts(cell_scheduler_opts, group='cells')
+CONF = nova.conf.CONF
class CellsScheduler(base.Base):
diff --git a/nova/cells/state.py b/nova/cells/state.py
index 0b77a0bce3..a61989fed9 100644
--- a/nova/cells/state.py
+++ b/nova/cells/state.py
@@ -31,33 +31,20 @@ from oslo_utils import units
import six
from nova.cells import rpc_driver
+import nova.conf
from nova import context
from nova.db import base
from nova import exception
from nova.i18n import _LE
from nova import objects
from nova import rpc
+from nova import servicegroup
from nova import utils
-cell_state_manager_opts = [
- cfg.IntOpt('db_check_interval',
- default=60,
- help='Interval, in seconds, for getting fresh cell '
- 'information from the database.'),
- cfg.StrOpt('cells_config',
- help='Configuration file from which to read cells '
- 'configuration. If given, overrides reading cells '
- 'from the database.'),
-]
-
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-CONF.import_opt('name', 'nova.cells.opts', group='cells')
-CONF.import_opt('reserve_percent', 'nova.cells.opts', group='cells')
-CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')
-CONF.register_opts(cell_state_manager_opts, group='cells')
+CONF = nova.conf.CONF
class CellState(object):
@@ -167,6 +154,7 @@ class CellStateManager(base.Base):
self.parent_cells = {}
self.child_cells = {}
self.last_cell_db_check = datetime.datetime.min
+ self.servicegroup_api = servicegroup.API()
attempts = 0
while True:
@@ -277,6 +265,15 @@ class CellStateManager(base.Base):
if not service or service['disabled']:
continue
+ # NOTE: This works because it is only used for computes found
+ # in the cell this is run in. It can not be used to check on
+ # computes in a child cell from the api cell. If this is run
+ # in the api cell objects.ComputeNodeList.get_all() above will
+ # return an empty list.
+ alive = self.servicegroup_api.service_is_up(service)
+ if not alive:
+ continue
+
chost = compute_hosts[host]
chost['free_ram_mb'] += compute['free_ram_mb']
free_disk = compute['free_disk_gb'] * 1024
@@ -347,12 +344,12 @@ class CellStateManager(base.Base):
@sync_before
def get_child_cells(self):
"""Return list of child cell_infos."""
- return self.child_cells.values()
+ return list(self.child_cells.values())
@sync_before
def get_parent_cells(self):
"""Return list of parent cell_infos."""
- return self.parent_cells.values()
+ return list(self.parent_cells.values())
@sync_before
def get_parent_cell(self, cell_name):
diff --git a/nova/cells/utils.py b/nova/cells/utils.py
index 13f5055341..e196470e13 100644
--- a/nova/cells/utils.py
+++ b/nova/cells/utils.py
@@ -19,9 +19,9 @@ Cells Utility Methods
import random
import sys
-from oslo_config import cfg
import six
+import nova.conf
from nova import objects
from nova.objects import base as obj_base
@@ -36,9 +36,7 @@ BLOCK_SYNC_FLAG = '!!'
# Separator used between cell name and item
_CELL_ITEM_SEP = '@'
-CONF = cfg.CONF
-CONF.import_opt('instance_update_sync_database_limit', 'nova.cells.opts',
- group='cells')
+CONF = nova.conf.CONF
class ProxyObjectSerializer(obj_base.NovaObjectSerializer):
@@ -105,10 +103,10 @@ class _CellProxy(object):
else:
yield name, getattr(self._obj, name)
- if six.PY3:
- items = _iteritems
- else:
+ if six.PY2:
iteritems = _iteritems
+ else:
+ items = _iteritems
def __getattr__(self, key):
return getattr(self._obj, key)
diff --git a/nova/cells/weights/mute_child.py b/nova/cells/weights/mute_child.py
index f004251f10..eaf62577f8 100644
--- a/nova/cells/weights/mute_child.py
+++ b/nova/cells/weights/mute_child.py
@@ -18,25 +18,16 @@ If a child cell hasn't sent capacity or capability updates in a while,
downgrade its likelihood of being chosen for scheduling requests.
"""
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from nova.cells import weights
+import nova.conf
from nova.i18n import _LW
LOG = logging.getLogger(__name__)
-mute_weigher_opts = [
- cfg.FloatOpt('mute_weight_multiplier',
- default=-10000.0,
- help='Multiplier used to weigh mute children. (The value '
- 'should be negative.)'),
-]
-
-CONF = cfg.CONF
-CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')
-CONF.register_opts(mute_weigher_opts, group='cells')
+CONF = nova.conf.CONF
class MuteChildWeigher(weights.BaseCellWeigher):
diff --git a/nova/cells/weights/ram_by_instance_type.py b/nova/cells/weights/ram_by_instance_type.py
index fb54edefb9..90052be0e6 100644
--- a/nova/cells/weights/ram_by_instance_type.py
+++ b/nova/cells/weights/ram_by_instance_type.py
@@ -16,19 +16,12 @@
"""
Weigh cells by memory needed in a way that spreads instances.
"""
-from oslo_config import cfg
from nova.cells import weights
+import nova.conf
-ram_weigher_opts = [
- cfg.FloatOpt('ram_weight_multiplier',
- default=10.0,
- help='Multiplier used for weighing ram. Negative '
- 'numbers mean to stack vs spread.'),
-]
-CONF = cfg.CONF
-CONF.register_opts(ram_weigher_opts, group='cells')
+CONF = nova.conf.CONF
class RamByInstanceTypeWeigher(weights.BaseCellWeigher):
diff --git a/nova/cells/weights/weight_offset.py b/nova/cells/weights/weight_offset.py
index 01ff35ed1e..adbb0c5730 100644
--- a/nova/cells/weights/weight_offset.py
+++ b/nova/cells/weights/weight_offset.py
@@ -18,18 +18,11 @@ Weigh cells by their weight_offset in the DB. Cells with higher
weight_offsets in the DB will be preferred.
"""
-from oslo_config import cfg
-
from nova.cells import weights
+import nova.conf
-weigher_opts = [
- cfg.FloatOpt('offset_weight_multiplier',
- default=1.0,
- help='Multiplier used to weigh offset weigher.'),
-]
-CONF = cfg.CONF
-CONF.register_opts(weigher_opts, group='cells')
+CONF = nova.conf.CONF
class WeightOffsetWeigher(weights.BaseCellWeigher):
diff --git a/nova/cert/rpcapi.py b/nova/cert/rpcapi.py
index 2111bd5c1c..537b715d2e 100644
--- a/nova/cert/rpcapi.py
+++ b/nova/cert/rpcapi.py
@@ -16,23 +16,12 @@
Client side of the cert manager RPC API.
"""
-from oslo_config import cfg
import oslo_messaging as messaging
+import nova.conf
from nova import rpc
-rpcapi_opts = [
- cfg.StrOpt('cert_topic',
- default='cert',
- help='The topic cert nodes listen on'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(rpcapi_opts)
-
-rpcapi_cap_opt = cfg.StrOpt('cert',
- help='Set a version cap for messages sent to cert services')
-CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
+CONF = nova.conf.CONF
class CertAPI(object):
diff --git a/nova/cmd/all.py b/nova/cmd/all.py
index 0764698e86..8ac5f393b9 100644
--- a/nova/cmd/all.py
+++ b/nova/cmd/all.py
@@ -26,9 +26,9 @@ continue attempting to launch the rest of the services.
import sys
-from oslo_config import cfg
from oslo_log import log as logging
+import nova.conf
from nova import config
from nova.i18n import _LE
from nova import objects
@@ -37,9 +37,7 @@ from nova import utils
from nova.vnc import xvp_proxy
-CONF = cfg.CONF
-CONF.import_opt('manager', 'nova.conductor.api', group='conductor')
-CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
+CONF = nova.conf.CONF
CONF.import_opt('enabled_apis', 'nova.service')
CONF.import_opt('enabled_ssl_apis', 'nova.service')
diff --git a/nova/cmd/api_metadata.py b/nova/cmd/api_metadata.py
index ae0d36447b..dabc4cd555 100644
--- a/nova/cmd/api_metadata.py
+++ b/nova/cmd/api_metadata.py
@@ -18,11 +18,11 @@
import sys
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from nova.conductor import rpcapi as conductor_rpcapi
+import nova.conf
from nova import config
from nova import objects
from nova.objects import base as objects_base
@@ -31,9 +31,8 @@ from nova import utils
from nova import version
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.import_opt('enabled_ssl_apis', 'nova.service')
-CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
def main():
diff --git a/nova/cmd/cert.py b/nova/cmd/cert.py
index e07df8d0fd..9e199d7797 100644
--- a/nova/cmd/cert.py
+++ b/nova/cmd/cert.py
@@ -16,18 +16,17 @@
import sys
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
+import nova.conf
from nova import config
from nova import objects
from nova import service
from nova import utils
from nova import version
-CONF = cfg.CONF
-CONF.import_opt('cert_topic', 'nova.cert.rpcapi')
+CONF = nova.conf.CONF
def main():
diff --git a/nova/cmd/compute.py b/nova/cmd/compute.py
index 71434a225d..8dc627cb39 100644
--- a/nova/cmd/compute.py
+++ b/nova/cmd/compute.py
@@ -19,11 +19,11 @@
import sys
import traceback
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from nova.conductor import rpcapi as conductor_rpcapi
+import nova.conf
from nova import config
import nova.db.api
from nova import exception
@@ -34,9 +34,8 @@ from nova import service
from nova import utils
from nova import version
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
-CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
LOG = logging.getLogger('nova.compute')
diff --git a/nova/cmd/conductor.py b/nova/cmd/conductor.py
index f85cb1fb10..6dc3500a67 100644
--- a/nova/cmd/conductor.py
+++ b/nova/cmd/conductor.py
@@ -17,18 +17,17 @@
import sys
from oslo_concurrency import processutils
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
+import nova.conf
from nova import config
from nova import objects
from nova import service
from nova import utils
from nova import version
-CONF = cfg.CONF
-CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
+CONF = nova.conf.CONF
def main():
diff --git a/nova/cmd/dhcpbridge.py b/nova/cmd/dhcpbridge.py
index e8098f456a..69fb949e62 100644
--- a/nova/cmd/dhcpbridge.py
+++ b/nova/cmd/dhcpbridge.py
@@ -30,6 +30,7 @@ from oslo_serialization import jsonutils
from oslo_utils import importutils
from nova.conductor import rpcapi as conductor_rpcapi
+import nova.conf
from nova import config
from nova import context
import nova.db.api
@@ -40,10 +41,9 @@ from nova import objects
from nova.objects import base as objects_base
from nova import rpc
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('network_manager', 'nova.service')
-CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
LOG = logging.getLogger(__name__)
@@ -120,6 +120,14 @@ def main():
logging.setup(CONF, "nova")
global LOG
LOG = logging.getLogger('nova.dhcpbridge')
+
+ if CONF.action.name == 'old':
+ # NOTE(sdague): old is the most frequent message sent, and
+ # it's a noop. We should just exit immediately otherwise we
+ # can stack up a bunch of requests in dnsmasq. A SIGHUP seems
+ # to dump this list, so actions queued up get lost.
+ return
+
objects.register_all()
if not CONF.conductor.use_local:
@@ -130,7 +138,7 @@ def main():
LOG.warning(_LW('Conductor local mode is deprecated and will '
'be removed in a subsequent release'))
- if CONF.action.name in ['add', 'del', 'old']:
+ if CONF.action.name in ['add', 'del']:
LOG.debug("Called '%(action)s' for mac '%(mac)s' with IP '%(ip)s'",
{"action": CONF.action.name,
"mac": CONF.action.mac,
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index f2d7cf205b..6339d15bdd 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -234,7 +234,7 @@ def _db_error(caught_exception):
print(_("The above error may show that the database has not "
"been created.\nPlease create a database using "
"'nova-manage db sync' before running this command."))
- exit(1)
+ sys.exit(1)
class ProjectCommands(object):
@@ -923,6 +923,8 @@ class HostCommands(object):
class DbCommands(object):
"""Class for managing the main database."""
+ online_migrations = ()
+
def __init__(self):
pass
@@ -985,6 +987,37 @@ class DbCommands(object):
print(_('There were no records found where '
'instance_uuid was NULL.'))
+ @args('--max-count', metavar='<number>', dest='max_count',
+ help='Maximum number of objects to consider')
+ def online_data_migrations(self, max_count=None):
+ if max_count is not None:
+ max_count = int(max_count)
+ unlimited = False
+ if max_count < 0:
+ print(_('Must supply a positive value for max_number'))
+ return(1)
+ else:
+ unlimited = True
+
+ ran = 0
+ for migration_meth in self.online_migrations:
+ count = max_count - ran if not unlimited else None
+ try:
+ found, done = migration_meth(count)
+ except Exception:
+ print("Error attempting to run %(meth)s" % migration_meth)
+ found = done = 0
+
+ if found:
+ print(_('%(total)i rows matched query %(meth)s, %(done)i '
+ 'migrated'), {'total': found,
+ 'meth': migration_meth.__name__,
+ 'done': done})
+ if max_count is not None:
+ ran += done
+ if ran >= max_count:
+ break
+
class ApiDbCommands(object):
"""Class for managing the api database."""
@@ -1277,7 +1310,7 @@ class CellV2Commands(object):
if cell_uuid is None:
raise Exception(_("cell_uuid must be set"))
else:
- # Validate the the cell exists
+ # Validate the cell exists
cell_mapping = objects.CellMapping.get_by_uuid(ctxt, cell_uuid)
filters = {}
instances = objects.InstanceList.get_by_filters(
@@ -1399,7 +1432,7 @@ def main():
print(_("Could not read %s. Re-running with sudo") % cfgfile)
try:
os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)
- except Exception:
+ except OSError:
print(_('sudo failed, continuing as if nothing happened'))
print(_('Please re-run nova-manage as root.'))
diff --git a/nova/cmd/network.py b/nova/cmd/network.py
index c559758931..1045cc4bbe 100644
--- a/nova/cmd/network.py
+++ b/nova/cmd/network.py
@@ -19,11 +19,11 @@
import sys
import traceback
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from nova.conductor import rpcapi as conductor_rpcapi
+import nova.conf
from nova import config
import nova.db.api
from nova import exception
@@ -34,9 +34,8 @@ from nova import service
from nova import utils
from nova import version
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.import_opt('network_topic', 'nova.network.rpcapi')
-CONF.import_opt('use_local', 'nova.conductor.api', group='conductor')
LOG = logging.getLogger('nova.network')
diff --git a/nova/cmd/novncproxy.py b/nova/cmd/novncproxy.py
index acd56925fa..30f7308c45 100644
--- a/nova/cmd/novncproxy.py
+++ b/nova/cmd/novncproxy.py
@@ -20,25 +20,13 @@ noVNC consoles. Leverages websockify.py by Joel Martin
import sys
-from oslo_config import cfg
from nova.cmd import baseproxy
+import nova.conf
from nova import config
-opts = [
- cfg.StrOpt('novncproxy_host',
- default='0.0.0.0',
- help='Host on which to listen for incoming requests'),
- cfg.IntOpt('novncproxy_port',
- default=6080,
- min=1,
- max=65535,
- help='Port on which to listen for incoming requests'),
- ]
-
-CONF = cfg.CONF
-CONF.register_cli_opts(opts)
+CONF = nova.conf.CONF
def main():
@@ -47,5 +35,5 @@ def main():
config.parse_args(sys.argv)
baseproxy.proxy(
- host=CONF.novncproxy_host,
- port=CONF.novncproxy_port)
+ host=CONF.vnc.novncproxy_host,
+ port=CONF.vnc.novncproxy_port)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index a8be00a153..e0805b9258 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -67,6 +67,7 @@ from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
+from nova.objects import fields as fields_obj
from nova.objects import keypair as keypair_obj
from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
@@ -74,6 +75,7 @@ from nova.pci import request as pci_request
import nova.policy
from nova import rpc
from nova.scheduler import client as scheduler_client
+from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
from nova.virt import hardware
@@ -86,10 +88,8 @@ wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
CONF = nova.conf.CONF
-
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
-CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
RO_SECURITY_GROUPS = ['default']
@@ -675,8 +675,8 @@ class API(base.Base):
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
else:
- image_defined_bdms = map(block_device.BlockDeviceDict,
- image_defined_bdms)
+ image_defined_bdms = list(map(block_device.BlockDeviceDict,
+ image_defined_bdms))
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
@@ -877,7 +877,7 @@ class API(base.Base):
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
- 'display_description': display_description or '',
+ 'display_description': display_description,
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
@@ -901,20 +901,10 @@ class API(base.Base):
# by the network quotas
return base_options, max_network_count
- def _build_filter_properties(self, context, scheduler_hints, forced_host,
- forced_node, instance_type):
- filter_properties = dict(scheduler_hints=scheduler_hints)
- filter_properties['instance_type'] = instance_type
- if forced_host:
- filter_properties['force_hosts'] = [forced_host]
- if forced_node:
- filter_properties['force_nodes'] = [forced_node]
- return filter_properties
-
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
- instance_group, check_server_group_quota):
+ instance_group, check_server_group_quota, filter_properties):
# Reserve quotas
num_instances, quotas = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
@@ -922,7 +912,18 @@ class API(base.Base):
instances = []
try:
for i in range(num_instances):
+ # Create a uuid for the instance so we can store the
+ # RequestSpec before the instance is created.
+ instance_uuid = str(uuid.uuid4())
+ # Store the RequestSpec that will be used for scheduling.
+ req_spec = objects.RequestSpec.from_components(context,
+ instance_uuid, boot_meta, instance_type,
+ base_options['numa_topology'],
+ base_options['pci_requests'], filter_properties,
+ instance_group, base_options['availability_zone'])
+ req_spec.create()
instance = objects.Instance(context=context)
+ instance.uuid = instance_uuid
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
@@ -1018,15 +1019,18 @@ class API(base.Base):
return {}
@staticmethod
- def _get_requested_instance_group(context, scheduler_hints,
+ def _get_requested_instance_group(context, filter_properties,
check_quota):
- if not scheduler_hints:
+ if (not filter_properties or
+ not filter_properties.get('scheduler_hints')):
return
- group_hint = scheduler_hints.get('group')
+ group_hint = filter_properties.get('scheduler_hints').get('group')
if not group_hint:
return
+ # TODO(gibi): We need to remove the following validation code when
+ # removing legacy v2 code.
if not uuidutils.is_uuid_like(group_hint):
msg = _('Server group scheduler hint must be a UUID.')
raise exception.InvalidInput(reason=msg)
@@ -1038,13 +1042,11 @@ class API(base.Base):
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
- availability_zone, forced_host, forced_node, user_data,
- metadata, injected_files, admin_password,
- access_ip_v4, access_ip_v6,
+ availability_zone, user_data, metadata, injected_files,
+ admin_password, access_ip_v4, access_ip_v6,
requested_networks, config_drive,
- block_device_mapping, auto_disk_config,
- reservation_id=None, scheduler_hints=None,
- legacy_bdm=True, shutdown_terminate=False,
+ block_device_mapping, auto_disk_config, filter_properties,
+ reservation_id=None, legacy_bdm=True, shutdown_terminate=False,
check_server_group_quota=False):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
@@ -1058,8 +1060,6 @@ class API(base.Base):
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
- if not instance_type:
- instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
@@ -1085,10 +1085,10 @@ class API(base.Base):
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
- LOG.debug("max count reduced from %(max_count)d to "
- "%(max_net_count)d due to network port quota",
- {'max_count': max_count,
- 'max_net_count': max_net_count})
+ LOG.info(_LI("max count reduced from %(max_count)d to "
+ "%(max_net_count)d due to network port quota"),
+ {'max_count': max_count,
+ 'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(context,
@@ -1102,16 +1102,12 @@ class API(base.Base):
block_device_mapping.root_bdm())
instance_group = self._get_requested_instance_group(context,
- scheduler_hints, check_server_group_quota)
+ filter_properties, check_server_group_quota)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
- instance_group, check_server_group_quota)
-
- filter_properties = self._build_filter_properties(context,
- scheduler_hints, forced_host,
- forced_node, instance_type)
+ instance_group, check_server_group_quota, filter_properties)
for instance in instances:
self._record_action_start(context, instance,
@@ -1198,7 +1194,8 @@ class API(base.Base):
bdm.instance_uuid = instance_uuid
bdm.update_or_create()
- def _validate_bdm(self, context, instance, instance_type, all_mappings):
+ def _validate_bdm(self, context, instance, instance_type,
+ block_device_mappings):
def _subsequent_list(l):
# Each device which is capable of being used as boot device should
# be given a unique boot index, starting from 0 in ascending order.
@@ -1208,17 +1205,18 @@ class API(base.Base):
# Setting a negative value or None indicates that the device should not
# be used for booting.
boot_indexes = sorted([bdm.boot_index
- for bdm in all_mappings
+ for bdm in block_device_mappings
if bdm.boot_index is not None
and bdm.boot_index >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
# Convert the BlockDeviceMappingList to a list for repr details.
LOG.debug('Invalid block device mapping boot sequence for '
- 'instance: %s', list(all_mappings), instance=instance)
+ 'instance: %s', list(block_device_mappings),
+ instance=instance)
raise exception.InvalidBDMBootSequence()
- for bdm in all_mappings:
+ for bdm in block_device_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
@@ -1265,13 +1263,13 @@ class API(base.Base):
"size"))
ephemeral_size = sum(bdm.volume_size or 0
- for bdm in all_mappings
+ for bdm in block_device_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
- swap_list = block_device.get_bdm_swap_list(all_mappings)
+ swap_list = block_device.get_bdm_swap_list(block_device_mappings)
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
@@ -1283,7 +1281,7 @@ class API(base.Base):
max_local = CONF.max_local_block_devices
if max_local >= 0:
- num_local = len([bdm for bdm in all_mappings
+ num_local = len([bdm for bdm in block_device_mappings
if bdm.destination_type == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
@@ -1323,11 +1321,6 @@ class API(base.Base):
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
- if not instance.obj_attr_is_set('uuid'):
- # Generate the instance_uuid here so we can use it
- # for additional setup before creating the DB entry.
- instance.uuid = str(uuid.uuid4())
-
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
@@ -1360,8 +1353,9 @@ class API(base.Base):
instance.system_metadata.update(system_meta)
- self.security_group_api.populate_security_groups(instance,
- security_groups)
+ pop_sec_groups = self.security_group_api.populate_security_groups
+ instance.security_groups = pop_sec_groups(security_groups)
+
return instance
# NOTE(bcwaldon): No policy check since this is only used by scheduler and
@@ -1484,19 +1478,21 @@ class API(base.Base):
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
+ filter_properties = scheduler_utils.build_filter_properties(
+ scheduler_hints, forced_host, forced_node, instance_type)
+
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
- availability_zone, forced_host, forced_node,
- user_data, metadata,
+ availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
- scheduler_hints=scheduler_hints,
+ filter_properties=filter_properties,
legacy_bdm=legacy_bdm,
shutdown_terminate=shutdown_terminate,
check_server_group_quota=check_server_group_quota)
@@ -1719,6 +1715,33 @@ class API(base.Base):
ram=-instance_memory_mb)
return quotas
+ def _local_cleanup_bdm_volumes(self, bdms, instance, context):
+ """The method deletes the bdm records and, if a bdm is a volume, call
+ the terminate connection and the detach volume via the Volume API.
+ Note that at this point we do not have the information about the
+ correct connector so we pass a fake one.
+ """
+ elevated = context.elevated()
+ for bdm in bdms:
+ if bdm.is_volume:
+ # NOTE(vish): We don't have access to correct volume
+ # connector info, so just pass a fake
+ # connector. This can be improved when we
+ # expose get_volume_connector to rpc.
+ connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
+ try:
+ self.volume_api.terminate_connection(context,
+ bdm.volume_id,
+ connector)
+ self.volume_api.detach(elevated, bdm.volume_id,
+ instance.uuid)
+ if bdm.delete_on_termination:
+ self.volume_api.delete(context, bdm.volume_id)
+ except Exception as exc:
+ err_str = _LW("Ignoring volume cleanup failure due to %s")
+ LOG.warn(err_str % exc, instance=instance)
+ bdm.destroy()
+
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
LOG.info(_LI("instance is in SHELVED_OFFLOADED state, cleanup"
@@ -1752,24 +1775,7 @@ class API(base.Base):
instance.host = orig_host
# cleanup volumes
- for bdm in bdms:
- if bdm.is_volume:
- # NOTE(vish): We don't have access to correct volume
- # connector info, so just pass a fake
- # connector. This can be improved when we
- # expose get_volume_connector to rpc.
- connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
- try:
- self.volume_api.terminate_connection(context,
- bdm.volume_id,
- connector)
- self.volume_api.detach(elevated, bdm.volume_id)
- if bdm.delete_on_termination:
- self.volume_api.delete(context, bdm.volume_id)
- except Exception as exc:
- err_str = _LW("Ignoring volume cleanup failure due to %s")
- LOG.warn(err_str % exc, instance=instance)
- bdm.destroy()
+ self._local_cleanup_bdm_volumes(bdms, instance, context)
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
@@ -1944,10 +1950,6 @@ class API(base.Base):
instance_uuid=instance_id)
instance = objects.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
- elif strutils.is_int_like(instance_id):
- LOG.debug("Fetching instance by numeric id %s", instance_id)
- instance = objects.Instance.get_by_id(
- context, instance_id, expected_attrs=expected_attrs)
else:
LOG.debug("Failed to fetch instance by id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
@@ -2044,7 +2046,10 @@ class API(base.Base):
# We already know we can't match the filter, so
# return an empty list
except ValueError:
- return []
+ if want_objects:
+ return objects.InstanceList()
+ else:
+ return []
# IP address filtering cannot be applied at the DB layer, remove any DB
# limit so that it can be applied after the IP filter.
@@ -2264,8 +2269,8 @@ class API(base.Base):
except (exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException, NotImplementedError) as err:
- if strutils.bool_from_string(properties.get(
- 'os_require_quiesce')):
+ if strutils.bool_from_string(instance.system_metadata.get(
+ 'image_os_require_quiesce')):
raise
else:
LOG.info(_LI('Skipping quiescing instance: '
@@ -2678,7 +2683,15 @@ class API(base.Base):
self._record_action_start(context, instance, instance_actions.UNSHELVE)
- self.compute_task_api.unshelve_instance(context, instance)
+ try:
+ request_spec = objects.RequestSpec.get_by_instance_uuid(
+ context, instance.uuid)
+ except exception.RequestSpecNotFound:
+ # Some old instances can still have no RequestSpec object attached
+ # to them, we need to support the old way
+ request_spec = None
+ self.compute_task_api.unshelve_instance(context, instance,
+ request_spec)
@wrap_check_policy
@check_instance_lock
@@ -2966,6 +2979,38 @@ class API(base.Base):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
+ def _create_volume_bdm(self, context, instance, device, volume_id,
+ disk_bus, device_type, is_local_creation=False):
+ if is_local_creation:
+ # when the creation is done locally we can't specify the device
+ # name as we do not have a way to check that the name specified is
+ # a valid one.
+ # We leave the setting of that value when the actual attach
+ # happens on the compute manager
+ volume_bdm = objects.BlockDeviceMapping(
+ context=context,
+ source_type='volume', destination_type='volume',
+ instance_uuid=instance.uuid, boot_index=None,
+ volume_id=volume_id or 'reserved',
+ device_name=None, guest_format=None,
+ disk_bus=disk_bus, device_type=device_type)
+ volume_bdm.create()
+ else:
+ # NOTE(vish): This is done on the compute host because we want
+ # to avoid a race where two devices are requested at
+ # the same time. When db access is removed from
+ # compute, the bdm will be created here and we will
+ # have to make sure that they are assigned atomically.
+ volume_bdm = self.compute_rpcapi.reserve_block_device_name(
+ context, instance, device, volume_id, disk_bus=disk_bus,
+ device_type=device_type)
+ return volume_bdm
+
+ def _check_attach_and_reserve_volume(self, context, volume_id, instance):
+ volume = self.volume_api.get(context, volume_id)
+ self.volume_api.check_attach(context, volume, instance=instance)
+ self.volume_api.reserve_volume(context, volume_id)
+
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance.
@@ -2973,18 +3018,11 @@ class API(base.Base):
This method is separated to make it possible for cells version
to override it.
"""
- # NOTE(vish): This is done on the compute host because we want
- # to avoid a race where two devices are requested at
- # the same time. When db access is removed from
- # compute, the bdm will be created here and we will
- # have to make sure that they are assigned atomically.
- volume_bdm = self.compute_rpcapi.reserve_block_device_name(
+ volume_bdm = self._create_volume_bdm(
context, instance, device, volume_id, disk_bus=disk_bus,
device_type=device_type)
try:
- volume = self.volume_api.get(context, volume_id)
- self.volume_api.check_attach(context, volume, instance=instance)
- self.volume_api.reserve_volume(context, volume_id)
+ self._check_attach_and_reserve_volume(context, volume_id, instance)
self.compute_rpcapi.attach_volume(context, instance, volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
@@ -2992,11 +3030,40 @@ class API(base.Base):
return volume_bdm.device_name
+ def _attach_volume_shelved_offloaded(self, context, instance, volume_id,
+ device, disk_bus, device_type):
+ """Attach an existing volume to an instance in shelved offloaded state.
+
+ Attaching a volume for an instance in shelved offloaded state requires
+ to perform the regular check to see if we can attach and reserve the
+ volume then we need to call the attach method on the volume API
+ to mark the volume as 'in-use'.
+ The instance at this stage is not managed by a compute manager
+ therefore the actual attachment will be performed once the
+ instance will be unshelved.
+ """
+
+ volume_bdm = self._create_volume_bdm(
+ context, instance, device, volume_id, disk_bus=disk_bus,
+ device_type=device_type, is_local_creation=True)
+ try:
+ self._check_attach_and_reserve_volume(context, volume_id, instance)
+ self.volume_api.attach(context,
+ volume_id,
+ instance.uuid,
+ device)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ volume_bdm.destroy()
+
+ return volume_bdm.device_name
+
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
- vm_states.SOFT_DELETED])
+ vm_states.SOFT_DELETED, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
"""Attach an existing volume to an existing instance."""
@@ -3006,35 +3073,63 @@ class API(base.Base):
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
+
+ is_shelved_offloaded = instance.vm_state == vm_states.SHELVED_OFFLOADED
+ if is_shelved_offloaded:
+ return self._attach_volume_shelved_offloaded(context,
+ instance,
+ volume_id,
+ device,
+ disk_bus,
+ device_type)
+
return self._attach_volume(context, instance, volume_id, device,
disk_bus, device_type)
+ def _check_and_begin_detach(self, context, volume, instance):
+ self.volume_api.check_detach(context, volume, instance=instance)
+ self.volume_api.begin_detaching(context, volume['id'])
+
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
- self.volume_api.check_detach(context, volume)
- self.volume_api.begin_detaching(context, volume['id'])
+ self._check_and_begin_detach(context, volume, instance)
+ attachments = volume.get('attachments', {})
+ attachment_id = None
+ if attachments and instance.uuid in attachments:
+ attachment_id = attachments[instance.uuid]['attachment_id']
self.compute_rpcapi.detach_volume(context, instance=instance,
- volume_id=volume['id'])
+ volume_id=volume['id'], attachment_id=attachment_id)
+
+ def _detach_volume_shelved_offloaded(self, context, instance, volume):
+ """Detach a volume from an instance in shelved offloaded state.
+
+ If the instance is shelved offloaded we just need to cleanup volume
+ calling the volume api detach, the volume api terminte_connection
+ and delete the bdm record.
+ If the volume has delete_on_termination option set then we call the
+ volume api delete as well.
+ """
+ self._check_and_begin_detach(context, volume, instance)
+ bdms = [objects.BlockDeviceMapping.get_by_volume_id(
+ context, volume['id'], instance.uuid)]
+ self._local_cleanup_bdm_volumes(bdms, instance, context)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
- vm_states.SOFT_DELETED])
+ vm_states.SOFT_DELETED, vm_states.SHELVED,
+ vm_states.SHELVED_OFFLOADED])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
- if volume['attach_status'] == 'detached':
- msg = _("Volume must be attached in order to detach.")
- raise exception.InvalidVolume(reason=msg)
- # The caller likely got the instance from volume['instance_uuid']
- # in the first place, but let's sanity check.
- if volume['instance_uuid'] != instance.uuid:
- raise exception.VolumeUnattached(volume_id=volume['id'])
- self._detach_volume(context, instance, volume)
+ if instance.vm_state == vm_states.SHELVED_OFFLOADED:
+ self._detach_volume_shelved_offloaded(context, instance, volume)
+ else:
+ self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@@ -3045,9 +3140,9 @@ class API(base.Base):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
- # The caller likely got the instance from volume['instance_uuid']
+ # The caller likely got the instance from volume['attachments']
# in the first place, but let's sanity check.
- if old_volume['instance_uuid'] != instance.uuid:
+ if not old_volume.get('attachments', {}).get(instance.uuid):
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
@@ -3192,10 +3287,46 @@ class API(base.Base):
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION)
-
+ try:
+ request_spec = objects.RequestSpec.get_by_instance_uuid(
+ context, instance.uuid)
+ except exception.RequestSpecNotFound:
+ # Some old instances can still have no RequestSpec object attached
+ # to them, we need to support the old way
+ request_spec = None
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
- disk_over_commit=disk_over_commit)
+ disk_over_commit=disk_over_commit,
+ request_spec=request_spec)
+
+ @check_instance_lock
+ @check_instance_cell
+ @check_instance_state(vm_state=[vm_states.ACTIVE],
+ task_state=[task_states.MIGRATING])
+ def live_migrate_force_complete(self, context, instance, migration_id):
+ """Force live migration to complete.
+
+ :param context: Security context
+ :param instance: The instance that is being migrated
+ :param migration_id: ID of ongoing migration
+
+ """
+ LOG.debug("Going to try to force live migration to complete",
+ instance=instance)
+
+ # NOTE(pkoniszewski): Get migration object to check if there is ongoing
+ # live migration for particular instance. Also pass migration id to
+ # compute to double check and avoid possible race condition.
+ migration = objects.Migration.get_by_id_and_instance(
+ context, migration_id, instance.uuid)
+ if migration.status != 'running':
+ raise exception.InvalidMigrationState(migration_id=migration_id,
+ instance_uuid=instance.uuid,
+ state=migration.status,
+ method='force complete')
+
+ self.compute_rpcapi.live_migration_force_complete(
+ context, instance, migration.id)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
@@ -3239,6 +3370,13 @@ class API(base.Base):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "evacuate")
+ try:
+ request_spec = objects.RequestSpec.get_by_instance_uuid(
+ context, instance.uuid)
+ except exception.RequestSpecNotFound:
+ # Some old instances can still have no RequestSpec object attached
+ # to them, we need to support the old way
+ request_spec = None
return self.compute_task_api.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
@@ -3249,7 +3387,9 @@ class API(base.Base):
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
- host=host)
+ host=host,
+ request_spec=request_spec,
+ )
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
@@ -3304,6 +3444,40 @@ class API(base.Base):
self.compute_rpcapi.external_instance_event(
context, instances_by_host[host], events_by_host[host])
+ def get_instance_host_status(self, instance):
+ if instance.host:
+ try:
+ service = [service for service in instance.services if
+ service.binary == 'nova-compute'][0]
+ if service.forced_down:
+ host_status = fields_obj.HostStatus.DOWN
+ elif service.disabled:
+ host_status = fields_obj.HostStatus.MAINTENANCE
+ else:
+ alive = self.servicegroup_api.service_is_up(service)
+ host_status = ((alive and fields_obj.HostStatus.UP) or
+ fields_obj.HostStatus.UNKNOWN)
+ except IndexError:
+ host_status = fields_obj.HostStatus.NONE
+ else:
+ host_status = fields_obj.HostStatus.NONE
+ return host_status
+
+ def get_instances_host_statuses(self, instance_list):
+ host_status_dict = dict()
+ host_statuses = dict()
+ for instance in instance_list:
+ if instance.host:
+ if instance.host not in host_status_dict:
+ host_status = self.get_instance_host_status(instance)
+ host_status_dict[instance.host] = host_status
+ else:
+ host_status = host_status_dict[instance.host]
+ else:
+ host_status = fields_obj.HostStatus.NONE
+ host_statuses[instance.uuid] = host_status
+ return host_statuses
+
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
@@ -4117,9 +4291,8 @@ class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
groups = objects.SecurityGroupList.get_by_instance(context, instance)
return [{'name': group.name} for group in groups]
- def populate_security_groups(self, instance, security_groups):
+ def populate_security_groups(self, security_groups):
if not security_groups:
- # Make sure it's an empty list and not None
- security_groups = []
- instance.security_groups = security_group_obj.make_secgroup_list(
- security_groups)
+ # Make sure it's an empty SecurityGroupList and not None
+ return objects.SecurityGroupList()
+ return security_group_obj.make_secgroup_list(security_groups)
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index 7381a14f8b..50ad4826bc 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -429,7 +429,7 @@ class ComputeCellsAPI(compute_api.API):
@check_instance_cell
def _detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
- self.volume_api.check_detach(context, volume)
+ self.volume_api.check_detach(context, volume, instance=instance)
self._cast_to_cells(context, instance, 'detach_volume',
volume)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 831fe5b29e..94996b1057 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -40,7 +40,7 @@ import eventlet.event
from eventlet import greenthread
import eventlet.semaphore
import eventlet.timeout
-from keystoneclient import exceptions as keystone_exception
+from keystoneauth1 import exceptions as keystone_exception
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -344,8 +344,12 @@ def reverts_task_state(function):
# have utils.expects_func_args('instance') decorating this
# method.
instance = keyed_args['instance']
+ original_task_state = instance.task_state
try:
self._instance_update(context, instance, task_state=None)
+ LOG.info(_LI("Successfully reverted task state from %s on "
+ "failure for instance."), original_task_state,
+ instance=instance)
except exception.InstanceNotFound:
# We might delete an instance that failed to build shortly
# after it errored out this is an expected case and we
@@ -602,9 +606,6 @@ class ComputeVirtAPI(virtapi.VirtAPI):
super(ComputeVirtAPI, self).__init__()
self._compute = compute
- def provider_fw_rule_get_all(self, context):
- return self._compute.conductor_api.provider_fw_rule_get_all(context)
-
def _default_error_callback(self, event_name, instance):
raise exception.NovaException(_('Instance event failed'))
@@ -674,7 +675,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
- target = messaging.Target(version='4.6')
+ target = messaging.Target(version='4.9')
# How long to wait in seconds before re-issuing a shutdown
# signal to an instance during power off. The overall
@@ -1392,11 +1393,6 @@ class ComputeManager(manager.Manager):
return _sync_refresh()
- @wrap_exception()
- def refresh_provider_fw_rules(self, context):
- """This call passes straight through to the virtualization driver."""
- return self.driver.refresh_provider_fw_rules()
-
def _await_block_device_map_created(self, context, vol_id):
# TODO(yamahata): creating volume simultaneously
# reduces creation time?
@@ -1734,10 +1730,20 @@ class ComputeManager(manager.Manager):
'swap': swap,
'block_device_mapping': mapping})
+ def _check_dev_name(self, bdms, instance):
+ bdms_no_device_name = [x for x in bdms if x.device_name is None]
+ for bdm in bdms_no_device_name:
+ device_name = self._get_device_name_for_instance(instance,
+ bdms,
+ bdm)
+ values = {'device_name': device_name}
+ bdm.update(values)
+
def _prep_block_device(self, context, instance, bdms,
do_check_attach=True):
"""Set up the block device for an instance with error logging."""
try:
+ self._check_dev_name(bdms, instance)
block_device_info = driver.get_block_device_info(instance, bdms)
mapping = driver.block_device_info_get_mapping(block_device_info)
driver_block_device.attach_block_devices(
@@ -2010,8 +2016,9 @@ class ComputeManager(manager.Manager):
# the host is set on the instance.
self._validate_instance_group_policy(context, instance,
filter_properties)
+ image_meta = objects.ImageMeta.from_dict(image)
with self._build_resources(context, instance,
- requested_networks, security_groups, image,
+ requested_networks, security_groups, image_meta,
block_device_mapping) as resources:
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SPAWNING
@@ -2025,7 +2032,7 @@ class ComputeManager(manager.Manager):
LOG.debug('Start spawning the instance on the hypervisor.',
instance=instance)
with timeutils.StopWatch() as timer:
- self.driver.spawn(context, instance, image,
+ self.driver.spawn(context, instance, image_meta,
injected_files, admin_password,
network_info=network_info,
block_device_info=block_device_info)
@@ -2118,7 +2125,7 @@ class ComputeManager(manager.Manager):
@contextlib.contextmanager
def _build_resources(self, context, instance, requested_networks,
- security_groups, image, block_device_mapping):
+ security_groups, image_meta, block_device_mapping):
resources = {}
network_info = None
try:
@@ -2145,7 +2152,7 @@ class ComputeManager(manager.Manager):
try:
# Verify that all the BDMs have a device_name set and assign a
# default to the ones missing it with the help of the driver.
- self._default_block_device_names(context, instance, image,
+ self._default_block_device_names(context, instance, image_meta,
block_device_mapping)
LOG.debug('Start building block device mappings for instance.',
@@ -2275,9 +2282,8 @@ class ComputeManager(manager.Manager):
trying to teardown networking
"""
context = context.elevated()
- LOG.info(_LI('%(action_str)s instance') %
- {'action_str': 'Terminating'},
- context=context, instance=instance)
+ LOG.info(_LI('Terminating instance'),
+ context=context, instance=instance)
if notify:
self._notify_about_instance_usage(context, instance,
@@ -2325,7 +2331,7 @@ class ComputeManager(manager.Manager):
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
- self.volume_api.detach(context, bdm.volume_id)
+ self.volume_api.detach(context, bdm.volume_id, instance.uuid)
except exception.DiskNotFound as exc:
LOG.debug('Ignoring DiskNotFound: %s', exc,
instance=instance)
@@ -2780,6 +2786,13 @@ class ComputeManager(manager.Manager):
'rebuild.error', fault=e)
raise exception.BuildAbortException(
instance_uuid=instance.uuid, reason=e.format_message())
+ except (exception.InstanceNotFound,
+ exception.UnexpectedDeletingTaskStateError) as e:
+ LOG.debug('Instance was deleted while rebuilding',
+ instance=instance)
+ self._set_migration_status(migration, 'failed')
+ self._notify_about_instance_usage(context, instance,
+ 'rebuild.error', fault=e)
except Exception as e:
self._set_migration_status(migration, 'failed')
self._notify_about_instance_usage(context, instance,
@@ -2806,6 +2819,13 @@ class ComputeManager(manager.Manager):
with claim_context:
self._do_rebuild_instance(*args, **kwargs)
+ @staticmethod
+ def _get_image_name(image_meta):
+ if image_meta.obj_attr_is_set("name"):
+ return image_meta.name
+ else:
+ return ''
+
def _do_rebuild_instance(self, context, instance, orig_image_ref,
image_ref, injected_files, new_pass,
orig_sys_metadata, bdms, recreate,
@@ -2841,9 +2861,10 @@ class ComputeManager(manager.Manager):
" '%s'"), str(image_ref))
if image_ref:
- image_meta = self.image_api.get(context, image_ref)
+ image_meta = objects.ImageMeta.from_image_ref(
+ context, self.image_api, image_ref)
else:
- image_meta = {}
+ image_meta = objects.ImageMeta.from_dict({})
# This instance.exists message should contain the original
# image_ref, not the new one. Since the DB has been updated
@@ -2857,7 +2878,7 @@ class ComputeManager(manager.Manager):
extra_usage_info=extra_usage_info)
# This message should contain the new image_ref
- extra_usage_info = {'image_name': image_meta.get('name', '')}
+ extra_usage_info = {'image_name': self._get_image_name(image_meta)}
self._notify_about_instance_usage(context, instance,
"rebuild.start", extra_usage_info=extra_usage_info)
@@ -3316,10 +3337,8 @@ class ComputeManager(manager.Manager):
instance=instance)
rescue_image_ref = instance.image_ref
- image_meta = self.image_api.get(context, rescue_image_ref)
- # NOTE(belliott) bug #1227350 - xenapi needs the actual image id
- image_meta['id'] = rescue_image_ref
- return image_meta
+ return objects.ImageMeta.from_image_ref(
+ context, self.image_api, rescue_image_ref)
@wrap_exception()
@reverts_task_state
@@ -3339,7 +3358,7 @@ class ComputeManager(manager.Manager):
rescue_image_ref)
extra_usage_info = {'rescue_image_name':
- rescue_image_meta.get('name', '')}
+ self._get_image_name(rescue_image_meta)}
self._notify_about_instance_usage(context, instance,
"rescue.start", extra_usage_info=extra_usage_info,
network_info=network_info)
@@ -3896,7 +3915,7 @@ class ComputeManager(manager.Manager):
instance.flavor = instance_type
def _finish_resize(self, context, instance, migration, disk_info,
- image):
+ image_meta):
resize_instance = False
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
@@ -3946,7 +3965,7 @@ class ComputeManager(manager.Manager):
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
- image, resize_instance,
+ image_meta, resize_instance,
block_device_info, power_on)
except Exception:
with excutils.save_and_reraise_exception():
@@ -3985,8 +4004,9 @@ class ComputeManager(manager.Manager):
reservations,
instance=instance)
try:
+ image_meta = objects.ImageMeta.from_dict(image)
self._finish_resize(context, instance, migration,
- disk_info, image)
+ disk_info, image_meta)
quotas.commit()
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
@@ -4274,6 +4294,9 @@ class ComputeManager(manager.Manager):
instance.task_state = None
instance.save(expected_task_state=[task_states.SHELVING,
task_states.SHELVING_OFFLOADING])
+ # NOTE(ndipanov): This frees the resources with the resource_tracker
+ self._update_resource_tracker(context, instance)
+
self._delete_scheduler_instance_info(context, instance.uuid)
self._notify_about_instance_usage(context, instance,
'shelve_offload.end')
@@ -4335,13 +4358,14 @@ class ComputeManager(manager.Manager):
rt = self._get_resource_tracker(node)
limits = filter_properties.get('limits', {})
+ shelved_image_ref = instance.image_ref
if image:
- shelved_image_ref = instance.image_ref
instance.image_ref = image['id']
- image_meta = image
+ image_meta = objects.ImageMeta.from_dict(image)
else:
- image_meta = utils.get_image_from_system_metadata(
- instance.system_metadata)
+ image_meta = objects.ImageMeta.from_dict(
+ utils.get_image_from_system_metadata(
+ instance.system_metadata))
self.network_api.setup_instance_network_on_host(context, instance,
self.host)
@@ -4728,7 +4752,8 @@ class ComputeManager(manager.Manager):
context=context, instance=instance)
self.volume_api.roll_detaching(context, volume_id)
- def _detach_volume(self, context, volume_id, instance, destroy_bdm=True):
+ def _detach_volume(self, context, volume_id, instance, destroy_bdm=True,
+ attachment_id=None):
"""Detach a volume from an instance.
:param context: security context
@@ -4781,14 +4806,16 @@ class ComputeManager(manager.Manager):
info = dict(volume_id=volume_id)
self._notify_about_instance_usage(
context, instance, "volume.detach", extra_usage_info=info)
- self.volume_api.detach(context.elevated(), volume_id)
+ self.volume_api.detach(context.elevated(), volume_id, instance.uuid,
+ attachment_id)
@wrap_exception()
@wrap_instance_fault
- def detach_volume(self, context, volume_id, instance):
+ def detach_volume(self, context, volume_id, instance, attachment_id=None):
"""Detach a volume from an instance."""
- self._detach_volume(context, volume_id, instance)
+ self._detach_volume(context, volume_id, instance,
+ attachment_id=attachment_id)
def _init_volume_connection(self, context, new_volume_id,
old_volume_id, connector, instance, bdm):
@@ -4941,8 +4968,7 @@ class ComputeManager(manager.Manager):
'ports'), {'ports': len(network_info)})
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
- image_meta = utils.get_image_from_system_metadata(
- instance.system_metadata)
+ image_meta = objects.ImageMeta.from_instance(instance)
try:
self.driver.attach_interface(instance, image_meta, network_info[0])
@@ -5043,9 +5069,7 @@ class ComputeManager(manager.Manager):
dest_check_data = self.driver.check_can_live_migrate_destination(ctxt,
instance, src_compute_info, dst_compute_info,
block_migration, disk_over_commit)
- if isinstance(dest_check_data, migrate_data_obj.LiveMigrateData):
- dest_check_data = dest_check_data.to_legacy_dict()
- migrate_data = {}
+ LOG.debug('destination check data is %s', dest_check_data)
try:
migrate_data = self.compute_rpcapi.\
check_can_live_migrate_source(ctxt, instance,
@@ -5053,8 +5077,6 @@ class ComputeManager(manager.Manager):
finally:
self.driver.check_can_live_migrate_destination_cleanup(ctxt,
dest_check_data)
- if 'migrate_data' in dest_check_data:
- migrate_data.update(dest_check_data['migrate_data'])
return migrate_data
@wrap_exception()
@@ -5073,14 +5095,21 @@ class ComputeManager(manager.Manager):
"""
is_volume_backed = self.compute_api.is_volume_backed_instance(ctxt,
instance)
- dest_check_data['is_volume_backed'] = is_volume_backed
+ got_migrate_data_object = isinstance(dest_check_data,
+ migrate_data_obj.LiveMigrateData)
+ if not got_migrate_data_object:
+ dest_check_data = \
+ migrate_data_obj.LiveMigrateData.detect_implementation(
+ dest_check_data)
+ dest_check_data.is_volume_backed = is_volume_backed
block_device_info = self._get_instance_block_device_info(
ctxt, instance, refresh_conn_info=True)
result = self.driver.check_can_live_migrate_source(ctxt, instance,
dest_check_data,
block_device_info)
- if isinstance(result, migrate_data_obj.LiveMigrateData):
+ if not got_migrate_data_object:
result = result.to_legacy_dict()
+ LOG.debug('source check data is %s', result)
return result
@wrap_exception()
@@ -5098,6 +5127,13 @@ class ComputeManager(manager.Manager):
storage.
"""
+ LOG.debug('pre_live_migration data is %s', migrate_data)
+ got_migrate_data_object = isinstance(migrate_data,
+ migrate_data_obj.LiveMigrateData)
+ if not got_migrate_data_object:
+ migrate_data = \
+ migrate_data_obj.LiveMigrateData.detect_implementation(
+ migrate_data)
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
@@ -5106,18 +5142,13 @@ class ComputeManager(manager.Manager):
context, instance, "live_migration.pre.start",
network_info=network_info)
- pre_live_migration_data = self.driver.pre_live_migration(context,
+ migrate_data = self.driver.pre_live_migration(context,
instance,
block_device_info,
network_info,
disk,
migrate_data)
- if isinstance(pre_live_migration_data,
- migrate_data_obj.LiveMigrateData):
- pre_live_migration_data = pre_live_migration_data.to_legacy_dict(
- pre_migration_result=True)
- pre_live_migration_data = pre_live_migration_data[
- 'pre_live_migration_result']
+ LOG.debug('driver pre_live_migration data is %s' % migrate_data)
# NOTE(tr3buchet): setup networks on destination host
self.network_api.setup_networks_on_host(context, instance,
@@ -5136,22 +5167,12 @@ class ComputeManager(manager.Manager):
context, instance, "live_migration.pre.end",
network_info=network_info)
- return pre_live_migration_data
-
- def _get_migrate_data_obj(self):
- # FIXME(danms): A couple patches from now, we'll be able to
- # avoid this failure _if_ we get a new-style call with the
- # object.
- if CONF.compute_driver.startswith('libvirt'):
- return objects.LibvirtLiveMigrateData()
- elif CONF.compute_driver.startswith('xenapi'):
- return objects.XenapiLiveMigrateData()
- else:
- LOG.error(_('Older RPC caller and unsupported virt driver in '
- 'use. Unable to handle this!'))
- raise exception.MigrationError(
- _('Unknown compute driver while providing compatibility '
- 'with older RPC formats'))
+ if not got_migrate_data_object and migrate_data:
+ migrate_data = migrate_data.to_legacy_dict(
+ pre_migration_result=True)
+ migrate_data = migrate_data['pre_live_migration_result']
+ LOG.debug('pre_live_migration result data is %s', migrate_data)
+ return migrate_data
def _do_live_migration(self, context, dest, instance, block_migration,
migration, migrate_data):
@@ -5161,8 +5182,13 @@ class ComputeManager(manager.Manager):
# reporting
self._set_migration_status(migration, 'preparing')
- # Create a local copy since we'll be modifying the dictionary
- migrate_data = dict(migrate_data or {})
+ got_migrate_data_object = isinstance(migrate_data,
+ migrate_data_obj.LiveMigrateData)
+ if not got_migrate_data_object:
+ migrate_data = \
+ migrate_data_obj.LiveMigrateData.detect_implementation(
+ migrate_data)
+
try:
if block_migration:
block_device_info = self._get_instance_block_device_info(
@@ -5172,12 +5198,9 @@ class ComputeManager(manager.Manager):
else:
disk = None
- pre_migration_data = self.compute_rpcapi.pre_live_migration(
+ migrate_data = self.compute_rpcapi.pre_live_migration(
context, instance,
block_migration, disk, dest, migrate_data)
- migrate_data['pre_live_migration_result'] = pre_migration_data
- migrate_data_obj = self._get_migrate_data_obj()
- migrate_data_obj.from_legacy_dict(migrate_data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Pre live migration failed at %s'),
@@ -5188,12 +5211,14 @@ class ComputeManager(manager.Manager):
self._set_migration_status(migration, 'running')
- migrate_data_obj.migration = migration
+ if migrate_data:
+ migrate_data.migration = migration
+ LOG.debug('live_migration data is %s', migrate_data)
try:
self.driver.live_migration(context, instance, dest,
self._post_live_migration,
self._rollback_live_migration,
- block_migration, migrate_data_obj)
+ block_migration, migrate_data)
except Exception:
# Executing live migration
# live_migration might raises exceptions, but
@@ -5231,6 +5256,29 @@ class ComputeManager(manager.Manager):
block_migration, migration,
migrate_data)
+ @wrap_exception()
+ @wrap_instance_fault
+ def live_migration_force_complete(self, context, instance, migration_id):
+ """Force live migration to complete.
+
+ :param context: Security context
+ :param instance: The instance that is being migrated
+ :param migration_id: ID of ongoing migration
+
+ """
+ migration = objects.Migration.get_by_id(context, migration_id)
+ if migration.status != 'running':
+ raise exception.InvalidMigrationState(migration_id=migration_id,
+ instance_uuid=instance.uuid,
+ state=migration.status,
+ method='force complete')
+
+ self._notify_about_instance_usage(
+ context, instance, 'live.migration.force.complete.start')
+ self.driver.live_migration_force_complete(instance)
+ self._notify_about_instance_usage(
+ context, instance, 'live.migration.force.complete.end')
+
def _live_migration_cleanup_flags(self, block_migration, migrate_data):
"""Determine whether disks or instance path need to be cleaned up after
live migration (at source on success, at destination on rollback)
@@ -5250,7 +5298,7 @@ class ComputeManager(manager.Manager):
# block storage or instance path were shared
is_shared_block_storage = not block_migration
is_shared_instance_path = not block_migration
- if isinstance(migrate_data, objects.LibvirtLiveMigrateData):
+ if isinstance(migrate_data, migrate_data_obj.LibvirtLiveMigrateData):
is_shared_block_storage = migrate_data.is_shared_block_storage
is_shared_instance_path = migrate_data.is_shared_instance_path
@@ -5423,24 +5471,32 @@ class ComputeManager(manager.Manager):
block_device_info = self._get_instance_block_device_info(context,
instance)
- self.driver.post_live_migration_at_destination(context, instance,
- network_info,
- block_migration, block_device_info)
- # Restore instance state
- current_power_state = self._get_power_state(context, instance)
- node_name = None
- prev_host = instance.host
try:
- compute_node = self._get_compute_info(context, self.host)
- node_name = compute_node.hypervisor_hostname
- except exception.ComputeHostNotFound:
- LOG.exception(_LE('Failed to get compute_info for %s'), self.host)
+ self.driver.post_live_migration_at_destination(
+ context, instance, network_info, block_migration,
+ block_device_info)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ instance.vm_state = vm_states.ERROR
+ LOG.error(_LE('Unexpected error during post live migration at '
+ 'destination host.'), instance=instance)
finally:
- instance.host = self.host
- instance.power_state = current_power_state
- instance.task_state = None
- instance.node = node_name
- instance.save(expected_task_state=task_states.MIGRATING)
+ # Restore instance state and update host
+ current_power_state = self._get_power_state(context, instance)
+ node_name = None
+ prev_host = instance.host
+ try:
+ compute_node = self._get_compute_info(context, self.host)
+ node_name = compute_node.hypervisor_hostname
+ except exception.ComputeHostNotFound:
+ LOG.exception(_LE('Failed to get compute_info for %s'),
+ self.host)
+ finally:
+ instance.host = self.host
+ instance.power_state = current_power_state
+ instance.task_state = None
+ instance.node = node_name
+ instance.save(expected_task_state=task_states.MIGRATING)
# NOTE(tr3buchet): tear down networks on source host
self.network_api.setup_networks_on_host(context, instance,
@@ -5470,11 +5526,13 @@ class ComputeManager(manager.Manager):
instance.task_state = None
instance.save(expected_task_state=[task_states.MIGRATING])
- # NOTE(danms): Pop out the migration object so we don't pass
- # it over RPC unintentionally below
if isinstance(migrate_data, dict):
migration = migrate_data.pop('migration', None)
- elif isinstance(migrate_data, migrate_data_obj.LiveMigrateData):
+ migrate_data = \
+ migrate_data_obj.LiveMigrateData.detect_implementation(
+ migrate_data)
+ elif (isinstance(migrate_data, migrate_data_obj.LiveMigrateData) and
+ migrate_data.obj_attr_is_set('migration')):
migration = migrate_data.migration
else:
migration = None
@@ -5487,7 +5545,7 @@ class ComputeManager(manager.Manager):
for bdm in bdms:
if bdm.is_volume:
self.compute_rpcapi.remove_volume_connection(
- context, instance, bdm.volume_id, dest)
+ context, bdm.volume_id, instance, dest)
self._notify_about_instance_usage(context, instance,
"live_migration._rollback.start")
@@ -5496,8 +5554,6 @@ class ComputeManager(manager.Manager):
block_migration, migrate_data)
if do_cleanup:
- if isinstance(migrate_data, migrate_data_obj.LiveMigrateData):
- migrate_data = migrate_data.to_legacy_dict()
self.compute_rpcapi.rollback_live_migration_at_destination(
context, instance, dest, destroy_disks=destroy_disks,
migrate_data=migrate_data)
@@ -5538,6 +5594,10 @@ class ComputeManager(manager.Manager):
# from remote volumes if necessary
block_device_info = self._get_instance_block_device_info(context,
instance)
+ if isinstance(migrate_data, dict):
+ migrate_data = \
+ migrate_data_obj.LiveMigrateData.detect_implementation(
+ migrate_data)
self.driver.rollback_live_migration_at_destination(
context, instance, network_info, block_device_info,
destroy_disks=destroy_disks, migrate_data=migrate_data)
@@ -5601,7 +5661,8 @@ class ComputeManager(manager.Manager):
try:
inst = objects.Instance.get_by_uuid(
context, instance_uuids.pop(0),
- expected_attrs=['system_metadata', 'info_cache'],
+ expected_attrs=['system_metadata', 'info_cache',
+ 'flavor'],
use_slave=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
@@ -5809,7 +5870,8 @@ class ComputeManager(manager.Manager):
instances = objects.InstanceList.get_active_by_window_joined(
context, begin, end, host=self.host,
- expected_attrs=['system_metadata', 'info_cache', 'metadata'],
+ expected_attrs=['system_metadata', 'info_cache', 'metadata',
+ 'flavor'],
use_slave=True)
num_instances = len(instances)
errors = 0
@@ -6294,10 +6356,9 @@ class ComputeManager(manager.Manager):
LOG.info(_LI("Compute node '%s' not found in "
"update_available_resource."), nodename)
continue
- except Exception as e:
- LOG.error(_LE("Error updating resources for node "
- "%(node)s: %(e)s"),
- {'node': nodename, 'e': e})
+ except Exception:
+ LOG.exception(_LE("Error updating resources for node "
+ "%(node)s."), {'node': nodename})
new_resource_tracker_dict[nodename] = rt
# NOTE(comstud): Replace the RT cache before looping through
@@ -6651,8 +6712,7 @@ class ComputeManager(manager.Manager):
def quiesce_instance(self, context, instance):
"""Quiesce an instance on this host."""
context = context.elevated()
- image_meta = utils.get_image_from_system_metadata(
- instance.system_metadata)
+ image_meta = objects.ImageMeta.from_instance(instance)
self.driver.quiesce(context, instance, image_meta)
def _wait_for_snapshots_completion(self, context, mapping):
@@ -6687,6 +6747,5 @@ class ComputeManager(manager.Manager):
LOG.exception(_LE("Exception while waiting completion of "
"volume snapshots: %s"),
error, instance=instance)
- image_meta = utils.get_image_from_system_metadata(
- instance.system_metadata)
+ image_meta = objects.ImageMeta.from_instance(instance)
self.driver.unquiesce(context, instance, image_meta)
diff --git a/nova/compute/monitors/cpu/virt_driver.py b/nova/compute/monitors/cpu/virt_driver.py
index 267a14c4cf..ef7a01b76a 100644
--- a/nova/compute/monitors/cpu/virt_driver.py
+++ b/nova/compute/monitors/cpu/virt_driver.py
@@ -17,16 +17,15 @@
CPU monitor based on virt driver to retrieve CPU information
"""
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from nova.compute.monitors import base
+import nova.conf
from nova import exception
from nova.i18n import _LE
-CONF = cfg.CONF
-CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 2b26ea0b40..ebe8aa0f51 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -31,7 +31,7 @@ from nova.compute import resources as ext_resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
-from nova.i18n import _, _LI, _LW
+from nova.i18n import _, _LE, _LI, _LW
from nova import objects
from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
@@ -81,6 +81,21 @@ allocation_ratio_opts = [
'NOTE: This can be set per-compute, or if set to 0.0, the value '
'set on the scheduler node(s) will be used '
'and defaulted to 1.5'),
+ cfg.FloatOpt('disk_allocation_ratio',
+ default=0.0,
+ help='This is the virtual disk to physical disk allocation ratio used '
+ 'by the disk_filter.py script to determine if a host has '
+ 'sufficient disk space to fit a requested instance. A ratio '
+ 'greater than 1.0 will result in over-subscription of the '
+ 'available physical disk, which can be useful for more '
+ 'efficiently packing instances created with images that do not '
+ 'use the entire virtual disk,such as sparse or compressed '
+ 'images. It can be set to a value between 0.0 and 1.0 in order '
+ 'to preserve a percentage of the disk for uses other than '
+ 'instances.'
+ 'NOTE: This can be set per-compute, or if set to 0.0, the value '
+ 'set on the scheduler node(s) will be used '
+ 'and defaulted to 1.0'),
]
@@ -114,6 +129,9 @@ def _instance_in_resize_state(instance):
return False
+_REMOVED_STATES = (vm_states.DELETED, vm_states.SHELVED_OFFLOADED)
+
+
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
@@ -136,6 +154,7 @@ class ResourceTracker(object):
self.scheduler_client = scheduler_client.SchedulerClient()
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
+ self.disk_allocation_ratio = CONF.disk_allocation_ratio
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance_ref, limits=None):
@@ -430,6 +449,7 @@ class ResourceTracker(object):
# update the allocation ratios for the related ComputeNode object
self.compute_node.ram_allocation_ratio = self.ram_allocation_ratio
self.compute_node.cpu_allocation_ratio = self.cpu_allocation_ratio
+ self.compute_node.disk_allocation_ratio = self.disk_allocation_ratio
# now copy rest to compute_node
self.compute_node.update_from_virt_driver(resources)
@@ -491,6 +511,20 @@ class ResourceTracker(object):
self._update_available_resource(context, resources)
+ def _pair_instances_to_migrations(self, migrations, instances):
+ instance_by_uuid = {inst.uuid: inst for inst in instances}
+ for migration in migrations:
+ try:
+ migration.instance = instance_by_uuid[migration.instance_uuid]
+ except KeyError:
+ # NOTE(danms): If this happens, we don't set it here, and
+ # let the code either fail or lazy-load the instance later
+ # which is what happened before we added this optimization.
+ # This _should_ not be possible, of course.
+ LOG.error(_LE('Migration for instance %(uuid)s refers to '
+ 'another host\'s instance!'),
+ {'uuid': migration.instance_uuid})
+
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources):
@@ -516,7 +550,8 @@ class ResourceTracker(object):
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, self.nodename,
expected_attrs=['system_metadata',
- 'numa_topology'])
+ 'numa_topology',
+ 'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(context, instances)
@@ -525,6 +560,7 @@ class ResourceTracker(object):
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, self.nodename)
+ self._pair_instances_to_migrations(migrations, instances)
self._update_usage_from_migrations(context, migrations)
# Detect and account for orphaned instances that may exist on the
@@ -804,8 +840,10 @@ class ResourceTracker(object):
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
- if (not other_migration or
- migration.updated_at >= other_migration.updated_at):
+ # NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
+ if (not other_migration or (
+ migration.updated_at and other_migration.updated_at and
+ migration.updated_at >= other_migration.updated_at)):
filtered[uuid] = migration
for migration in filtered.values():
@@ -823,20 +861,20 @@ class ResourceTracker(object):
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
- is_deleted_instance = instance['vm_state'] == vm_states.DELETED
+ is_removed_instance = instance['vm_state'] in _REMOVED_STATES
if is_new_instance:
self.tracked_instances[uuid] = obj_base.obj_to_primitive(instance)
sign = 1
- if is_deleted_instance:
+ if is_removed_instance:
self.tracked_instances.pop(uuid)
sign = -1
self.stats.update_stats_for_instance(instance)
# if it's a new or deleted instance:
- if is_new_instance or is_deleted_instance:
+ if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
@@ -875,7 +913,7 @@ class ResourceTracker(object):
self.driver)
for instance in instances:
- if instance.vm_state != vm_states.DELETED:
+ if instance.vm_state not in _REMOVED_STATES:
self._update_usage_from_instance(context, instance)
def _find_orphaned_instances(self):
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 0260fc9b3d..c9b33c6b3a 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -25,6 +25,7 @@ from nova import exception
from nova.i18n import _, _LI, _LE
from nova import objects
from nova.objects import base as objects_base
+from nova.objects import migrate_data as migrate_data_obj
from nova.objects import service as service_obj
from nova import rpc
@@ -319,6 +320,12 @@ class ComputeAPI(object):
* ... - Remove refresh_security_group_members()
* ... - Remove refresh_security_group_rules()
* 4.6 - Add trigger_crash_dump()
+ * 4.7 - Add attachment_id argument to detach_volume()
+ * 4.8 - Send migrate_data in object format for live_migration,
+ rollback_live_migration_at_destination, and
+ pre_live_migration.
+ * ... - Remove refresh_provider_fw_rules()
+ * 4.9 - Add live_migration_force_complete()
'''
VERSION_ALIASES = {
@@ -429,18 +436,37 @@ class ComputeAPI(object):
block_migration, disk_over_commit):
version = '4.0'
cctxt = self.client.prepare(server=destination, version=version)
- return cctxt.call(ctxt, 'check_can_live_migrate_destination',
- instance=instance,
- block_migration=block_migration,
- disk_over_commit=disk_over_commit)
+ result = cctxt.call(ctxt, 'check_can_live_migrate_destination',
+ instance=instance,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+ if isinstance(result, migrate_data_obj.LiveMigrateData):
+ return result
+ elif result:
+ return migrate_data_obj.LiveMigrateData.detect_implementation(
+ result)
+ else:
+ return result
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
- version = '4.0'
+ dest_check_data_obj = dest_check_data
+ version = '4.8'
+ if not self.client.can_send_version(version):
+ version = '4.0'
+ if dest_check_data:
+ dest_check_data = dest_check_data.to_legacy_dict()
source = _compute_host(None, instance)
cctxt = self.client.prepare(server=source, version=version)
- return cctxt.call(ctxt, 'check_can_live_migrate_source',
- instance=instance,
- dest_check_data=dest_check_data)
+ result = cctxt.call(ctxt, 'check_can_live_migrate_source',
+ instance=instance,
+ dest_check_data=dest_check_data)
+ if isinstance(result, migrate_data_obj.LiveMigrateData):
+ return result
+ elif dest_check_data_obj and result:
+ dest_check_data_obj.from_legacy_dict(result)
+ return dest_check_data_obj
+ else:
+ return result
def check_instance_shared_storage(self, ctxt, instance, data, host=None):
version = '4.0'
@@ -467,12 +493,16 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'detach_interface',
instance=instance, port_id=port_id)
- def detach_volume(self, ctxt, instance, volume_id):
- version = '4.0'
+ def detach_volume(self, ctxt, instance, volume_id, attachment_id=None):
+ extra = {'attachment_id': attachment_id}
+ version = '4.7'
+ if not self.client.can_send_version(version):
+ version = '4.0'
+ extra.pop('attachment_id')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_volume',
- instance=instance, volume_id=volume_id)
+ instance=instance, volume_id=volume_id, **extra)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
@@ -594,7 +624,12 @@ class ComputeAPI(object):
def live_migration(self, ctxt, instance, dest, block_migration, host,
migration, migrate_data=None):
args = {'migration': migration}
- version = '4.2'
+ version = '4.8'
+ if not self.client.can_send_version(version):
+ version = '4.2'
+ if migrate_data:
+ migrate_data = migrate_data.to_legacy_dict(
+ pre_migration_result=True)
if not self.client.can_send_version(version):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
@@ -602,6 +637,13 @@ class ComputeAPI(object):
dest=dest, block_migration=block_migration,
migrate_data=migrate_data, **args)
+ def live_migration_force_complete(self, ctxt, instance, migration_id):
+ version = '4.9'
+ cctxt = self.client.prepare(server=_compute_host(None, instance),
+ version=version)
+ cctxt.cast(ctxt, 'live_migration_force_complete', instance=instance,
+ migration_id=migration_id)
+
def pause_instance(self, ctxt, instance):
version = '4.0'
cctxt = self.client.prepare(server=_compute_host(None, instance),
@@ -617,12 +659,24 @@ class ComputeAPI(object):
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
- version = '4.0'
+ migrate_data_orig = migrate_data
+ version = '4.8'
+ if not self.client.can_send_version(version):
+ version = '4.0'
+ if migrate_data:
+ migrate_data = migrate_data.to_legacy_dict()
cctxt = self.client.prepare(server=host, version=version)
- return cctxt.call(ctxt, 'pre_live_migration',
- instance=instance,
- block_migration=block_migration,
- disk=disk, migrate_data=migrate_data)
+ result = cctxt.call(ctxt, 'pre_live_migration',
+ instance=instance,
+ block_migration=block_migration,
+ disk=disk, migrate_data=migrate_data)
+ if isinstance(result, migrate_data_obj.LiveMigrateData):
+ return result
+ elif migrate_data_orig and result:
+ migrate_data_orig.from_legacy_dict(result)
+ return migrate_data_orig
+ else:
+ return result
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
@@ -682,11 +736,6 @@ class ComputeAPI(object):
recreate=recreate, on_shared_storage=on_shared_storage,
**extra)
- def refresh_provider_fw_rules(self, ctxt, host):
- version = '4.0'
- cctxt = self.client.prepare(server=host, version=version)
- cctxt.cast(ctxt, 'refresh_provider_fw_rules')
-
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
@@ -710,7 +759,7 @@ class ComputeAPI(object):
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance, address=address)
- def remove_volume_connection(self, ctxt, instance, volume_id, host):
+ def remove_volume_connection(self, ctxt, volume_id, instance, host):
version = '4.0'
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'remove_volume_connection',
@@ -768,7 +817,11 @@ class ComputeAPI(object):
def rollback_live_migration_at_destination(self, ctxt, instance, host,
destroy_disks=True,
migrate_data=None):
- version = '4.0'
+ version = '4.8'
+ if not self.client.can_send_version(version):
+ version = '4.0'
+ if migrate_data:
+ migrate_data = migrate_data.to_legacy_dict()
extra = {'destroy_disks': destroy_disks,
'migrate_data': migrate_data,
}
@@ -810,11 +863,7 @@ class ComputeAPI(object):
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
- volume_bdm = cctxt.call(ctxt, 'reserve_block_device_name', **kw)
- if not isinstance(volume_bdm, objects.BlockDeviceMapping):
- volume_bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
- ctxt, volume_id, instance.uuid)
- return volume_bdm
+ return cctxt.call(ctxt, 'reserve_block_device_name', **kw)
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
diff --git a/nova/compute/stats.py b/nova/compute/stats.py
index 5b6fe7be12..5d078e5d91 100644
--- a/nova/compute/stats.py
+++ b/nova/compute/stats.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
from nova.i18n import _
@@ -105,7 +106,7 @@ class Stats(dict):
(vm_state, task_state, os_type, project_id) = \
self._extract_state_from_instance(instance)
- if vm_state == vm_states.DELETED:
+ if vm_state in resource_tracker._REMOVED_STATES:
self._decrement("num_instances")
self.states.pop(uuid)
diff --git a/nova/conductor/__init__.py b/nova/conductor/__init__.py
index 04a9609c0e..395a95ba7f 100644
--- a/nova/conductor/__init__.py
+++ b/nova/conductor/__init__.py
@@ -12,14 +12,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-import oslo_config.cfg
from nova.conductor import api as conductor_api
+import nova.conf
+
+CONF = nova.conf.CONF
def API(*args, **kwargs):
use_local = kwargs.pop('use_local', False)
- if oslo_config.cfg.CONF.conductor.use_local or use_local:
+ if CONF.conductor.use_local or use_local:
api = conductor_api.LocalAPI
else:
api = conductor_api.API
@@ -28,7 +30,7 @@ def API(*args, **kwargs):
def ComputeTaskAPI(*args, **kwargs):
use_local = kwargs.pop('use_local', False)
- if oslo_config.cfg.CONF.conductor.use_local or use_local:
+ if CONF.conductor.use_local or use_local:
api = conductor_api.LocalComputeTaskAPI
else:
api = conductor_api.ComputeTaskAPI
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index d993b4bfba..627d97b84c 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -14,7 +14,6 @@
"""Handles all requests to the conductor service."""
-from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_versionedobjects import base as ovo_base
@@ -22,33 +21,11 @@ from oslo_versionedobjects import base as ovo_base
from nova import baserpc
from nova.conductor import manager
from nova.conductor import rpcapi
+import nova.conf
from nova.i18n import _LI, _LW
from nova import utils
-conductor_opts = [
- cfg.BoolOpt('use_local',
- default=False,
- help='DEPRECATED: Perform nova-conductor operations locally. '
- 'This legacy mode was introduced to bridge a gap during '
- 'the transition to the conductor service. It no longer '
- 'represents a reasonable alternative for deployers. '
- 'Removal may be as early as 14.0',
- deprecated_for_removal=True),
- cfg.StrOpt('topic',
- default='conductor',
- help='The topic on which conductor nodes listen'),
- cfg.StrOpt('manager',
- default='nova.conductor.manager.ConductorManager',
- help='Full class name for the Manager for conductor'),
- cfg.IntOpt('workers',
- help='Number of workers for OpenStack Conductor service. '
- 'The default will be the number of CPUs available.')
-]
-conductor_group = cfg.OptGroup(name='conductor',
- title='Conductor Options')
-CONF = cfg.CONF
-CONF.register_group(conductor_group)
-CONF.register_opts(conductor_opts, conductor_group)
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -67,9 +44,6 @@ class LocalAPI(object):
# nothing to wait for in the local case.
pass
- def provider_fw_rule_get_all(self, context):
- return self._manager.provider_fw_rule_get_all(context)
-
def object_backport(self, context, objinst, target_version):
# NOTE(hanlind): This shouldn't be called anymore but leaving it for
# now just in case. Collect the object version manifest and redirect
@@ -101,11 +75,12 @@ class LocalComputeTaskAPI(object):
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
- block_migration, disk_over_commit):
+ block_migration, disk_over_commit,
+ request_spec=None):
scheduler_hint = {'host': host_name}
self._manager.migrate_server(
context, instance, scheduler_hint, True, False, None,
- block_migration, disk_over_commit, None)
+ block_migration, disk_over_commit, None, request_spec=request_spec)
def build_instances(self, context, instances, image,
filter_properties, admin_password, injected_files,
@@ -120,14 +95,15 @@ class LocalComputeTaskAPI(object):
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
- def unshelve_instance(self, context, instance):
+ def unshelve_instance(self, context, instance, request_spec=None):
utils.spawn_n(self._manager.unshelve_instance, context,
- instance=instance)
+ instance=instance, request_spec=request_spec)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
- preserve_ephemeral=False, host=None, kwargs=None):
+ preserve_ephemeral=False, host=None,
+ request_spec=None, kwargs=None):
# kwargs unused but required for cell compatibility.
utils.spawn_n(self._manager.rebuild_instance, context,
instance=instance,
@@ -140,7 +116,8 @@ class LocalComputeTaskAPI(object):
recreate=recreate,
on_shared_storage=on_shared_storage,
host=host,
- preserve_ephemeral=preserve_ephemeral)
+ preserve_ephemeral=preserve_ephemeral,
+ request_spec=request_spec)
class API(LocalAPI):
@@ -209,11 +186,12 @@ class ComputeTaskAPI(object):
reservations=reservations, clean_shutdown=clean_shutdown)
def live_migrate_instance(self, context, instance, host_name,
- block_migration, disk_over_commit):
+ block_migration, disk_over_commit,
+ request_spec=None):
scheduler_hint = {'host': host_name}
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, True, False, None,
- block_migration, disk_over_commit, None)
+ block_migration, disk_over_commit, None, request_spec=request_spec)
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
@@ -227,14 +205,15 @@ class ComputeTaskAPI(object):
block_device_mapping=block_device_mapping,
legacy_bdm=legacy_bdm)
- def unshelve_instance(self, context, instance):
+ def unshelve_instance(self, context, instance, request_spec=None):
self.conductor_compute_rpcapi.unshelve_instance(context,
- instance=instance)
+ instance=instance, request_spec=request_spec)
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate=False, on_shared_storage=False,
- preserve_ephemeral=False, host=None, kwargs=None):
+ preserve_ephemeral=False, host=None,
+ request_spec=None, kwargs=None):
# kwargs unused but required for cell compatibility
self.conductor_compute_rpcapi.rebuild_instance(context,
instance=instance,
@@ -247,4 +226,5 @@ class ComputeTaskAPI(object):
recreate=recreate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
- host=host)
+ host=host,
+ request_spec=request_spec)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index b06dd3d3cd..68a65c0b0c 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -19,7 +19,6 @@ import copy
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
-from oslo_serialization import jsonutils
from oslo_utils import excutils
import six
@@ -68,9 +67,10 @@ class ConductorManager(manager.Manager):
self.compute_task_mgr = ComputeTaskManager()
self.additional_endpoints.append(self.compute_task_mgr)
+ # NOTE(hanlind): This can be removed in version 4.0 of the RPC API
def provider_fw_rule_get_all(self, context):
- rules = self.db.provider_fw_rule_get_all(context)
- return jsonutils.to_primitive(rules)
+ # NOTE(hanlind): Simulate an empty db result for compat reasons.
+ return []
def _object_dispatch(self, target, method, args, kwargs):
"""Dispatch a call to an object method.
@@ -144,7 +144,7 @@ class ComputeTaskManager(base.Base):
may involve coordinating activities on multiple compute nodes.
"""
- target = messaging.Target(namespace='compute_task', version='1.11')
+ target = messaging.Target(namespace='compute_task', version='1.14')
def __init__(self):
super(ComputeTaskManager, self).__init__()
@@ -175,7 +175,7 @@ class ComputeTaskManager(base.Base):
exception.UnsupportedPolicyException)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
- clean_shutdown=True):
+ clean_shutdown=True, request_spec=None):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
@@ -191,7 +191,7 @@ class ComputeTaskManager(base.Base):
flavor = objects.Flavor.get_by_id(context, flavor['id'])
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
- block_migration, disk_over_commit)
+ block_migration, disk_over_commit, request_spec)
elif not live and not rebuild and flavor:
instance_uuid = instance.uuid
with compute_utils.EventReporter(context, 'cold_migrate',
@@ -250,15 +250,29 @@ class ComputeTaskManager(base.Base):
ex, request_spec):
scheduler_utils.set_vm_state_and_notify(
context, instance_uuid, 'compute_task', method, updates,
- ex, request_spec, self.db)
+ ex, request_spec)
def _cleanup_allocated_networks(
self, context, instance, requested_networks):
- self.network_api.deallocate_for_instance(
- context, instance, requested_networks=requested_networks)
+ try:
+ self.network_api.deallocate_for_instance(
+ context, instance, requested_networks=requested_networks)
+ except Exception:
+ msg = _LE('Failed to deallocate networks')
+ LOG.exception(msg, instance=instance)
+ return
+
+ instance.system_metadata['network_allocated'] = 'False'
+ try:
+ instance.save()
+ except exception.InstanceNotFound:
+ # NOTE: It's possible that we're cleaning up the networks
+ # because the instance was deleted. If that's the case then this
+ # exception will be raised by instance.save()
+ pass
def _live_migrate(self, context, instance, scheduler_hint,
- block_migration, disk_over_commit):
+ block_migration, disk_over_commit, request_spec):
destination = scheduler_hint.get("host")
def _set_vm_state(context, instance, ex, vm_state=None,
@@ -272,7 +286,7 @@ class ComputeTaskManager(base.Base):
dict(vm_state=vm_state,
task_state=task_state,
expected_task_state=task_states.MIGRATING,),
- ex, request_spec, self.db)
+ ex, request_spec)
migration = objects.Migration(context=context.elevated())
migration.dest_compute = destination
@@ -290,7 +304,7 @@ class ComputeTaskManager(base.Base):
task = self._build_live_migrate_task(context, instance, destination,
block_migration, disk_over_commit,
- migration)
+ migration, request_spec)
try:
task.execute()
except (exception.NoValidHost,
@@ -304,7 +318,8 @@ class ComputeTaskManager(base.Base):
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
- exception.LiveMigrationWithOldNovaNotSafe) as ex:
+ exception.LiveMigrationWithOldNovaNotSafe,
+ exception.MigrationSchedulerRPCError) as ex:
with excutils.save_and_reraise_exception():
# TODO(johngarbutt) - eventually need instance actions here
_set_vm_state(context, instance, ex, instance.vm_state)
@@ -322,13 +337,15 @@ class ComputeTaskManager(base.Base):
raise exception.MigrationError(reason=six.text_type(ex))
def _build_live_migrate_task(self, context, instance, destination,
- block_migration, disk_over_commit, migration):
+ block_migration, disk_over_commit, migration,
+ request_spec=None):
return live_migrate.LiveMigrationTask(context, instance,
destination, block_migration,
disk_over_commit, migration,
self.compute_rpcapi,
self.servicegroup_api,
- self.scheduler_client)
+ self.scheduler_client,
+ request_spec)
def _build_cold_migrate_task(self, context, instance, flavor,
filter_properties, request_spec, reservations,
@@ -416,7 +433,7 @@ class ComputeTaskManager(base.Base):
hosts = self.scheduler_client.select_destinations(context, spec_obj)
return hosts
- def unshelve_instance(self, context, instance):
+ def unshelve_instance(self, context, instance, request_spec=None):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
@@ -454,11 +471,24 @@ class ComputeTaskManager(base.Base):
try:
with compute_utils.EventReporter(context, 'schedule_instances',
instance.uuid):
- filter_properties = {}
+ if not request_spec:
+ # NOTE(sbauza): We were unable to find an original
+ # RequestSpec object - probably because the instance is
+ # old. We need to mock that the old way
+ filter_properties = {}
+ request_spec = scheduler_utils.build_request_spec(
+ context, image, [instance])
+ else:
+ # TODO(sbauza): Provide directly the RequestSpec object
+ # when _schedule_instances(),
+ # populate_filter_properties and populate_retry()
+ # accept it
+ filter_properties = request_spec.\
+ to_legacy_filter_properties_dict()
+ request_spec = request_spec.\
+ to_legacy_request_spec_dict()
scheduler_utils.populate_retry(filter_properties,
instance.uuid)
- request_spec = scheduler_utils.build_request_spec(
- context, image, [instance])
hosts = self._schedule_instances(
context, request_spec, filter_properties)
host_state = hosts[0]
@@ -491,18 +521,32 @@ class ComputeTaskManager(base.Base):
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
- preserve_ephemeral=False, host=None):
+ preserve_ephemeral=False, host=None,
+ request_spec=None):
with compute_utils.EventReporter(context, 'rebuild_server',
instance.uuid):
node = limits = None
if not host:
- # NOTE(lcostantino): Retrieve scheduler filters for the
- # instance when the feature is available
- filter_properties = {'ignore_hosts': [instance.host]}
- try:
+ if not request_spec:
+ # NOTE(sbauza): We were unable to find an original
+ # RequestSpec object - probably because the instance is old
+ # We need to mock that the old way
+ filter_properties = {'ignore_hosts': [instance.host]}
request_spec = scheduler_utils.build_request_spec(
context, image_ref, [instance])
+ else:
+ # NOTE(sbauza): Augment the RequestSpec object by excluding
+ # the source host for avoiding the scheduler to pick it
+ request_spec.ignore_hosts = request_spec.ignore_hosts or []
+ request_spec.ignore_hosts.append(instance.host)
+ # TODO(sbauza): Provide directly the RequestSpec object
+ # when _schedule_instances() and _set_vm_state_and_notify()
+ # accept it
+ filter_properties = request_spec.\
+ to_legacy_filter_properties_dict()
+ request_spec = request_spec.to_legacy_request_spec_dict()
+ try:
hosts = self._schedule_instances(
context, request_spec, filter_properties)
host_dict = hosts.pop(0)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 6dd32f3ac6..0a300ce6f4 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -20,10 +20,11 @@ import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_versionedobjects import base as ovo_base
+import nova.conf
from nova.objects import base as objects_base
from nova import rpc
-CONF = cfg.CONF
+CONF = nova.conf.CONF
rpcapi_cap_opt = cfg.StrOpt('conductor',
help='Set a version cap for messages sent to conductor services')
@@ -195,6 +196,8 @@ class ConductorAPI(object):
... Liberty supports message version 3.0. So, any changes to
existing methods in 3.x after that point should be done such
that they can handle the version_cap being set to 3.0.
+
+ * Remove provider_fw_rule_get_all()
"""
VERSION_ALIASES = {
@@ -216,10 +219,6 @@ class ConductorAPI(object):
version_cap=version_cap,
serializer=serializer)
- def provider_fw_rule_get_all(self, context):
- cctxt = self.client.prepare()
- return cctxt.call(context, 'provider_fw_rule_get_all')
-
# TODO(hanlind): This method can be removed once oslo.versionedobjects
# has been converted to use version_manifests in remotable_classmethod
# operations, which will use the new class action handler.
@@ -268,7 +267,9 @@ class ComputeTaskAPI(object):
1.9 - Converted requested_networks to NetworkRequestList object
1.10 - Made migrate_server() and build_instances() send flavor objects
1.11 - Added clean_shutdown to migrate_server()
-
+ 1.12 - Added request_spec to rebuild_instance()
+ 1.13 - Added request_spec to migrate_server()
+ 1.14 - Added request_spec to unshelve_instance()
"""
def __init__(self):
@@ -281,14 +282,19 @@ class ComputeTaskAPI(object):
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
- reservations=None, clean_shutdown=True):
+ reservations=None, clean_shutdown=True, request_spec=None):
kw = {'instance': instance, 'scheduler_hint': scheduler_hint,
'live': live, 'rebuild': rebuild, 'flavor': flavor,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit,
'reservations': reservations,
- 'clean_shutdown': clean_shutdown}
- version = '1.11'
+ 'clean_shutdown': clean_shutdown,
+ 'request_spec': request_spec,
+ }
+ version = '1.13'
+ if not self.client.can_send_version(version):
+ del kw['request_spec']
+ version = '1.11'
if not self.client.can_send_version(version):
del kw['clean_shutdown']
version = '1.10'
@@ -332,20 +338,37 @@ class ComputeTaskAPI(object):
cctxt = self.client.prepare(version=version)
cctxt.cast(context, 'build_instances', **kw)
- def unshelve_instance(self, context, instance):
- cctxt = self.client.prepare(version='1.3')
- cctxt.cast(context, 'unshelve_instance', instance=instance)
+ def unshelve_instance(self, context, instance, request_spec=None):
+ version = '1.14'
+ kw = {'instance': instance,
+ 'request_spec': request_spec
+ }
+ if not self.client.can_send_version(version):
+ version = '1.3'
+ del kw['request_spec']
+ cctxt = self.client.prepare(version=version)
+ cctxt.cast(context, 'unshelve_instance', **kw)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
- preserve_ephemeral=False, kwargs=None):
- cctxt = self.client.prepare(version='1.8')
- cctxt.cast(ctxt, 'rebuild_instance',
- instance=instance, new_pass=new_pass,
- injected_files=injected_files, image_ref=image_ref,
- orig_image_ref=orig_image_ref,
- orig_sys_metadata=orig_sys_metadata, bdms=bdms,
- recreate=recreate, on_shared_storage=on_shared_storage,
- preserve_ephemeral=preserve_ephemeral,
- host=host)
+ preserve_ephemeral=False, request_spec=None, kwargs=None):
+ version = '1.12'
+ kw = {'instance': instance,
+ 'new_pass': new_pass,
+ 'injected_files': injected_files,
+ 'image_ref': image_ref,
+ 'orig_image_ref': orig_image_ref,
+ 'orig_sys_metadata': orig_sys_metadata,
+ 'bdms': bdms,
+ 'recreate': recreate,
+ 'on_shared_storage': on_shared_storage,
+ 'preserve_ephemeral': preserve_ephemeral,
+ 'host': host,
+ 'request_spec': request_spec,
+ }
+ if not self.client.can_send_version(version):
+ version = '1.8'
+ del kw['request_spec']
+ cctxt = self.client.prepare(version=version)
+ cctxt.cast(ctxt, 'rebuild_instance', **kw)
diff --git a/nova/conductor/tasks/live_migrate.py b/nova/conductor/tasks/live_migrate.py
index 1974fc2dd6..93560c2347 100644
--- a/nova/conductor/tasks/live_migrate.py
+++ b/nova/conductor/tasks/live_migrate.py
@@ -13,6 +13,7 @@
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
+import six
from nova.compute import power_state
from nova.conductor.tasks import base
@@ -37,7 +38,7 @@ CONF.register_opt(migrate_opt)
class LiveMigrationTask(base.TaskBase):
def __init__(self, context, instance, destination,
block_migration, disk_over_commit, migration, compute_rpcapi,
- servicegroup_api, scheduler_client):
+ servicegroup_api, scheduler_client, request_spec=None):
super(LiveMigrationTask, self).__init__(context, instance)
self.destination = destination
self.block_migration = block_migration
@@ -49,6 +50,7 @@ class LiveMigrationTask(base.TaskBase):
self.compute_rpcapi = compute_rpcapi
self.servicegroup_api = servicegroup_api
self.scheduler_client = scheduler_client
+ self.request_spec = request_spec
def _execute(self):
self._check_instance_is_active()
@@ -164,21 +166,40 @@ class LiveMigrationTask(base.TaskBase):
attempted_hosts = [self.source]
image = utils.get_image_from_system_metadata(
self.instance.system_metadata)
- request_spec = scheduler_utils.build_request_spec(self.context, image,
- [self.instance])
+ filter_properties = {'ignore_hosts': attempted_hosts}
+ # TODO(sbauza): Remove that once setup_instance_group() accepts a
+ # RequestSpec object
+ request_spec = {'instance_properties': {'uuid': self.instance.uuid}}
+ scheduler_utils.setup_instance_group(self.context, request_spec,
+ filter_properties)
+ if not self.request_spec:
+ # NOTE(sbauza): We were unable to find an original RequestSpec
+ # object - probably because the instance is old.
+ # We need to mock that the old way
+ request_spec = objects.RequestSpec.from_components(
+ self.context, self.instance.uuid, image,
+ self.instance.flavor, self.instance.numa_topology,
+ self.instance.pci_requests,
+ filter_properties, None, self.instance.availability_zone
+ )
+ else:
+ request_spec = self.request_spec
host = None
while host is None:
self._check_not_over_max_retries(attempted_hosts)
- filter_properties = {'ignore_hosts': attempted_hosts}
- scheduler_utils.setup_instance_group(self.context, request_spec,
- filter_properties)
- # TODO(sbauza): Hydrate here the object until we modify the
- # scheduler.utils methods to directly use the RequestSpec object
- spec_obj = objects.RequestSpec.from_primitives(
- self.context, request_spec, filter_properties)
- host = self.scheduler_client.select_destinations(self.context,
- spec_obj)[0]['host']
+ request_spec.ignore_hosts = attempted_hosts
+ try:
+ host = self.scheduler_client.select_destinations(self.context,
+ request_spec)[0]['host']
+ except messaging.RemoteError as ex:
+ # TODO(ShaoHe Feng) There maybe multi-scheduler, and the
+ # scheduling algorithm is R-R, we can let other scheduler try.
+ # Note(ShaoHe Feng) There are types of RemoteError, such as
+ # NoSuchMethod, UnsupportedVersion, we can distinguish it by
+ # ex.exc_type.
+ raise exception.MigrationSchedulerRPCError(
+ reason=six.text_type(ex))
try:
self._check_compatible_with_source_hypervisor(host)
self._call_livem_checks_on_host(host)
diff --git a/nova/conf/__init__.py b/nova/conf/__init__.py
index c9b8a7cd80..3b66a39651 100644
--- a/nova/conf/__init__.py
+++ b/nova/conf/__init__.py
@@ -21,15 +21,16 @@ from oslo_config import cfg
# from nova.conf import api
# from nova.conf import api_database
-# from nova.conf import availability_zone
+from nova.conf import availability_zone
# from nova.conf import aws
# from nova.conf import barbican
# from nova.conf import base
-# from nova.conf import cells
+from nova.conf import cells
+from nova.conf import cert
# from nova.conf import cinder
# from nova.conf import cloudpipe
from nova.conf import compute
-# from nova.conf import conductor
+from nova.conf import conductor
# from nova.conf import configdrive
# from nova.conf import console
# from nova.conf import cors
@@ -57,6 +58,7 @@ from nova.conf import ironic
# from nova.conf import neutron
# from nova.conf import notification
# from nova.conf import osapi_v21
+from nova.conf import pci
# from nova.conf import rdp
from nova.conf import scheduler
# from nova.conf import security
@@ -67,10 +69,10 @@ from nova.conf import serial_console
# from nova.conf import upgrade_levels
from nova.conf import virt
# from nova.conf import vmware
-# from nova.conf import vnc
+from nova.conf import vnc
# from nova.conf import volume
# from nova.conf import workarounds
-# from nova.conf import wsgi
+from nova.conf import wsgi
# from nova.conf import xenserver
# from nova.conf import xvp
# from nova.conf import zookeeper
@@ -79,15 +81,16 @@ CONF = cfg.CONF
# api.register_opts(CONF)
# api_database.register_opts(CONF)
-# availability_zone.register_opts(CONF)
+availability_zone.register_opts(CONF)
# aws.register_opts(CONF)
# barbican.register_opts(CONF)
# base.register_opts(CONF)
-# cells.register_opts(CONF)
+cells.register_opts(CONF)
+cert.register_opts(CONF)
# cinder.register_opts(CONF)
# cloudpipe.register_opts(CONF)
compute.register_opts(CONF)
-# conductor.register_opts(CONF)
+conductor.register_opts(CONF)
# configdrive.register_opts(CONF)
# console.register_opts(CONF)
# cors.register_opts(CONF)
@@ -115,6 +118,7 @@ ironic.register_opts(CONF)
# neutron.register_opts(CONF)
# notification.register_opts(CONF)
# osapi_v21.register_opts(CONF)
+pci.register_opts(CONF)
# rdp.register_opts(CONF)
scheduler.register_opts(CONF)
# security.register_opts(CONF)
@@ -125,10 +129,10 @@ serial_console.register_opts(CONF)
# upgrade_levels.register_opts(CONF)
virt.register_opts(CONF)
# vmware.register_opts(CONF)
-# vnc.register_opts(CONF)
+vnc.register_opts(CONF)
# volume.register_opts(CONF)
# workarounds.register_opts(CONF)
-# wsgi.register_opts(CONF)
+wsgi.register_opts(CONF)
# xenserver.register_opts(CONF)
# xvp.register_opts(CONF)
# zookeeper.register_opts(CONF)
diff --git a/nova/conf/availability_zone.py b/nova/conf/availability_zone.py
new file mode 100644
index 0000000000..3cbaaa7164
--- /dev/null
+++ b/nova/conf/availability_zone.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2013 Intel, Inc.
+# Copyright (c) 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+internal_service_availability_zone = cfg.StrOpt(
+ 'internal_service_availability_zone',
+ default='internal',
+ help='The availability_zone to show internal services under')
+
+default_availability_zone = cfg.StrOpt(
+ 'default_availability_zone',
+ default='nova',
+ help='Default compute node availability_zone')
+
+ALL_OPTS = [internal_service_availability_zone,
+ default_availability_zone]
+
+
+def register_opts(conf):
+ conf.register_opts(ALL_OPTS)
+
+
+def list_opts():
+ return {'DEFAULT': ALL_OPTS}
diff --git a/nova/conf/cells.py b/nova/conf/cells.py
new file mode 100644
index 0000000000..ca1ecd62db
--- /dev/null
+++ b/nova/conf/cells.py
@@ -0,0 +1,806 @@
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import itertools
+
+from oslo_config import cfg
+
+
+cells_opts = [
+ cfg.BoolOpt('enable',
+ default=False,
+ help="""
+Enable cell functionality
+
+When this functionality is enabled, it lets you to scale an OpenStack
+Compute cloud in a more distributed fashion without having to use
+complicated technologies like database and message queue clustering.
+Cells are configured as a tree. The top-level cell should have a host
+that runs a nova-api service, but no nova-compute services. Each
+child cell should run all of the typical nova-* services in a regular
+Compute cloud except for nova-api. You can think of cells as a normal
+Compute deployment in that each cell has its own database server and
+message queue broker.
+
+Possible values:
+
+* True: Enables the feature
+* False: Disables the feature
+
+Services which consume this:
+
+* nova-api
+* nova-cells
+* nova-compute
+
+Related options:
+
+* name: A unique cell name must be given when this functionality
+ is enabled.
+* cell_type: Cell type should be defined for all cells.
+"""),
+ cfg.StrOpt('topic',
+ default='cells',
+ help="""
+Topic
+
+This is the message queue topic that cells nodes listen on. It is
+used when the cells service is started up to configure the queue,
+and whenever an RPC call to the scheduler is made.
+
+Possible values:
+
+* cells: This is the recommended and the default value.
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.StrOpt('manager',
+ default='nova.cells.manager.CellsManager',
+ help="""
+Manager for cells
+
+The nova-cells manager class. This class defines RPC methods that
+the local cell may call. This class is NOT used for messages coming
+from other cells. That communication is driver-specific.
+
+Communication to other cells happens via the nova.cells.messaging module.
+The MessageRunner from that module will handle routing the message to
+the correct cell via the communication driver. Most methods below
+create 'targeted' (where we want to route a message to a specific cell)
+or 'broadcast' (where we want a message to go to multiple cells)
+messages.
+
+Scheduling requests get passed to the scheduler class.
+
+Possible values:
+
+* 'nova.cells.manager.CellsManager' is the only possible value for
+ this option as of the Mitaka release
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.StrOpt('name',
+ default='nova',
+ help="""
+Name of the current cell
+
+This value must be unique for each cell. Name of a cell is used as
+its id, leaving this option unset or setting the same name for
+two or more cells may cause unexpected behaviour.
+
+Possible values:
+
+* Unique name string
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* enabled: This option is meaningful only when cells service
+ is enabled
+"""),
+ cfg.ListOpt('capabilities',
+ default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
+ help="""
+Cell capabilities
+
+List of arbitrary key=value pairs defining capabilities of the
+current cell to be sent to the parent cells. These capabilities
+are intended to be used in cells scheduler filters/weighers.
+
+Possible values:
+
+* key=value pairs list for example;
+ ``hypervisor=xenserver;kvm,os=linux;windows``
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.IntOpt('call_timeout',
+ default=60,
+ help="""
+Call timeout
+
+Cell messaging module waits for response(s) to be put into the
+eventlet queue. This option defines the seconds waited for
+response from a call to a cell.
+
+Possible values:
+
+* Time in seconds.
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.FloatOpt('reserve_percent',
+ default=10.0,
+ help="""
+Reserve percentage
+
+Percentage of cell capacity to hold in reserve, so the minimum
+amount of free resource is considered to be;
+ min_free = total * (reserve_percent / 100.0)
+This option affects both memory and disk utilization.
+The primary purpose of this reserve is to ensure some space is
+available for users who want to resize their instance to be larger.
+Note that currently once the capacity expands into this reserve
+space this option is ignored.
+
+Possible values:
+
+* Float percentage value
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.StrOpt('cell_type',
+ default='compute',
+ choices=('api', 'compute'),
+ help="""
+Type of cell
+
+When cells feature is enabled the hosts in the OpenStack Compute
+cloud are partitioned into groups. Cells are configured as a tree.
+The top-level cell's cell_type must be set to ``api``. All other
+cells are defined as a ``compute cell`` by default.
+
+Possible values:
+
+* api: Cell type of top-level cell.
+* compute: Cell type of all child cells. (Default)
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* compute_api_class: This option must be set to cells api driver
+ for the top-level cell (nova.compute.cells_api.ComputeCellsAPI)
+* quota_driver: Disable quota checking for the child cells.
+ (nova.quota.NoopQuotaDriver)
+"""),
+ cfg.IntOpt("mute_child_interval",
+ default=300,
+ help="""
+Mute child interval
+
+Number of seconds after which a lack of capability and capacity
+update the child cell is to be treated as a mute cell. Then the
+child cell will be weighed as recommend highly that it be skipped.
+
+Possible values:
+
+* Time in seconds.
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.IntOpt('bandwidth_update_interval',
+ default=600,
+ help="""
+Bandwidth update interval
+
+Seconds between bandwidth usage cache updates for cells.
+
+Possible values:
+
+* Time in seconds.
+
+Services which consume this:
+
+* nova-compute
+
+Related options:
+
+* None
+"""),
+ cfg.IntOpt('instance_update_sync_database_limit',
+ default=100,
+ help="""
+Instance update sync database limit
+
+Number of instances to pull from the database at one time for
+a sync. If there are more instances to update the results will
+be paged through.
+
+Possible values:
+
+* Number of instances.
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+]
+
+mute_weigher_opts = [
+ cfg.FloatOpt('mute_weight_multiplier',
+ default=-10000.0,
+ help="""
+Mute weight multiplier
+
+Multiplier used to weigh mute children. Mute children cells are
+recommended to be skipped so their weight is multiplied by this
+negative value.
+
+Possible values:
+
+* Negative numeric number
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+]
+
+ram_weigher_opts = [
+ cfg.FloatOpt('ram_weight_multiplier',
+ default=10.0,
+ help="""
+Ram weight multiplier
+
+Multiplier used for weighing ram. Negative numbers indicate that
+Compute should stack VMs on one host instead of spreading out new
+VMs to more hosts in the cell.
+
+Possible values:
+
+* Numeric multiplier
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+]
+
+weigher_opts = [
+ cfg.FloatOpt('offset_weight_multiplier',
+ default=1.0,
+ help="""
+Offset weight multiplier
+
+Multiplier used to weigh offset weigher. Cells with higher
+weight_offsets in the DB will be preferred. The weight_offset
+is a property of a cell stored in the database. It can be used
+by a deployer to have scheduling decisions favor or disfavor
+cells based on the setting.
+
+Possible values:
+
+* Numeric multiplier
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+]
+
+cell_manager_opts = [
+ cfg.StrOpt('driver',
+ default='nova.cells.rpc_driver.CellsRPCDriver',
+ help="""
+Cells communication driver
+
+Driver for cell<->cell communication via RPC. This is used to
+setup the RPC consumers as well as to send a message to another cell.
+'nova.cells.rpc_driver.CellsRPCDriver' starts up 2 separate servers
+for handling inter-cell communication via RPC.
+
+Possible values:
+
+* 'nova.cells.rpc_driver.CellsRPCDriver' is the default driver
+* Otherwise it should be the full Python path to the class to be used
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.IntOpt("instance_updated_at_threshold",
+ default=3600,
+ help="""
+Instance updated at threshold
+
+Number of seconds after an instance was updated or deleted to
+continue to update cells. This option lets cells manager to only
+attempt to sync instances that have been updated recently.
+i.e., a threshold of 3600 means to only update instances that
+have modified in the last hour.
+
+Possible values:
+
+* Threshold in seconds
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* This value is used with the ``instance_update_num_instances``
+ value in a periodic task run.
+"""),
+ cfg.IntOpt("instance_update_num_instances",
+ default=1,
+ help="""
+Instance update num instances
+
+On every run of the periodic task, nova cells manager will attempt to
+sync instance_updated_at_threshold number of instances. When the
+manager gets the list of instances, it shuffles them so that multiple
+nova-cells services do not attempt to sync the same instances in
+lockstep.
+
+Possible values:
+
+* Positive integer number
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* This value is used with the ``instance_updated_at_threshold``
+ value in a periodic task run.
+""")
+]
+
+cell_messaging_opts = [
+ cfg.IntOpt('max_hop_count',
+ default=10,
+ help="""
+Maximum hop count
+
+When processing a targeted message, if the local cell is not the
+target, a route is defined between neighbouring cells. And the
+message is processed across the whole routing path. This option
+defines the maximum hop counts until reaching the target.
+
+Possible values:
+
+* Positive integer value
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.StrOpt('scheduler',
+ default='nova.cells.scheduler.CellsScheduler',
+ help="""
+Cells scheduler
+
+The class of the driver used by the cells scheduler. This should be
+the full Python path to the class to be used. If nothing is specified
+in this option, the CellsScheduler is used.
+
+
+Possible values:
+
+* 'nova.cells.scheduler.CellsScheduler' is the default option
+* Otherwise it should be the full Python path to the class to be used
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+""")
+]
+
+cell_rpc_driver_opts = [
+ cfg.StrOpt('rpc_driver_queue_base',
+ default='cells.intercell',
+ help="""
+RPC driver queue base
+
+When sending a message to another cell by JSON-ifying the message
+and making an RPC cast to 'process_message', a base queue is used.
+This option defines the base queue name to be used when communicating
+between cells. Various topics by message type will be appended to this.
+
+Possible values:
+
+* The base queue name to be used when communicating between cells.
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+""")
+]
+
+cell_scheduler_opts = [
+ cfg.ListOpt('scheduler_filter_classes',
+ default=['nova.cells.filters.all_filters'],
+ help="""
+Scheduler filter classes
+
+Filter classes the cells scheduler should use. An entry of
+"nova.cells.filters.all_filters" maps to all cells filters
+included with nova. As of the Mitaka release the following
+filter classes are available:
+
+Different cell filter: A scheduler hint of 'different_cell'
+with a value of a full cell name may be specified to route
+a build away from a particular cell.
+
+Image properties filter: Image metadata named
+'hypervisor_version_requires' with a version specification
+may be specified to ensure the build goes to a cell which
+has hypervisors of the required version. If either the version
+requirement on the image or the hypervisor capability of the
+cell is not present, this filter returns without filtering out
+the cells.
+
+Target cell filter: A scheduler hint of 'target_cell' with a
+value of a full cell name may be specified to route a build to
+a particular cell. No error handling is done as there's no way
+to know whether the full path is a valid.
+
+As an admin user, you can also add a filter that directs builds
+to a particular cell.
+
+
+Possible values:
+
+* 'nova.cells.filters.all_filters' is the default option
+* Otherwise it should be the full Python path to the class to be used
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.ListOpt('scheduler_weight_classes',
+ default=['nova.cells.weights.all_weighers'],
+ help="""
+Scheduler weight classes
+
+Weigher classes the cells scheduler should use. An entry of
+"nova.cells.weights.all_weighers" maps to all cell weighers
+included with nova. As of the Mitaka release the following
+weight classes are available:
+
+mute_child: Downgrades the likelihood of child cells being
+chosen for scheduling requests, which haven't sent capacity
+or capability updates in a while. Options include
+mute_weight_multiplier (multiplier for mute children; value
+should be negative).
+
+ram_by_instance_type: Select cells with the most RAM capacity
+for the instance type being requested. Because higher weights
+win, Compute returns the number of available units for the
+instance type requested. The ram_weight_multiplier option defaults
+to 10.0 that adds to the weight by a factor of 10. Use a negative
+number to stack VMs on one host instead of spreading out new VMs
+to more hosts in the cell.
+
+weight_offset: Allows modifying the database to weight a particular
+cell. The highest weight will be the first cell to be scheduled for
+launching an instance. When the weight_offset of a cell is set to 0,
+it is unlikely to be picked but it could be picked if other cells
+have a lower weight, like if they're full. And when the weight_offset
+is set to a very high value (for example, '999999999999999'), it is
+likely to be picked if another cell do not have a higher weight.
+
+Possible values:
+
+* 'nova.cells.weights.all_weighers' is the default option
+* Otherwise it should be the full Python path to the class to be used
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.IntOpt('scheduler_retries',
+ default=10,
+ help="""
+Scheduler retries
+
+How many retries when no cells are available. Specifies how many
+times the scheduler tries to launch a new instance when no cells
+are available.
+
+Possible values:
+
+* Positive integer value
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* This value is used with the ``scheduler_retry_delay`` value
+ while retrying to find a suitable cell.
+"""),
+ cfg.IntOpt('scheduler_retry_delay',
+ default=2,
+ help="""
+Scheduler retry delay
+
+Specifies the delay (in seconds) between scheduling retries when no
+cell can be found to place the new instance on. When the instance
+could not be scheduled to a cell after ``scheduler_retries`` in
+combination with ``scheduler_retry_delay``, then the scheduling
+of the instance failed.
+
+Possible values:
+
+* Time in seconds.
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* This value is used with the ``scheduler_retries`` value
+ while retrying to find a suitable cell.
+""")
+]
+
+cell_state_manager_opts = [
+ cfg.IntOpt('db_check_interval',
+ default=60,
+ help="""
+DB check interval
+
+Cell state manager updates cell status for all cells from the DB
+only after this particular interval time is passed. Otherwise cached
+status are used. If this value is 0 or negative all cell status are
+updated from the DB whenever a state is needed.
+
+Possible values:
+
+* Interval time, in seconds.
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+"""),
+ cfg.StrOpt('cells_config',
+ help="""
+Optional cells configuration
+
+Configuration file from which to read cells configuration. If given,
+overrides reading cells from the database.
+
+Cells store all inter-cell communication data, including user names
+and passwords, in the database. Because the cells data is not updated
+very frequently, use this option to specify a JSON file to store
+cells data. With this configuration, the database is no longer
+consulted when reloading the cells data. The file must have columns
+present in the Cell model (excluding common database fields and the
+id column). You must specify the queue connection information through
+a transport_url field, instead of username, password, and so on.
+
+The transport_url has the following form:
+rabbit://USERNAME:PASSWORD@HOSTNAME:PORT/VIRTUAL_HOST
+
+Possible values:
+
+The scheme can be either qpid or rabbit, the following sample shows
+this optional configuration:
+
+ {
+ "parent": {
+ "name": "parent",
+ "api_url": "http://api.example.com:8774",
+ "transport_url": "rabbit://rabbit.example.com",
+ "weight_offset": 0.0,
+ "weight_scale": 1.0,
+ "is_parent": true
+ },
+ "cell1": {
+ "name": "cell1",
+ "api_url": "http://api.example.com:8774",
+ "transport_url": "rabbit://rabbit1.example.com",
+ "weight_offset": 0.0,
+ "weight_scale": 1.0,
+ "is_parent": false
+ },
+ "cell2": {
+ "name": "cell2",
+ "api_url": "http://api.example.com:8774",
+ "transport_url": "rabbit://rabbit2.example.com",
+ "weight_offset": 0.0,
+ "weight_scale": 1.0,
+ "is_parent": false
+ }
+ }
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+""")
+]
+
+
+rpcapi_cap_intercell_opt = cfg.StrOpt('intercell',
+ help="""
+Intercell version
+
+Intercell RPC API is the client side of the Cell<->Cell RPC API.
+Use this option to set a version cap for messages sent between
+cells services.
+
+Possible values:
+
+* None: This is the default value.
+* grizzly: message version 1.0.
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+""")
+
+
+rpcapi_cap_cells_opt = cfg.StrOpt('cells',
+ help="""
+Cells version
+
+Cells client-side RPC API version. Use this option to set a version
+cap for messages sent to local cells services.
+
+Possible values:
+
+* None: This is the default value.
+* grizzly: message version 1.6.
+* havana: message version 1.24.
+* icehouse: message version 1.27.
+* juno: message version 1.29.
+* kilo: message version 1.34.
+* liberty: message version 1.37.
+
+Services which consume this:
+
+* nova-cells
+
+Related options:
+
+* None
+""")
+
+
+ALL_CELLS_OPTS = list(itertools.chain(
+ cells_opts,
+ mute_weigher_opts,
+ ram_weigher_opts,
+ weigher_opts,
+ cell_manager_opts,
+ cell_messaging_opts,
+ cell_rpc_driver_opts,
+ cell_scheduler_opts,
+ cell_state_manager_opts
+ ))
+
+ALL_RPCAPI_CAP_OPTS = [rpcapi_cap_intercell_opt,
+ rpcapi_cap_cells_opt]
+
+
+def register_opts(conf):
+ conf.register_opts(ALL_CELLS_OPTS, group="cells")
+ conf.register_opts(ALL_RPCAPI_CAP_OPTS, group="upgrade_levels")
+
+
+def list_opts():
+ return {
+ 'cells': ALL_CELLS_OPTS,
+ 'upgrade_levels': ALL_RPCAPI_CAP_OPTS,
+ }
diff --git a/nova/conf/cert.py b/nova/conf/cert.py
new file mode 100644
index 0000000000..51bc42d219
--- /dev/null
+++ b/nova/conf/cert.py
@@ -0,0 +1,66 @@
+# Copyright 2016 IBM Corp.
+# Copyright 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+cert_topic_opt = cfg.StrOpt("cert_topic",
+ default="cert",
+ help="""
+Determines the RPC topic that the cert nodes listen on. The default is 'cert',
+and for most deployments there is no need to ever change it.
+
+Possible values:
+
+Any string.
+
+* Services which consume this:
+
+ ``nova-cert``
+
+* Related options:
+
+ None
+""")
+
+rpcapi_cap_opt = cfg.StrOpt("cert",
+ help="""
+Specifies the maximum version for messages sent from cert services. This should
+be the minimum value that is supported by all of the deployed cert services.
+
+Possible values:
+
+Any valid OpenStack release name, in lower case, such as 'mitaka' or 'liberty'.
+Alternatively, it can be any string representing a version number in the format
+'N.N'; for example, possible values might be '1.12' or '2.0'.
+
+* Services which consume this:
+
+ ``nova-cert``
+
+* Related options:
+
+ None
+""")
+
+
+def register_opts(conf):
+ conf.register_opts([cert_topic_opt])
+ conf.register_opt(rpcapi_cap_opt, "upgrade_levels")
+
+
+def list_opts():
+ return {"DEFAULT": [cert_topic_opt],
+ "upgrade_levels": [rpcapi_cap_opt]}
diff --git a/nova/conf/conductor.py b/nova/conf/conductor.py
new file mode 100644
index 0000000000..70a3b8335d
--- /dev/null
+++ b/nova/conf/conductor.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+conductor_group = cfg.OptGroup(
+ 'conductor',
+ title='Conductor Options')
+
+use_local = cfg.BoolOpt(
+ 'use_local',
+ default=False,
+ help='DEPRECATED: Perform nova-conductor operations locally. '
+ 'This legacy mode was introduced to bridge a gap during '
+ 'the transition to the conductor service. It no longer '
+ 'represents a reasonable alternative for deployers. '
+ 'Removal may be as early as 14.0',
+ deprecated_for_removal='True')
+
+topic = cfg.StrOpt(
+ 'topic',
+ default='conductor',
+ help='The topic on which conductor nodes listen')
+
+manager = cfg.StrOpt(
+ 'manager',
+ default='nova.conductor.manager.ConductorManager',
+ help='Full class name for the Manager for conductor')
+
+workers = cfg.IntOpt(
+ 'workers',
+ help='Number of workers for OpenStack Conductor service. '
+ 'The default will be the number of CPUs available.')
+
+ALL_OPTS = [
+ use_local,
+ topic,
+ manager,
+ workers]
+
+
+def register_opts(conf):
+ conf.register_group(conductor_group)
+ conf.register_opts(ALL_OPTS, group=conductor_group)
+
+
+def list_opts():
+ return {conductor_group: ALL_OPTS}
diff --git a/nova/conf/ironic.py b/nova/conf/ironic.py
index f3cabffb93..781dd8649d 100644
--- a/nova/conf/ironic.py
+++ b/nova/conf/ironic.py
@@ -22,7 +22,9 @@ ironic_group = cfg.OptGroup(
api_version = cfg.IntOpt(
'api_version',
default=1,
- help='Version of Ironic API service endpoint.')
+ deprecated_for_removal=True,
+ help='Version of Ironic API service endpoint. '
+ 'DEPRECATED: Setting the API version is not possible anymore.')
api_endpoint = cfg.StrOpt(
'api_endpoint',
diff --git a/nova/conf/opts.py b/nova/conf/opts.py
index 6de56058e0..1bb5dcc0dc 100644
--- a/nova/conf/opts.py
+++ b/nova/conf/opts.py
@@ -17,10 +17,11 @@
This is the single point of entry to generate the sample configuration
file for Nova. It collects all the necessary info from the other modules
in this package. It is assumed that:
+
* every other module in this package has a 'list_opts' function which
return a dict where
- * the keys are strings which are the group names
- * the value of each key is a list of config options for that group
+ * the keys are strings which are the group names
+ * the value of each key is a list of config options for that group
* the nova.conf package doesn't have further packages with config options
* this module is only used in the context of sample file generation
"""
@@ -64,7 +65,7 @@ def _import_modules(module_names):
if not hasattr(mod, LIST_OPTS_FUNC_NAME):
msg = "The module 'nova.conf.%s' should have a '%s' "\
"function which returns the config options." % \
- (LIST_OPTS_FUNC_NAME, modname)
+ (modname, LIST_OPTS_FUNC_NAME)
raise Exception(msg)
else:
imported_modules.append(mod)
diff --git a/nova/conf/pci.py b/nova/conf/pci.py
new file mode 100644
index 0000000000..4505d44e04
--- /dev/null
+++ b/nova/conf/pci.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2013 Intel, Inc.
+# Copyright (c) 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+pci_alias_opt = cfg.MultiStrOpt(
+ 'pci_alias',
+ default=[],
+ help="""
+An alias for a PCI passthrough device requirement.
+
+This allows users to specify the alias in the extra_spec for a flavor, without
+needing to repeat all the PCI property requirements.
+
+Possible Values:
+
+* A list of JSON values which describe the aliases. For example:
+
+ pci_alias = {
+ "name": "QuickAssist",
+ "product_id": "0443",
+ "vendor_id": "8086",
+ "device_type": "type-PCI"
+ }
+
+ defines an alias for the Intel QuickAssist card. (multi valued). Valid key
+ values are :
+
+ * "name"
+ * "product_id"
+ * "vendor_id"
+ * "device_type"
+
+Services which consume this:
+
+* nova-compute
+
+Related options:
+
+* None""")
+
+pci_passthrough_whitelist_opt = cfg.MultiStrOpt(
+ 'pci_passthrough_whitelist',
+ default=[],
+ help="""
+White list of PCI devices available to VMs.
+
+Possible values:
+
+* A JSON dictionary which describe a whitelisted PCI device. It should take
+ the following format:
+
+ ["device_id": "<id>",] ["product_id": "<id>",]
+ ["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
+ "devname": "PCI Device Name",]
+ {"tag": "<tag_value>",}
+
+ where '[' indicates zero or one occurrences, '{' indicates zero or multiple
+ occurrences, and '|' mutually exclusive options. Note that any missing
+ fields are automatically wildcarded. Valid examples are:
+
+ pci_passthrough_whitelist = {"devname":"eth0",
+ "physical_network":"physnet"}
+ pci_passthrough_whitelist = {"address":"*:0a:00.*"}
+ pci_passthrough_whitelist = {"address":":0a:00.",
+ "physical_network":"physnet1"}
+ pci_passthrough_whitelist = {"vendor_id":"1137",
+ "product_id":"0071"}
+ pci_passthrough_whitelist = {"vendor_id":"1137",
+ "product_id":"0071",
+ "address": "0000:0a:00.1",
+ "physical_network":"physnet1"}
+
+ The following are invalid, as they specify mutually exclusive options:
+
+ pci_passthrough_whitelist = {"devname":"eth0",
+ "physical_network":"physnet",
+ "address":"*:0a:00.*"}
+
+* A JSON list of JSON dictionaries corresponding to the above format. For
+ example:
+
+ pci_passthrough_whitelist = [{"product_id":"0001", "vendor_id":"8086"},
+ {"product_id":"0002", "vendor_id":"8086"}]
+
+Services which consume this:
+
+* nova-compute
+
+Related options:
+
+* None""")
+
+ALL_OPTS = [pci_alias_opt,
+ pci_passthrough_whitelist_opt]
+
+
+def register_opts(conf):
+ conf.register_opts(ALL_OPTS)
+
+
+def list_opts():
+ # TODO(sfinucan): This should be moved into the PCI group and
+ # oslo_config.cfg.OptGroup used
+ return {'DEFAULT': ALL_OPTS}
diff --git a/nova/conf/scheduler.py b/nova/conf/scheduler.py
index cf05819f52..3fc2d1eea5 100644
--- a/nova/conf/scheduler.py
+++ b/nova/conf/scheduler.py
@@ -13,19 +13,40 @@
# License for the specific language governing permissions and limitations
# under the License.
-import itertools
-
from oslo_config import cfg
+DEFAULT_GROUP_NAME = "DEFAULT"
+# The scheduler has options in several groups
+METRICS_GROUP_NAME = "metrics"
+TRUSTED_GROUP_NAME = "trusted_computing"
+UPGRADE_GROUP_NAME = "upgrade_levels"
+
host_subset_size_opt = cfg.IntOpt("scheduler_host_subset_size",
default=1,
- help="New instances will be scheduled on a host chosen randomly from "
- "a subset of the N best hosts. This property defines the subset "
- "size that a host is chosen from. A value of 1 chooses the first "
- "host returned by the weighing functions. This value must be at "
- "least 1. Any value less than 1 will be ignored, and 1 will be "
- "used instead")
+ help="""
+New instances will be scheduled on a host chosen randomly from a subset of the
+N best hosts, where N is the value set by this option. Valid values are 1 or
+greater. Any value less than one will be treated as 1.
+
+Setting this to a value greater than 1 will reduce the chance that multiple
+scheduler processes handling similar requests will select the same host,
+creating a potential race condition. By selecting a host randomly from the N
+hosts that best fit the request, the chance of a conflict is reduced. However,
+the higher you set this value, the less optimal the chosen host may be for a
+given request.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
bm_default_filter_opt = cfg.ListOpt("baremetal_scheduler_default_filters",
default=[
@@ -38,20 +59,69 @@ bm_default_filter_opt = cfg.ListOpt("baremetal_scheduler_default_filters",
"ExactDiskFilter",
"ExactCoreFilter",
],
- help="Which filter class names to use for filtering baremetal hosts "
- "when not specified in the request.")
+ help="""
+This option specifies the filters used for filtering baremetal hosts. The value
+should be a list of strings, with each string being the name of a filter class
+to be used. When used, they will be applied in order, so place your most
+restrictive filters first to make the filtering process more efficient.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ If the 'scheduler_use_baremetal_filters' option is False, this option has
+ no effect.
+""")
use_bm_filters_opt = cfg.BoolOpt("scheduler_use_baremetal_filters",
default=False,
- help="Flag to decide whether to use "
- "baremetal_scheduler_default_filters or not.")
+ help="""
+Set this to True to tell the nova scheduler that it should use the filters
+specified in the 'baremetal_scheduler_default_filters' option. If you are not
+scheduling baremetal nodes, leave this at the default setting of False.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ If this option is set to True, then the filters specified in the
+ 'baremetal_scheduler_default_filters' are used instead of the filters
+ specified in 'scheduler_default_filters'.
+""")
host_mgr_avail_filt_opt = cfg.MultiStrOpt("scheduler_available_filters",
default=["nova.scheduler.filters.all_filters"],
- help="Filter classes available to the scheduler which may be "
- "specified more than once. An entry of "
- "'nova.scheduler.filters.all_filters' maps to all filters "
- "included with nova.")
+ help="""
+This is an unordered list of the filter classes the Nova scheduler may apply.
+Only the filters specified in the 'scheduler_default_filters' option will be
+used, but any filter appearing in that option must also be included in this
+list.
+
+By default, this is set to all filters that are included with Nova. If you wish
+to change this, replace this with a list of strings, where each element is the
+path to a filter.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ scheduler_default_filters
+""")
host_mgr_default_filt_opt = cfg.ListOpt("scheduler_default_filters",
default=[
@@ -65,149 +135,765 @@ host_mgr_default_filt_opt = cfg.ListOpt("scheduler_default_filters",
"ServerGroupAntiAffinityFilter",
"ServerGroupAffinityFilter",
],
- help="Which filter class names to use for filtering hosts when not "
- "specified in the request.")
+ help="""
+This option is the list of filter class names that will be used for filtering
+hosts. The use of 'default' in the name of this option implies that other
+filters may sometimes be used, but that is not the case. These filters will be
+applied in the order they are listed, so place your most restrictive filters
+first to make the filtering process more efficient.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ All of the filters in this option *must* be present in the
+ 'scheduler_available_filters' option, or a SchedulerHostFilterNotFound
+ exception will be raised.
+""")
host_mgr_sched_wgt_cls_opt = cfg.ListOpt("scheduler_weight_classes",
default=["nova.scheduler.weights.all_weighers"],
- help="Which weight class names to use for weighing hosts")
+ help="""
+This is a list of weigher class names. Only hosts which pass the filters are
+weighed. The weight for any host starts at 0, and the weighers order these
+hosts by adding to or subtracting from the weight assigned by the previous
+weigher. Weights may become negative.
+
+An instance will be scheduled to one of the N most-weighted hosts, where N is
+'scheduler_host_subset_size'.
+
+By default, this is set to all weighers that are included with Nova. If you
+wish to change this, replace this with a list of strings, where each element is
+the path to a weigher.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
host_mgr_tracks_inst_chg_opt = cfg.BoolOpt("scheduler_tracks_instance_changes",
default=True,
- help="Determines if the Scheduler tracks changes to instances to help "
- "with its filtering decisions.")
+ help="""
+The scheduler may need information about the instances on a host in order to
+evaluate its filters and weighers. The most common need for this information is
+for the (anti-)affinity filters, which need to choose a host based on the
+instances already running on a host.
+
+If the configured filters and weighers do not need this information, disabling
+this option will improve performance. It may also be disabled when the tracking
+overhead proves too heavy, although this will cause classes requiring host
+usage data to query the database on each request instead.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
rpc_sched_topic_opt = cfg.StrOpt("scheduler_topic",
default="scheduler",
- help="The topic scheduler nodes listen on")
+ help="""
+This is the message queue topic that the scheduler 'listens' on. It is used
+when the scheduler service is started up to configure the queue, and whenever
+an RPC call to the scheduler is made. There is almost never any reason to ever
+change this value.
-# This option specifies an option group, so register separately
-rpcapi_cap_opt = cfg.StrOpt("scheduler",
- help="Set a version cap for messages sent to scheduler services")
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
scheduler_json_config_location_opt = cfg.StrOpt(
"scheduler_json_config_location",
default="",
- help="Absolute path to scheduler configuration JSON file.")
+ help="""
+The absolute path to the scheduler configuration JSON file, if any. This file
+location is monitored by the scheduler for changes and reloads it if needed. It
+is converted from JSON to a Python data structure, and passed into the
+filtering and weighing functions of the scheduler, which can use it for dynamic
+configuration.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
sched_driver_host_mgr_opt = cfg.StrOpt("scheduler_host_manager",
- default="nova.scheduler.host_manager.HostManager",
- help="The scheduler host manager class to use")
+ default="host_manager",
+ help="""
+The scheduler host manager to use, which manages the in-memory picture of the
+hosts that the scheduler uses.
+
+The option value should be chosen from one of the entrypoints under the
+namespace 'nova.scheduler.host_manager' of file 'setup.cfg'. For example,
+'host_manager' is the default setting. Aside from the default, the only other
+option as of the Mitaka release is 'ironic_host_manager', which should be used
+if you're using Ironic to provision bare-metal instances.
+
+This option also supports a full class path style, for example
+"nova.scheduler.host_manager.HostManager", but note this support is deprecated
+and will be dropped in the N release.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
driver_opt = cfg.StrOpt("scheduler_driver",
- default="nova.scheduler.filter_scheduler.FilterScheduler",
- help="Default driver to use for the scheduler")
+ default="filter_scheduler",
+ help="""
+The class of the driver used by the scheduler. This should be chosen from one
+of the entrypoints under the namespace 'nova.scheduler.driver' of file
+'setup.cfg'. If nothing is specified in this option, the 'filter_scheduler' is
+used.
+
+This option also supports deprecated full Python path to the class to be used.
+For example, "nova.scheduler.filter_scheduler.FilterScheduler". But note: this
+support will be dropped in the N Release.
+
+Other options are:
+
+ * 'caching_scheduler' which aggressively caches the system state for better
+ individual scheduler performance at the risk of more retries when running
+ multiple schedulers.
+
+ * 'chance_scheduler' which simply picks a host at random.
+
+ * 'fake_scheduler' which is used for testing.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
driver_period_opt = cfg.IntOpt("scheduler_driver_task_period",
default=60,
- help="How often (in seconds) to run periodic tasks in the scheduler "
- "driver of your choice. Please note this is likely to interact "
- "with the value of service_down_time, but exactly how they "
- "interact will depend on your choice of scheduler driver.")
+ help="""
+This value controls how often (in seconds) to run periodic tasks in the
+scheduler. The specific tasks that are run for each period are determined by
+the particular scheduler being used.
-disk_allocation_ratio_opt = cfg.FloatOpt("disk_allocation_ratio",
- default=1.0,
- help="Virtual disk to physical disk allocation ratio")
+If this is larger than the nova-service 'service_down_time' setting, Nova may
+report the scheduler service as down. This is because the scheduler driver is
+responsible for sending a heartbeat and it will only do that as often as this
+option allows. As each scheduler can work a little differently than the others,
+be sure to test this with your selected scheduler.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ ``nova-service service_down_time``
+""")
isolated_img_opt = cfg.ListOpt("isolated_images",
default=[],
- help="Images to run on isolated host")
+ help="""
+If there is a need to restrict some images to only run on certain designated
+hosts, list those image UUIDs here.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ scheduler/isolated_hosts
+ scheduler/restrict_isolated_hosts_to_isolated_images
+""")
isolated_host_opt = cfg.ListOpt("isolated_hosts",
default=[],
- help="Host reserved for specific images")
+ help="""
+If there is a need to restrict some images to only run on certain designated
+hosts, list those host names here.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ scheduler/isolated_images
+ scheduler/restrict_isolated_hosts_to_isolated_images
+""")
restrict_iso_host_img_opt = cfg.BoolOpt(
"restrict_isolated_hosts_to_isolated_images",
default=True,
- help="Whether to force isolated hosts to run only isolated images")
+ help="""
+This setting determines if the scheduler's isolated_hosts filter will allow
+non-isolated images on a host designated as an isolated host. When set to True
+(the default), non-isolated images will not be allowed to be built on isolated
+hosts. When False, non-isolated images can be built on both isolated and
+non-isolated hosts alike.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'IsolatedHostsFilter' filter is enabled. Even
+then, this option doesn't affect the behavior of requests for isolated images,
+which will *always* be restricted to isolated hosts.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ scheduler/isolated_images
+ scheduler/isolated_hosts
+""")
+
+# This option specifies an option group, so register separately
+rpcapi_cap_opt = cfg.StrOpt("scheduler",
+ help="""
+Sets a version cap (limit) for messages sent to scheduler services. In the
+situation where there were multiple scheduler services running, and they were
+not being upgraded together, you would set this to the lowest deployed version
+to guarantee that other services never send messages that any of your running
+schedulers cannot understand.
+
+This is rarely needed in practice as most deployments run a single scheduler.
+It exists mainly for design compatibility with the other services, such as
+compute, which are routinely upgraded in a rolling fashion.
+
+* Services that use this:
+
+ ``nova-compute, nova-conductor``
+
+* Related options:
+
+ None
+""")
# These opts are registered as a separate OptGroup
trusted_opts = [
cfg.StrOpt("attestation_server",
- help="Attestation server HTTP"),
+ help="""
+The host to use as the attestation server.
+
+Cloud computing pools can involve thousands of compute nodes located at
+different geographical locations, making it difficult for cloud providers to
+identify a node's trustworthiness. When using the Trusted filter, users can
+request that their VMs only be placed on nodes that have been verified by the
+attestation server specified in this option.
+
+The value is a string, and can be either an IP address or FQDN.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'TrustedFilter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ attestation_server_ca_file
+ attestation_port
+ attestation_api_url
+ attestation_auth_blob
+ attestation_auth_timeout
+ attestation_insecure_ssl
+"""),
cfg.StrOpt("attestation_server_ca_file",
- help="Attestation server Cert file for Identity verification"),
+ help="""
+The absolute path to the certificate to use for authentication when connecting
+to the attestation server. See the `attestation_server` help text for more
+information about host verification.
+
+The value is a string, and must point to a file that is readable by the
+scheduler.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'TrustedFilter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ attestation_server
+ attestation_port
+ attestation_api_url
+ attestation_auth_blob
+ attestation_auth_timeout
+ attestation_insecure_ssl
+"""),
cfg.StrOpt("attestation_port",
default="8443",
- help="Attestation server port"),
+ help="""
+The port to use when connecting to the attestation server. See the
+`attestation_server` help text for more information about host verification.
+
+Valid values are strings, not integers, but must be digits only.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'TrustedFilter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ attestation_server
+ attestation_server_ca_file
+ attestation_api_url
+ attestation_auth_blob
+ attestation_auth_timeout
+ attestation_insecure_ssl
+"""),
cfg.StrOpt("attestation_api_url",
default="/OpenAttestationWebServices/V1.0",
- help="Attestation web API URL"),
+ help="""
+The URL on the attestation server to use. See the `attestation_server` help
+text for more information about host verification.
+
+This value must be just that path portion of the full URL, as it will be joined
+to the host specified in the attestation_server option.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'TrustedFilter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ attestation_server
+ attestation_server_ca_file
+ attestation_port
+ attestation_auth_blob
+ attestation_auth_timeout
+ attestation_insecure_ssl
+"""),
cfg.StrOpt("attestation_auth_blob",
- help="Attestation authorization blob - must change"),
+ help="""
+Attestation servers require a specific blob that is used to authenticate. The
+content and format of the blob are determined by the particular attestation
+server being used. There is no default value; you must supply the value as
+specified by your attestation service. See the `attestation_server` help text
+for more information about host verification.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'TrustedFilter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ attestation_server
+ attestation_server_ca_file
+ attestation_port
+ attestation_api_url
+ attestation_auth_timeout
+ attestation_insecure_ssl
+"""),
cfg.IntOpt("attestation_auth_timeout",
default=60,
- help="Attestation status cache valid period length"),
+ help="""
+This value controls how long a successful attestation is cached. Once this
+period has elapsed, a new attestation request will be made. See the
+`attestation_server` help text for more information about host verification.
+
+The value is in seconds. Valid values must be positive integers for any
+caching; setting this to zero or a negative value will result in calls to the
+attestation_server for every request, which may impact performance.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'TrustedFilter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ attestation_server
+ attestation_server_ca_file
+ attestation_port
+ attestation_api_url
+ attestation_auth_blob
+ attestation_insecure_ssl
+"""),
cfg.BoolOpt("attestation_insecure_ssl",
default=False,
- help="Disable SSL cert verification for Attestation service")
+ help="""
+When set to True, the SSL certificate verification is skipped for the
+attestation service. See the `attestation_server` help text for more
+information about host verification.
+
+Valid values are True or False. The default is False.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'TrustedFilter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ attestation_server
+ attestation_server_ca_file
+ attestation_port
+ attestation_api_url
+ attestation_auth_blob
+ attestation_auth_timeout
+"""),
]
max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host",
default=8,
- help="Tells filters to ignore hosts that have this many or more "
- "instances currently in build, resize, snapshot, migrate, rescue "
- "or unshelve task states")
+ help="""
+This setting caps the number of instances on a host that can be actively
+performing IO (in a build, resize, snapshot, migrate, rescue, or unshelve task
+state) before that host becomes ineligible to build new instances.
+
+Valid values are positive integers: 1 or greater.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'io_ops_filter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
agg_img_prop_iso_namespace_opt = cfg.StrOpt(
"aggregate_image_properties_isolation_namespace",
- help="Force the filter to consider only keys matching the given "
- "namespace.")
+ help="""
+Images and hosts can be configured so that certain images can only be scheduled
+to hosts in a particular aggregate. This is done with metadata values set on
+the host aggregate that are identified by beginning with the value of this
+option. If the host is part of an aggregate with such a metadata key, the image
+in the request spec must have the value of that metadata in its properties in
+order for the scheduler to consider the host as acceptable.
+
+Valid values are strings.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'aggregate_image_properties_isolation' filter is
+enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ aggregate_image_properties_isolation_separator
+""")
agg_img_prop_iso_separator_opt = cfg.StrOpt(
"aggregate_image_properties_isolation_separator",
default=".",
- help="The separator used between the namespace and keys")
+ help="""
+When using the aggregate_image_properties_isolation filter, the relevant
+metadata keys are prefixed with the namespace defined in the
+aggregate_image_properties_isolation_namespace configuration option plus a
+separator. This option defines the separator to be used. It defaults to a
+period ('.').
+
+Valid values are strings.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'aggregate_image_properties_isolation' filter is
+enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ aggregate_image_properties_isolation_namespace
+""")
max_instances_per_host_opt = cfg.IntOpt("max_instances_per_host",
default=50,
- help="Ignore hosts that have too many instances")
+ help="""
+If you need to limit the number of instances on any given host, set this option
+to the maximum number of instances you want to allow. The num_instances_filter
+will reject any host that has at least as many instances as this option's
+value.
+
+Valid values are positive integers; setting it to zero will cause all hosts to
+be rejected if the num_instances_filter is active.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'num_instances_filter' filter is enabled.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
ram_weight_mult_opt = cfg.FloatOpt("ram_weight_multiplier",
default=1.0,
- help="Multiplier used for weighing ram. Negative numbers mean to "
- "stack vs spread.")
+ help="""
+This option determines how hosts with more or less available RAM are weighed. A
+positive value will result in the scheduler preferring hosts with more
+available RAM, and a negative number will result in the scheduler preferring
+hosts with less available RAM. Another way to look at it is that positive
+values for this option will tend to spread instances across many hosts, while
+negative values will tend to fill up (stack) hosts as much as possible before
+scheduling to a less-used host. The absolute value, whether positive or
+negative, controls how strong the RAM weigher is relative to other weighers.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'ram' weigher is enabled.
+
+Valid values are numeric, either integer or float.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
+
+disk_weight_mult_opt = cfg.FloatOpt("disk_weight_multiplier",
+ default=1.0,
+ help="Multiplier used for weighing free disk space. Negative "
+ "numbers mean to stack vs spread.")
io_ops_weight_mult_opt = cfg.FloatOpt("io_ops_weight_multiplier",
default=-1.0,
- help="Multiplier used for weighing host io ops. Negative numbers mean "
- "a preference to choose light workload compute hosts.")
+ help="""
+This option determines how hosts with differing workloads are weighed. Negative
+values, such as the default, will result in the scheduler preferring hosts with
+lighter workloads whereas positive values will prefer hosts with heavier
+workloads. Another way to look at it is that positive values for this option
+will tend to schedule instances onto hosts that are already busy, while
+negative values will tend to distribute the workload across more hosts. The
+absolute value, whether positive or negative, controls how strong the io_ops
+weigher is relative to other weighers.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect. Also note that this setting
+only affects scheduling if the 'io_ops' weigher is enabled.
+
+Valid values are numeric, either integer or float.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
# These opts are registered as a separate OptGroup
metrics_weight_opts = [
cfg.FloatOpt("weight_multiplier",
default=1.0,
- help="Multiplier used for weighing metrics."),
+ help="""
+When using metrics to weight the suitability of a host, you can use this option
+to change how the calculated weight influences the weight assigned to a host as
+follows:
+
+ * Greater than 1.0: increases the effect of the metric on overall weight.
+
+ * Equal to 1.0: No change to the calculated weight.
+
+ * Less than 1.0, greater than 0: reduces the effect of the metric on
+ overall weight.
+
+ * 0: The metric value is ignored, and the value of the
+ 'weight_of_unavailable' option is returned instead.
+
+ * Greater than -1.0, less than 0: the effect is reduced and reversed.
+
+ * -1.0: the effect is reversed
+
+ * Less than -1.0: the effect is increased proportionally and reversed.
+
+Valid values are numeric, either integer or float.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ weight_of_unavailable
+"""),
cfg.ListOpt("weight_setting",
default=[],
- help="How the metrics are going to be weighed. This should be in "
- "the form of '<name1>=<ratio1>, <name2>=<ratio2>, ...', "
- "where <nameX> is one of the metrics to be weighed, and "
- "<ratioX> is the corresponding ratio. So for "
- "'name1=1.0, name2=-1.0' The final weight would be "
- "name1.value * 1.0 + name2.value * -1.0."),
+ help="""
+This setting specifies the metrics to be weighed and the relative ratios for
+each metric. This should be a single string value, consisting of a series of
+one or more 'name=ratio' pairs, separated by commas, where 'name' is the name
+of the metric to be weighed, and 'ratio' is the relative weight for that
+metric.
+
+Note that if the ratio is set to 0, the metric value is ignored, and instead
+the weight will be set to the value of the 'weight_of_unavailable' option.
+
+As an example, let's consider the case where this option is set to:
+
+ ``name1=1.0, name2=-1.3``
+
+The final weight will be:
+
+ ``(name1.value * 1.0) + (name2.value * -1.3)``
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ weight_of_unavailable
+"""),
cfg.BoolOpt("required",
default=True,
- help="How to treat the unavailable metrics. When a metric is NOT "
- "available for a host, if it is set to be True, it would "
- "raise an exception, so it is recommended to use the "
- "scheduler filter MetricFilter to filter out those hosts. If "
- "it is set to be False, the unavailable metric would be "
- "treated as a negative factor in weighing process, the "
- "returned value would be set by the option "
- "weight_of_unavailable."),
+ help="""
+This setting determines how any unavailable metrics are treated. If this option
+is set to True, any hosts for which a metric is unavailable will raise an
+exception, so it is recommended to also use the MetricFilter to filter out
+those hosts before weighing.
+
+When this option is False, any metric being unavailable for a host will set the
+host weight to 'weight_of_unavailable'.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ weight_of_unavailable
+"""),
cfg.FloatOpt("weight_of_unavailable",
default=float(-10000.0),
- help="The final weight value to be returned if required is set to "
- "False and any one of the metrics set by weight_setting is "
- "unavailable."),
+ help="""
+When any of the following conditions are met, this value will be used in place
+of any actual metric value:
+
+ * One of the metrics named in 'weight_setting' is not available for a host,
+ and the value of 'required' is False.
+
+ * The ratio specified for a metric in 'weight_setting' is 0.
+
+ * The 'weight_multiplier' option is set to 0.
+
+This option is only used by the FilterScheduler and its subclasses; if you use
+a different scheduler, this option has no effect.
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ weight_setting
+ required
+ weight_multiplier
+"""),
]
scheduler_max_att_opt = cfg.IntOpt("scheduler_max_attempts",
- default=3,
- min=1,
- help="Maximum number of attempts to schedule an instance")
+ default=3,
+ help="""
+This is the maximum number of attempts that will be made to schedule an
+instance before it is assumed that the failures aren't due to normal occasional
+race conflicts, but rather some other problem. When this is reached a
+MaxRetriesExceeded exception is raised, and the instance is set to an error
+state.
+
+Valid values are positive integers (1 or greater).
+
+* Services that use this:
+
+ ``nova-scheduler``
+
+* Related options:
+
+ None
+""")
soft_affinity_weight_opt = cfg.FloatOpt('soft_affinity_weight_multiplier',
default=1.0,
@@ -227,7 +913,7 @@ soft_anti_affinity_weight_opt = cfg.FloatOpt(
'the opposite, which is soft-affinity.')
-SIMPLE_OPTS = [host_subset_size_opt,
+default_opts = [host_subset_size_opt,
bm_default_filter_opt,
use_bm_filters_opt,
host_mgr_avail_filt_opt,
@@ -239,7 +925,6 @@ SIMPLE_OPTS = [host_subset_size_opt,
driver_opt,
driver_period_opt,
scheduler_json_config_location_opt,
- disk_allocation_ratio_opt,
isolated_img_opt,
isolated_host_opt,
restrict_iso_host_img_opt,
@@ -248,33 +933,27 @@ SIMPLE_OPTS = [host_subset_size_opt,
agg_img_prop_iso_separator_opt,
max_instances_per_host_opt,
ram_weight_mult_opt,
+ disk_weight_mult_opt,
io_ops_weight_mult_opt,
scheduler_max_att_opt,
soft_affinity_weight_opt,
soft_anti_affinity_weight_opt,
]
-ALL_OPTS = itertools.chain(
- SIMPLE_OPTS,
- [rpcapi_cap_opt],
- trusted_opts,
- metrics_weight_opts,
- )
-
def register_opts(conf):
- conf.register_opts(SIMPLE_OPTS)
- conf.register_opt(rpcapi_cap_opt, "upgrade_levels")
- trust_group = cfg.OptGroup(name="trusted_computing",
+ conf.register_opts(default_opts)
+ conf.register_opt(rpcapi_cap_opt, UPGRADE_GROUP_NAME)
+ trust_group = cfg.OptGroup(name=TRUSTED_GROUP_NAME,
title="Trust parameters")
conf.register_group(trust_group)
conf.register_opts(trusted_opts, group=trust_group)
- conf.register_opts(metrics_weight_opts, group="metrics")
+ conf.register_opts(metrics_weight_opts, group=METRICS_GROUP_NAME)
def list_opts():
- return {"DEFAULT": SIMPLE_OPTS,
- "upgrade_levels": [rpcapi_cap_opt],
- "trusted_computing": trusted_opts,
- "metrics": metrics_weight_opts,
+ return {DEFAULT_GROUP_NAME: default_opts,
+ UPGRADE_GROUP_NAME: [rpcapi_cap_opt],
+ TRUSTED_GROUP_NAME: trusted_opts,
+ METRICS_GROUP_NAME: metrics_weight_opts,
}
diff --git a/nova/conf/serial_console.py b/nova/conf/serial_console.py
index f0d7b1cf70..1ed793009f 100644
--- a/nova/conf/serial_console.py
+++ b/nova/conf/serial_console.py
@@ -48,7 +48,7 @@ Interdependencies to other options:
port_range_opt = cfg.StrOpt('port_range',
default=DEFAULT_PORT_RANGE,
- # TODO(markus_z): regex="\d+:\d+", As soon as we have oslo.config 2.7
+ regex="\d+:\d+",
help="""
A range of TCP ports a guest can use for its backend.
diff --git a/nova/conf/virt.py b/nova/conf/virt.py
index 6911c9e015..91f92ef5ad 100644
--- a/nova/conf/virt.py
+++ b/nova/conf/virt.py
@@ -14,6 +14,8 @@
from oslo_config import cfg
+from nova import paths
+
vcpu_pin_set = cfg.StrOpt(
'vcpu_pin_set',
help="""Defines which physical CPUs (pCPUs) can be used by instance
@@ -30,15 +32,291 @@ Possible values:
Services which consume this:
-* nova-scheduler
+* ``nova-scheduler``
+* ``nova-compute``
+
+Interdependencies to other options:
+
+* None
+""")
+
+compute_driver = cfg.StrOpt(
+ 'compute_driver',
+ help="""Defines which driver to use for controlling virtualization.
+
+Possible values:
+
+* ``libvirt.LibvirtDriver``
+* ``xenapi.XenAPIDriver``
+* ``fake.FakeDriver``
+* ``ironic.IronicDriver``
+* ``vmwareapi.VMwareVCDriver``
+* ``hyperv.HyperVDriver``
+
+Services which consume this:
+
+* ``nova-compute``
+
+Interdependencies to other options:
+
+* None
+""")
+
+default_ephemeral_format = cfg.StrOpt(
+ 'default_ephemeral_format',
+ help="""The default format an ephemeral_volume will be formatted
+with on creation.
+
+Possible values:
+
+* ``ext2``
+* ``ext3``
+* ``ext4``
+* ``xfs``
+* ``ntfs`` (only for Windows guests)
+
+Services which consume this:
+
+* ``nova-compute``
+
+Interdependencies to other options:
+
+* None
+""")
+
+preallocate_images = cfg.StrOpt(
+ 'preallocate_images',
+ default='none',
+ choices=('none', 'space'),
+ help="""The image preallocation mode to use. Image preallocation allows
+storage for instance images to be allocated up front when the instance is
+initially provisioned. This ensures immediate feedback is given if enough
+space isn't available. In addition, it should significantly improve
+performance on writes to new blocks and may even improve I/O performance to
+prewritten blocks due to reduced fragmentation.
+
+Possible values:
+
+* "none" => no storage provisioning is done up front
+* "space" => storage is fully allocated at instance start
+
+Services which consume this:
+
+* ``nova-compute``
+
+Interdependencies to other options:
+
+* None
+""")
+
+use_cow_images = cfg.BoolOpt(
+ 'use_cow_images',
+ default=True,
+ help="""Enable use of copy-on-write (cow) images.
+
+QEMU/KVM allow the use of qcow2 as backing files. By disabling this,
+backing files will not be used.
+
+Possible values:
+
+* True: Enable use of cow images
+* False: Disable use of cow images
+
+Services which consume this:
+
+* ``nova-compute``
+
+Interdependencies to other options:
+
+* None
+""")
+
+vif_plugging_is_fatal = cfg.BoolOpt(
+ 'vif_plugging_is_fatal',
+ default=True,
+ help="""Determine if instance should boot or fail on VIF plugging timeout.
+
+Nova sends a port update to Neutron after an instance has been scheduled,
+providing Neutron with the necessary information to finish setup of the port.
+Once completed, Neutron notifies Nova that it has finished setting up the
+port, at which point Nova resumes the boot of the instance since network
+connectivity is now supposed to be present. A timeout will occur if the reply
+is not received after a given interval.
+
+This option determines what Nova does when the VIF plugging timeout event
+happens. When enabled, the instance will error out. When disabled, the
+instance will continue to boot on the assumption that the port is ready.
+
+Possible values:
+
+* True: Instances should fail after VIF plugging timeout
+* False: Instances should continue booting after VIF plugging timeout
+
+Services which consume this:
+
+* ``nova-compute``
+
+Interdependencies to other options:
+
+* None
+""")
+
+vif_plugging_timeout = cfg.IntOpt(
+ 'vif_plugging_timeout',
+ default=300,
+ help="""Timeout for Neutron VIF plugging event message arrival.
+
+Number of seconds to wait for Neutron vif plugging events to
+arrive before continuing or failing (see 'vif_plugging_is_fatal'). If this is
+set to zero and 'vif_plugging_is_fatal' is False, events should not be
+expected to arrive at all.
+
+Possible values:
+
+* A time interval in seconds
+
+Services which consume this:
+
+* ``nova-compute``
+
+Interdependencies to other options:
+
+* None
+""")
+
+firewall_driver = cfg.StrOpt(
+ 'firewall_driver',
+ help="""Firewall driver to use with ``nova-network`` service.
+
+This option only applies when using the ``nova-network`` service. When using
+another networking services, such as Neutron, this should be to set to the
+``NoopFirewallDriver``.
+
+If unset (the default), this will default to the hypervisor-specified
+default driver.
+
+Possible values:
+
+* nova.virt.firewall.IptablesFirewallDriver
+* nova.virt.firewall.NoopFirewallDriver
+* nova.virt.libvirt.firewall.IptablesFirewallDriver
+* [...]
+
+Services which consume this:
+
+* nova-network
+
+Interdependencies to other options:
+
+* ``network_api_class``: This must be set to ``nova.network.api.API`` to
+ enable ``nova-network`` networking
+* ``security_group_api``: This must be set to ``nova`` to enable
+ ``nova-network`` networking
+""")
+
+allow_same_net_traffic = cfg.BoolOpt(
+ 'allow_same_net_traffic',
+ default=True,
+ help="""Determine whether to allow network traffic from same network.
+
+When set to true, hosts on the same subnet are not filtered and are allowed
+to pass all types of traffic between them. On a flat network, this allows
+all instances from all projects unfiltered communication. With VLAN
+networking, this allows access between instances within the same project.
+
+This option only applies when using the ``nova-network`` service. When using
+another networking services, such as Neutron, security groups or other
+approaches should be used.
+
+Possible values:
+
+* True: Network traffic should be allowed pass between all instances on the
+ same network, regardless of their tenant and security policies
+* False: Network traffic should not be allowed pass between instances unless
+ it is unblocked in a security group
+
+Services which consume this:
+
+* nova-network
+
+Interdependencies to other options:
+
+* ``network_api_class``: This must be set to ``nova.network.api.API`` to
+ enable ``nova-network`` networking
+* ``security_group_api``: This must be set to ``nova`` to enable
+ ``nova-network`` networking
+* ``firewall_driver``: This must be set to
+ ``nova.virt.libvirt.firewall.IptablesFirewallDriver`` to ensure the
+ libvirt firewall driver is enabled.
+""")
+
+force_raw_images = cfg.BoolOpt(
+ 'force_raw_images',
+ default=True,
+ help="""Force conversion of backing images to raw format.
+
+Possible values:
+
+* True: Backing image files will be converted to raw image format
+* False: Backing image files will not be converted
+
+Services which consume this:
+
* nova-compute
-Related options:
+Interdependencies to other options:
+
+* ``compute_driver``: Only the libvirt driver uses this option.
+""")
+
+injected_network_template = cfg.StrOpt(
+ 'injected_network_template',
+ default=paths.basedir_def('nova/virt/interfaces.template'),
+ help='Template file for injected network')
+
+# NOTE(yamahata): ListOpt won't work because the command may include a comma.
+# For example:
+#
+# mkfs.ext4 -O dir_index,extent -E stride=8,stripe-width=16
+# --label %(fs_label)s %(target)s
+#
+# list arguments are comma separated and there is no way to escape such
+# commas.
+virt_mkfs = cfg.MultiStrOpt(
+ 'virt_mkfs',
+ default=[],
+ help='Name of the mkfs commands for ephemeral device. '
+ 'The format is <os_type>=<mkfs command>')
-* None""")
+resize_fs_using_block_device = cfg.BoolOpt(
+ 'resize_fs_using_block_device',
+ default=False,
+ help='Attempt to resize the filesystem by accessing the '
+ 'image over a block device. This is done by the host '
+ 'and may not be necessary if the image contains a recent '
+ 'version of cloud-init. Possible mechanisms require '
+ 'the nbd driver (for qcow and raw), or loop (for raw).')
+timeout_nbd = cfg.IntOpt(
+ 'timeout_nbd',
+ default=10,
+ help='Amount of time, in seconds, to wait for NBD '
+ 'device start up.')
-ALL_OPTS = [vcpu_pin_set]
+ALL_OPTS = [vcpu_pin_set,
+ compute_driver,
+ default_ephemeral_format,
+ preallocate_images,
+ use_cow_images,
+ vif_plugging_is_fatal,
+ vif_plugging_timeout,
+ firewall_driver,
+ allow_same_net_traffic,
+ force_raw_images,
+ injected_network_template,
+ virt_mkfs,
+ resize_fs_using_block_device,
+ timeout_nbd]
def register_opts(conf):
@@ -47,4 +325,4 @@ def register_opts(conf):
def list_opts():
# TODO(sfinucan): This should be moved to a virt or hardware group
- return ('DEFAULT', ALL_OPTS)
+ return {'DEFAULT': ALL_OPTS}
diff --git a/nova/conf/vnc.py b/nova/conf/vnc.py
new file mode 100644
index 0000000000..b30d8849b6
--- /dev/null
+++ b/nova/conf/vnc.py
@@ -0,0 +1,335 @@
+# Copyright (c) 2010 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+vnc_group = cfg.OptGroup(
+ 'vnc',
+ title='VNC options',
+ help="""
+Virtual Network Computer (VNC) can be used to provide remote desktop
+console access to instances for tenants and/or administrators.""")
+
+enabled = cfg.BoolOpt(
+ 'enabled',
+ default=True,
+ deprecated_group='DEFAULT',
+ deprecated_name='vnc_enabled',
+ help="""Enable VNC related features.
+
+Guests will get created with graphical devices to support this. Clients
+(for example Horizon) can then establish a VNC connection to the guest.
+
+Possible values:
+
+* True: Enables the feature
+* False: Disables the feature
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* None
+""")
+
+keymap = cfg.StrOpt(
+ 'keymap',
+ default='en-us',
+ deprecated_group='DEFAULT',
+ deprecated_name='vnc_keymap',
+ help="""Keymap for VNC.
+
+The keyboard mapping (keymap) determines which keyboard layout a VNC
+session should use by default.
+
+Possible values:
+
+* A keyboard layout which is supported by the underlying hypervisor on
+ this node. This is usually an 'IETF language tag' (for example
+ 'en-us'). If you use QEMU as hypervisor, you should find the list
+ of supported keyboard layouts at ``/usr/share/qemu/keymaps``.
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* None
+""")
+
+# TODO(sfinucan): This should be an IPOpt
+vncserver_listen = cfg.StrOpt(
+ 'vncserver_listen',
+ default='127.0.0.1',
+ deprecated_group='DEFAULT',
+ help="""
+The IP address on which an instance should listen to for incoming VNC
+connection requests on this node.
+
+Possible values:
+
+* An IP address
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* None
+""")
+
+# TODO(sfinucan): This should be an IPOpt
+vncserver_proxyclient_address = cfg.StrOpt(
+ 'vncserver_proxyclient_address',
+ default='127.0.0.1',
+ deprecated_group='DEFAULT',
+ help="""
+Private, internal address of VNC console proxy.
+
+The VNC proxy is an OpenStack component that enables compute service
+users to access their instances through VNC clients.
+
+This option sets the private address to which proxy clients, such as
+``nova-xvpvncproxy``, should connect to.
+
+Possible values:
+
+* An IP address
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* None
+""")
+
+# TODO(sfinucan): This should be an IPOpt
+novncproxy_host = cfg.StrOpt(
+ 'novncproxy_host',
+ default='0.0.0.0',
+ deprecated_group='DEFAULT',
+ help="""
+IP address that the noVNC console proxy should bind to.
+
+The VNC proxy is an OpenStack component that enables compute service
+users to access their instances through VNC clients. noVNC provides
+VNC support through a websocket-based client.
+
+This option sets the private address to which the noVNC console proxy
+service should bind to.
+
+Possible values:
+
+* An IP address
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* novncproxy_port
+* novncproxy_base_url
+""")
+
+# TODO(sfinucan): This should be a PortOpt
+novncproxy_port = cfg.IntOpt(
+ 'novncproxy_port',
+ default=6080,
+ min=1,
+ max=65535,
+ deprecated_group='DEFAULT',
+ help="""
+Port that the noVNC console proxy should bind to.
+
+The VNC proxy is an OpenStack component that enables compute service
+users to access their instances through VNC clients. noVNC provides
+VNC support through a websocket-based client.
+
+This option sets the private port to which the noVNC console proxy
+service should bind to.
+
+Possible values:
+
+* A port number
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* novncproxy_host
+* novncproxy_base_url
+""")
+
+novncproxy_base_url = cfg.StrOpt(
+ 'novncproxy_base_url',
+ default='http://127.0.0.1:6080/vnc_auto.html',
+ deprecated_group='DEFAULT',
+ help="""
+Public address of noVNC VNC console proxy.
+
+The VNC proxy is an OpenStack component that enables compute service
+users to access their instances through VNC clients. noVNC provides
+VNC support through a websocket-based client.
+
+This option sets the public base URL to which client systems will
+connect. noVNC clients can use this address to connect to the noVNC
+instance and, by extension, the VNC sessions.
+
+Possible values:
+
+* A URL
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* novncproxy_host
+* novncproxy_port
+""")
+
+# TODO(sfinucan): This should be an IPOpt
+xvpvncproxy_host = cfg.StrOpt(
+ 'xvpvncproxy_host',
+ default='0.0.0.0',
+ deprecated_group='DEFAULT',
+ help="""
+IP address that the XVP VNC console proxy should bind to.
+
+The VNC proxy is an OpenStack component that enables compute service
+users to access their instances through VNC clients. Xen provides
+the Xenserver VNC Proxy, or XVP, as an alternative to the
+websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
+XVP clients are Java-based.
+
+This option sets the private address to which the XVP VNC console proxy
+service should bind to.
+
+Possible values:
+
+* An IP address
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* xvpvncproxy_port
+* xvpvncproxy_base_url
+""")
+
+# TODO(sfinucan): This should be a PortOpt
+xvpvncproxy_port = cfg.IntOpt(
+ 'xvpvncproxy_port',
+ default=6081,
+ min=1,
+ max=65535,
+ deprecated_group='DEFAULT',
+ help="""
+Port that the XVP VNC console proxy should bind to.
+
+The VNC proxy is an OpenStack component that enables compute service
+users to access their instances through VNC clients. Xen provides
+the Xenserver VNC Proxy, or XVP, as an alternative to the
+websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
+XVP clients are Java-based.
+
+This option sets the private port to which the XVP VNC console proxy
+service should bind to.
+
+Possible values:
+
+* A port number
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* xvpvncproxy_host
+* xvpvncproxy_base_url
+""")
+
+xvpvncproxy_base_url = cfg.StrOpt(
+ 'xvpvncproxy_base_url',
+ default='http://127.0.0.1:6081/console',
+ deprecated_group='DEFAULT',
+ help="""
+Public address of XVP VNC console proxy.
+
+The VNC proxy is an OpenStack component that enables compute service
+users to access their instances through VNC clients. Xen provides
+the Xenserver VNC Proxy, or XVP, as an alternative to the
+websocket-based noVNC proxy used by Libvirt. In contrast to noVNC,
+XVP clients are Java-based.
+
+This option sets the public base URL to which client systems will
+connect. XVP clients can use this address to connect to the XVP
+instance and, by extension, the VNC sessions.
+
+Possible values:
+
+* A URL
+
+Services which consume this:
+
+* ``nova-compute``
+
+Related options:
+
+* xvpvncproxy_host
+* xvpvncproxy_port
+""")
+
+ALL_OPTS = [
+ enabled,
+ keymap,
+ vncserver_listen,
+ vncserver_proxyclient_address,
+ novncproxy_host,
+ novncproxy_port,
+ novncproxy_base_url,
+ xvpvncproxy_host,
+ xvpvncproxy_port,
+ xvpvncproxy_base_url]
+
+CLI_OPTS = [
+ novncproxy_host,
+ novncproxy_port]
+
+
+def register_opts(conf):
+ conf.register_group(vnc_group)
+ conf.register_opts(ALL_OPTS, group=vnc_group)
+
+
+def register_cli_opts(conf):
+ conf.register_cli_opts(CLI_OPTS, group=vnc_group)
+
+
+def list_opts():
+ return {vnc_group: ALL_OPTS}
diff --git a/nova/conf/wsgi.py b/nova/conf/wsgi.py
new file mode 100644
index 0000000000..791264c75b
--- /dev/null
+++ b/nova/conf/wsgi.py
@@ -0,0 +1,92 @@
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+api_paste_config_opt = cfg.StrOpt('api_paste_config',
+ default="api-paste.ini",
+ help='File name for the paste.deploy config for nova-api')
+
+wsgi_log_format_opt = cfg.StrOpt('wsgi_log_format',
+ default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
+ ' len: %(body_length)s time: %(wall_seconds).7f',
+ help='A python format string that is used as the template to '
+ 'generate log lines. The following values can be formatted '
+ 'into it: client_ip, date_time, request_line, status_code, '
+ 'body_length, wall_seconds.')
+
+secure_proxy_ssl_header_opt = cfg.StrOpt('secure_proxy_ssl_header',
+ help='The HTTP header used to determine the scheme for the '
+ 'original request, even if it was removed by an SSL '
+ 'terminating proxy. Typical value is '
+ '"HTTP_X_FORWARDED_PROTO".')
+
+ssl_ca_file_opt = cfg.StrOpt('ssl_ca_file',
+ help="CA certificate file to use to verify "
+ "connecting clients")
+
+ssl_cert_file_opt = cfg.StrOpt('ssl_cert_file',
+ help="SSL certificate of API server")
+
+ssl_key_file_opt = cfg.StrOpt('ssl_key_file',
+ help="SSL private key of API server")
+
+tcp_keepidle_opt = cfg.IntOpt('tcp_keepidle',
+ default=600,
+ help="Sets the value of TCP_KEEPIDLE in seconds for each "
+ "server socket. Not supported on OS X.")
+
+wsgi_default_pool_size_opt = cfg.IntOpt('wsgi_default_pool_size',
+ default=1000,
+ help="Size of the pool of greenthreads used by wsgi")
+
+max_header_line_opt = cfg.IntOpt('max_header_line',
+ default=16384,
+ help="Maximum line size of message headers to be accepted. "
+ "max_header_line may need to be increased when using "
+ "large tokens (typically those generated by the "
+ "Keystone v3 API with big service catalogs).")
+
+wsgi_keep_alive_opt = cfg.BoolOpt('wsgi_keep_alive',
+ default=True,
+ help="If False, closes the client socket connection "
+ "explicitly.")
+
+client_socket_timeout_opt = cfg.IntOpt('client_socket_timeout', default=900,
+ help="Timeout for client connections' socket operations. "
+ "If an incoming connection is idle for this number of "
+ "seconds it will be closed. A value of '0' means "
+ "wait forever.")
+
+ALL_OPTS = [api_paste_config_opt,
+ wsgi_log_format_opt,
+ secure_proxy_ssl_header_opt,
+ ssl_ca_file_opt,
+ ssl_cert_file_opt,
+ ssl_key_file_opt,
+ tcp_keepidle_opt,
+ wsgi_default_pool_size_opt,
+ max_header_line_opt,
+ wsgi_keep_alive_opt,
+ client_socket_timeout_opt
+ ]
+
+
+def register_opts(conf):
+ conf.register_opts(ALL_OPTS)
+
+
+def list_opts():
+ return {"DEFAULT": ALL_OPTS}
diff --git a/nova/config.py b/nova/config.py
index b3c9902efb..b9c025915a 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_cache import core as cache
from oslo_config import cfg
from oslo_db import options
from oslo_log import log
@@ -47,18 +48,22 @@ _DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d '
'%(message)s')
-def parse_args(argv, default_config_files=None, configure_db=True):
+def parse_args(argv, default_config_files=None, configure_db=True,
+ init_rpc=True):
log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS)
log.register_options(CONF)
options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION,
sqlite_db='nova.sqlite')
rpc.set_defaults(control_exchange='nova')
+ cache.configure(CONF)
debugger.register_cli_opts()
CONF(argv[1:],
project='nova',
version=version.version_string(),
default_config_files=default_config_files)
- rpc.init(CONF)
+
+ if init_rpc:
+ rpc.init(CONF)
if configure_db:
sqlalchemy_api.configure(CONF)
diff --git a/nova/console/websocketproxy.py b/nova/console/websocketproxy.py
index 6b57380f38..7dc68e16f8 100644
--- a/nova/console/websocketproxy.py
+++ b/nova/console/websocketproxy.py
@@ -18,19 +18,19 @@ Websocket proxy that is compatible with OpenStack Nova.
Leverages websockify.py by Joel Martin
'''
-from six.moves import http_cookies as Cookie
-import six.moves.urllib.parse as urlparse
import socket
import sys
+from oslo_config import cfg
from oslo_log import log as logging
+from six.moves import http_cookies as Cookie
+import six.moves.urllib.parse as urlparse
import websockify
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova import exception
from nova.i18n import _
-from oslo_config import cfg
LOG = logging.getLogger(__name__)
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index a6ddde3af1..87ae985706 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -23,12 +23,12 @@ from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
+from nova import cache_utils
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import rpcapi as compute_rpcapi
from nova.i18n import _LI, _LW
from nova import manager
from nova import objects
-from nova.openstack.common import memorycache
LOG = logging.getLogger(__name__)
@@ -52,17 +52,30 @@ class ConsoleAuthManager(manager.Manager):
def __init__(self, scheduler_driver=None, *args, **kwargs):
super(ConsoleAuthManager, self).__init__(service_name='consoleauth',
*args, **kwargs)
- self.mc = memorycache.get_client()
+ self._mc = None
+ self._mc_instance = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
+ @property
+ def mc(self):
+ if self._mc is None:
+ self._mc = cache_utils.get_client(CONF.console_token_ttl)
+ return self._mc
+
+ @property
+ def mc_instance(self):
+ if self._mc_instance is None:
+ self._mc_instance = cache_utils.get_client()
+ return self._mc_instance
+
def reset(self):
LOG.info(_LI('Reloading compute RPC API'))
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def _get_tokens_for_instance(self, instance_uuid):
- tokens_str = self.mc.get(instance_uuid.encode('UTF-8'))
+ tokens_str = self.mc_instance.get(instance_uuid.encode('UTF-8'))
if not tokens_str:
tokens = []
else:
@@ -86,17 +99,19 @@ class ConsoleAuthManager(manager.Manager):
# We need to log the warning message if the token is not cached
# successfully, because the failure will cause the console for
# instance to not be usable.
- if not self.mc.set(token.encode('UTF-8'),
- data, CONF.console_token_ttl):
+ if not self.mc.set(token.encode('UTF-8'), data):
LOG.warning(_LW("Token: %(token)s failed to save into memcached."),
{'token': token})
tokens = self._get_tokens_for_instance(instance_uuid)
# Remove the expired tokens from cache.
- tokens = [tok for tok in tokens if self.mc.get(tok.encode('UTF-8'))]
+ token_values = self.mc.get_multi(
+ [tok.encode('UTF-8') for tok in tokens])
+ tokens = [name for name, value in zip(tokens, token_values)
+ if value is not None]
tokens.append(token)
- if not self.mc.set(instance_uuid.encode('UTF-8'),
+ if not self.mc_instance.set(instance_uuid.encode('UTF-8'),
jsonutils.dumps(tokens)):
LOG.warning(_LW("Instance: %(instance_uuid)s failed to save "
"into memcached"),
@@ -136,6 +151,6 @@ class ConsoleAuthManager(manager.Manager):
def delete_tokens_for_instance(self, context, instance_uuid):
tokens = self._get_tokens_for_instance(instance_uuid)
- for token in tokens:
- self.mc.delete(token.encode('UTF-8'))
- self.mc.delete(instance_uuid.encode('UTF-8'))
+ self.mc.delete_multi(
+ [tok.encode('UTF-8') for tok in tokens])
+ self.mc_instance.delete(instance_uuid.encode('UTF-8'))
diff --git a/nova/context.py b/nova/context.py
index f37952f1ba..808f5dd3df 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -17,10 +17,11 @@
"""RequestContext: context for requests that persist through all of nova."""
+from contextlib import contextmanager
import copy
-from keystoneclient import auth
-from keystoneclient import service_catalog
+from keystoneauth1.access import service_catalog as ksa_service_catalog
+from keystoneauth1 import plugin
from oslo_context import context
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
@@ -35,8 +36,8 @@ from nova import utils
LOG = logging.getLogger(__name__)
-class _ContextAuthPlugin(auth.BaseAuthPlugin):
- """A keystoneclient auth plugin that uses the values from the Context.
+class _ContextAuthPlugin(plugin.BaseAuthPlugin):
+ """A keystoneauth auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
@@ -47,8 +48,7 @@ class _ContextAuthPlugin(auth.BaseAuthPlugin):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
- sc = {'serviceCatalog': sc}
- self.service_catalog = service_catalog.ServiceCatalogV2(sc)
+ self.service_catalog = ksa_service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
@@ -57,7 +57,7 @@ class _ContextAuthPlugin(auth.BaseAuthPlugin):
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
- endpoint_type=interface,
+ interface=interface,
region_name=region_name)
@@ -142,6 +142,12 @@ class RequestContext(context.RequestContext):
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
+
+ # NOTE(dheeraj): The following attribute is used by cellsv2 to store
+ # connection information for connecting to the target cell.
+ # It is only manipulated using the target_cell contextmanager
+ # provided by this module
+ self.db_connection = None
self.user_auth_plugin = user_auth_plugin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
@@ -273,3 +279,23 @@ def authorize_quota_class_context(context, class_name):
raise exception.Forbidden()
elif context.quota_class != class_name:
raise exception.Forbidden()
+
+
+@contextmanager
+def target_cell(context, cell_mapping):
+ """Adds database connection information to the context for communicating
+ with the given target cell.
+
+ :param context: The RequestContext to add database connection information
+ :param cell_mapping: A objects.CellMapping object
+ """
+ original_db_connection = context.db_connection
+ # avoid circular import
+ from nova import db
+ connection_string = cell_mapping.database_connection
+ context.db_connection = db.create_context_manager(connection_string)
+
+ try:
+ yield context
+ finally:
+ context.db_connection = original_db_connection
diff --git a/nova/crypto.py b/nova/crypto.py
index db7816233e..7e07fb4763 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -26,6 +26,7 @@ import base64
import binascii
import os
+from Crypto.PublicKey import RSA
from cryptography import exceptions
from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import padding
@@ -161,8 +162,27 @@ def generate_x509_fingerprint(pem_key):
'Error message: %s') % ex)
+def generate_key(bits):
+ """Generate a paramiko RSAKey"""
+ # NOTE(dims): pycryptodome has changed the signature of the RSA.generate
+ # call. specifically progress_func has been dropped. paramiko still uses
+ # pycrypto. However some projects like latest pysaml2 have switched from
+ # pycrypto to pycryptodome as pycrypto seems to have been abandoned.
+ # paramiko project has started transition to pycryptodome as well but
+ # there is no release yet with that support. So at the moment depending on
+ # which version of pysaml2 is installed, Nova is likely to break. So we
+ # call "RSA.generate(bits)" which works on both pycrypto and pycryptodome
+ # and then wrap it into a paramiko.RSAKey
+ rsa = RSA.generate(bits)
+ key = paramiko.RSAKey(vals=(rsa.e, rsa.n))
+ key.d = rsa.d
+ key.p = rsa.p
+ key.q = rsa.q
+ return key
+
+
def generate_key_pair(bits=2048):
- key = paramiko.RSAKey.generate(bits)
+ key = generate_key(bits)
keyout = six.StringIO()
key.write_private_key(keyout)
private_key = keyout.getvalue()
diff --git a/nova/db/api.py b/nova/db/api.py
index 210af0e5ec..39b04c731f 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -86,6 +86,11 @@ def not_equal(*values):
return IMPL.not_equal(*values)
+def create_context_manager(connection):
+ """Return a context manager for a cell database connection."""
+ return IMPL.create_context_manager(connection=connection)
+
+
###################
@@ -106,16 +111,14 @@ def service_destroy(context, service_id):
return IMPL.service_destroy(context, service_id)
-def service_get(context, service_id, use_slave=False):
+def service_get(context, service_id):
"""Get a service or raise if it does not exist."""
- return IMPL.service_get(context, service_id,
- use_slave=use_slave)
+ return IMPL.service_get(context, service_id)
-def service_get_minimum_version(context, binary, use_slave=False):
+def service_get_minimum_version(context, binary):
"""Get the minimum service version in the database."""
- return IMPL.service_get_minimum_version(context, binary,
- use_slave=use_slave)
+ return IMPL.service_get_minimum_version(context, binary)
def service_get_by_host_and_topic(context, host, topic):
@@ -148,13 +151,12 @@ def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
-def service_get_by_compute_host(context, host, use_slave=False):
+def service_get_by_compute_host(context, host):
"""Get the service entry for a given compute host.
Returns the service entry joined with the compute_node entry.
"""
- return IMPL.service_get_by_compute_host(context, host,
- use_slave=use_slave)
+ return IMPL.service_get_by_compute_host(context, host)
def service_create(context, values):
@@ -227,7 +229,7 @@ def compute_node_get_all(context):
return IMPL.compute_node_get_all(context)
-def compute_node_get_all_by_host(context, host, use_slave=False):
+def compute_node_get_all_by_host(context, host):
"""Get compute nodes by host name
:param context: The security context (admin)
@@ -235,7 +237,7 @@ def compute_node_get_all_by_host(context, host, use_slave=False):
:returns: List of dictionaries each containing compute node properties
"""
- return IMPL.compute_node_get_all_by_host(context, host, use_slave)
+ return IMPL.compute_node_get_all_by_host(context, host)
def compute_node_search_by_hypervisor(context, hypervisor_match):
@@ -475,6 +477,13 @@ def migration_get(context, migration_id):
return IMPL.migration_get(context, migration_id)
+def migration_get_by_id_and_instance(context, migration_id, instance_uuid):
+ """Finds a migration by the migration id and the instance uuid."""
+ return IMPL.migration_get_by_id_and_instance(context,
+ migration_id,
+ instance_uuid)
+
+
def migration_get_by_instance_and_status(context, instance_uuid, status):
"""Finds a migration by the instance uuid its migrating."""
return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
@@ -620,10 +629,9 @@ def virtual_interface_get_by_uuid(context, vif_uuid):
return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
-def virtual_interface_get_by_instance(context, instance_id, use_slave=False):
+def virtual_interface_get_by_instance(context, instance_id):
"""Gets all virtual_interfaces for instance."""
- return IMPL.virtual_interface_get_by_instance(context, instance_id,
- use_slave=use_slave)
+ return IMPL.virtual_interface_get_by_instance(context, instance_id)
def virtual_interface_get_by_instance_and_network(context, instance_id,
@@ -657,10 +665,9 @@ def instance_destroy(context, instance_uuid, constraint=None):
return IMPL.instance_destroy(context, instance_uuid, constraint)
-def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
+def instance_get_by_uuid(context, uuid, columns_to_join=None):
"""Get an instance or raise if it does not exist."""
- return IMPL.instance_get_by_uuid(context, uuid,
- columns_to_join, use_slave=use_slave)
+ return IMPL.instance_get_by_uuid(context, uuid, columns_to_join)
def instance_get(context, instance_id, columns_to_join=None):
@@ -676,7 +683,7 @@ def instance_get_all(context, columns_to_join=None):
def instance_get_all_by_filters(context, filters, sort_key='created_at',
sort_dir='desc', limit=None, marker=None,
- columns_to_join=None, use_slave=False):
+ columns_to_join=None):
"""Get all instances that match all filters."""
# Note: This function exists for backwards compatibility since calls to
# the instance layer coming in over RPC may specify the single sort
@@ -685,27 +692,24 @@ def instance_get_all_by_filters(context, filters, sort_key='created_at',
return IMPL.instance_get_all_by_filters(context, filters, sort_key,
sort_dir, limit=limit,
marker=marker,
- columns_to_join=columns_to_join,
- use_slave=use_slave)
+ columns_to_join=columns_to_join)
def instance_get_all_by_filters_sort(context, filters, limit=None,
marker=None, columns_to_join=None,
- use_slave=False, sort_keys=None,
- sort_dirs=None):
+ sort_keys=None, sort_dirs=None):
"""Get all instances that match all filters sorted by multiple keys.
sort_keys and sort_dirs must be a list of strings.
"""
return IMPL.instance_get_all_by_filters_sort(
context, filters, limit=limit, marker=marker,
- columns_to_join=columns_to_join, use_slave=use_slave,
- sort_keys=sort_keys, sort_dirs=sort_dirs)
+ columns_to_join=columns_to_join, sort_keys=sort_keys,
+ sort_dirs=sort_dirs)
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None,
- use_slave=False,
columns_to_join=None):
"""Get instances and joins active during a certain time window.
@@ -714,16 +718,12 @@ def instance_get_active_by_window_joined(context, begin, end=None,
"""
return IMPL.instance_get_active_by_window_joined(context, begin, end,
project_id, host,
- use_slave=use_slave,
columns_to_join=columns_to_join)
-def instance_get_all_by_host(context, host,
- columns_to_join=None, use_slave=False):
+def instance_get_all_by_host(context, host, columns_to_join=None):
"""Get all instances belonging to a host."""
- return IMPL.instance_get_all_by_host(context, host,
- columns_to_join,
- use_slave=use_slave)
+ return IMPL.instance_get_all_by_host(context, host, columns_to_join)
def instance_get_all_by_host_and_node(context, host, node,
@@ -1209,20 +1209,16 @@ def block_device_mapping_update_or_create(context, values, legacy=True):
return IMPL.block_device_mapping_update_or_create(context, values, legacy)
-def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids,
- use_slave=False):
+def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids):
"""Get all block device mapping belonging to a list of instances."""
return IMPL.block_device_mapping_get_all_by_instance_uuids(context,
- instance_uuids,
- use_slave)
+ instance_uuids)
-def block_device_mapping_get_all_by_instance(context, instance_uuid,
- use_slave=False):
+def block_device_mapping_get_all_by_instance(context, instance_uuid):
"""Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
- instance_uuid,
- use_slave)
+ instance_uuid)
def block_device_mapping_get_all_by_volume_id(context, volume_id,
@@ -1661,16 +1657,14 @@ def agent_build_update(context, agent_build_id, values):
####################
-def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
+def bw_usage_get(context, uuid, start_period, mac):
"""Return bw usage for instance and mac in a given audit period."""
- return IMPL.bw_usage_get(context, uuid, start_period, mac,
- use_slave=use_slave)
+ return IMPL.bw_usage_get(context, uuid, start_period, mac)
-def bw_usage_get_by_uuids(context, uuids, start_period, use_slave=False):
+def bw_usage_get_by_uuids(context, uuids, start_period):
"""Return bw usages for instance(s) in a given audit period."""
- return IMPL.bw_usage_get_by_uuids(context, uuids, start_period,
- use_slave=use_slave)
+ return IMPL.bw_usage_get_by_uuids(context, uuids, start_period)
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index fd93675ee9..f1b27ff288 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -135,9 +135,9 @@ main_context_manager = enginefacade.transaction_context()
api_context_manager = enginefacade.transaction_context()
-def _get_db_conf(conf_group):
+def _get_db_conf(conf_group, connection=None):
kw = dict(
- connection=conf_group.connection,
+ connection=connection or conf_group.connection,
slave_connection=conf_group.slave_connection,
sqlite_fk=False,
__autocommit=True,
@@ -155,27 +155,49 @@ def _get_db_conf(conf_group):
return kw
+def _context_manager_from_context(context):
+ if context:
+ try:
+ return context.db_connection
+ except AttributeError:
+ pass
+
+
def configure(conf):
main_context_manager.configure(**_get_db_conf(conf.database))
api_context_manager.configure(**_get_db_conf(conf.api_database))
-def get_engine(use_slave=False):
- return main_context_manager.get_legacy_facade().get_engine(
- use_slave=use_slave)
+def create_context_manager(connection=None):
+ """Create a database context manager object.
+ : param connection: The database connection string
+ """
+ ctxt_mgr = enginefacade.transaction_context()
+ ctxt_mgr.configure(**_get_db_conf(CONF.database, connection=connection))
+ return ctxt_mgr
-def get_api_engine():
- return api_context_manager.get_legacy_facade().get_engine()
+def get_context_manager(context):
+ """Get a database context manager object.
-def get_session(use_slave=False, **kwargs):
- return main_context_manager.get_legacy_facade().get_session(
- use_slave=use_slave, **kwargs)
+ :param context: The request context that can contain a context manager
+ """
+ return _context_manager_from_context(context) or main_context_manager
-def get_api_session(**kwargs):
- return api_context_manager.get_legacy_facade().get_session(**kwargs)
+def get_engine(use_slave=False, context=None):
+ """Get a database engine object.
+
+ :param use_slave: Whether to use the slave connection
+ :param context: The request context that can contain a context manager
+ """
+ ctxt_mgr = _context_manager_from_context(context) or main_context_manager
+ return ctxt_mgr.get_legacy_facade().get_engine(use_slave=use_slave)
+
+
+def get_api_engine():
+ return api_context_manager.get_legacy_facade().get_engine()
_SHADOW_TABLE_PREFIX = 'shadow_'
@@ -264,8 +286,6 @@ def select_db_reader_mode(f):
def model_query(context, model,
args=None,
- session=None,
- use_slave=False,
read_deleted=None,
project_only=False):
"""Query helper that accounts for context's `read_deleted` field.
@@ -273,9 +293,6 @@ def model_query(context, model,
:param context: NovaContext of the query.
:param model: Model to query. Must be a subclass of ModelBase.
:param args: Arguments to query. If None - model is used.
- :param session: If present, the session to use.
- :param use_slave: If true, use a slave connection to the DB if creating a
- session.
:param read_deleted: If not None, overrides context's read_deleted field.
Permitted values are 'no', which does not return
deleted values; 'only', which only returns deleted
@@ -286,14 +303,6 @@ def model_query(context, model,
'allow_none', restriction includes project_id = None.
"""
- if hasattr(context, 'session'):
- session = context.session
-
- if session is None:
- if CONF.database.slave_connection == '':
- use_slave = False
- session = get_session(use_slave=use_slave)
-
if read_deleted is None:
read_deleted = context.read_deleted
@@ -308,7 +317,8 @@ def model_query(context, model,
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
- query = sqlalchemyutils.model_query(model, session, args, **query_kwargs)
+ query = sqlalchemyutils.model_query(
+ model, context.session, args, **query_kwargs)
# We can't use oslo.db model_query's project_id here, as it doesn't allow
# us to return both our projects and unowned projects.
@@ -342,30 +352,28 @@ def convert_objects_related_datetimes(values, *datetime_keys):
return values
-def _sync_instances(context, project_id, user_id, session):
+def _sync_instances(context, project_id, user_id):
return dict(zip(('instances', 'cores', 'ram'),
- _instance_data_get_for_user(
- context, project_id, user_id, session)))
+ _instance_data_get_for_user(context, project_id, user_id)))
-def _sync_floating_ips(context, project_id, user_id, session):
+def _sync_floating_ips(context, project_id, user_id):
return dict(floating_ips=_floating_ip_count_by_project(
- context, project_id, session))
+ context, project_id))
-def _sync_fixed_ips(context, project_id, user_id, session):
- return dict(fixed_ips=_fixed_ip_count_by_project(
- context, project_id, session))
+def _sync_fixed_ips(context, project_id, user_id):
+ return dict(fixed_ips=_fixed_ip_count_by_project(context, project_id))
-def _sync_security_groups(context, project_id, user_id, session):
+def _sync_security_groups(context, project_id, user_id):
return dict(security_groups=_security_group_count_by_project_and_user(
- context, project_id, user_id, session))
+ context, project_id, user_id))
-def _sync_server_groups(context, project_id, user_id, session):
+def _sync_server_groups(context, project_id, user_id):
return dict(server_groups=_instance_group_count_by_project_and_user(
- context, project_id, user_id, session))
+ context, project_id, user_id))
QUOTA_SYNC_FUNCTIONS = {
'_sync_instances': _sync_instances,
@@ -425,28 +433,25 @@ class InequalityCondition(object):
###################
+@main_context_manager.writer
def service_destroy(context, service_id):
- session = get_session()
- with session.begin():
- service = _service_get(context, service_id)
+ service = service_get(context, service_id)
- model_query(context, models.Service, session=session).\
- filter_by(id=service_id).\
- soft_delete(synchronize_session=False)
+ model_query(context, models.Service).\
+ filter_by(id=service_id).\
+ soft_delete(synchronize_session=False)
- # TODO(sbauza): Remove the service_id filter in a later release
- # once we are sure that all compute nodes report the host field
- model_query(context, models.ComputeNode, session=session).\
- filter(or_(models.ComputeNode.service_id == service_id,
- models.ComputeNode.host == service['host'])).\
- soft_delete(synchronize_session=False)
+ # TODO(sbauza): Remove the service_id filter in a later release
+ # once we are sure that all compute nodes report the host field
+ model_query(context, models.ComputeNode).\
+ filter(or_(models.ComputeNode.service_id == service_id,
+ models.ComputeNode.host == service['host'])).\
+ soft_delete(synchronize_session=False)
-def _service_get(context, service_id, session=None,
- use_slave=False):
- query = model_query(context, models.Service, session=session,
- use_slave=use_slave).\
- filter_by(id=service_id)
+@main_context_manager.reader
+def service_get(context, service_id):
+ query = model_query(context, models.Service).filter_by(id=service_id)
result = query.first()
if not result:
@@ -455,22 +460,17 @@ def _service_get(context, service_id, session=None,
return result
-def service_get(context, service_id, use_slave=False):
- return _service_get(context, service_id,
- use_slave=use_slave)
-
-
-def service_get_minimum_version(context, binary, use_slave=False):
- session = get_session(use_slave=use_slave)
- with session.begin():
- min_version = session.query(
- func.min(models.Service.version)).\
- filter(models.Service.binary == binary).\
- filter(models.Service.forced_down == false()).\
- scalar()
+@main_context_manager.reader.allow_async
+def service_get_minimum_version(context, binary):
+ min_version = context.session.query(
+ func.min(models.Service.version)).\
+ filter(models.Service.binary == binary).\
+ filter(models.Service.forced_down == false()).\
+ scalar()
return min_version
+@main_context_manager.reader
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
@@ -480,6 +480,7 @@ def service_get_all(context, disabled=None):
return query.all()
+@main_context_manager.reader
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
@@ -487,6 +488,7 @@ def service_get_all_by_topic(context, topic):
all()
+@main_context_manager.reader
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
@@ -495,6 +497,7 @@ def service_get_by_host_and_topic(context, host, topic):
first()
+@main_context_manager.reader
def service_get_all_by_binary(context, binary):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
@@ -502,6 +505,7 @@ def service_get_all_by_binary(context, binary):
all()
+@main_context_manager.reader
def service_get_by_host_and_binary(context, host, binary):
result = model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
@@ -514,15 +518,16 @@ def service_get_by_host_and_binary(context, host, binary):
return result
+@main_context_manager.reader
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
-def service_get_by_compute_host(context, host, use_slave=False):
- result = model_query(context, models.Service, read_deleted="no",
- use_slave=use_slave).\
+@main_context_manager.reader.allow_async
+def service_get_by_compute_host(context, host):
+ result = model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
filter_by(binary='nova-compute').\
first()
@@ -533,13 +538,14 @@ def service_get_by_compute_host(context, host, use_slave=False):
return result
+@main_context_manager.writer
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
- service_ref.save()
+ service_ref.save(context.session)
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
@@ -550,29 +556,25 @@ def service_create(context, values):
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def service_update(context, service_id, values):
- session = get_session()
- with session.begin():
- service_ref = _service_get(context, service_id, session=session)
- # Only servicegroup.drivers.db.DbDriver._report_state() updates
- # 'report_count', so if that value changes then store the timestamp
- # as the last time we got a state report.
- if 'report_count' in values:
- if values['report_count'] > service_ref.report_count:
- service_ref.last_seen_up = timeutils.utcnow()
- service_ref.update(values)
+ service_ref = service_get(context, service_id)
+ # Only servicegroup.drivers.db.DbDriver._report_state() updates
+ # 'report_count', so if that value changes then store the timestamp
+ # as the last time we got a state report.
+ if 'report_count' in values:
+ if values['report_count'] > service_ref.report_count:
+ service_ref.last_seen_up = timeutils.utcnow()
+ service_ref.update(values)
return service_ref
###################
+@main_context_manager.reader
def compute_node_get(context, compute_id):
- return _compute_node_get(context, compute_id)
-
-
-def _compute_node_get(context, compute_id, session=None):
- result = model_query(context, models.ComputeNode, session=session).\
+ result = model_query(context, models.ComputeNode).\
filter_by(id=compute_id).\
first()
@@ -582,6 +584,7 @@ def _compute_node_get(context, compute_id, session=None):
return result
+@main_context_manager.reader
def compute_nodes_get_by_service_id(context, service_id):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(service_id=service_id).\
@@ -593,6 +596,7 @@ def compute_nodes_get_by_service_id(context, service_id):
return result
+@main_context_manager.reader
def compute_node_get_by_host_and_nodename(context, host, nodename):
result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(host=host, hypervisor_hostname=nodename).\
@@ -604,9 +608,9 @@ def compute_node_get_by_host_and_nodename(context, host, nodename):
return result
-def compute_node_get_all_by_host(context, host, use_slave=False):
- result = model_query(context, models.ComputeNode, read_deleted='no',
- use_slave=use_slave).\
+@main_context_manager.reader.allow_async
+def compute_node_get_all_by_host(context, host):
+ result = model_query(context, models.ComputeNode, read_deleted='no').\
filter_by(host=host).\
all()
@@ -616,10 +620,12 @@ def compute_node_get_all_by_host(context, host, use_slave=False):
return result
+@main_context_manager.reader
def compute_node_get_all(context):
return model_query(context, models.ComputeNode, read_deleted='no').all()
+@main_context_manager.reader
def compute_node_search_by_hypervisor(context, hypervisor_match):
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
@@ -627,6 +633,7 @@ def compute_node_search_by_hypervisor(context, hypervisor_match):
all()
+@main_context_manager.writer
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
@@ -635,40 +642,39 @@ def compute_node_create(context, values):
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
- compute_node_ref.save()
+ compute_node_ref.save(context.session)
return compute_node_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def compute_node_update(context, compute_id, values):
"""Updates the ComputeNode record with the most recent data."""
- session = get_session()
- with session.begin():
- compute_ref = _compute_node_get(context, compute_id, session=session)
- # Always update this, even if there's going to be no other
- # changes in data. This ensures that we invalidate the
- # scheduler cache of compute node data in case of races.
- values['updated_at'] = timeutils.utcnow()
- convert_objects_related_datetimes(values)
- compute_ref.update(values)
+ compute_ref = compute_node_get(context, compute_id)
+ # Always update this, even if there's going to be no other
+ # changes in data. This ensures that we invalidate the
+ # scheduler cache of compute node data in case of races.
+ values['updated_at'] = timeutils.utcnow()
+ convert_objects_related_datetimes(values)
+ compute_ref.update(values)
return compute_ref
+@main_context_manager.writer
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record."""
- session = get_session()
- with session.begin():
- result = model_query(context, models.ComputeNode, session=session).\
- filter_by(id=compute_id).\
- soft_delete(synchronize_session=False)
+ result = model_query(context, models.ComputeNode).\
+ filter_by(id=compute_id).\
+ soft_delete(synchronize_session=False)
- if not result:
- raise exception.ComputeHostNotFound(host=compute_id)
+ if not result:
+ raise exception.ComputeHostNotFound(host=compute_id)
+@main_context_manager.reader
def compute_node_statistics(context):
"""Compute statistics over all compute nodes."""
@@ -743,6 +749,7 @@ def certificate_get_all_by_user_and_project(context, user_id, project_id):
@require_context
+@main_context_manager.reader
def floating_ip_get(context, id):
try:
result = model_query(context, models.FloatingIp, project_only=True).\
@@ -760,6 +767,7 @@ def floating_ip_get(context, id):
@require_context
+@main_context_manager.reader
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp,
@@ -771,54 +779,50 @@ def floating_ip_get_pools(context):
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
+@main_context_manager.writer
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
nova.context.authorize_project_context(context, project_id)
- session = get_session()
- with session.begin():
- floating_ip_ref = model_query(context, models.FloatingIp,
- session=session, read_deleted="no").\
- filter_by(fixed_ip_id=None).\
- filter_by(project_id=None).\
- filter_by(pool=pool).\
- first()
-
- if not floating_ip_ref:
- raise exception.NoMoreFloatingIps()
-
- params = {'project_id': project_id, 'auto_assigned': auto_assigned}
-
- rows_update = model_query(context, models.FloatingIp,
- session=session, read_deleted="no").\
- filter_by(id=floating_ip_ref['id']).\
- filter_by(fixed_ip_id=None).\
- filter_by(project_id=None).\
- filter_by(pool=pool).\
- update(params, synchronize_session='evaluate')
-
- if not rows_update:
- LOG.debug('The row was updated in a concurrent transaction, '
- 'we will fetch another one')
- raise db_exc.RetryRequest(exception.FloatingIpAllocateFailed())
+ floating_ip_ref = model_query(context, models.FloatingIp,
+ read_deleted="no").\
+ filter_by(fixed_ip_id=None).\
+ filter_by(project_id=None).\
+ filter_by(pool=pool).\
+ first()
+
+ if not floating_ip_ref:
+ raise exception.NoMoreFloatingIps()
+
+ params = {'project_id': project_id, 'auto_assigned': auto_assigned}
+
+ rows_update = model_query(context, models.FloatingIp, read_deleted="no").\
+ filter_by(id=floating_ip_ref['id']).\
+ filter_by(fixed_ip_id=None).\
+ filter_by(project_id=None).\
+ filter_by(pool=pool).\
+ update(params, synchronize_session='evaluate')
+
+ if not rows_update:
+ LOG.debug('The row was updated in a concurrent transaction, '
+ 'we will fetch another one')
+ raise db_exc.RetryRequest(exception.FloatingIpAllocateFailed())
return floating_ip_ref['address']
@require_context
+@main_context_manager.writer
def floating_ip_bulk_create(context, ips, want_result=True):
- session = get_session()
- with session.begin():
- try:
- tab = models.FloatingIp().__table__
- session.execute(tab.insert(), ips)
- except db_exc.DBDuplicateEntry as e:
- raise exception.FloatingIpExists(address=e.value)
+ try:
+ tab = models.FloatingIp().__table__
+ context.session.execute(tab.insert(), ips)
+ except db_exc.DBDuplicateEntry as e:
+ raise exception.FloatingIpExists(address=e.value)
- if want_result:
- return model_query(
- context, models.FloatingIp, session=session).filter(
- models.FloatingIp.address.in_(
- [ip['address'] for ip in ips])).all()
+ if want_result:
+ return model_query(context, models.FloatingIp).filter(
+ models.FloatingIp.address.in_(
+ [ip['address'] for ip in ips])).all()
def _ip_range_splitter(ips, block_size=256):
@@ -839,24 +843,23 @@ def _ip_range_splitter(ips, block_size=256):
@require_context
+@main_context_manager.writer
def floating_ip_bulk_destroy(context, ips):
- session = get_session()
- with session.begin():
- project_id_to_quota_count = collections.defaultdict(int)
- for ip_block in _ip_range_splitter(ips):
- # Find any floating IPs that were not auto_assigned and
- # thus need quota released.
- query = model_query(context, models.FloatingIp, session=session).\
- filter(models.FloatingIp.address.in_(ip_block)).\
- filter_by(auto_assigned=False)
- for row in query.all():
- # The count is negative since we release quota by
- # reserving negative quota.
- project_id_to_quota_count[row['project_id']] -= 1
- # Delete the floating IPs.
- model_query(context, models.FloatingIp, session=session).\
- filter(models.FloatingIp.address.in_(ip_block)).\
- soft_delete(synchronize_session='fetch')
+ project_id_to_quota_count = collections.defaultdict(int)
+ for ip_block in _ip_range_splitter(ips):
+ # Find any floating IPs that were not auto_assigned and
+ # thus need quota released.
+ query = model_query(context, models.FloatingIp).\
+ filter(models.FloatingIp.address.in_(ip_block)).\
+ filter_by(auto_assigned=False)
+ for row in query.all():
+ # The count is negative since we release quota by
+ # reserving negative quota.
+ project_id_to_quota_count[row['project_id']] -= 1
+ # Delete the floating IPs.
+ model_query(context, models.FloatingIp).\
+ filter(models.FloatingIp.address.in_(ip_block)).\
+ soft_delete(synchronize_session='fetch')
# Delete the quotas, if needed.
# Quota update happens in a separate transaction, so previous must have
@@ -874,21 +877,21 @@ def floating_ip_bulk_destroy(context, ips):
@require_context
+@main_context_manager.writer
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
try:
- floating_ip_ref.save()
+ floating_ip_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return floating_ip_ref
-def _floating_ip_count_by_project(context, project_id, session=None):
+def _floating_ip_count_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
- return model_query(context, models.FloatingIp, read_deleted="no",
- session=session).\
+ return model_query(context, models.FloatingIp, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@@ -896,33 +899,33 @@ def _floating_ip_count_by_project(context, project_id, session=None):
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
- session = get_session()
- with session.begin():
- fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
- filter_by(address=fixed_address).\
- options(joinedload('network')).\
- first()
- if not fixed_ip_ref:
- raise exception.FixedIpNotFoundForAddress(address=fixed_address)
- rows = model_query(context, models.FloatingIp, session=session).\
- filter_by(address=floating_address).\
- filter(models.FloatingIp.project_id ==
- context.project_id).\
- filter(or_(models.FloatingIp.fixed_ip_id ==
- fixed_ip_ref['id'],
- models.FloatingIp.fixed_ip_id.is_(None))).\
- update({'fixed_ip_id': fixed_ip_ref['id'], 'host': host})
-
- if not rows:
- raise exception.FloatingIpAssociateFailed(address=floating_address)
+ fixed_ip_ref = model_query(context, models.FixedIp).\
+ filter_by(address=fixed_address).\
+ options(joinedload('network')).\
+ first()
+ if not fixed_ip_ref:
+ raise exception.FixedIpNotFoundForAddress(address=fixed_address)
+ rows = model_query(context, models.FloatingIp).\
+ filter_by(address=floating_address).\
+ filter(models.FloatingIp.project_id ==
+ context.project_id).\
+ filter(or_(models.FloatingIp.fixed_ip_id ==
+ fixed_ip_ref['id'],
+ models.FloatingIp.fixed_ip_id.is_(None))).\
+ update({'fixed_ip_id': fixed_ip_ref['id'], 'host': host})
+
+ if not rows:
+ raise exception.FloatingIpAssociateFailed(address=floating_address)
- return fixed_ip_ref
+ return fixed_ip_ref
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def floating_ip_deallocate(context, address):
return model_query(context, models.FloatingIp).\
filter_by(address=address).\
@@ -935,6 +938,7 @@ def floating_ip_deallocate(context, address):
@require_context
+@main_context_manager.writer
def floating_ip_destroy(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
@@ -942,32 +946,30 @@ def floating_ip_destroy(context, address):
@require_context
+@main_context_manager.writer
def floating_ip_disassociate(context, address):
- session = get_session()
- with session.begin():
- floating_ip_ref = model_query(context,
- models.FloatingIp,
- session=session).\
- filter_by(address=address).\
- first()
- if not floating_ip_ref:
- raise exception.FloatingIpNotFoundForAddress(address=address)
+ floating_ip_ref = model_query(context,
+ models.FloatingIp).\
+ filter_by(address=address).\
+ first()
+ if not floating_ip_ref:
+ raise exception.FloatingIpNotFoundForAddress(address=address)
- fixed_ip_ref = model_query(context, models.FixedIp, session=session).\
- filter_by(id=floating_ip_ref['fixed_ip_id']).\
- options(joinedload('network')).\
- first()
- floating_ip_ref.fixed_ip_id = None
- floating_ip_ref.host = None
+ fixed_ip_ref = model_query(context, models.FixedIp).\
+ filter_by(id=floating_ip_ref['fixed_ip_id']).\
+ options(joinedload('network')).\
+ first()
+ floating_ip_ref.fixed_ip_id = None
+ floating_ip_ref.host = None
return fixed_ip_ref
-def _floating_ip_get_all(context, session=None):
- return model_query(context, models.FloatingIp, read_deleted="no",
- session=session)
+def _floating_ip_get_all(context):
+ return model_query(context, models.FloatingIp, read_deleted="no")
+@main_context_manager.reader
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).\
options(joinedload('fixed_ip')).\
@@ -977,6 +979,7 @@ def floating_ip_get_all(context):
return floating_ip_refs
+@main_context_manager.reader
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
@@ -988,6 +991,7 @@ def floating_ip_get_all_by_host(context, host):
@require_context
+@main_context_manager.reader
def floating_ip_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
@@ -999,17 +1003,18 @@ def floating_ip_get_all_by_project(context, project_id):
@require_context
+@main_context_manager.reader
def floating_ip_get_by_address(context, address):
return _floating_ip_get_by_address(context, address)
-def _floating_ip_get_by_address(context, address, session=None):
+def _floating_ip_get_by_address(context, address):
# if address string is empty explicitly set it to None
if not address:
address = None
try:
- result = model_query(context, models.FloatingIp, session=session).\
+ result = model_query(context, models.FloatingIp).\
filter_by(address=address).\
options(joinedload_all('fixed_ip.instance')).\
first()
@@ -1030,6 +1035,7 @@ def _floating_ip_get_by_address(context, address, session=None):
@require_context
+@main_context_manager.reader
def floating_ip_get_by_fixed_address(context, fixed_address):
return model_query(context, models.FloatingIp).\
outerjoin(models.FixedIp,
@@ -1040,6 +1046,7 @@ def floating_ip_get_by_fixed_address(context, fixed_address):
@require_context
+@main_context_manager.reader
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return model_query(context, models.FloatingIp).\
filter_by(fixed_ip_id=fixed_ip_id).\
@@ -1047,16 +1054,15 @@ def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
@require_context
+@main_context_manager.writer
def floating_ip_update(context, address, values):
- session = get_session()
- with session.begin():
- float_ip_ref = _floating_ip_get_by_address(context, address, session)
- float_ip_ref.update(values)
- try:
- float_ip_ref.save(session=session)
- except db_exc.DBDuplicateEntry:
- raise exception.FloatingIpExists(address=values['address'])
- return float_ip_ref
+ float_ip_ref = _floating_ip_get_by_address(context, address)
+ float_ip_ref.update(values)
+ try:
+ float_ip_ref.save(context.session)
+ except db_exc.DBDuplicateEntry:
+ raise exception.FloatingIpExists(address=values['address'])
+ return float_ip_ref
###################
@@ -1116,6 +1122,7 @@ def dnsdomain_get_all(context):
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
+@main_context_manager.writer
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False, virtual_interface_id=None):
"""Keyword arguments:
@@ -1125,165 +1132,165 @@ def fixed_ip_associate(context, address, instance_uuid, network_id=None,
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
- session = get_session()
- with session.begin():
- network_or_none = or_(models.FixedIp.network_id == network_id,
- models.FixedIp.network_id == null())
- fixed_ip_ref = model_query(context, models.FixedIp, session=session,
- read_deleted="no").\
- filter(network_or_none).\
- filter_by(reserved=reserved).\
- filter_by(address=address).\
- first()
+ network_or_none = or_(models.FixedIp.network_id == network_id,
+ models.FixedIp.network_id == null())
+ fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
+ filter(network_or_none).\
+ filter_by(reserved=reserved).\
+ filter_by(address=address).\
+ first()
- if fixed_ip_ref is None:
- raise exception.FixedIpNotFoundForNetwork(address=address,
- network_uuid=network_id)
- if fixed_ip_ref.instance_uuid:
- raise exception.FixedIpAlreadyInUse(address=address,
- instance_uuid=instance_uuid)
-
- params = {'instance_uuid': instance_uuid,
- 'allocated': virtual_interface_id is not None}
- if not fixed_ip_ref.network_id:
- params['network_id'] = network_id
- if virtual_interface_id:
- params['virtual_interface_id'] = virtual_interface_id
-
- rows_updated = model_query(context, models.FixedIp, session=session,
- read_deleted="no").\
- filter_by(id=fixed_ip_ref.id).\
- filter(network_or_none).\
- filter_by(reserved=reserved).\
- filter_by(address=address).\
- update(params, synchronize_session='evaluate')
-
- if not rows_updated:
- LOG.debug('The row was updated in a concurrent transaction, '
- 'we will fetch another row')
- raise db_exc.RetryRequest(
- exception.FixedIpAssociateFailed(net=network_id))
+ if fixed_ip_ref is None:
+ raise exception.FixedIpNotFoundForNetwork(address=address,
+ network_uuid=network_id)
+ if fixed_ip_ref.instance_uuid:
+ raise exception.FixedIpAlreadyInUse(address=address,
+ instance_uuid=instance_uuid)
+
+ params = {'instance_uuid': instance_uuid,
+ 'allocated': virtual_interface_id is not None}
+ if not fixed_ip_ref.network_id:
+ params['network_id'] = network_id
+ if virtual_interface_id:
+ params['virtual_interface_id'] = virtual_interface_id
+
+ rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
+ filter_by(id=fixed_ip_ref.id).\
+ filter(network_or_none).\
+ filter_by(reserved=reserved).\
+ filter_by(address=address).\
+ update(params, synchronize_session='evaluate')
+
+ if not rows_updated:
+ LOG.debug('The row was updated in a concurrent transaction, '
+ 'we will fetch another row')
+ raise db_exc.RetryRequest(
+ exception.FixedIpAssociateFailed(net=network_id))
return fixed_ip_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
+@main_context_manager.writer
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None, virtual_interface_id=None):
+ """allocate a fixed ip out of a fixed ip network pool.
+
+ This allocates an unallocated fixed ip out of a specified
+ network. We sort by updated_at to hand out the oldest address in
+ the list.
+
+ """
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
- session = get_session()
- with session.begin():
- network_or_none = or_(models.FixedIp.network_id == network_id,
- models.FixedIp.network_id == null())
- fixed_ip_ref = model_query(context, models.FixedIp, session=session,
- read_deleted="no").\
- filter(network_or_none).\
- filter_by(reserved=False).\
- filter_by(instance_uuid=None).\
- filter_by(host=None).\
- filter_by(leased=False).\
- first()
+ network_or_none = or_(models.FixedIp.network_id == network_id,
+ models.FixedIp.network_id == null())
+ fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
+ filter(network_or_none).\
+ filter_by(reserved=False).\
+ filter_by(instance_uuid=None).\
+ filter_by(host=None).\
+ filter_by(leased=False).\
+ order_by(asc(models.FixedIp.updated_at)).\
+ first()
- if not fixed_ip_ref:
- raise exception.NoMoreFixedIps(net=network_id)
+ if not fixed_ip_ref:
+ raise exception.NoMoreFixedIps(net=network_id)
- params = {'allocated': virtual_interface_id is not None}
- if fixed_ip_ref['network_id'] is None:
- params['network_id'] = network_id
- if instance_uuid:
- params['instance_uuid'] = instance_uuid
- if host:
- params['host'] = host
- if virtual_interface_id:
- params['virtual_interface_id'] = virtual_interface_id
-
- rows_updated = model_query(context, models.FixedIp, session=session,
- read_deleted="no").\
- filter_by(id=fixed_ip_ref['id']).\
- filter_by(network_id=fixed_ip_ref['network_id']).\
- filter_by(reserved=False).\
- filter_by(instance_uuid=None).\
- filter_by(host=None).\
- filter_by(leased=False).\
- filter_by(address=fixed_ip_ref['address']).\
- update(params, synchronize_session='evaluate')
-
- if not rows_updated:
- LOG.debug('The row was updated in a concurrent transaction, '
- 'we will fetch another row')
- raise db_exc.RetryRequest(
- exception.FixedIpAssociateFailed(net=network_id))
+ params = {'allocated': virtual_interface_id is not None}
+ if fixed_ip_ref['network_id'] is None:
+ params['network_id'] = network_id
+ if instance_uuid:
+ params['instance_uuid'] = instance_uuid
+ if host:
+ params['host'] = host
+ if virtual_interface_id:
+ params['virtual_interface_id'] = virtual_interface_id
+
+ rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
+ filter_by(id=fixed_ip_ref['id']).\
+ filter_by(network_id=fixed_ip_ref['network_id']).\
+ filter_by(reserved=False).\
+ filter_by(instance_uuid=None).\
+ filter_by(host=None).\
+ filter_by(leased=False).\
+ filter_by(address=fixed_ip_ref['address']).\
+ update(params, synchronize_session='evaluate')
+
+ if not rows_updated:
+ LOG.debug('The row was updated in a concurrent transaction, '
+ 'we will fetch another row')
+ raise db_exc.RetryRequest(
+ exception.FixedIpAssociateFailed(net=network_id))
return fixed_ip_ref
@require_context
+@main_context_manager.writer
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
- fixed_ip_ref.save()
+ fixed_ip_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
+@main_context_manager.writer
def fixed_ip_bulk_create(context, ips):
- engine = get_engine()
- with engine.begin() as conn:
- try:
- tab = models.FixedIp.__table__
- conn.execute(tab.insert(), ips)
- except db_exc.DBDuplicateEntry as e:
- raise exception.FixedIpExists(address=e.value)
+ try:
+ tab = models.FixedIp.__table__
+ context.session.execute(tab.insert(), ips)
+ except db_exc.DBDuplicateEntry as e:
+ raise exception.FixedIpExists(address=e.value)
@require_context
+@main_context_manager.writer
def fixed_ip_disassociate(context, address):
- session = get_session()
- with session.begin():
- _fixed_ip_get_by_address(context, address, session=session).\
- update({'instance_uuid': None,
- 'virtual_interface_id': None})
+ _fixed_ip_get_by_address(context, address).update(
+ {'instance_uuid': None,
+ 'virtual_interface_id': None})
+@main_context_manager.writer
def fixed_ip_disassociate_all_by_timeout(context, host, time):
- session = get_session()
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
- with session.begin():
- host_filter = or_(and_(models.Instance.host == host,
- models.Network.multi_host == true()),
- models.Network.host == host)
- result = model_query(context, models.FixedIp, (models.FixedIp.id,),
- read_deleted="no", session=session).\
- filter(models.FixedIp.allocated == false()).\
- filter(models.FixedIp.updated_at < time).\
- join((models.Network,
- models.Network.id == models.FixedIp.network_id)).\
- join((models.Instance,
- models.Instance.uuid == models.FixedIp.instance_uuid)).\
- filter(host_filter).\
- all()
- fixed_ip_ids = [fip[0] for fip in result]
- if not fixed_ip_ids:
- return 0
- result = model_query(context, models.FixedIp, session=session).\
- filter(models.FixedIp.id.in_(fixed_ip_ids)).\
- update({'instance_uuid': None,
- 'leased': False,
- 'updated_at': timeutils.utcnow()},
- synchronize_session='fetch')
- return result
+ host_filter = or_(and_(models.Instance.host == host,
+ models.Network.multi_host == true()),
+ models.Network.host == host)
+ result = model_query(context, models.FixedIp, (models.FixedIp.id,),
+ read_deleted="no").\
+ filter(models.FixedIp.allocated == false()).\
+ filter(models.FixedIp.updated_at < time).\
+ join((models.Network,
+ models.Network.id == models.FixedIp.network_id)).\
+ join((models.Instance,
+ models.Instance.uuid == models.FixedIp.instance_uuid)).\
+ filter(host_filter).\
+ all()
+ fixed_ip_ids = [fip[0] for fip in result]
+ if not fixed_ip_ids:
+ return 0
+ result = model_query(context, models.FixedIp).\
+ filter(models.FixedIp.id.in_(fixed_ip_ids)).\
+ update({'instance_uuid': None,
+ 'leased': False,
+ 'updated_at': timeutils.utcnow()},
+ synchronize_session='fetch')
+ return result
@require_context
+@main_context_manager.reader
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
@@ -1303,6 +1310,7 @@ def fixed_ip_get(context, id, get_network=False):
return result
+@main_context_manager.reader
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
@@ -1312,47 +1320,42 @@ def fixed_ip_get_all(context):
@require_context
+@main_context_manager.reader
def fixed_ip_get_by_address(context, address, columns_to_join=None):
return _fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
-def _fixed_ip_get_by_address(context, address, session=None,
- columns_to_join=None):
- if session is None:
- session = get_session()
+def _fixed_ip_get_by_address(context, address, columns_to_join=None):
if columns_to_join is None:
columns_to_join = []
- with session.begin(subtransactions=True):
- try:
- result = model_query(context, models.FixedIp, session=session)
- for column in columns_to_join:
- result = result.options(joinedload_all(column))
- result = result.filter_by(address=address).first()
- if not result:
- raise exception.FixedIpNotFoundForAddress(address=address)
- except db_exc.DBError:
- msg = _("Invalid fixed IP Address %s in request") % address
- LOG.warning(msg)
- raise exception.FixedIpInvalid(msg)
-
- # NOTE(sirp): shouldn't we just use project_only here to restrict the
- # results?
- if (nova.context.is_user_context(context) and
- result['instance_uuid'] is not None):
- instance = _instance_get_by_uuid(
- context.elevated(read_deleted='yes'),
- result['instance_uuid'],
- session
- )
- nova.context.authorize_project_context(context,
- instance.project_id)
+ try:
+ result = model_query(context, models.FixedIp)
+ for column in columns_to_join:
+ result = result.options(joinedload_all(column))
+ result = result.filter_by(address=address).first()
+ if not result:
+ raise exception.FixedIpNotFoundForAddress(address=address)
+ except db_exc.DBError:
+ msg = _("Invalid fixed IP Address %s in request") % address
+ LOG.warning(msg)
+ raise exception.FixedIpInvalid(msg)
+ # NOTE(sirp): shouldn't we just use project_only here to restrict the
+ # results?
+ if (nova.context.is_user_context(context) and
+ result['instance_uuid'] is not None):
+ instance = _instance_get_by_uuid(
+ context.elevated(read_deleted='yes'),
+ result['instance_uuid'])
+ nova.context.authorize_project_context(context,
+ instance.project_id)
return result
@require_context
+@main_context_manager.reader
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
join(models.FloatingIp,
@@ -1364,6 +1367,7 @@ def fixed_ip_get_by_floating_address(context, floating_address):
@require_context
+@main_context_manager.reader
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
@@ -1387,20 +1391,19 @@ def fixed_ip_get_by_instance(context, instance_uuid):
return result
+@main_context_manager.reader
def fixed_ip_get_by_host(context, host):
- session = get_session()
- with session.begin():
- instance_uuids = _instance_get_all_uuids_by_host(context, host,
- session=session)
- if not instance_uuids:
- return []
+ instance_uuids = _instance_get_all_uuids_by_host(context, host)
+ if not instance_uuids:
+ return []
- return model_query(context, models.FixedIp, session=session).\
- filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
- all()
+ return model_query(context, models.FixedIp).\
+ filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
+ all()
@require_context
+@main_context_manager.reader
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
@@ -1414,6 +1417,7 @@ def fixed_ip_get_by_network_host(context, network_id, host):
@require_context
+@main_context_manager.reader
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
@@ -1425,17 +1429,15 @@ def fixed_ips_by_virtual_interface(context, vif_id):
@require_context
+@main_context_manager.writer
def fixed_ip_update(context, address, values):
- session = get_session()
- with session.begin():
- _fixed_ip_get_by_address(context, address, session=session).\
- update(values)
+ _fixed_ip_get_by_address(context, address).update(values)
-def _fixed_ip_count_by_project(context, project_id, session=None):
+def _fixed_ip_count_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp, (models.FixedIp.id,),
- read_deleted="no", session=session).\
+ read_deleted="no").\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
@@ -1446,6 +1448,7 @@ def _fixed_ip_count_by_project(context, project_id, session=None):
@require_context
+@main_context_manager.writer
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
@@ -1454,19 +1457,19 @@ def virtual_interface_create(context, values):
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
- vif_ref.save()
+ vif_ref.save(context.session)
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
-def _virtual_interface_query(context, session=None, use_slave=False):
- return model_query(context, models.VirtualInterface, session=session,
- read_deleted="no", use_slave=use_slave)
+def _virtual_interface_query(context):
+ return model_query(context, models.VirtualInterface, read_deleted="no")
@require_context
+@main_context_manager.reader
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
@@ -1479,6 +1482,7 @@ def virtual_interface_get(context, vif_id):
@require_context
+@main_context_manager.reader
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
@@ -1496,6 +1500,7 @@ def virtual_interface_get_by_address(context, address):
@require_context
+@main_context_manager.reader
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
@@ -1509,12 +1514,13 @@ def virtual_interface_get_by_uuid(context, vif_uuid):
@require_context
@require_instance_exists_using_uuid
-def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
+@main_context_manager.reader.allow_async
+def virtual_interface_get_by_instance(context, instance_uuid):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
- vif_refs = _virtual_interface_query(context, use_slave=use_slave).\
+ vif_refs = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\
all()
@@ -1522,6 +1528,7 @@ def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False):
@require_context
+@main_context_manager.reader
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
@@ -1533,6 +1540,7 @@ def virtual_interface_get_by_instance_and_network(context, instance_uuid,
@require_context
+@main_context_manager.writer
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
@@ -1545,6 +1553,7 @@ def virtual_interface_delete_by_instance(context, instance_uuid):
@require_context
+@main_context_manager.reader
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
@@ -1565,13 +1574,12 @@ def _metadata_refs(metadata_dict, meta_class):
return metadata_refs
-def _validate_unique_server_name(context, session, name):
+def _validate_unique_server_name(context, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
- base_query = model_query(context, models.Instance, session=session,
- read_deleted='no').\
+ base_query = model_query(context, models.Instance, read_deleted='no').\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
@@ -1609,14 +1617,15 @@ def _handle_objects_related_type_conversions(values):
convert_objects_related_datetimes(values, *datetime_keys)
-def _check_instance_exists_in_project(context, session, instance_uuid):
- if not model_query(context, models.Instance, session=session,
- read_deleted="no", project_only=True).filter_by(
+def _check_instance_exists_in_project(context, instance_uuid):
+ if not model_query(context, models.Instance, read_deleted="no",
+ project_only=True).filter_by(
uuid=instance_uuid).first():
raise exception.InstanceNotFound(instance_id=instance_uuid)
@require_context
+@main_context_manager.writer
def instance_create(context, values):
"""Create a new Instance record in the database.
@@ -1624,10 +1633,6 @@ def instance_create(context, values):
values - dict containing column values.
"""
- # NOTE(rpodolyaka): create the default security group, if it doesn't exist.
- # This must be done in a separate transaction, so that this one is not
- # aborted in case a concurrent one succeeds first and the unique constraint
- # for security group names is violated by a concurrent INSERT
security_group_ensure_default(context)
values = values.copy()
@@ -1655,25 +1660,22 @@ def instance_create(context, values):
instance_ref['extra'].update(values.pop('extra', {}))
instance_ref.update(values)
- def _get_sec_group_models(session, security_groups):
+ def _get_sec_group_models(security_groups):
models = []
- default_group = _security_group_ensure_default(context, session)
+ default_group = _security_group_ensure_default(context)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
- session, context.project_id, security_groups))
+ context.project_id, security_groups))
return models
- session = get_session()
- with session.begin():
- if 'hostname' in values:
- _validate_unique_server_name(context, session, values['hostname'])
- instance_ref.security_groups = _get_sec_group_models(session,
- security_groups)
- session.add(instance_ref)
+ if 'hostname' in values:
+ _validate_unique_server_name(context, values['hostname'])
+ instance_ref.security_groups = _get_sec_group_models(security_groups)
+ context.session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
@@ -1681,14 +1683,12 @@ def instance_create(context, values):
return instance_ref
-def _instance_data_get_for_user(context, project_id, user_id, session=None):
- result = model_query(context,
- models.Instance, (
- func.count(models.Instance.id),
- func.sum(models.Instance.vcpus),
- func.sum(models.Instance.memory_mb),
- ), session=session).\
- filter_by(project_id=project_id)
+def _instance_data_get_for_user(context, project_id, user_id):
+ result = model_query(context, models.Instance, (
+ func.count(models.Instance.id),
+ func.sum(models.Instance.vcpus),
+ func.sum(models.Instance.memory_mb))).\
+ filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
@@ -1699,59 +1699,55 @@ def _instance_data_get_for_user(context, project_id, user_id, session=None):
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def instance_destroy(context, instance_uuid, constraint=None):
- session = get_session()
- with session.begin():
- if uuidutils.is_uuid_like(instance_uuid):
- instance_ref = _instance_get_by_uuid(context, instance_uuid,
- session=session)
- else:
- raise exception.InvalidUUID(instance_uuid)
-
- query = model_query(context, models.Instance, session=session).\
- filter_by(uuid=instance_uuid)
- if constraint is not None:
- query = constraint.apply(models.Instance, query)
- count = query.soft_delete()
- if count == 0:
- raise exception.ConstraintNotMet()
- model_query(context, models.SecurityGroupInstanceAssociation,
- session=session).\
- filter_by(instance_uuid=instance_uuid).\
- soft_delete()
- model_query(context, models.InstanceInfoCache, session=session).\
- filter_by(instance_uuid=instance_uuid).\
- soft_delete()
- model_query(context, models.InstanceMetadata, session=session).\
- filter_by(instance_uuid=instance_uuid).\
- soft_delete()
- model_query(context, models.InstanceFault, session=session).\
- filter_by(instance_uuid=instance_uuid).\
- soft_delete()
- model_query(context, models.InstanceExtra, session=session).\
- filter_by(instance_uuid=instance_uuid).\
- soft_delete()
- model_query(context, models.InstanceSystemMetadata, session=session).\
- filter_by(instance_uuid=instance_uuid).\
- soft_delete()
- # NOTE(snikitin): We can't use model_query here, because there is no
- # column 'deleted' in 'tags' table.
- session.query(models.Tag).filter_by(resource_id=instance_uuid).delete()
+ if uuidutils.is_uuid_like(instance_uuid):
+ instance_ref = _instance_get_by_uuid(context, instance_uuid)
+ else:
+ raise exception.InvalidUUID(instance_uuid)
+
+ query = model_query(context, models.Instance).\
+ filter_by(uuid=instance_uuid)
+ if constraint is not None:
+ query = constraint.apply(models.Instance, query)
+ count = query.soft_delete()
+ if count == 0:
+ raise exception.ConstraintNotMet()
+ model_query(context, models.SecurityGroupInstanceAssociation).\
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
+ model_query(context, models.InstanceInfoCache).\
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
+ model_query(context, models.InstanceMetadata).\
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
+ model_query(context, models.InstanceFault).\
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
+ model_query(context, models.InstanceExtra).\
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
+ model_query(context, models.InstanceSystemMetadata).\
+ filter_by(instance_uuid=instance_uuid).\
+ soft_delete()
+ # NOTE(snikitin): We can't use model_query here, because there is no
+ # column 'deleted' in 'tags' table.
+ context.session.query(models.Tag).filter_by(
+ resource_id=instance_uuid).delete()
return instance_ref
@require_context
-def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False):
+@main_context_manager.reader.allow_async
+def instance_get_by_uuid(context, uuid, columns_to_join=None):
return _instance_get_by_uuid(context, uuid,
- columns_to_join=columns_to_join, use_slave=use_slave)
+ columns_to_join=columns_to_join)
-def _instance_get_by_uuid(context, uuid, session=None,
- columns_to_join=None, use_slave=False):
- result = _build_instance_get(context, session=session,
- columns_to_join=columns_to_join,
- use_slave=use_slave).\
+def _instance_get_by_uuid(context, uuid, columns_to_join=None):
+ result = _build_instance_get(context, columns_to_join=columns_to_join).\
filter_by(uuid=uuid).\
first()
@@ -1762,6 +1758,7 @@ def _instance_get_by_uuid(context, uuid, session=None,
@require_context
+@main_context_manager.reader
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
@@ -1779,10 +1776,8 @@ def instance_get(context, instance_id, columns_to_join=None):
raise exception.InvalidID(id=instance_id)
-def _build_instance_get(context, session=None,
- columns_to_join=None, use_slave=False):
- query = model_query(context, models.Instance, session=session,
- project_only=True, use_slave=use_slave).\
+def _build_instance_get(context, columns_to_join=None):
+ query = model_query(context, models.Instance, project_only=True).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
@@ -1802,8 +1797,7 @@ def _build_instance_get(context, session=None,
return query
-def _instances_fill_metadata(context, instances,
- manual_joins=None, use_slave=False):
+def _instances_fill_metadata(context, instances, manual_joins=None):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
@@ -1820,14 +1814,12 @@ def _instances_fill_metadata(context, instances,
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
- for row in _instance_metadata_get_multi(context, uuids,
- use_slave=use_slave):
+ for row in _instance_metadata_get_multi(context, uuids):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
- for row in _instance_system_metadata_get_multi(context, uuids,
- use_slave=use_slave):
+ for row in _instance_system_metadata_get_multi(context, uuids):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
@@ -1871,6 +1863,7 @@ def _manual_join_columns(columns_to_join):
@require_context
+@main_context_manager.reader
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
@@ -1892,9 +1885,9 @@ def instance_get_all(context, columns_to_join=None):
@require_context
+@main_context_manager.reader.allow_async
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
- limit=None, marker=None, columns_to_join=None,
- use_slave=False):
+ limit=None, marker=None, columns_to_join=None):
"""Return instances matching all filters sorted by the primary key.
See instance_get_all_by_filters_sort for more information.
@@ -1904,16 +1897,16 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
return instance_get_all_by_filters_sort(context, filters, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
- use_slave=use_slave,
sort_keys=[sort_key],
sort_dirs=[sort_dir])
@require_context
+@main_context_manager.reader.allow_async
def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
- columns_to_join=None, use_slave=False,
- sort_keys=None, sort_dirs=None):
- """Return instances that match all filters sorted the the given keys.
+ columns_to_join=None, sort_keys=None,
+ sort_dirs=None):
+ """Return instances that match all filters sorted by the given keys.
Deleted instances will be returned by default, unless there's a filter that
says otherwise.
@@ -1973,11 +1966,6 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
sort_dirs,
default_dir='desc')
- if CONF.database.slave_connection == '':
- use_slave = False
-
- session = get_session(use_slave=use_slave)
-
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
@@ -1985,7 +1973,7 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
- query_prefix = session.query(models.Instance)
+ query_prefix = context.session.query(models.Instance)
for column in columns_to_join_new:
if 'extra.' in column:
query_prefix = query_prefix.options(undefer(column))
@@ -2084,8 +2072,7 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
if marker is not None:
try:
marker = _instance_get_by_uuid(
- context.elevated(read_deleted='yes'), marker,
- session=session)
+ context.elevated(read_deleted='yes'), marker)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
try:
@@ -2146,15 +2133,13 @@ def _tag_instance_filter(context, query, filters):
or_query = subq if or_query is None else or_(or_query, subq)
elif filter_name.startswith('tag:'):
- subq = model_query(context, model_metadata, (model_uuid,),
- session=query.session).\
+ subq = model_query(context, model_metadata, (model_uuid,)).\
filter_by(key=tag_name).\
filter(model_metadata.value.in_(tag_val))
query = query.filter(model.uuid.in_(subq))
if or_query is not None:
- subq = model_query(context, model_metadata, (model_uuid,),
- session=query.session).\
+ subq = model_query(context, model_metadata, (model_uuid,)).\
filter(or_query)
query = query.filter(model.uuid.in_(subq))
@@ -2252,8 +2237,8 @@ def _exact_instance_filter(query, filters, legal_keys):
# Apply simple exact matches
if filter_dict:
- query = query.filter_by(**filter_dict)
-
+ query = query.filter(*[getattr(models.Instance, k) == v
+ for k, v in filter_dict.items()])
return query
@@ -2327,13 +2312,12 @@ def process_sort_params(sort_keys, sort_dirs,
@require_context
+@main_context_manager.reader.allow_async
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None,
- use_slave=False,
columns_to_join=None):
"""Return instances and joins that were active during window."""
- session = get_session(use_slave=use_slave)
- query = session.query(models.Instance)
+ query = context.session.query(models.Instance)
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
@@ -2360,15 +2344,13 @@ def instance_get_active_by_window_joined(context, begin, end=None,
return _instances_fill_metadata(context, query.all(), manual_joins)
-def _instance_get_all_query(context, project_only=False,
- joins=None, use_slave=False):
+def _instance_get_all_query(context, project_only=False, joins=None):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
- project_only=project_only,
- use_slave=use_slave)
+ project_only=project_only)
for column in joins:
if 'extra.' in column:
query = query.options(undefer(column))
@@ -2377,31 +2359,28 @@ def _instance_get_all_query(context, project_only=False,
return query
-def instance_get_all_by_host(context, host,
- columns_to_join=None,
- use_slave=False):
+@main_context_manager.reader.allow_async
+def instance_get_all_by_host(context, host, columns_to_join=None):
return _instances_fill_metadata(context,
- _instance_get_all_query(context,
- use_slave=use_slave).filter_by(host=host).all(),
- manual_joins=columns_to_join,
- use_slave=use_slave)
+ _instance_get_all_query(context).filter_by(host=host).all(),
+ manual_joins=columns_to_join)
-def _instance_get_all_uuids_by_host(context, host, session=None):
+def _instance_get_all_uuids_by_host(context, host):
"""Return a list of the instance uuids on a given host.
- Returns a list of UUIDs, not Instance model objects. This internal version
- allows you to specify a session object as a kwarg.
+ Returns a list of UUIDs, not Instance model objects.
"""
uuids = []
for tuple in model_query(context, models.Instance, (models.Instance.uuid,),
- read_deleted="no", session=session).\
+ read_deleted="no").\
filter_by(host=host).\
all():
uuids.append(tuple[0])
return uuids
+@main_context_manager.reader
def instance_get_all_by_host_and_node(context, host, node,
columns_to_join=None):
if columns_to_join is None:
@@ -2417,12 +2396,14 @@ def instance_get_all_by_host_and_node(context, host, node,
filter_by(node=node).all(), manual_joins=manual_joins)
+@main_context_manager.reader
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).
filter(models.Instance.instance_type_id != type_id).all())
+@main_context_manager.reader
def instance_get_all_by_grantee_security_groups(context, group_ids):
if not group_ids:
return []
@@ -2435,6 +2416,7 @@ def instance_get_all_by_grantee_security_groups(context, group_ids):
@require_context
+@main_context_manager.reader
def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
@@ -2449,6 +2431,7 @@ def instance_floating_address_get_all(context, instance_uuid):
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
+@main_context_manager.reader
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
@@ -2475,15 +2458,14 @@ def _retry_instance_update():
@require_context
@_retry_instance_update()
+@main_context_manager.writer
def instance_update(context, instance_uuid, values, expected=None):
- session = get_session()
- with session.begin():
- return _instance_update(context, session, instance_uuid,
- values, expected)
+ return _instance_update(context, instance_uuid, values, expected)
@require_context
@_retry_instance_update()
+@main_context_manager.writer
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None, expected=None):
"""Set the given properties on an instance and update it. Return
@@ -2502,21 +2484,17 @@ def instance_update_and_get_original(context, instance_uuid, values,
Raises NotFound if instance does not exist.
"""
- session = get_session()
- with session.begin():
- instance_ref = _instance_get_by_uuid(context, instance_uuid,
- columns_to_join=columns_to_join,
- session=session)
- return (copy.copy(instance_ref),
- _instance_update(context, session, instance_uuid, values,
- expected, original=instance_ref))
+ instance_ref = _instance_get_by_uuid(context, instance_uuid,
+ columns_to_join=columns_to_join)
+ return (copy.copy(instance_ref), _instance_update(
+ context, instance_uuid, values, expected, original=instance_ref))
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
- metadata, session):
+ metadata):
metadata = dict(metadata)
to_delete = []
for keyvalue in instance[metadata_type]:
@@ -2532,22 +2510,21 @@ def _instance_metadata_update_in_place(context, instance, metadata_type, model,
# allow reading deleted regular metadata anywhere.
if metadata_type == 'system_metadata':
for condemned in to_delete:
- session.delete(condemned)
+ context.session.delete(condemned)
instance[metadata_type].remove(condemned)
else:
for condemned in to_delete:
- condemned.soft_delete(session=session)
+ condemned.soft_delete(context.session)
for key, value in metadata.items():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
- session.add(newitem)
+ context.session.add(newitem)
instance[metadata_type].append(newitem)
-def _instance_update(context, session, instance_uuid, values, expected,
- original=None):
+def _instance_update(context, instance_uuid, values, expected, original=None):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
@@ -2581,12 +2558,12 @@ def _instance_update(context, session, instance_uuid, values, expected,
# osapi_compute_unique_server_name_scope is small, and a robust fix
# will be complex. This is intentionally left as is for the moment.
if 'hostname' in values:
- _validate_unique_server_name(context, session, values['hostname'])
+ _validate_unique_server_name(context, values['hostname'])
compare = models.Instance(uuid=instance_uuid, **expected)
try:
instance_ref = model_query(context, models.Instance,
- project_only=True, session=session).\
+ project_only=True).\
update_on_match(compare, 'uuid', values)
except update_match.NoRowsMatched:
# Update failed. Try to find why and raise a specific error.
@@ -2608,8 +2585,7 @@ def _instance_update(context, session, instance_uuid, values, expected,
# is no point refreshing it. If we have not previously read the
# instance, we can fetch it here and we will get fresh data.
if original is None:
- original = _instance_get_by_uuid(context, instance_uuid,
- session=session)
+ original = _instance_get_by_uuid(context, instance_uuid)
conflicts_expected = {}
conflicts_actual = {}
@@ -2653,26 +2629,28 @@ def _instance_update(context, session, instance_uuid, values, expected,
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
- metadata, session)
+ metadata)
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
- system_metadata, session)
+ system_metadata)
return instance_ref
+@main_context_manager.writer
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
- sec_group_ref.save()
+ sec_group_ref.save(context.session)
@require_context
+@main_context_manager.writer
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
@@ -2788,7 +2766,7 @@ def key_pair_create(context, values):
try:
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
- key_pair_ref.save(session=context.session)
+ key_pair_ref.save(context.session)
return key_pair_ref
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
@@ -2837,7 +2815,7 @@ def key_pair_count_by_user(context, user_id):
###################
-
+@main_context_manager.writer
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a project with a network.
@@ -2853,37 +2831,33 @@ def network_associate(context, project_id, network_id=None, force=False):
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
- session = get_session()
- with session.begin():
-
- def network_query(project_filter, id=None):
- filter_kwargs = {'project_id': project_filter}
- if id is not None:
- filter_kwargs['id'] = id
- return model_query(context, models.Network, session=session,
- read_deleted="no").\
- filter_by(**filter_kwargs).\
- with_lockmode('update').\
- first()
+ def network_query(project_filter, id=None):
+ filter_kwargs = {'project_id': project_filter}
+ if id is not None:
+ filter_kwargs['id'] = id
+ return model_query(context, models.Network, read_deleted="no").\
+ filter_by(**filter_kwargs).\
+ with_lockmode('update').\
+ first()
- if not force:
- # find out if project has a network
- network_ref = network_query(project_id)
+ if not force:
+ # find out if project has a network
+ network_ref = network_query(project_id)
- if force or not network_ref:
- # in force mode or project doesn't have a network so associate
- # with a new network
+ if force or not network_ref:
+ # in force mode or project doesn't have a network so associate
+ # with a new network
- # get new network
- network_ref = network_query(None, network_id)
- if not network_ref:
- raise exception.NoMoreNetworks()
+ # get new network
+ network_ref = network_query(None, network_id)
+ if not network_ref:
+ raise exception.NoMoreNetworks()
- # associate with network
- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
- # then this has concurrency issues
- network_ref['project_id'] = project_id
- session.add(network_ref)
+ # associate with network
+ # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
+ # then this has concurrency issues
+ network_ref['project_id'] = project_id
+ context.session.add(network_ref)
return network_ref
@@ -2892,45 +2866,44 @@ def _network_ips_query(context, network_id):
filter_by(network_id=network_id)
+@main_context_manager.reader
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
+@main_context_manager.writer
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
- network_ref.save()
+ network_ref.save(context.session)
return network_ref
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
+@main_context_manager.writer
def network_delete_safe(context, network_id):
- session = get_session()
- with session.begin():
- result = model_query(context, models.FixedIp, session=session,
- read_deleted="no").\
- filter_by(network_id=network_id).\
- filter_by(allocated=True).\
- count()
- if result != 0:
- raise exception.NetworkInUse(network_id=network_id)
- network_ref = _network_get(context, network_id=network_id,
- session=session)
-
- model_query(context, models.FixedIp, session=session,
- read_deleted="no").\
- filter_by(network_id=network_id).\
- soft_delete()
+ result = model_query(context, models.FixedIp, read_deleted="no").\
+ filter_by(network_id=network_id).\
+ filter_by(allocated=True).\
+ count()
+ if result != 0:
+ raise exception.NetworkInUse(network_id=network_id)
+ network_ref = _network_get(context, network_id=network_id)
+
+ model_query(context, models.FixedIp, read_deleted="no").\
+ filter_by(network_id=network_id).\
+ soft_delete()
- session.delete(network_ref)
+ context.session.delete(network_ref)
+@main_context_manager.writer
def network_disassociate(context, network_id, disassociate_host,
disassociate_project):
net_update = {}
@@ -2941,9 +2914,8 @@ def network_disassociate(context, network_id, disassociate_host,
network_update(context, network_id, net_update)
-def _network_get(context, network_id, session=None, project_only='allow_none'):
- result = model_query(context, models.Network, session=session,
- project_only=project_only).\
+def _network_get(context, network_id, project_only='allow_none'):
+ result = model_query(context, models.Network, project_only=project_only).\
filter_by(id=network_id).\
first()
@@ -2954,11 +2926,13 @@ def _network_get(context, network_id, session=None, project_only='allow_none'):
@require_context
+@main_context_manager.reader
def network_get(context, network_id, project_only='allow_none'):
return _network_get(context, network_id, project_only=project_only)
@require_context
+@main_context_manager.reader
def network_get_all(context, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).all()
@@ -2970,6 +2944,7 @@ def network_get_all(context, project_only):
@require_context
+@main_context_manager.reader
def network_get_all_by_uuids(context, network_uuids, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).\
@@ -2994,7 +2969,7 @@ def network_get_all_by_uuids(context, network_uuids, project_only):
return result
-def _get_associated_fixed_ips_query(network_id, host=None):
+def _get_associated_fixed_ips_query(context, network_id, host=None):
# NOTE(vish): The ugly joins here are to solve a performance issue and
# should be removed once we can add and remove leases
# without regenerating the whole list
@@ -3003,42 +2978,44 @@ def _get_associated_fixed_ips_query(network_id, host=None):
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == 0)
- session = get_session()
# NOTE(vish): This subquery left joins the minimum interface id for each
# instance. If the join succeeds (i.e. the 11th column is not
# null), then the fixed ip is on the first interface.
- subq = session.query(func.min(models.VirtualInterface.id).label("id"),
- models.VirtualInterface.instance_uuid).\
- group_by(models.VirtualInterface.instance_uuid).subquery()
+ subq = context.session.query(
+ func.min(models.VirtualInterface.id).label("id"),
+ models.VirtualInterface.instance_uuid).\
+ group_by(models.VirtualInterface.instance_uuid).subquery()
subq_and = and_(subq.c.id == models.FixedIp.virtual_interface_id,
subq.c.instance_uuid == models.VirtualInterface.instance_uuid)
- query = session.query(models.FixedIp.address,
- models.FixedIp.instance_uuid,
- models.FixedIp.network_id,
- models.FixedIp.virtual_interface_id,
- models.VirtualInterface.address,
- models.Instance.hostname,
- models.Instance.updated_at,
- models.Instance.created_at,
- models.FixedIp.allocated,
- models.FixedIp.leased,
- subq.c.id).\
- filter(models.FixedIp.deleted == 0).\
- filter(models.FixedIp.network_id == network_id).\
- join((models.VirtualInterface, vif_and)).\
- join((models.Instance, inst_and)).\
- outerjoin((subq, subq_and)).\
- filter(models.FixedIp.instance_uuid != null()).\
- filter(models.FixedIp.virtual_interface_id != null())
+ query = context.session.query(
+ models.FixedIp.address,
+ models.FixedIp.instance_uuid,
+ models.FixedIp.network_id,
+ models.FixedIp.virtual_interface_id,
+ models.VirtualInterface.address,
+ models.Instance.hostname,
+ models.Instance.updated_at,
+ models.Instance.created_at,
+ models.FixedIp.allocated,
+ models.FixedIp.leased,
+ subq.c.id).\
+ filter(models.FixedIp.deleted == 0).\
+ filter(models.FixedIp.network_id == network_id).\
+ join((models.VirtualInterface, vif_and)).\
+ join((models.Instance, inst_and)).\
+ outerjoin((subq, subq_and)).\
+ filter(models.FixedIp.instance_uuid != null()).\
+ filter(models.FixedIp.virtual_interface_id != null())
if host:
query = query.filter(models.Instance.host == host)
return query
+@main_context_manager.reader
def network_get_associated_fixed_ips(context, network_id, host=None):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
- query = _get_associated_fixed_ips_query(network_id, host)
+ query = _get_associated_fixed_ips_query(context, network_id, host)
result = query.all()
data = []
for datum in result:
@@ -3060,16 +3037,17 @@ def network_get_associated_fixed_ips(context, network_id, host=None):
return data
+@main_context_manager.reader
def network_in_use_on_host(context, network_id, host):
- query = _get_associated_fixed_ips_query(network_id, host)
+ query = _get_associated_fixed_ips_query(context, network_id, host)
return query.count() > 0
-def _network_get_query(context, session=None):
- return model_query(context, models.Network, session=session,
- read_deleted="no")
+def _network_get_query(context):
+ return model_query(context, models.Network, read_deleted="no")
+@main_context_manager.reader
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
@@ -3079,6 +3057,7 @@ def network_get_by_uuid(context, uuid):
return result
+@main_context_manager.reader
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
@@ -3091,14 +3070,13 @@ def network_get_by_cidr(context, cidr):
return result
+@main_context_manager.reader
def network_get_all_by_host(context, host):
- session = get_session()
fixed_host_filter = or_(models.FixedIp.host == host,
and_(models.FixedIp.instance_uuid != null(),
models.Instance.host == host))
fixed_ip_query = model_query(context, models.FixedIp,
- (models.FixedIp.network_id,),
- session=session).\
+ (models.FixedIp.network_id,)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
@@ -3108,13 +3086,12 @@ def network_get_all_by_host(context, host):
# or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
- return _network_get_query(context, session=session).\
- filter(host_filter).\
- all()
+ return _network_get_query(context).filter(host_filter).all()
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
+@main_context_manager.writer
def network_set_host(context, network_id, host_id):
network_ref = _network_get_query(context).\
filter_by(id=network_id).\
@@ -3139,22 +3116,22 @@ def network_set_host(context, network_id, host_id):
@require_context
+@main_context_manager.writer
def network_update(context, network_id, values):
- session = get_session()
- with session.begin():
- network_ref = _network_get(context, network_id, session=session)
- network_ref.update(values)
- try:
- network_ref.save(session=session)
- except db_exc.DBDuplicateEntry:
- raise exception.DuplicateVlan(vlan=values['vlan'])
- return network_ref
+ network_ref = _network_get(context, network_id)
+ network_ref.update(values)
+ try:
+ network_ref.save(context.session)
+ except db_exc.DBDuplicateEntry:
+ raise exception.DuplicateVlan(vlan=values['vlan'])
+ return network_ref
###################
@require_context
+@main_context_manager.reader
def quota_get(context, project_id, resource, user_id=None):
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
@@ -3175,6 +3152,7 @@ def quota_get(context, project_id, resource, user_id=None):
@require_context
+@main_context_manager.reader
def quota_get_all_by_project_and_user(context, project_id, user_id):
user_quotas = model_query(context, models.ProjectUserQuota,
(models.ProjectUserQuota.resource,
@@ -3191,6 +3169,7 @@ def quota_get_all_by_project_and_user(context, project_id, user_id):
@require_context
+@main_context_manager.reader
def quota_get_all_by_project(context, project_id):
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
@@ -3204,6 +3183,7 @@ def quota_get_all_by_project(context, project_id):
@require_context
+@main_context_manager.reader
def quota_get_all(context, project_id):
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
@@ -3212,6 +3192,7 @@ def quota_get_all(context, project_id):
return result
+@main_context_manager.writer
def quota_create(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
@@ -3221,12 +3202,13 @@ def quota_create(context, project_id, resource, limit, user_id=None):
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
- quota_ref.save()
+ quota_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
+@main_context_manager.writer
def quota_update(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
@@ -3249,6 +3231,7 @@ def quota_update(context, project_id, resource, limit, user_id=None):
@require_context
+@main_context_manager.reader
def quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
@@ -3261,6 +3244,7 @@ def quota_class_get(context, class_name, resource):
return result
+@main_context_manager.reader
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
@@ -3274,6 +3258,7 @@ def quota_class_get_default(context):
@require_context
+@main_context_manager.reader
def quota_class_get_all_by_name(context, class_name):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
@@ -3286,15 +3271,17 @@ def quota_class_get_all_by_name(context, class_name):
return result
+@main_context_manager.writer
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
- quota_class_ref.save()
+ quota_class_ref.save(context.session)
return quota_class_ref
+@main_context_manager.writer
def quota_class_update(context, class_name, resource, limit):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
@@ -3309,6 +3296,7 @@ def quota_class_update(context, class_name, resource, limit):
@require_context
+@main_context_manager.reader
def quota_usage_get(context, project_id, resource, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
@@ -3349,17 +3337,19 @@ def _quota_usage_get_all(context, project_id, user_id=None):
@require_context
+@main_context_manager.reader
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
+@main_context_manager.reader
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
def _quota_usage_create(project_id, user_id, resource, in_use,
- reserved, until_refresh, session=None):
+ reserved, until_refresh, session):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
@@ -3370,11 +3360,12 @@ def _quota_usage_create(project_id, user_id, resource, in_use,
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
- quota_usage_ref.save(session=session)
+ quota_usage_ref.save(session)
return quota_usage_ref
+@main_context_manager.writer
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
updates = {}
@@ -3397,7 +3388,7 @@ def quota_usage_update(context, project_id, user_id, resource, **kwargs):
def _reservation_create(uuid, usage, project_id, user_id, resource,
- delta, expire, session=None):
+ delta, expire, session):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
@@ -3406,7 +3397,7 @@ def _reservation_create(uuid, usage, project_id, user_id, resource,
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
- reservation_ref.save(session=session)
+ reservation_ref.save(session)
return reservation_ref
@@ -3418,11 +3409,9 @@ def _reservation_create(uuid, usage, project_id, user_id, resource,
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
-def _get_project_user_quota_usages(context, session, project_id,
- user_id):
+def _get_project_user_quota_usages(context, project_id, user_id):
rows = model_query(context, models.QuotaUsage,
- read_deleted="no",
- session=session).\
+ read_deleted="no").\
filter_by(project_id=project_id).\
order_by(models.QuotaUsage.id.asc()).\
with_lockmode('update').\
@@ -3463,8 +3452,7 @@ def _create_quota_usage_if_missing(user_usages, resource, until_refresh,
if resource in PER_PROJECT_QUOTAS:
user_id_to_use = None
new_usage = _quota_usage_create(project_id, user_id_to_use, resource,
- 0, 0, until_refresh or None,
- session=session)
+ 0, 0, until_refresh or None, session)
user_usages[resource] = new_usage
return new_usage is not None
@@ -3566,118 +3554,117 @@ def _calculate_overquota(project_quotas, user_quotas, deltas,
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
elevated = context.elevated()
- session = get_session()
- with session.begin():
-
- if project_id is None:
- project_id = context.project_id
- if user_id is None:
- user_id = context.user_id
-
- # Get the current usages
- project_usages, user_usages = _get_project_user_quota_usages(
- context, session, project_id, user_id)
-
- # Handle usage refresh
- work = set(deltas.keys())
- while work:
- resource = work.pop()
-
- # Do we need to refresh the usage?
- created = _create_quota_usage_if_missing(user_usages, resource,
- until_refresh, project_id,
- user_id, session)
- refresh = created or _is_quota_refresh_needed(
- user_usages[resource], max_age)
-
- # OK, refresh the usage
- if refresh:
- # Grab the sync routine
- sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
-
- updates = sync(elevated, project_id, user_id, session)
- for res, in_use in updates.items():
- # Make sure we have a destination for the usage!
- _create_quota_usage_if_missing(user_usages, res,
- until_refresh, project_id,
- user_id, session)
- _refresh_quota_usages(user_usages[res], until_refresh,
- in_use)
-
- # Because more than one resource may be refreshed
- # by the call to the sync routine, and we don't
- # want to double-sync, we make sure all refreshed
- # resources are dropped from the work set.
- work.discard(res)
-
- # NOTE(Vek): We make the assumption that the sync
- # routine actually refreshes the
- # resources that it is the sync routine
- # for. We don't check, because this is
- # a best-effort mechanism.
-
- # Check for deltas that would go negative
- unders = [res for res, delta in deltas.items()
- if delta < 0 and
- delta + user_usages[res].in_use < 0]
-
- # Now, let's check the quotas
- # NOTE(Vek): We're only concerned about positive increments.
- # If a project has gone over quota, we want them to
- # be able to reduce their usage without any
- # problems.
- for key, value in user_usages.items():
- if key not in project_usages:
- LOG.debug('Copying QuotaUsage for resource "%(key)s" from '
- 'user_usages into project_usages: %(value)s',
- {'key': key, 'value': dict(value)})
- project_usages[key] = value
-
- overs = _calculate_overquota(project_quotas, user_quotas, deltas,
- project_usages, user_usages)
-
- # NOTE(Vek): The quota check needs to be in the transaction,
- # but the transaction doesn't fail just because
- # we're over quota, so the OverQuota raise is
- # outside the transaction. If we did the raise
- # here, our usage updates would be discarded, but
- # they're not invalidated by being over-quota.
-
- # Create the reservations
- if not overs:
- reservations = []
- for res, delta in deltas.items():
- reservation = _reservation_create(
- str(uuid.uuid4()),
- user_usages[res],
- project_id,
- user_id,
- res, delta, expire,
- session=session)
- reservations.append(reservation.uuid)
-
- # Also update the reserved quantity
- # NOTE(Vek): Again, we are only concerned here about
- # positive increments. Here, though, we're
- # worried about the following scenario:
- #
- # 1) User initiates resize down.
- # 2) User allocates a new instance.
- # 3) Resize down fails or is reverted.
- # 4) User is now over quota.
- #
- # To prevent this, we only update the
- # reserved value if the delta is positive.
- if delta > 0:
- user_usages[res].reserved += delta
-
- # Apply updates to the usages table
- for usage_ref in user_usages.values():
- session.add(usage_ref)
+
+ if project_id is None:
+ project_id = context.project_id
+ if user_id is None:
+ user_id = context.user_id
+
+ # Get the current usages
+ project_usages, user_usages = _get_project_user_quota_usages(
+ context, project_id, user_id)
+
+ # Handle usage refresh
+ work = set(deltas.keys())
+ while work:
+ resource = work.pop()
+
+ # Do we need to refresh the usage?
+ created = _create_quota_usage_if_missing(user_usages, resource,
+ until_refresh, project_id,
+ user_id, context.session)
+ refresh = created or _is_quota_refresh_needed(
+ user_usages[resource], max_age)
+
+ # OK, refresh the usage
+ if refresh:
+ # Grab the sync routine
+ sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
+
+ updates = sync(elevated, project_id, user_id)
+ for res, in_use in updates.items():
+ # Make sure we have a destination for the usage!
+ _create_quota_usage_if_missing(user_usages, res,
+ until_refresh, project_id,
+ user_id, context.session)
+ _refresh_quota_usages(user_usages[res], until_refresh,
+ in_use)
+
+ # Because more than one resource may be refreshed
+ # by the call to the sync routine, and we don't
+ # want to double-sync, we make sure all refreshed
+ # resources are dropped from the work set.
+ work.discard(res)
+
+ # NOTE(Vek): We make the assumption that the sync
+ # routine actually refreshes the
+ # resources that it is the sync routine
+ # for. We don't check, because this is
+ # a best-effort mechanism.
+
+ # Check for deltas that would go negative
+ unders = [res for res, delta in deltas.items()
+ if delta < 0 and
+ delta + user_usages[res].in_use < 0]
+
+ # Now, let's check the quotas
+ # NOTE(Vek): We're only concerned about positive increments.
+ # If a project has gone over quota, we want them to
+ # be able to reduce their usage without any
+ # problems.
+ for key, value in user_usages.items():
+ if key not in project_usages:
+ LOG.debug('Copying QuotaUsage for resource "%(key)s" from '
+ 'user_usages into project_usages: %(value)s',
+ {'key': key, 'value': dict(value)})
+ project_usages[key] = value
+
+ overs = _calculate_overquota(project_quotas, user_quotas, deltas,
+ project_usages, user_usages)
+
+ # NOTE(Vek): The quota check needs to be in the transaction,
+ # but the transaction doesn't fail just because
+ # we're over quota, so the OverQuota raise is
+ # outside the transaction. If we did the raise
+ # here, our usage updates would be discarded, but
+ # they're not invalidated by being over-quota.
+
+ # Create the reservations
+ if not overs:
+ reservations = []
+ for res, delta in deltas.items():
+ reservation = _reservation_create(
+ str(uuid.uuid4()),
+ user_usages[res],
+ project_id,
+ user_id,
+ res, delta, expire,
+ context.session)
+ reservations.append(reservation.uuid)
+
+ # Also update the reserved quantity
+ # NOTE(Vek): Again, we are only concerned here about
+ # positive increments. Here, though, we're
+ # worried about the following scenario:
+ #
+ # 1) User initiates resize down.
+ # 2) User allocates a new instance.
+ # 3) Resize down fails or is reverted.
+ # 4) User is now over quota.
+ #
+ # To prevent this, we only update the
+ # reserved value if the delta is positive.
+ if delta > 0:
+ user_usages[res].reserved += delta
+
+ # Apply updates to the usages table
+ for usage_ref in user_usages.values():
+ context.session.add(usage_ref)
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
@@ -3714,111 +3701,95 @@ def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
return reservations
-def _quota_reservations_query(session, context, reservations):
+def _quota_reservations_query(context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
- return model_query(context, models.Reservation,
- read_deleted="no",
- session=session).\
- filter(models.Reservation.uuid.in_(reservations)).\
- with_lockmode('update')
+ return model_query(context, models.Reservation, read_deleted="no").\
+ filter(models.Reservation.uuid.in_(reservations)).\
+ with_lockmode('update')
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def reservation_commit(context, reservations, project_id=None, user_id=None):
- session = get_session()
- with session.begin():
- _project_usages, user_usages = _get_project_user_quota_usages(
- context, session, project_id, user_id)
- reservation_query = _quota_reservations_query(session, context,
- reservations)
- for reservation in reservation_query.all():
- usage = user_usages[reservation.resource]
- if reservation.delta >= 0:
- usage.reserved -= reservation.delta
- usage.in_use += reservation.delta
- reservation_query.soft_delete(synchronize_session=False)
+ _project_usages, user_usages = _get_project_user_quota_usages(
+ context, project_id, user_id)
+ reservation_query = _quota_reservations_query(context, reservations)
+ for reservation in reservation_query.all():
+ usage = user_usages[reservation.resource]
+ if reservation.delta >= 0:
+ usage.reserved -= reservation.delta
+ usage.in_use += reservation.delta
+ reservation_query.soft_delete(synchronize_session=False)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def reservation_rollback(context, reservations, project_id=None, user_id=None):
- session = get_session()
- with session.begin():
- _project_usages, user_usages = _get_project_user_quota_usages(
- context, session, project_id, user_id)
- reservation_query = _quota_reservations_query(session, context,
- reservations)
- for reservation in reservation_query.all():
- usage = user_usages[reservation.resource]
- if reservation.delta >= 0:
- usage.reserved -= reservation.delta
- reservation_query.soft_delete(synchronize_session=False)
+ _project_usages, user_usages = _get_project_user_quota_usages(
+ context, project_id, user_id)
+ reservation_query = _quota_reservations_query(context, reservations)
+ for reservation in reservation_query.all():
+ usage = user_usages[reservation.resource]
+ if reservation.delta >= 0:
+ usage.reserved -= reservation.delta
+ reservation_query.soft_delete(synchronize_session=False)
+@main_context_manager.writer
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
- session = get_session()
- with session.begin():
- model_query(context, models.ProjectUserQuota, session=session,
- read_deleted="no").\
- filter_by(project_id=project_id).\
- filter_by(user_id=user_id).\
- soft_delete(synchronize_session=False)
+ model_query(context, models.ProjectUserQuota, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ filter_by(user_id=user_id).\
+ soft_delete(synchronize_session=False)
- model_query(context, models.QuotaUsage,
- session=session, read_deleted="no").\
- filter_by(project_id=project_id).\
- filter_by(user_id=user_id).\
- soft_delete(synchronize_session=False)
+ model_query(context, models.QuotaUsage, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ filter_by(user_id=user_id).\
+ soft_delete(synchronize_session=False)
- model_query(context, models.Reservation,
- session=session, read_deleted="no").\
- filter_by(project_id=project_id).\
- filter_by(user_id=user_id).\
- soft_delete(synchronize_session=False)
+ model_query(context, models.Reservation, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ filter_by(user_id=user_id).\
+ soft_delete(synchronize_session=False)
+@main_context_manager.writer
def quota_destroy_all_by_project(context, project_id):
- session = get_session()
- with session.begin():
- model_query(context, models.Quota, session=session,
- read_deleted="no").\
- filter_by(project_id=project_id).\
- soft_delete(synchronize_session=False)
+ model_query(context, models.Quota, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ soft_delete(synchronize_session=False)
- model_query(context, models.ProjectUserQuota, session=session,
- read_deleted="no").\
- filter_by(project_id=project_id).\
- soft_delete(synchronize_session=False)
+ model_query(context, models.ProjectUserQuota, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ soft_delete(synchronize_session=False)
- model_query(context, models.QuotaUsage,
- session=session, read_deleted="no").\
- filter_by(project_id=project_id).\
- soft_delete(synchronize_session=False)
+ model_query(context, models.QuotaUsage, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ soft_delete(synchronize_session=False)
- model_query(context, models.Reservation,
- session=session, read_deleted="no").\
- filter_by(project_id=project_id).\
- soft_delete(synchronize_session=False)
+ model_query(context, models.Reservation, read_deleted="no").\
+ filter_by(project_id=project_id).\
+ soft_delete(synchronize_session=False)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def reservation_expire(context):
- session = get_session()
- with session.begin():
- current_time = timeutils.utcnow()
- reservation_query = model_query(context, models.Reservation,
- session=session, read_deleted="no").\
- filter(models.Reservation.expire < current_time)
+ current_time = timeutils.utcnow()
+ reservation_query = model_query(
+ context, models.Reservation, read_deleted="no").\
+ filter(models.Reservation.expire < current_time)
- for reservation in reservation_query.join(models.QuotaUsage).all():
- if reservation.delta >= 0:
- reservation.usage.reserved -= reservation.delta
- session.add(reservation.usage)
+ for reservation in reservation_query.join(models.QuotaUsage).all():
+ if reservation.delta >= 0:
+ reservation.usage.reserved -= reservation.delta
+ context.session.add(reservation.usage)
- reservation_query.soft_delete(synchronize_session=False)
+ reservation_query.soft_delete(synchronize_session=False)
###################
@@ -3915,13 +3886,11 @@ def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
###################
-def _block_device_mapping_get_query(context, session=None,
- columns_to_join=None, use_slave=False):
+def _block_device_mapping_get_query(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join = []
- query = model_query(context, models.BlockDeviceMapping,
- session=session, use_slave=use_slave)
+ query = model_query(context, models.BlockDeviceMapping)
for column in columns_to_join:
query = query.options(joinedload(column))
@@ -3949,6 +3918,7 @@ def _from_legacy_values(values, legacy, allow_updates=False):
@require_context
+@main_context_manager.writer
def block_device_mapping_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy)
@@ -3956,11 +3926,12 @@ def block_device_mapping_create(context, values, legacy=True):
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
- bdm_ref.save()
+ bdm_ref.save(context.session)
return bdm_ref
@require_context
+@main_context_manager.writer
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
@@ -3971,62 +3942,59 @@ def block_device_mapping_update(context, bdm_id, values, legacy=True):
return query.first()
+@main_context_manager.writer
def block_device_mapping_update_or_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
convert_objects_related_datetimes(values)
- session = get_session()
- with session.begin():
- result = None
- # NOTE(xqueralt): Only update a BDM when device_name was provided. We
- # allow empty device names so they will be set later by the manager.
- if values['device_name']:
- query = _block_device_mapping_get_query(context, session=session)
- result = query.filter_by(instance_uuid=values['instance_uuid'],
- device_name=values['device_name']).first()
-
- if result:
- result.update(values)
- else:
- # Either the device_name doesn't exist in the database yet, or no
- # device_name was provided. Both cases mean creating a new BDM.
- result = models.BlockDeviceMapping(**values)
- result.save(session=session)
-
- # NOTE(xqueralt): Prevent from having multiple swap devices for the
- # same instance. This will delete all the existing ones.
- if block_device.new_format_is_swap(values):
- query = _block_device_mapping_get_query(context, session=session)
- query = query.filter_by(instance_uuid=values['instance_uuid'],
- source_type='blank', guest_format='swap')
- query = query.filter(models.BlockDeviceMapping.id != result.id)
- query.soft_delete()
+ result = None
+ # NOTE(xqueralt): Only update a BDM when device_name was provided. We
+ # allow empty device names so they will be set later by the manager.
+ if values['device_name']:
+ query = _block_device_mapping_get_query(context)
+ result = query.filter_by(instance_uuid=values['instance_uuid'],
+ device_name=values['device_name']).first()
- return result
+ if result:
+ result.update(values)
+ else:
+ # Either the device_name doesn't exist in the database yet, or no
+ # device_name was provided. Both cases mean creating a new BDM.
+ result = models.BlockDeviceMapping(**values)
+ result.save(context.session)
+
+ # NOTE(xqueralt): Prevent from having multiple swap devices for the
+ # same instance. This will delete all the existing ones.
+ if block_device.new_format_is_swap(values):
+ query = _block_device_mapping_get_query(context)
+ query = query.filter_by(instance_uuid=values['instance_uuid'],
+ source_type='blank', guest_format='swap')
+ query = query.filter(models.BlockDeviceMapping.id != result.id)
+ query.soft_delete()
+
+ return result
@require_context
-def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids,
- use_slave=False):
+@main_context_manager.reader.allow_async
+def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids):
if not instance_uuids:
return []
- return _block_device_mapping_get_query(
- context, use_slave=use_slave
- ).filter(
- models.BlockDeviceMapping.instance_uuid.in_(instance_uuids)
- ).all()
+ return _block_device_mapping_get_query(context).filter(
+ models.BlockDeviceMapping.instance_uuid.in_(instance_uuids)).all()
@require_context
-def block_device_mapping_get_all_by_instance(context, instance_uuid,
- use_slave=False):
- return _block_device_mapping_get_query(context, use_slave=use_slave).\
+@main_context_manager.reader.allow_async
+def block_device_mapping_get_all_by_instance(context, instance_uuid):
+ return _block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
+@main_context_manager.reader
def block_device_mapping_get_all_by_volume_id(context, volume_id,
columns_to_join=None):
return _block_device_mapping_get_query(context,
@@ -4036,6 +4004,7 @@ def block_device_mapping_get_all_by_volume_id(context, volume_id,
@require_context
+@main_context_manager.reader
def block_device_mapping_get_by_instance_and_volume_id(context, volume_id,
instance_uuid,
columns_to_join=None):
@@ -4047,6 +4016,7 @@ def block_device_mapping_get_by_instance_and_volume_id(context, volume_id,
@require_context
+@main_context_manager.writer
def block_device_mapping_destroy(context, bdm_id):
_block_device_mapping_get_query(context).\
filter_by(id=bdm_id).\
@@ -4054,6 +4024,7 @@ def block_device_mapping_destroy(context, bdm_id):
@require_context
+@main_context_manager.writer
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
_block_device_mapping_get_query(context).\
@@ -4063,6 +4034,7 @@ def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
@require_context
+@main_context_manager.writer
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
_block_device_mapping_get_query(context).\
@@ -4073,14 +4045,18 @@ def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
###################
-def _security_group_create(context, values, session=None):
+
+@require_context
+@main_context_manager.writer
+def security_group_create(context, values):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
try:
- security_group_ref.save(session=session)
+ with main_context_manager.writer.savepoint.using(context):
+ security_group_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=values['project_id'],
@@ -4088,21 +4064,21 @@ def _security_group_create(context, values, session=None):
return security_group_ref
-def _security_group_get_query(context, session=None, read_deleted=None,
+def _security_group_get_query(context, read_deleted=None,
project_only=False, join_rules=True):
- query = model_query(context, models.SecurityGroup, session=session,
+ query = model_query(context, models.SecurityGroup,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
query = query.options(joinedload_all('rules.grantee_group'))
return query
-def _security_group_get_by_names(context, session, project_id, group_names):
+def _security_group_get_by_names(context, project_id, group_names):
"""Get security group models for a project by a list of names.
Raise SecurityGroupNotFoundForProject for a name not found.
"""
- query = _security_group_get_query(context, session=session,
- read_deleted="no", join_rules=False).\
+ query = _security_group_get_query(context, read_deleted="no",
+ join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
@@ -4118,11 +4094,13 @@ def _security_group_get_by_names(context, session, project_id, group_names):
@require_context
+@main_context_manager.reader
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
+@main_context_manager.reader
def security_group_get(context, security_group_id, columns_to_join=None):
query = _security_group_get_query(context, project_only=True).\
filter_by(id=security_group_id)
@@ -4142,6 +4120,7 @@ def security_group_get(context, security_group_id, columns_to_join=None):
@require_context
+@main_context_manager.reader
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
query = _security_group_get_query(context,
@@ -4164,6 +4143,7 @@ def security_group_get_by_name(context, project_id, group_name,
@require_context
+@main_context_manager.reader
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
@@ -4171,6 +4151,7 @@ def security_group_get_by_project(context, project_id):
@require_context
+@main_context_manager.reader
def security_group_get_by_instance(context, instance_uuid):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
@@ -4179,56 +4160,49 @@ def security_group_get_by_instance(context, instance_uuid):
@require_context
+@main_context_manager.reader
def security_group_in_use(context, group_id):
- session = get_session()
- with session.begin():
- # Are there any instances that haven't been deleted
- # that include this group?
- inst_assoc = model_query(context,
- models.SecurityGroupInstanceAssociation,
- read_deleted="no", session=session).\
- filter_by(security_group_id=group_id).\
- all()
- for ia in inst_assoc:
- num_instances = model_query(context, models.Instance,
- session=session, read_deleted="no").\
- filter_by(uuid=ia.instance_uuid).\
- count()
- if num_instances:
- return True
+ # Are there any instances that haven't been deleted
+ # that include this group?
+ inst_assoc = model_query(context,
+ models.SecurityGroupInstanceAssociation,
+ read_deleted="no").\
+ filter_by(security_group_id=group_id).\
+ all()
+ for ia in inst_assoc:
+ num_instances = model_query(context, models.Instance,
+ read_deleted="no").\
+ filter_by(uuid=ia.instance_uuid).\
+ count()
+ if num_instances:
+ return True
return False
@require_context
-def security_group_create(context, values):
- return _security_group_create(context, values)
-
-
-@require_context
+@main_context_manager.writer
def security_group_update(context, security_group_id, values,
columns_to_join=None):
- session = get_session()
- with session.begin():
- query = model_query(context, models.SecurityGroup,
- session=session).filter_by(id=security_group_id)
- if columns_to_join:
- for column in columns_to_join:
- query = query.options(joinedload_all(column))
- security_group_ref = query.first()
-
- if not security_group_ref:
- raise exception.SecurityGroupNotFound(
- security_group_id=security_group_id)
- security_group_ref.update(values)
- name = security_group_ref['name']
- project_id = security_group_ref['project_id']
- try:
- security_group_ref.save(session=session)
- except db_exc.DBDuplicateEntry:
- raise exception.SecurityGroupExists(
- project_id=project_id,
- security_group_name=name)
+ query = model_query(context, models.SecurityGroup).filter_by(
+ id=security_group_id)
+ if columns_to_join:
+ for column in columns_to_join:
+ query = query.options(joinedload_all(column))
+ security_group_ref = query.first()
+
+ if not security_group_ref:
+ raise exception.SecurityGroupNotFound(
+ security_group_id=security_group_id)
+ security_group_ref.update(values)
+ name = security_group_ref['name']
+ project_id = security_group_ref['project_id']
+ try:
+ security_group_ref.save(context.session)
+ except db_exc.DBDuplicateEntry:
+ raise exception.SecurityGroupExists(
+ project_id=project_id,
+ security_group_name=name)
return security_group_ref
@@ -4236,7 +4210,13 @@ def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
try:
- return _security_group_ensure_default(context)
+ # NOTE(rpodolyaka): create the default security group, if it doesn't
+ # exist. This must be done in a separate transaction, so that
+ # this one is not aborted in case a concurrent one succeeds first
+ # and the unique constraint for security group names is violated
+ # by a concurrent INSERT
+ with main_context_manager.writer.independent.using(context):
+ return _security_group_ensure_default(context)
except exception.SecurityGroupExists:
# NOTE(rpodolyaka): a concurrent transaction has succeeded first,
# suppress the error and proceed
@@ -4244,83 +4224,67 @@ def security_group_ensure_default(context):
'default')
-def _security_group_ensure_default(context, session=None):
- if session is None:
- session = get_session()
-
- with session.begin(subtransactions=True):
- try:
- default_group = _security_group_get_by_names(context,
- session,
- context.project_id,
- ['default'])[0]
- except exception.NotFound:
- values = {'name': 'default',
- 'description': 'default',
- 'user_id': context.user_id,
- 'project_id': context.project_id}
- default_group = _security_group_create(context, values,
- session=session)
- usage = model_query(context, models.QuotaUsage,
- read_deleted="no", session=session).\
- filter_by(project_id=context.project_id).\
- filter_by(user_id=context.user_id).\
- filter_by(resource='security_groups')
- # Create quota usage for auto created default security group
- if not usage.first():
- _quota_usage_create(context.project_id,
- context.user_id,
- 'security_groups',
- 1, 0,
- CONF.until_refresh,
- session=session)
- else:
- usage.update({'in_use': int(usage.first().in_use) + 1})
-
- default_rules = _security_group_rule_get_default_query(context,
- session=session).all()
- for default_rule in default_rules:
- # This is suboptimal, it should be programmatic to know
- # the values of the default_rule
- rule_values = {'protocol': default_rule.protocol,
- 'from_port': default_rule.from_port,
- 'to_port': default_rule.to_port,
- 'cidr': default_rule.cidr,
- 'parent_group_id': default_group.id,
- }
- _security_group_rule_create(context,
- rule_values,
- session=session)
- return default_group
+@main_context_manager.writer
+def _security_group_ensure_default(context):
+ try:
+ default_group = _security_group_get_by_names(context,
+ context.project_id,
+ ['default'])[0]
+ except exception.NotFound:
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id}
+ default_group = security_group_create(context, values)
+ usage = model_query(context, models.QuotaUsage, read_deleted="no").\
+ filter_by(project_id=context.project_id).\
+ filter_by(user_id=context.user_id).\
+ filter_by(resource='security_groups')
+ # Create quota usage for auto created default security group
+ if not usage.first():
+ _quota_usage_create(context.project_id,
+ context.user_id,
+ 'security_groups',
+ 1, 0,
+ CONF.until_refresh,
+ context.session)
+ else:
+ usage.update({'in_use': int(usage.first().in_use) + 1})
+
+ default_rules = _security_group_rule_get_default_query(context).all()
+ for default_rule in default_rules:
+ # This is suboptimal, it should be programmatic to know
+ # the values of the default_rule
+ rule_values = {'protocol': default_rule.protocol,
+ 'from_port': default_rule.from_port,
+ 'to_port': default_rule.to_port,
+ 'cidr': default_rule.cidr,
+ 'parent_group_id': default_group.id,
+ }
+ _security_group_rule_create(context, rule_values)
+ return default_group
@require_context
+@main_context_manager.writer
def security_group_destroy(context, security_group_id):
- session = get_session()
- with session.begin():
- model_query(context, models.SecurityGroup,
- session=session).\
- filter_by(id=security_group_id).\
- soft_delete()
- model_query(context, models.SecurityGroupInstanceAssociation,
- session=session).\
- filter_by(security_group_id=security_group_id).\
- soft_delete()
- model_query(context, models.SecurityGroupIngressRule,
- session=session).\
- filter_by(group_id=security_group_id).\
- soft_delete()
- model_query(context, models.SecurityGroupIngressRule,
- session=session).\
- filter_by(parent_group_id=security_group_id).\
- soft_delete()
+ model_query(context, models.SecurityGroup).\
+ filter_by(id=security_group_id).\
+ soft_delete()
+ model_query(context, models.SecurityGroupInstanceAssociation).\
+ filter_by(security_group_id=security_group_id).\
+ soft_delete()
+ model_query(context, models.SecurityGroupIngressRule).\
+ filter_by(group_id=security_group_id).\
+ soft_delete()
+ model_query(context, models.SecurityGroupIngressRule).\
+ filter_by(parent_group_id=security_group_id).\
+ soft_delete()
-def _security_group_count_by_project_and_user(context, project_id, user_id,
- session=None):
+def _security_group_count_by_project_and_user(context, project_id, user_id):
nova.context.authorize_project_context(context, project_id)
- return model_query(context, models.SecurityGroup, read_deleted="no",
- session=session).\
+ return model_query(context, models.SecurityGroup, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
@@ -4329,19 +4293,19 @@ def _security_group_count_by_project_and_user(context, project_id, user_id,
###################
-def _security_group_rule_create(context, values, session=None):
+def _security_group_rule_create(context, values):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
- security_group_rule_ref.save(session=session)
+ security_group_rule_ref.save(context.session)
return security_group_rule_ref
-def _security_group_rule_get_query(context, session=None):
- return model_query(context, models.SecurityGroupIngressRule,
- session=session)
+def _security_group_rule_get_query(context):
+ return model_query(context, models.SecurityGroupIngressRule)
@require_context
+@main_context_manager.reader
def security_group_rule_get(context, security_group_rule_id):
result = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
@@ -4355,6 +4319,7 @@ def security_group_rule_get(context, security_group_rule_id):
@require_context
+@main_context_manager.reader
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
if columns_to_join is None:
@@ -4368,6 +4333,7 @@ def security_group_rule_get_by_security_group(context, security_group_id,
@require_context
+@main_context_manager.reader
def security_group_rule_get_by_instance(context, instance_uuid):
return (_security_group_rule_get_query(context).
join('parent_group', 'instances').
@@ -4377,11 +4343,13 @@ def security_group_rule_get_by_instance(context, instance_uuid):
@require_context
+@main_context_manager.writer
def security_group_rule_create(context, values):
return _security_group_rule_create(context, values)
@require_context
+@main_context_manager.writer
def security_group_rule_destroy(context, security_group_rule_id):
count = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
@@ -4392,22 +4360,23 @@ def security_group_rule_destroy(context, security_group_rule_id):
@require_context
+@main_context_manager.reader
def security_group_rule_count_by_group(context, security_group_id):
return (model_query(context, models.SecurityGroupIngressRule,
read_deleted="no").
filter_by(parent_group_id=security_group_id).
count())
-#
+
###################
-def _security_group_rule_get_default_query(context, session=None):
- return model_query(context, models.SecurityGroupIngressDefaultRule,
- session=session)
+def _security_group_rule_get_default_query(context):
+ return model_query(context, models.SecurityGroupIngressDefaultRule)
@require_context
+@main_context_manager.reader
def security_group_default_rule_get(context, security_group_rule_default_id):
result = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
@@ -4420,58 +4389,59 @@ def security_group_default_rule_get(context, security_group_rule_default_id):
return result
+@main_context_manager.writer
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
- session = get_session()
- with session.begin():
- count = _security_group_rule_get_default_query(context,
- session=session).\
- filter_by(id=security_group_rule_default_id).\
- soft_delete()
- if count == 0:
- raise exception.SecurityGroupDefaultRuleNotFound(
- rule_id=security_group_rule_default_id)
+ count = _security_group_rule_get_default_query(context).\
+ filter_by(id=security_group_rule_default_id).\
+ soft_delete()
+ if count == 0:
+ raise exception.SecurityGroupDefaultRuleNotFound(
+ rule_id=security_group_rule_default_id)
+@main_context_manager.writer
def security_group_default_rule_create(context, values):
security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
security_group_default_rule_ref.update(values)
- security_group_default_rule_ref.save()
+ security_group_default_rule_ref.save(context.session)
return security_group_default_rule_ref
@require_context
+@main_context_manager.reader
def security_group_default_rule_list(context):
- return _security_group_rule_get_default_query(context).\
- all()
+ return _security_group_rule_get_default_query(context).all()
###################
+@main_context_manager.writer
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
- fw_rule_ref.save()
+ fw_rule_ref.save(context.session)
return fw_rule_ref
+@main_context_manager.reader
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
+@main_context_manager.writer
def provider_fw_rule_destroy(context, rule_id):
- session = get_session()
- with session.begin():
- session.query(models.ProviderFirewallRule).\
- filter_by(id=rule_id).\
- soft_delete()
+ context.session.query(models.ProviderFirewallRule).\
+ filter_by(id=rule_id).\
+ soft_delete()
###################
@require_context
+@main_context_manager.writer
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
@@ -4521,6 +4491,20 @@ def migration_get(context, id):
@main_context_manager.reader
+def migration_get_by_id_and_instance(context, id, instance_uuid):
+ result = model_query(context, models.Migration).\
+ filter_by(id=id).\
+ filter_by(instance_uuid=instance_uuid).\
+ first()
+
+ if not result:
+ raise exception.MigrationNotFoundForInstance(migration_id=id,
+ instance_id=instance_uuid)
+
+ return result
+
+
+@main_context_manager.reader
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
@@ -4556,7 +4540,8 @@ def migration_get_in_progress_by_host_and_node(context, host, node):
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['accepted', 'confirmed',
- 'reverted', 'error'])).\
+ 'reverted', 'error',
+ 'failed'])).\
options(joinedload_all('instance.system_metadata')).\
all()
@@ -4587,11 +4572,12 @@ def migration_get_all_by_filters(context, filters):
##################
+@main_context_manager.writer
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
try:
- pool.save()
+ pool.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.ConsolePoolExists(
host=values["host"],
@@ -4601,6 +4587,7 @@ def console_pool_create(context, values):
return pool
+@main_context_manager.reader
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
@@ -4619,6 +4606,7 @@ def console_pool_get_by_host_type(context, compute_host, host,
return result
+@main_context_manager.reader
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
@@ -4627,22 +4615,26 @@ def console_pool_get_all_by_host_type(context, host, console_type):
all()
+##################
+
+
+@main_context_manager.writer
def console_create(context, values):
console = models.Console()
console.update(values)
- console.save()
+ console.save(context.session)
return console
+@main_context_manager.writer
def console_delete(context, console_id):
- session = get_session()
- with session.begin():
- # NOTE(mdragon): consoles are meant to be transient.
- session.query(models.Console).\
- filter_by(id=console_id).\
- delete()
+ # NOTE(mdragon): consoles are meant to be transient.
+ context.session.query(models.Console).\
+ filter_by(id=console_id).\
+ delete()
+@main_context_manager.reader
def console_get_by_pool_instance(context, pool_id, instance_uuid):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
@@ -4657,6 +4649,7 @@ def console_get_by_pool_instance(context, pool_id, instance_uuid):
return result
+@main_context_manager.reader
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid)
@@ -4666,6 +4659,7 @@ def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
return query.all()
+@main_context_manager.reader
def console_get(context, console_id, instance_uuid=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
@@ -4689,6 +4683,7 @@ def console_get(context, console_id, instance_uuid=None):
##################
+@main_context_manager.writer
def flavor_create(context, values, projects=None):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
@@ -4712,21 +4707,19 @@ def flavor_create(context, values, projects=None):
if projects is None:
projects = []
- session = get_session()
- with session.begin():
- try:
- instance_type_ref.save()
- except db_exc.DBDuplicateEntry as e:
- if 'flavorid' in e.columns:
- raise exception.FlavorIdExists(flavor_id=values['flavorid'])
- raise exception.FlavorExists(name=values['name'])
- except Exception as e:
- raise db_exc.DBError(e)
- for project in set(projects):
- access_ref = models.InstanceTypeProjects()
- access_ref.update({"instance_type_id": instance_type_ref.id,
- "project_id": project})
- access_ref.save()
+ try:
+ instance_type_ref.save(context.session)
+ except db_exc.DBDuplicateEntry as e:
+ if 'flavorid' in e.columns:
+ raise exception.FlavorIdExists(flavor_id=values['flavorid'])
+ raise exception.FlavorExists(name=values['name'])
+ except Exception as e:
+ raise db_exc.DBError(e)
+ for project in set(projects):
+ access_ref = models.InstanceTypeProjects()
+ access_ref.update({"instance_type_id": instance_type_ref.id,
+ "project_id": project})
+ access_ref.save(context.session)
return _dict_with_extra_specs(instance_type_ref)
@@ -4750,8 +4743,8 @@ def _dict_with_extra_specs(inst_type_query):
return inst_type_dict
-def _flavor_get_query(context, session=None, read_deleted=None):
- query = model_query(context, models.InstanceTypes, session=session,
+def _flavor_get_query(context, read_deleted=None):
+ query = model_query(context, models.InstanceTypes,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
@@ -4764,6 +4757,7 @@ def _flavor_get_query(context, session=None, read_deleted=None):
@require_context
+@main_context_manager.reader
def flavor_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
@@ -4821,23 +4815,22 @@ def flavor_get_all(context, inactive=False, filters=None,
return [_dict_with_extra_specs(i) for i in inst_types]
-def _flavor_get_id_from_flavor_query(context, flavor_id, session=None):
+def _flavor_get_id_from_flavor_query(context, flavor_id):
return model_query(context, models.InstanceTypes,
(models.InstanceTypes.id,),
- read_deleted="no", session=session).\
+ read_deleted="no").\
filter_by(flavorid=flavor_id)
-def _flavor_get_id_from_flavor(context, flavor_id, session=None):
- result = _flavor_get_id_from_flavor_query(context, flavor_id,
- session=session).\
- first()
+def _flavor_get_id_from_flavor(context, flavor_id):
+ result = _flavor_get_id_from_flavor_query(context, flavor_id).first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return result[0]
@require_context
+@main_context_manager.reader
def flavor_get(context, id):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
@@ -4849,6 +4842,7 @@ def flavor_get(context, id):
@require_context
+@main_context_manager.reader
def flavor_get_by_name(context, name):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
@@ -4860,6 +4854,7 @@ def flavor_get_by_name(context, name):
@require_context
+@main_context_manager.reader
def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _flavor_get_query(context, read_deleted=read_deleted).\
@@ -4872,43 +4867,40 @@ def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
return _dict_with_extra_specs(result)
+@main_context_manager.writer
def flavor_destroy(context, name):
"""Marks specific flavor as deleted."""
- session = get_session()
- with session.begin():
- ref = model_query(context, models.InstanceTypes, session=session,
- read_deleted="no").\
- filter_by(name=name).\
- first()
- if not ref:
- raise exception.FlavorNotFoundByName(flavor_name=name)
+ ref = model_query(context, models.InstanceTypes, read_deleted="no").\
+ filter_by(name=name).\
+ first()
+ if not ref:
+ raise exception.FlavorNotFoundByName(flavor_name=name)
- ref.soft_delete(session=session)
- model_query(context, models.InstanceTypeExtraSpecs,
- session=session, read_deleted="no").\
- filter_by(instance_type_id=ref['id']).\
- soft_delete()
- model_query(context, models.InstanceTypeProjects,
- session=session, read_deleted="no").\
- filter_by(instance_type_id=ref['id']).\
- soft_delete()
+ ref.soft_delete(context.session)
+ model_query(context, models.InstanceTypeExtraSpecs, read_deleted="no").\
+ filter_by(instance_type_id=ref['id']).\
+ soft_delete()
+ model_query(context, models.InstanceTypeProjects, read_deleted="no").\
+ filter_by(instance_type_id=ref['id']).\
+ soft_delete()
-def _flavor_access_query(context, session=None):
- return model_query(context, models.InstanceTypeProjects, session=session,
- read_deleted="no")
+def _flavor_access_query(context):
+ return model_query(context, models.InstanceTypeProjects, read_deleted="no")
+@main_context_manager.reader
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access list by flavor id."""
- instance_type_id_subq = \
- _flavor_get_id_from_flavor_query(context, flavor_id)
+ instance_type_id_subq = _flavor_get_id_from_flavor_query(context,
+ flavor_id)
access_refs = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id_subq).\
all()
return access_refs
+@main_context_manager.writer
def flavor_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
@@ -4917,13 +4909,14 @@ def flavor_access_add(context, flavor_id, project_id):
access_ref.update({"instance_type_id": instance_type_id,
"project_id": project_id})
try:
- access_ref.save()
+ access_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
return access_ref
+@main_context_manager.writer
def flavor_access_remove(context, flavor_id, project_id):
"""Remove given tenant from the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
@@ -4937,22 +4930,24 @@ def flavor_access_remove(context, flavor_id, project_id):
project_id=project_id)
-def _flavor_extra_specs_get_query(context, flavor_id, session=None):
- instance_type_id_subq = \
- _flavor_get_id_from_flavor_query(context, flavor_id)
+def _flavor_extra_specs_get_query(context, flavor_id):
+ instance_type_id_subq = _flavor_get_id_from_flavor_query(context,
+ flavor_id)
- return model_query(context, models.InstanceTypeExtraSpecs, session=session,
+ return model_query(context, models.InstanceTypeExtraSpecs,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id_subq)
@require_context
+@main_context_manager.reader
def flavor_extra_specs_get(context, flavor_id):
rows = _flavor_extra_specs_get_query(context, flavor_id).all()
return {row['key']: row['value'] for row in rows}
@require_context
+@main_context_manager.writer
def flavor_extra_specs_delete(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
@@ -4964,34 +4959,34 @@ def flavor_extra_specs_delete(context, flavor_id, key):
@require_context
+@main_context_manager.writer
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in range(max_retries):
try:
- session = get_session()
- with session.begin():
- instance_type_id = _flavor_get_id_from_flavor(context,
- flavor_id, session)
-
- spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
- session=session, read_deleted="no").\
- filter_by(instance_type_id=instance_type_id).\
- filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
- all()
-
- existing_keys = set()
- for spec_ref in spec_refs:
- key = spec_ref["key"]
- existing_keys.add(key)
+ instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
+
+ spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
+ read_deleted="no").\
+ filter_by(instance_type_id=instance_type_id).\
+ filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
+ all()
+
+ existing_keys = set()
+ for spec_ref in spec_refs:
+ key = spec_ref["key"]
+ existing_keys.add(key)
+ with main_context_manager.writer.savepoint.using(context):
spec_ref.update({"value": specs[key]})
- for key, value in specs.items():
- if key in existing_keys:
- continue
- spec_ref = models.InstanceTypeExtraSpecs()
+ for key, value in specs.items():
+ if key in existing_keys:
+ continue
+ spec_ref = models.InstanceTypeExtraSpecs()
+ with main_context_manager.writer.savepoint.using(context):
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id})
- session.add(spec_ref)
+ context.session.add(spec_ref)
return specs
except db_exc.DBDuplicateEntry:
@@ -5010,7 +5005,7 @@ def cell_create(context, values):
cell = models.Cell()
cell.update(values)
try:
- cell.save(session=context.session)
+ cell.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.CellExists(name=values['name'])
return cell
@@ -5050,23 +5045,20 @@ def cell_get_all(context):
########################
# User-provided metadata
-def _instance_metadata_get_multi(context, instance_uuids,
- session=None, use_slave=False):
+def _instance_metadata_get_multi(context, instance_uuids):
if not instance_uuids:
return []
- return model_query(context, models.InstanceMetadata,
- session=session, use_slave=use_slave).\
- filter(
- models.InstanceMetadata.instance_uuid.in_(instance_uuids))
+ return model_query(context, models.InstanceMetadata).filter(
+ models.InstanceMetadata.instance_uuid.in_(instance_uuids))
-def _instance_metadata_get_query(context, instance_uuid, session=None):
- return model_query(context, models.InstanceMetadata, session=session,
- read_deleted="no").\
+def _instance_metadata_get_query(context, instance_uuid):
+ return model_query(context, models.InstanceMetadata, read_deleted="no").\
filter_by(instance_uuid=instance_uuid)
@require_context
+@main_context_manager.reader
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@@ -5074,6 +5066,7 @@ def instance_metadata_get(context, instance_uuid):
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
@@ -5082,92 +5075,83 @@ def instance_metadata_delete(context, instance_uuid, key):
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def instance_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
- session = get_session()
- with session.begin(subtransactions=True):
- if delete:
- _instance_metadata_get_query(context, instance_uuid,
- session=session).\
- filter(~models.InstanceMetadata.key.in_(all_keys)).\
- soft_delete(synchronize_session=False)
-
- already_existing_keys = []
- meta_refs = _instance_metadata_get_query(context, instance_uuid,
- session=session).\
- filter(models.InstanceMetadata.key.in_(all_keys)).\
- all()
+ if delete:
+ _instance_metadata_get_query(context, instance_uuid).\
+ filter(~models.InstanceMetadata.key.in_(all_keys)).\
+ soft_delete(synchronize_session=False)
+
+ already_existing_keys = []
+ meta_refs = _instance_metadata_get_query(context, instance_uuid).\
+ filter(models.InstanceMetadata.key.in_(all_keys)).\
+ all()
- for meta_ref in meta_refs:
- already_existing_keys.append(meta_ref.key)
- meta_ref.update({"value": metadata[meta_ref.key]})
+ for meta_ref in meta_refs:
+ already_existing_keys.append(meta_ref.key)
+ meta_ref.update({"value": metadata[meta_ref.key]})
- new_keys = set(all_keys) - set(already_existing_keys)
- for key in new_keys:
- meta_ref = models.InstanceMetadata()
- meta_ref.update({"key": key, "value": metadata[key],
- "instance_uuid": instance_uuid})
- session.add(meta_ref)
+ new_keys = set(all_keys) - set(already_existing_keys)
+ for key in new_keys:
+ meta_ref = models.InstanceMetadata()
+ meta_ref.update({"key": key, "value": metadata[key],
+ "instance_uuid": instance_uuid})
+ context.session.add(meta_ref)
- return metadata
+ return metadata
#######################
# System-owned metadata
-def _instance_system_metadata_get_multi(context, instance_uuids,
- session=None, use_slave=False):
+def _instance_system_metadata_get_multi(context, instance_uuids):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
- session=session, use_slave=use_slave,
- read_deleted='yes').\
- filter(
- models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
+ read_deleted='yes').filter(
+ models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
-def _instance_system_metadata_get_query(context, instance_uuid, session=None):
- return model_query(context, models.InstanceSystemMetadata,
- session=session).\
+def _instance_system_metadata_get_query(context, instance_uuid):
+ return model_query(context, models.InstanceSystemMetadata).\
filter_by(instance_uuid=instance_uuid)
@require_context
+@main_context_manager.reader
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@require_context
+@main_context_manager.writer
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
- session = get_session()
- with session.begin(subtransactions=True):
- if delete:
- _instance_system_metadata_get_query(context, instance_uuid,
- session=session).\
- filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
- soft_delete(synchronize_session=False)
-
- already_existing_keys = []
- meta_refs = _instance_system_metadata_get_query(context, instance_uuid,
- session=session).\
- filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
- all()
+ if delete:
+ _instance_system_metadata_get_query(context, instance_uuid).\
+ filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
+ soft_delete(synchronize_session=False)
+
+ already_existing_keys = []
+ meta_refs = _instance_system_metadata_get_query(context, instance_uuid).\
+ filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
+ all()
- for meta_ref in meta_refs:
- already_existing_keys.append(meta_ref.key)
- meta_ref.update({"value": metadata[meta_ref.key]})
+ for meta_ref in meta_refs:
+ already_existing_keys.append(meta_ref.key)
+ meta_ref.update({"value": metadata[meta_ref.key]})
- new_keys = set(all_keys) - set(already_existing_keys)
- for key in new_keys:
- meta_ref = models.InstanceSystemMetadata()
- meta_ref.update({"key": key, "value": metadata[key],
- "instance_uuid": instance_uuid})
- session.add(meta_ref)
+ new_keys = set(all_keys) - set(already_existing_keys)
+ for key in new_keys:
+ meta_ref = models.InstanceSystemMetadata()
+ meta_ref.update({"key": key, "value": metadata[key],
+ "instance_uuid": instance_uuid})
+ context.session.add(meta_ref)
- return metadata
+ return metadata
####################
@@ -5225,11 +5209,11 @@ def agent_build_update(context, agent_build_id, values):
####################
@require_context
-def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
+@main_context_manager.reader.allow_async
+def bw_usage_get(context, uuid, start_period, mac):
values = {'start_period': start_period}
values = convert_objects_related_datetimes(values, 'start_period')
- return model_query(context, models.BandwidthUsage, read_deleted="yes",
- use_slave=use_slave).\
+ return model_query(context, models.BandwidthUsage, read_deleted="yes").\
filter_by(start_period=values['start_period']).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
@@ -5237,12 +5221,12 @@ def bw_usage_get(context, uuid, start_period, mac, use_slave=False):
@require_context
-def bw_usage_get_by_uuids(context, uuids, start_period, use_slave=False):
+@main_context_manager.reader.allow_async
+def bw_usage_get_by_uuids(context, uuids, start_period):
values = {'start_period': start_period}
values = convert_objects_related_datetimes(values, 'start_period')
return (
- model_query(context, models.BandwidthUsage, read_deleted="yes",
- use_slave=use_slave).
+ model_query(context, models.BandwidthUsage, read_deleted="yes").
filter(models.BandwidthUsage.uuid.in_(uuids)).
filter_by(start_period=values['start_period']).
all()
@@ -5251,59 +5235,58 @@ def bw_usage_get_by_uuids(context, uuids, start_period, use_slave=False):
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@main_context_manager.writer
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None):
- session = get_session()
-
if last_refreshed is None:
last_refreshed = timeutils.utcnow()
# NOTE(comstud): More often than not, we'll be updating records vs
# creating records. Optimize accordingly, trying to update existing
# records. Fall back to creation when no rows are updated.
- with session.begin():
- ts_values = {'last_refreshed': last_refreshed,
- 'start_period': start_period}
- ts_keys = ('start_period', 'last_refreshed')
- ts_values = convert_objects_related_datetimes(ts_values, *ts_keys)
- values = {'last_refreshed': ts_values['last_refreshed'],
- 'last_ctr_in': last_ctr_in,
- 'last_ctr_out': last_ctr_out,
- 'bw_in': bw_in,
- 'bw_out': bw_out}
- bw_usage = model_query(context, models.BandwidthUsage, session=session,
- read_deleted='yes').\
- filter_by(start_period=ts_values['start_period']).\
- filter_by(uuid=uuid).\
- filter_by(mac=mac).first()
-
- if bw_usage:
- bw_usage.update(values)
- return bw_usage
-
- bwusage = models.BandwidthUsage()
- bwusage.start_period = ts_values['start_period']
- bwusage.uuid = uuid
- bwusage.mac = mac
- bwusage.last_refreshed = ts_values['last_refreshed']
- bwusage.bw_in = bw_in
- bwusage.bw_out = bw_out
- bwusage.last_ctr_in = last_ctr_in
- bwusage.last_ctr_out = last_ctr_out
- try:
- bwusage.save(session=session)
- except db_exc.DBDuplicateEntry:
- # NOTE(sirp): Possible race if two greenthreads attempt to create
- # the usage entry at the same time. First one wins.
- pass
- return bwusage
+ ts_values = {'last_refreshed': last_refreshed,
+ 'start_period': start_period}
+ ts_keys = ('start_period', 'last_refreshed')
+ ts_values = convert_objects_related_datetimes(ts_values, *ts_keys)
+ values = {'last_refreshed': ts_values['last_refreshed'],
+ 'last_ctr_in': last_ctr_in,
+ 'last_ctr_out': last_ctr_out,
+ 'bw_in': bw_in,
+ 'bw_out': bw_out}
+ bw_usage = model_query(context, models.BandwidthUsage,
+ read_deleted='yes').\
+ filter_by(start_period=ts_values['start_period']).\
+ filter_by(uuid=uuid).\
+ filter_by(mac=mac).first()
+
+ if bw_usage:
+ bw_usage.update(values)
+ return bw_usage
+
+ bwusage = models.BandwidthUsage()
+ bwusage.start_period = ts_values['start_period']
+ bwusage.uuid = uuid
+ bwusage.mac = mac
+ bwusage.last_refreshed = ts_values['last_refreshed']
+ bwusage.bw_in = bw_in
+ bwusage.bw_out = bw_out
+ bwusage.last_ctr_in = last_ctr_in
+ bwusage.last_ctr_out = last_ctr_out
+ try:
+ bwusage.save(context.session)
+ except db_exc.DBDuplicateEntry:
+ # NOTE(sirp): Possible race if two greenthreads attempt to create
+ # the usage entry at the same time. First one wins.
+ pass
+ return bwusage
####################
@require_context
+@main_context_manager.reader
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
@@ -5311,119 +5294,118 @@ def vol_get_usage_by_time(context, begin):
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == null(),
models.VolumeUsage.curr_last_refreshed > begin,
- )).\
- all()
+ )).all()
@require_context
+@main_context_manager.writer
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
- session = get_session()
refreshed = timeutils.utcnow()
- with session.begin():
- values = {}
- # NOTE(dricco): We will be mostly updating current usage records vs
- # updating total or creating records. Optimize accordingly.
- if not update_totals:
- values = {'curr_last_refreshed': refreshed,
- 'curr_reads': rd_req,
- 'curr_read_bytes': rd_bytes,
- 'curr_writes': wr_req,
- 'curr_write_bytes': wr_bytes,
- 'instance_uuid': instance_id,
- 'project_id': project_id,
- 'user_id': user_id,
- 'availability_zone': availability_zone}
- else:
- values = {'tot_last_refreshed': refreshed,
- 'tot_reads': models.VolumeUsage.tot_reads + rd_req,
- 'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
- rd_bytes,
- 'tot_writes': models.VolumeUsage.tot_writes + wr_req,
- 'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
- wr_bytes,
- 'curr_reads': 0,
- 'curr_read_bytes': 0,
- 'curr_writes': 0,
- 'curr_write_bytes': 0,
- 'instance_uuid': instance_id,
- 'project_id': project_id,
- 'user_id': user_id,
- 'availability_zone': availability_zone}
-
- current_usage = model_query(context, models.VolumeUsage,
- session=session, read_deleted="yes").\
- filter_by(volume_id=id).\
- first()
- if current_usage:
- if (rd_req < current_usage['curr_reads'] or
- rd_bytes < current_usage['curr_read_bytes'] or
- wr_req < current_usage['curr_writes'] or
- wr_bytes < current_usage['curr_write_bytes']):
- LOG.info(_LI("Volume(%s) has lower stats then what is in "
- "the database. Instance must have been rebooted "
- "or crashed. Updating totals."), id)
- if not update_totals:
- values['tot_reads'] = (models.VolumeUsage.tot_reads +
- current_usage['curr_reads'])
- values['tot_read_bytes'] = (
- models.VolumeUsage.tot_read_bytes +
- current_usage['curr_read_bytes'])
- values['tot_writes'] = (models.VolumeUsage.tot_writes +
- current_usage['curr_writes'])
- values['tot_write_bytes'] = (
- models.VolumeUsage.tot_write_bytes +
- current_usage['curr_write_bytes'])
- else:
- values['tot_reads'] = (models.VolumeUsage.tot_reads +
- current_usage['curr_reads'] +
- rd_req)
- values['tot_read_bytes'] = (
- models.VolumeUsage.tot_read_bytes +
- current_usage['curr_read_bytes'] + rd_bytes)
- values['tot_writes'] = (models.VolumeUsage.tot_writes +
- current_usage['curr_writes'] +
- wr_req)
- values['tot_write_bytes'] = (
- models.VolumeUsage.tot_write_bytes +
- current_usage['curr_write_bytes'] + wr_bytes)
-
- current_usage.update(values)
- current_usage.save(session=session)
- session.refresh(current_usage)
- return current_usage
-
- vol_usage = models.VolumeUsage()
- vol_usage.volume_id = id
- vol_usage.instance_uuid = instance_id
- vol_usage.project_id = project_id
- vol_usage.user_id = user_id
- vol_usage.availability_zone = availability_zone
-
- if not update_totals:
- vol_usage.curr_last_refreshed = refreshed
- vol_usage.curr_reads = rd_req
- vol_usage.curr_read_bytes = rd_bytes
- vol_usage.curr_writes = wr_req
- vol_usage.curr_write_bytes = wr_bytes
- else:
- vol_usage.tot_last_refreshed = refreshed
- vol_usage.tot_reads = rd_req
- vol_usage.tot_read_bytes = rd_bytes
- vol_usage.tot_writes = wr_req
- vol_usage.tot_write_bytes = wr_bytes
+ values = {}
+ # NOTE(dricco): We will be mostly updating current usage records vs
+ # updating total or creating records. Optimize accordingly.
+ if not update_totals:
+ values = {'curr_last_refreshed': refreshed,
+ 'curr_reads': rd_req,
+ 'curr_read_bytes': rd_bytes,
+ 'curr_writes': wr_req,
+ 'curr_write_bytes': wr_bytes,
+ 'instance_uuid': instance_id,
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'availability_zone': availability_zone}
+ else:
+ values = {'tot_last_refreshed': refreshed,
+ 'tot_reads': models.VolumeUsage.tot_reads + rd_req,
+ 'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
+ rd_bytes,
+ 'tot_writes': models.VolumeUsage.tot_writes + wr_req,
+ 'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
+ wr_bytes,
+ 'curr_reads': 0,
+ 'curr_read_bytes': 0,
+ 'curr_writes': 0,
+ 'curr_write_bytes': 0,
+ 'instance_uuid': instance_id,
+ 'project_id': project_id,
+ 'user_id': user_id,
+ 'availability_zone': availability_zone}
+
+ current_usage = model_query(context, models.VolumeUsage,
+ read_deleted="yes").\
+ filter_by(volume_id=id).\
+ first()
+ if current_usage:
+ if (rd_req < current_usage['curr_reads'] or
+ rd_bytes < current_usage['curr_read_bytes'] or
+ wr_req < current_usage['curr_writes'] or
+ wr_bytes < current_usage['curr_write_bytes']):
+ LOG.info(_LI("Volume(%s) has lower stats then what is in "
+ "the database. Instance must have been rebooted "
+ "or crashed. Updating totals."), id)
+ if not update_totals:
+ values['tot_reads'] = (models.VolumeUsage.tot_reads +
+ current_usage['curr_reads'])
+ values['tot_read_bytes'] = (
+ models.VolumeUsage.tot_read_bytes +
+ current_usage['curr_read_bytes'])
+ values['tot_writes'] = (models.VolumeUsage.tot_writes +
+ current_usage['curr_writes'])
+ values['tot_write_bytes'] = (
+ models.VolumeUsage.tot_write_bytes +
+ current_usage['curr_write_bytes'])
+ else:
+ values['tot_reads'] = (models.VolumeUsage.tot_reads +
+ current_usage['curr_reads'] +
+ rd_req)
+ values['tot_read_bytes'] = (
+ models.VolumeUsage.tot_read_bytes +
+ current_usage['curr_read_bytes'] + rd_bytes)
+ values['tot_writes'] = (models.VolumeUsage.tot_writes +
+ current_usage['curr_writes'] +
+ wr_req)
+ values['tot_write_bytes'] = (
+ models.VolumeUsage.tot_write_bytes +
+ current_usage['curr_write_bytes'] + wr_bytes)
+
+ current_usage.update(values)
+ current_usage.save(context.session)
+ context.session.refresh(current_usage)
+ return current_usage
+
+ vol_usage = models.VolumeUsage()
+ vol_usage.volume_id = id
+ vol_usage.instance_uuid = instance_id
+ vol_usage.project_id = project_id
+ vol_usage.user_id = user_id
+ vol_usage.availability_zone = availability_zone
+
+ if not update_totals:
+ vol_usage.curr_last_refreshed = refreshed
+ vol_usage.curr_reads = rd_req
+ vol_usage.curr_read_bytes = rd_bytes
+ vol_usage.curr_writes = wr_req
+ vol_usage.curr_write_bytes = wr_bytes
+ else:
+ vol_usage.tot_last_refreshed = refreshed
+ vol_usage.tot_reads = rd_req
+ vol_usage.tot_read_bytes = rd_bytes
+ vol_usage.tot_writes = wr_req
+ vol_usage.tot_write_bytes = wr_bytes
- vol_usage.save(session=session)
+ vol_usage.save(context.session)
- return vol_usage
+ return vol_usage
####################
+@main_context_manager.reader
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
@@ -5436,6 +5418,7 @@ def s3_image_get(context, image_id):
return result
+@main_context_manager.reader
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
@@ -5448,12 +5431,13 @@ def s3_image_get_by_uuid(context, image_uuid):
return result
+@main_context_manager.writer
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
- s3_image_ref.save()
+ s3_image_ref.save(context.session)
except Exception as e:
raise db_exc.DBError(e)
@@ -5464,11 +5448,10 @@ def s3_image_create(context, image_uuid):
def _aggregate_get_query(context, model_class, id_field=None, id=None,
- session=None, read_deleted=None):
+ read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
- query = model_query(context, model_class, session=session,
- read_deleted=read_deleted)
+ query = model_query(context, model_class, read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
@@ -5479,19 +5462,18 @@ def _aggregate_get_query(context, model_class, id_field=None, id=None,
return query
+@main_context_manager.writer
def aggregate_create(context, values, metadata=None):
- session = get_session()
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
- session=session,
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
- aggregate.save(session=session)
+ aggregate.save(context.session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
@@ -5500,9 +5482,16 @@ def aggregate_create(context, values, metadata=None):
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
- return aggregate_get(context, aggregate.id)
+ # NOTE(pkholkin): '_metadata' attribute was updated during
+ # 'aggregate_metadata_add' method, so it should be expired and
+ # read from db
+ context.session.expire(aggregate, ['_metadata'])
+ aggregate._metadata
+
+ return aggregate
+@main_context_manager.writer
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
@@ -5516,6 +5505,7 @@ def aggregate_get(context, aggregate_id):
return aggregate
+@main_context_manager.reader
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
@@ -5534,6 +5524,7 @@ def aggregate_get_by_host(context, host, key=None):
return query.all()
+@main_context_manager.reader
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
@@ -5552,6 +5543,7 @@ def aggregate_metadata_get_by_host(context, host, key=None):
return dict(metadata)
+@main_context_manager.reader
def aggregate_get_by_metadata_key(context, key):
"""Return rows that match metadata key.
@@ -5565,15 +5557,13 @@ def aggregate_get_by_metadata_key(context, key):
return query.all()
+@main_context_manager.writer
def aggregate_update(context, aggregate_id, values):
- session = get_session()
-
if "name" in values:
aggregate_by_name = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
- session=session,
read_deleted='no').first())
if aggregate_by_name and aggregate_by_name.id != aggregate_id:
# there is another aggregate with the new name
@@ -5582,8 +5572,7 @@ def aggregate_update(context, aggregate_id, values):
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
- aggregate_id,
- session=session).first())
+ aggregate_id).first())
set_delete = True
if aggregate:
@@ -5602,45 +5591,42 @@ def aggregate_update(context, aggregate_id, values):
set_delete=set_delete)
aggregate.update(values)
- aggregate.save(session=session)
+ aggregate.save(context.session)
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
+@main_context_manager.writer
def aggregate_delete(context, aggregate_id):
- session = get_session()
- with session.begin():
- count = _aggregate_get_query(context,
- models.Aggregate,
- models.Aggregate.id,
- aggregate_id,
- session=session).\
- soft_delete()
- if count == 0:
- raise exception.AggregateNotFound(aggregate_id=aggregate_id)
+ count = _aggregate_get_query(context,
+ models.Aggregate,
+ models.Aggregate.id,
+ aggregate_id).\
+ soft_delete()
+ if count == 0:
+ raise exception.AggregateNotFound(aggregate_id=aggregate_id)
- # Delete Metadata
- model_query(context,
- models.AggregateMetadata, session=session).\
- filter_by(aggregate_id=aggregate_id).\
- soft_delete()
+ # Delete Metadata
+ model_query(context, models.AggregateMetadata).\
+ filter_by(aggregate_id=aggregate_id).\
+ soft_delete()
+@main_context_manager.reader
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
-def _aggregate_metadata_get_query(context, aggregate_id, session=None,
- read_deleted="yes"):
+def _aggregate_metadata_get_query(context, aggregate_id, read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
- read_deleted=read_deleted,
- session=session).\
+ read_deleted=read_deleted).\
filter_by(aggregate_id=aggregate_id)
@require_aggregate_exists
+@main_context_manager.reader
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
@@ -5650,6 +5636,7 @@ def aggregate_metadata_get(context, aggregate_id):
@require_aggregate_exists
+@main_context_manager.writer
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
@@ -5663,40 +5650,38 @@ def aggregate_metadata_delete(context, aggregate_id, key):
@require_aggregate_exists
+@main_context_manager.writer
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in range(max_retries):
try:
- session = get_session()
- with session.begin():
- query = _aggregate_metadata_get_query(context, aggregate_id,
- read_deleted='no',
- session=session)
- if set_delete:
- query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
- soft_delete(synchronize_session=False)
-
- already_existing_keys = set()
- if all_keys:
- query = query.filter(
- models.AggregateMetadata.key.in_(all_keys))
- for meta_ref in query.all():
- key = meta_ref.key
- meta_ref.update({"value": metadata[key]})
- already_existing_keys.add(key)
-
- new_entries = []
- for key, value in metadata.items():
- if key in already_existing_keys:
- continue
- new_entries.append({"key": key,
- "value": value,
- "aggregate_id": aggregate_id})
- if new_entries:
- session.execute(
- models.AggregateMetadata.__table__.insert(),
- new_entries)
+ query = _aggregate_metadata_get_query(context, aggregate_id,
+ read_deleted='no')
+ if set_delete:
+ query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
+ soft_delete(synchronize_session=False)
+
+ already_existing_keys = set()
+ if all_keys:
+ query = query.filter(
+ models.AggregateMetadata.key.in_(all_keys))
+ for meta_ref in query.all():
+ key = meta_ref.key
+ meta_ref.update({"value": metadata[key]})
+ already_existing_keys.add(key)
+
+ new_entries = []
+ for key, value in metadata.items():
+ if key in already_existing_keys:
+ continue
+ new_entries.append({"key": key,
+ "value": value,
+ "aggregate_id": aggregate_id})
+ if new_entries:
+ context.session.execute(
+ models.AggregateMetadata.__table__.insert(),
+ new_entries)
return metadata
except db_exc.DBDuplicateEntry:
@@ -5713,6 +5698,7 @@ def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
@require_aggregate_exists
+@main_context_manager.reader
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
@@ -5722,6 +5708,7 @@ def aggregate_host_get_all(context, aggregate_id):
@require_aggregate_exists
+@main_context_manager.writer
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
@@ -5735,11 +5722,12 @@ def aggregate_host_delete(context, aggregate_id, host):
@require_aggregate_exists
+@main_context_manager.writer
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
- host_ref.save()
+ host_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
@@ -5749,14 +5737,16 @@ def aggregate_host_add(context, aggregate_id, host):
################
+@main_context_manager.writer
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
- fault_ref.save()
+ fault_ref.save(context.session)
return dict(fault_ref)
+@main_context_manager.reader
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
if not instance_uuids:
@@ -5927,6 +5917,7 @@ def action_event_get_by_id(context, action_id, event_id):
@require_context
+@main_context_manager.writer
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
@@ -5934,12 +5925,13 @@ def ec2_instance_create(context, instance_uuid, id=None):
if id is not None:
ec2_instance_ref.update({'id': id})
- ec2_instance_ref.save()
+ ec2_instance_ref.save(context.session)
return ec2_instance_ref
@require_context
+@main_context_manager.reader
def ec2_instance_get_by_uuid(context, instance_uuid):
result = _ec2_instance_get_query(context).\
filter_by(uuid=instance_uuid).\
@@ -5952,6 +5944,7 @@ def ec2_instance_get_by_uuid(context, instance_uuid):
@require_context
+@main_context_manager.reader
def ec2_instance_get_by_id(context, instance_id):
result = _ec2_instance_get_query(context).\
filter_by(id=instance_id).\
@@ -5964,25 +5957,26 @@ def ec2_instance_get_by_id(context, instance_id):
@require_context
+@main_context_manager.reader
def get_instance_uuid_by_ec2_id(context, ec2_id):
result = ec2_instance_get_by_id(context, ec2_id)
return result['uuid']
-def _ec2_instance_get_query(context, session=None):
- return model_query(context,
- models.InstanceIdMapping,
- session=session,
- read_deleted='yes')
+def _ec2_instance_get_query(context):
+ return model_query(context, models.InstanceIdMapping, read_deleted='yes')
+
+
+##################
def _task_log_get_query(context, task_name, period_beginning,
- period_ending, host=None, state=None, session=None):
+ period_ending, host=None, state=None):
values = {'period_beginning': period_beginning,
'period_ending': period_ending}
values = convert_objects_related_datetimes(values, *values.keys())
- query = model_query(context, models.TaskLog, session=session).\
+ query = model_query(context, models.TaskLog).\
filter_by(task_name=task_name).\
filter_by(period_beginning=values['period_beginning']).\
filter_by(period_ending=values['period_ending'])
@@ -5993,18 +5987,21 @@ def _task_log_get_query(context, task_name, period_beginning,
return query
+@main_context_manager.reader
def task_log_get(context, task_name, period_beginning, period_ending, host,
state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).first()
+@main_context_manager.reader
def task_log_get_all(context, task_name, period_beginning, period_ending,
host=None, state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).all()
+@main_context_manager.writer
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
values = {'period_beginning': period_beginning,
@@ -6022,25 +6019,26 @@ def task_log_begin_task(context, task_name, period_beginning, period_ending,
if task_items:
task.task_items = task_items
try:
- task.save()
+ task.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
+@main_context_manager.writer
def task_log_end_task(context, task_name, period_beginning, period_ending,
host, errors, message=None):
values = dict(state="DONE", errors=errors)
if message:
values["message"] = message
- session = get_session()
- with session.begin():
- rows = _task_log_get_query(context, task_name, period_beginning,
- period_ending, host, session=session).\
- update(values)
- if rows == 0:
- # It's not running!
- raise exception.TaskNotRunning(task_name=task_name, host=host)
+ rows = _task_log_get_query(context, task_name, period_beginning,
+ period_ending, host).update(values)
+ if rows == 0:
+ # It's not running!
+ raise exception.TaskNotRunning(task_name=task_name, host=host)
+
+
+##################
def _archive_deleted_rows_for_table(tablename, max_rows):
@@ -6153,11 +6151,10 @@ def archive_deleted_rows(max_rows=None):
def _instance_group_get_query(context, model_class, id_field=None, id=None,
- session=None, read_deleted=None):
+ read_deleted=None):
columns_to_join = {models.InstanceGroup: ['_policies', '_members']}
- query = model_query(context, model_class, session=session,
- read_deleted=read_deleted, project_only=True)
-
+ query = model_query(context, model_class, read_deleted=read_deleted,
+ project_only=True)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
@@ -6167,35 +6164,37 @@ def _instance_group_get_query(context, model_class, id_field=None, id=None,
return query
-def instance_group_create(context, values, policies=None,
- members=None):
+@main_context_manager.writer
+def instance_group_create(context, values, policies=None, members=None):
"""Create a new group."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
values['uuid'] = uuid
- session = get_session()
- with session.begin():
- try:
- group = models.InstanceGroup()
- group.update(values)
- group.save(session=session)
- except db_exc.DBDuplicateEntry:
- raise exception.InstanceGroupIdExists(group_uuid=uuid)
- # We don't want these to be lazy loaded later. We know there is
- # nothing here since we just created this instance group.
+ try:
+ group = models.InstanceGroup()
+ group.update(values)
+ group.save(context.session)
+ except db_exc.DBDuplicateEntry:
+ raise exception.InstanceGroupIdExists(group_uuid=uuid)
+
+ # We don't want '_policies' and '_members' attributes to be lazy loaded
+ # later. We know there is nothing here since we just created this
+ # instance group.
+ if policies:
+ _instance_group_policies_add(context, group.id, policies)
+ else:
group._policies = []
+ if members:
+ _instance_group_members_add(context, group.id, members)
+ else:
group._members = []
- if policies:
- _instance_group_policies_add(context, group.id, policies,
- session=session)
- if members:
- _instance_group_members_add(context, group.id, members,
- session=session)
+
return instance_group_get(context, uuid)
+@main_context_manager.reader
def instance_group_get(context, group_uuid):
"""Get a specific group by uuid."""
group = _instance_group_get_query(context,
@@ -6208,92 +6207,82 @@ def instance_group_get(context, group_uuid):
return group
+@main_context_manager.reader
def instance_group_get_by_instance(context, instance_uuid):
- session = get_session()
- with session.begin():
- group_member = model_query(context, models.InstanceGroupMember,
- session=session).\
- filter_by(instance_id=instance_uuid).\
- first()
- if not group_member:
- raise exception.InstanceGroupNotFound(group_uuid='')
- group = _instance_group_get_query(context, models.InstanceGroup,
- models.InstanceGroup.id,
- group_member.group_id,
- session=session).first()
- if not group:
- raise exception.InstanceGroupNotFound(
- group_uuid=group_member.group_id)
- return group
+ group_member = model_query(context, models.InstanceGroupMember).\
+ filter_by(instance_id=instance_uuid).\
+ first()
+ if not group_member:
+ raise exception.InstanceGroupNotFound(group_uuid='')
+ group = _instance_group_get_query(context, models.InstanceGroup,
+ models.InstanceGroup.id,
+ group_member.group_id).first()
+ if not group:
+ raise exception.InstanceGroupNotFound(
+ group_uuid=group_member.group_id)
+ return group
+@main_context_manager.writer
def instance_group_update(context, group_uuid, values):
"""Update the attributes of a group.
If values contains a metadata key, it updates the aggregate metadata
too. Similarly for the policies and members.
"""
- session = get_session()
- with session.begin():
- group = model_query(context,
- models.InstanceGroup,
- session=session).\
- filter_by(uuid=group_uuid).\
- first()
- if not group:
- raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
-
- policies = values.get('policies')
- if policies is not None:
- _instance_group_policies_add(context,
- group.id,
- values.pop('policies'),
- set_delete=True,
- session=session)
- members = values.get('members')
- if members is not None:
- _instance_group_members_add(context,
- group.id,
- values.pop('members'),
- set_delete=True,
- session=session)
+ group = model_query(context, models.InstanceGroup).\
+ filter_by(uuid=group_uuid).\
+ first()
+ if not group:
+ raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
- group.update(values)
+ policies = values.get('policies')
+ if policies is not None:
+ _instance_group_policies_add(context,
+ group.id,
+ values.pop('policies'),
+ set_delete=True)
+ members = values.get('members')
+ if members is not None:
+ _instance_group_members_add(context,
+ group.id,
+ values.pop('members'),
+ set_delete=True)
+
+ group.update(values)
- if policies:
- values['policies'] = policies
- if members:
- values['members'] = members
+ if policies:
+ values['policies'] = policies
+ if members:
+ values['members'] = members
+@main_context_manager.writer
def instance_group_delete(context, group_uuid):
"""Delete a group."""
- session = get_session()
- with session.begin():
- group_id = _instance_group_id(context, group_uuid, session=session)
-
- count = _instance_group_get_query(context,
- models.InstanceGroup,
- models.InstanceGroup.uuid,
- group_uuid,
- session=session).soft_delete()
- if count == 0:
- raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
-
- # Delete policies, metadata and members
- instance_models = [models.InstanceGroupPolicy,
- models.InstanceGroupMember]
- for model in instance_models:
- model_query(context, model, session=session).\
- filter_by(group_id=group_id).\
- soft_delete()
+ group_id = _instance_group_id(context, group_uuid)
+
+ count = _instance_group_get_query(context,
+ models.InstanceGroup,
+ models.InstanceGroup.uuid,
+ group_uuid).soft_delete()
+ if count == 0:
+ raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
+
+ # Delete policies, metadata and members
+ instance_models = [models.InstanceGroupPolicy,
+ models.InstanceGroupMember]
+ for model in instance_models:
+ model_query(context, model).filter_by(group_id=group_id).soft_delete()
+@main_context_manager.reader
def instance_group_get_all(context):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).all()
+@main_context_manager.reader
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).\
@@ -6301,31 +6290,27 @@ def instance_group_get_all_by_project_id(context, project_id):
all()
-def _instance_group_count_by_project_and_user(context, project_id,
- user_id, session=None):
- return model_query(context, models.InstanceGroup, read_deleted="no",
- session=session).\
+def _instance_group_count_by_project_and_user(context, project_id, user_id):
+ return model_query(context, models.InstanceGroup, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
def _instance_group_model_get_query(context, model_class, group_id,
- session=None, read_deleted='no'):
+ read_deleted='no'):
return model_query(context,
model_class,
- read_deleted=read_deleted,
- session=session).\
+ read_deleted=read_deleted).\
filter_by(group_id=group_id)
-def _instance_group_id(context, group_uuid, session=None):
+def _instance_group_id(context, group_uuid):
"""Returns the group database ID for the group UUID."""
result = model_query(context,
models.InstanceGroup,
- (models.InstanceGroup.id,),
- session=session).\
+ (models.InstanceGroup.id,)).\
filter_by(uuid=group_uuid).\
first()
if not result:
@@ -6333,39 +6318,33 @@ def _instance_group_id(context, group_uuid, session=None):
return result.id
-def _instance_group_members_add(context, id, members, set_delete=False,
- session=None):
- if not session:
- session = get_session()
-
+def _instance_group_members_add(context, id, members, set_delete=False):
all_members = set(members)
- with session.begin(subtransactions=True):
- query = _instance_group_model_get_query(context,
- models.InstanceGroupMember,
- id,
- session=session)
- if set_delete:
- query.filter(~models.InstanceGroupMember.instance_id.in_(
- all_members)).\
- soft_delete(synchronize_session=False)
-
- query = query.filter(
- models.InstanceGroupMember.instance_id.in_(all_members))
- already_existing = set()
- for member_ref in query.all():
- already_existing.add(member_ref.instance_id)
-
- for instance_id in members:
- if instance_id in already_existing:
- continue
- member_ref = models.InstanceGroupMember()
- member_ref.update({'instance_id': instance_id,
- 'group_id': id})
- session.add(member_ref)
+ query = _instance_group_model_get_query(context,
+ models.InstanceGroupMember, id)
+ if set_delete:
+ query.filter(~models.InstanceGroupMember.instance_id.in_(
+ all_members)).\
+ soft_delete(synchronize_session=False)
+
+ query = query.filter(
+ models.InstanceGroupMember.instance_id.in_(all_members))
+ already_existing = set()
+ for member_ref in query.all():
+ already_existing.add(member_ref.instance_id)
+
+ for instance_id in members:
+ if instance_id in already_existing:
+ continue
+ member_ref = models.InstanceGroupMember()
+ member_ref.update({'instance_id': instance_id,
+ 'group_id': id})
+ context.session.add(member_ref)
- return members
+ return members
+@main_context_manager.writer
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
id = _instance_group_id(context, group_uuid)
@@ -6373,6 +6352,7 @@ def instance_group_members_add(context, group_uuid, members,
set_delete=set_delete)
+@main_context_manager.writer
def instance_group_member_delete(context, group_uuid, instance_id):
id = _instance_group_id(context, group_uuid)
count = _instance_group_model_get_query(context,
@@ -6385,6 +6365,7 @@ def instance_group_member_delete(context, group_uuid, instance_id):
instance_id=instance_id)
+@main_context_manager.reader
def instance_group_members_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
instances = model_query(context,
@@ -6394,35 +6375,28 @@ def instance_group_members_get(context, group_uuid):
return [instance[0] for instance in instances]
-def _instance_group_policies_add(context, id, policies, set_delete=False,
- session=None):
- if not session:
- session = get_session()
-
+def _instance_group_policies_add(context, id, policies, set_delete=False):
allpols = set(policies)
- with session.begin(subtransactions=True):
- query = _instance_group_model_get_query(context,
- models.InstanceGroupPolicy,
- id,
- session=session)
- if set_delete:
- query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
- soft_delete(synchronize_session=False)
-
- query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
- already_existing = set()
- for policy_ref in query.all():
- already_existing.add(policy_ref.policy)
-
- for policy in policies:
- if policy in already_existing:
- continue
- policy_ref = models.InstanceGroupPolicy()
- policy_ref.update({'policy': policy,
- 'group_id': id})
- session.add(policy_ref)
+ query = _instance_group_model_get_query(context,
+ models.InstanceGroupPolicy, id)
+ if set_delete:
+ query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
+ soft_delete(synchronize_session=False)
+
+ query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
+ already_existing = set()
+ for policy_ref in query.all():
+ already_existing.add(policy_ref.policy)
+
+ for policy in policies:
+ if policy in already_existing:
+ continue
+ policy_ref = models.InstanceGroupPolicy()
+ policy_ref.update({'policy': policy,
+ 'group_id': id})
+ context.session.add(policy_ref)
- return policies
+ return policies
####################
@@ -6456,6 +6430,7 @@ def pci_device_get_all_by_node(context, node_id):
all()
+@main_context_manager.reader
def pci_device_get_all_by_parent_addr(context, node_id, parent_addr):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
@@ -6506,17 +6481,16 @@ def pci_device_update(context, node_id, address, values):
####################
+@main_context_manager.writer
def instance_tag_add(context, instance_uuid, tag):
- session = get_session()
-
tag_ref = models.Tag()
tag_ref.resource_id = instance_uuid
tag_ref.tag = tag
try:
- with session.begin(subtransactions=True):
- _check_instance_exists_in_project(context, session, instance_uuid)
- session.add(tag_ref)
+ _check_instance_exists_in_project(context, instance_uuid)
+ with main_context_manager.writer.savepoint.using(context):
+ context.session.add(tag_ref)
except db_exc.DBDuplicateEntry:
# NOTE(snikitin): We should ignore tags duplicates
pass
@@ -6524,70 +6498,61 @@ def instance_tag_add(context, instance_uuid, tag):
return tag_ref
+@main_context_manager.writer
def instance_tag_set(context, instance_uuid, tags):
- session = get_session()
+ _check_instance_exists_in_project(context, instance_uuid)
- with session.begin(subtransactions=True):
- _check_instance_exists_in_project(context, session, instance_uuid)
+ existing = context.session.query(models.Tag.tag).filter_by(
+ resource_id=instance_uuid).all()
- existing = session.query(models.Tag.tag).filter_by(
- resource_id=instance_uuid).all()
+ existing = set(row.tag for row in existing)
+ tags = set(tags)
+ to_delete = existing - tags
+ to_add = tags - existing
- existing = set(row.tag for row in existing)
- tags = set(tags)
- to_delete = existing - tags
- to_add = tags - existing
+ if to_delete:
+ context.session.query(models.Tag).filter_by(
+ resource_id=instance_uuid).filter(
+ models.Tag.tag.in_(to_delete)).delete(
+ synchronize_session=False)
- if to_delete:
- session.query(models.Tag).filter_by(
- resource_id=instance_uuid).filter(
- models.Tag.tag.in_(to_delete)).delete(
- synchronize_session=False)
+ if to_add:
+ data = [
+ {'resource_id': instance_uuid, 'tag': tag} for tag in to_add]
+ context.session.execute(models.Tag.__table__.insert(), data)
- if to_add:
- data = [
- {'resource_id': instance_uuid, 'tag': tag} for tag in to_add]
- session.execute(models.Tag.__table__.insert(), data)
-
- return session.query(models.Tag).filter_by(
- resource_id=instance_uuid).all()
+ return context.session.query(models.Tag).filter_by(
+ resource_id=instance_uuid).all()
+@main_context_manager.reader
def instance_tag_get_by_instance_uuid(context, instance_uuid):
- session = get_session()
-
- with session.begin(subtransactions=True):
- _check_instance_exists_in_project(context, session, instance_uuid)
- return session.query(models.Tag).filter_by(
- resource_id=instance_uuid).all()
+ _check_instance_exists_in_project(context, instance_uuid)
+ return context.session.query(models.Tag).filter_by(
+ resource_id=instance_uuid).all()
+@main_context_manager.writer
def instance_tag_delete(context, instance_uuid, tag):
- session = get_session()
-
- with session.begin(subtransactions=True):
- _check_instance_exists_in_project(context, session, instance_uuid)
- result = session.query(models.Tag).filter_by(
- resource_id=instance_uuid, tag=tag).delete()
+ _check_instance_exists_in_project(context, instance_uuid)
+ result = context.session.query(models.Tag).filter_by(
+ resource_id=instance_uuid, tag=tag).delete()
- if not result:
- raise exception.InstanceTagNotFound(instance_id=instance_uuid,
- tag=tag)
+ if not result:
+ raise exception.InstanceTagNotFound(instance_id=instance_uuid,
+ tag=tag)
+@main_context_manager.writer
def instance_tag_delete_all(context, instance_uuid):
- session = get_session()
-
- with session.begin(subtransactions=True):
- _check_instance_exists_in_project(context, session, instance_uuid)
- session.query(models.Tag).filter_by(resource_id=instance_uuid).delete()
+ _check_instance_exists_in_project(context, instance_uuid)
+ context.session.query(models.Tag).filter_by(
+ resource_id=instance_uuid).delete()
+@main_context_manager.reader
def instance_tag_exists(context, instance_uuid, tag):
- session = get_session()
-
- with session.begin(subtransactions=True):
- _check_instance_exists_in_project(context, session, instance_uuid)
- q = session.query(models.Tag).filter_by(
- resource_id=instance_uuid, tag=tag)
- return session.query(q.exists()).scalar()
+ _check_instance_exists_in_project(context, instance_uuid)
+ q = context.session.query(models.Tag).filter_by(
+ resource_id=instance_uuid, tag=tag)
+ return context.session.query(q.exists()).scalar()
diff --git a/nova/db/sqlalchemy/api_migrations/migrate_repo/versions/005_flavors.py b/nova/db/sqlalchemy/api_migrations/migrate_repo/versions/005_flavors.py
new file mode 100644
index 0000000000..cf1a82bcd5
--- /dev/null
+++ b/nova/db/sqlalchemy/api_migrations/migrate_repo/versions/005_flavors.py
@@ -0,0 +1,88 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate.changeset.constraint import ForeignKeyConstraint
+from migrate import UniqueConstraint
+from sqlalchemy import Boolean
+from sqlalchemy import Column
+from sqlalchemy import DateTime
+from sqlalchemy import Float
+from sqlalchemy import Index
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ flavors = Table('flavors', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('name', String(length=255), nullable=False),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('memory_mb', Integer, nullable=False),
+ Column('vcpus', Integer, nullable=False),
+ Column('swap', Integer, nullable=False),
+ Column('vcpu_weight', Integer),
+ Column('flavorid', String(length=255), nullable=False),
+ Column('rxtx_factor', Float),
+ Column('root_gb', Integer),
+ Column('ephemeral_gb', Integer),
+ Column('disabled', Boolean),
+ Column('is_public', Boolean),
+ UniqueConstraint("flavorid", name="uniq_flavors0flavorid"),
+ UniqueConstraint("name", name="uniq_flavors0name"),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+ flavors.create(checkfirst=True)
+
+ flavor_extra_specs = Table('flavor_extra_specs', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('flavor_id', Integer, nullable=False),
+ Column('key', String(length=255), nullable=False),
+ Column('value', String(length=255)),
+ UniqueConstraint('flavor_id', 'key',
+ name='uniq_flavor_extra_specs0flavor_id0key'),
+ ForeignKeyConstraint(columns=['flavor_id'], refcolumns=[flavors.c.id]),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+
+ # NOTE(mriedem): DB2 creates an index when a unique constraint is created
+ # so trying to add a second index on the flavor_id/key column will fail
+ # with error SQL0605W, so omit the index in the case of DB2.
+ if migrate_engine.name != 'ibm_db_sa':
+ Index('flavor_extra_specs_flavor_id_key_idx',
+ flavor_extra_specs.c.flavor_id,
+ flavor_extra_specs.c.key)
+ flavor_extra_specs.create(checkfirst=True)
+
+ flavor_projects = Table('flavor_projects', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('flavor_id', Integer, nullable=False),
+ Column('project_id', String(length=255), nullable=False),
+ UniqueConstraint('flavor_id', 'project_id',
+ name='uniq_flavor_projects0flavor_id0project_id'),
+ ForeignKeyConstraint(columns=['flavor_id'],
+ refcolumns=[flavors.c.id]),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8'
+ )
+ flavor_projects.create(checkfirst=True)
diff --git a/nova/db/sqlalchemy/api_models.py b/nova/db/sqlalchemy/api_models.py
index f470706b80..336eb4ebd9 100644
--- a/nova/db/sqlalchemy/api_models.py
+++ b/nova/db/sqlalchemy/api_models.py
@@ -12,8 +12,10 @@
from oslo_db.sqlalchemy import models
+from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
@@ -84,3 +86,51 @@ class RequestSpec(API_BASE):
id = Column(Integer, primary_key=True)
instance_uuid = Column(String(36), nullable=False)
spec = Column(Text, nullable=False)
+
+
+class Flavors(API_BASE):
+ """Represents possible flavors for instances"""
+ __tablename__ = 'flavors'
+ __table_args__ = (
+ schema.UniqueConstraint("flavorid", name="uniq_flavors0flavorid"),
+ schema.UniqueConstraint("name", name="uniq_flavors0name"))
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(255), nullable=False)
+ memory_mb = Column(Integer, nullable=False)
+ vcpus = Column(Integer, nullable=False)
+ root_gb = Column(Integer)
+ ephemeral_gb = Column(Integer)
+ flavorid = Column(String(255), nullable=False)
+ swap = Column(Integer, nullable=False, default=0)
+ rxtx_factor = Column(Float, default=1)
+ vcpu_weight = Column(Integer)
+ disabled = Column(Boolean, default=False)
+ is_public = Column(Boolean, default=True)
+
+
+class FlavorExtraSpecs(API_BASE):
+ """Represents additional specs as key/value pairs for a flavor"""
+ __tablename__ = 'flavor_extra_specs'
+ __table_args__ = (
+ Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'),
+ schema.UniqueConstraint('flavor_id', 'key',
+ name='uniq_flavor_extra_specs0flavor_id0key'),
+ {'mysql_collate': 'utf8_bin'},
+ )
+
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255), nullable=False)
+ value = Column(String(255))
+ flavor_id = Column(Integer, ForeignKey('flavors.id'), nullable=False)
+
+
+class FlavorProjects(API_BASE):
+ """Represents projects associated with flavors"""
+ __tablename__ = 'flavor_projects'
+ __table_args__ = (schema.UniqueConstraint('flavor_id', 'project_id',
+ name='uniq_flavor_projects0flavor_id0project_id'),)
+
+ id = Column(Integer, primary_key=True)
+ flavor_id = Column(Integer, ForeignKey('flavors.id'), nullable=False)
+ project_id = Column(String(255), nullable=False)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/314_add_resource_provider_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/314_add_resource_provider_tables.py
new file mode 100644
index 0000000000..1535f58243
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/314_add_resource_provider_tables.py
@@ -0,0 +1,84 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Database migrations for resource-providers."""
+
+from migrate import UniqueConstraint
+from sqlalchemy import Column
+from sqlalchemy import Float
+from sqlalchemy import Index
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ resource_providers = Table(
+ 'resource_providers', meta,
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('uuid', String(36), nullable=False),
+ UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
+ mysql_engine='InnoDB',
+ mysql_charset='latin1'
+ )
+ # NOTE(mriedem): DB2 creates an index when a unique constraint is created
+ # so trying to add a second index on the name column will fail with error
+ # SQL0605W, so omit the index in the case of DB2.
+ if migrate_engine.name != 'ibm_db_sa':
+ Index('resource_providers_uuid_idx', resource_providers.c.uuid)
+
+ inventories = Table(
+ 'inventories', meta,
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('resource_provider_id', Integer, nullable=False),
+ Column('resource_class_id', Integer, nullable=False),
+ Column('total', Integer, nullable=False),
+ Column('reserved', Integer, nullable=False),
+ Column('min_unit', Integer, nullable=False),
+ Column('max_unit', Integer, nullable=False),
+ Column('step_size', Integer, nullable=False),
+ Column('allocation_ratio', Float, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='latin1'
+ )
+ Index('inventories_resource_provider_id_idx',
+ inventories.c.resource_provider_id)
+ Index('inventories_resource_class_id_idx',
+ inventories.c.resource_class_id)
+
+ allocations = Table(
+ 'allocations', meta,
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('resource_provider_id', Integer, nullable=False),
+ Column('consumer_id', String(36), nullable=False),
+ Column('resource_class_id', Integer, nullable=False),
+ Column('used', Integer, nullable=False),
+ mysql_engine='InnoDB',
+ mysql_charset='latin1'
+ )
+ Index('allocations_resource_provider_class_id_idx',
+ allocations.c.resource_provider_id,
+ allocations.c.resource_class_id)
+ Index('allocations_consumer_id_idx', allocations.c.consumer_id)
+ Index('allocations_resource_class_id_idx',
+ allocations.c.resource_class_id)
+
+ for table in [resource_providers, inventories, allocations]:
+ table.create(checkfirst=True)
+
+ for table_name in ('', 'shadow_'):
+ uuid_column = Column('uuid', String(36))
+ compute_nodes = Table('%scompute_nodes' % table_name, meta)
+ compute_nodes.create_column(uuid_column)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/315_add_migration_progresss_detail.py b/nova/db/sqlalchemy/migrate_repo/versions/315_add_migration_progresss_detail.py
new file mode 100644
index 0000000000..3fca95cd6d
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/315_add_migration_progresss_detail.py
@@ -0,0 +1,30 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from sqlalchemy import BigInteger
+from sqlalchemy import Column
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData(bind=migrate_engine)
+ migrations = Table('migrations', meta, autoload=True)
+ shadow_migrations = Table('shadow_migrations', meta, autoload=True)
+
+ columns = ['memory_total', 'memory_processed', 'memory_remaining',
+ 'disk_total', 'disk_processed', 'disk_remaining']
+ for column_name in columns:
+ column = Column(column_name, BigInteger, nullable=True)
+ migrations.create_column(column)
+ shadow_migrations.create_column(column.copy())
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/316_add_disk_ratio_for_compute_nodes.py b/nova/db/sqlalchemy/migrate_repo/versions/316_add_disk_ratio_for_compute_nodes.py
new file mode 100644
index 0000000000..39ef93c4f0
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/316_add_disk_ratio_for_compute_nodes.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2016 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from sqlalchemy import Float, Column, MetaData, Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ cn = Table('compute_nodes', meta, autoload=True)
+ shadow_cn = Table('shadow_compute_nodes', meta, autoload=True)
+ cn.create_column(Column('disk_allocation_ratio', Float, nullable=True))
+ shadow_cn.create_column(Column('disk_allocation_ratio', Float))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/317_add_aggregate_uuid.py b/nova/db/sqlalchemy/migrate_repo/versions/317_add_aggregate_uuid.py
new file mode 100644
index 0000000000..8dc8d6b9a1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/317_add_aggregate_uuid.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Database migrations for resource-providers."""
+
+from sqlalchemy import Column
+from sqlalchemy import Index
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ for table_prefix in ('', 'shadow_'):
+ uuid_column = Column('uuid', String(36))
+ aggregates = Table('%saggregates' % table_prefix, meta)
+ if not hasattr(aggregates.c, 'uuid'):
+ aggregates.create_column(uuid_column)
+ if not table_prefix:
+ index = Index('aggregate_uuid_idx', aggregates.c.uuid)
+ index.create()
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 6aa6ba6e17..47364c2651 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -72,14 +72,6 @@ class NovaBase(models.TimestampMixin,
session.expunge(copy)
return copy
- def save(self, session=None):
- from nova.db.sqlalchemy import api
-
- if session is None:
- session = api.get_session()
-
- super(NovaBase, self).save(session=session)
-
class Service(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a running service on a host."""
@@ -131,6 +123,7 @@ class ComputeNode(BASE, NovaBase, models.SoftDeleteMixin):
# This field has to be set non-nullable in a later cycle (probably Lxxx)
# once we are sure that all compute nodes in production report it.
host = Column(String(255), nullable=True)
+ uuid = Column(String(36), nullable=True)
vcpus = Column(Integer, nullable=False)
memory_mb = Column(Integer, nullable=False)
local_gb = Column(Integer, nullable=False)
@@ -166,7 +159,7 @@ class ComputeNode(BASE, NovaBase, models.SoftDeleteMixin):
metrics = Column(Text)
# Note(yongli): json string PCI Stats
- # '{"vendor_id":"8086", "product_id":"1234", "count":3 }'
+ # '[{"vendor_id":"8086", "product_id":"1234", "count":3 }, ...]'
pci_stats = Column(Text)
# extra_resources is a json string containing arbitrary
@@ -183,6 +176,7 @@ class ComputeNode(BASE, NovaBase, models.SoftDeleteMixin):
# allocation ratios provided by the RT
ram_allocation_ratio = Column(Float, nullable=True)
cpu_allocation_ratio = Column(Float, nullable=True)
+ disk_allocation_ratio = Column(Float, nullable=True)
class Certificate(BASE, NovaBase, models.SoftDeleteMixin):
@@ -758,6 +752,12 @@ class Migration(BASE, NovaBase, models.SoftDeleteMixin):
'evacuation'),
nullable=True)
hidden = Column(Boolean, default=False)
+ memory_total = Column(BigInteger, nullable=True)
+ memory_processed = Column(BigInteger, nullable=True)
+ memory_remaining = Column(BigInteger, nullable=True)
+ disk_total = Column(BigInteger, nullable=True)
+ disk_processed = Column(BigInteger, nullable=True)
+ disk_remaining = Column(BigInteger, nullable=True)
instance = orm.relationship("Instance", foreign_keys=instance_uuid,
primaryjoin='and_(Migration.instance_uuid == '
@@ -1103,8 +1103,9 @@ class AggregateMetadata(BASE, NovaBase, models.SoftDeleteMixin):
class Aggregate(BASE, NovaBase, models.SoftDeleteMixin):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
- __table_args__ = ()
+ __table_args__ = (Index('aggregate_uuid_idx', 'uuid'),)
id = Column(Integer, primary_key=True, autoincrement=True)
+ uuid = Column(String(36))
name = Column(String(255))
_hosts = orm.relationship(AggregateHost,
primaryjoin='and_('
@@ -1439,3 +1440,63 @@ class Tag(BASE, models.ModelBase):
'Instance.deleted == 0)',
foreign_keys=resource_id
)
+
+
+class ResourceProvider(BASE, models.ModelBase):
+ """Represents a mapping to a providers of resources."""
+
+ __tablename__ = "resource_providers"
+ __table_args__ = (
+ Index('resource_providers_uuid_idx', 'uuid'),
+ schema.UniqueConstraint('uuid',
+ name='uniq_resource_providers0uuid'),
+ )
+
+ id = Column(Integer, primary_key=True, nullable=False)
+ uuid = Column(String(36), nullable=False)
+
+
+class Inventory(BASE, models.ModelBase):
+ """Represents a quantity of available resource."""
+
+ __tablename__ = "inventories"
+ __table_args__ = (
+ Index('inventories_resource_provider_id_idx',
+ 'resource_provider_id'),
+ Index('inventories_resource_class_id_idx',
+ 'resource_class_id'),
+ )
+
+ id = Column(Integer, primary_key=True, nullable=False)
+ resource_provider_id = Column(Integer, nullable=False)
+ resource_class_id = Column(Integer, nullable=False)
+ total = Column(Integer, nullable=False)
+ reserved = Column(Integer, nullable=False)
+ min_unit = Column(Integer, nullable=False)
+ max_unit = Column(Integer, nullable=False)
+ step_size = Column(Integer, nullable=False)
+ allocation_ratio = Column(Float, nullable=False)
+ resource_provider = orm.relationship(
+ "ResourceProvider",
+ primaryjoin=('and_(Inventory.resource_provider_id == '
+ 'ResourceProvider.id)'),
+ foreign_keys=resource_provider_id)
+
+
+class Allocation(BASE, models.ModelBase):
+ """A use of inventory."""
+
+ __tablename__ = "allocations"
+ __table_args__ = (
+ Index('allocations_resource_provider_class_id_idx',
+ 'resource_provider_id', 'resource_class_id'),
+ Index('allocations_resource_class_id_idx',
+ 'resource_class_id'),
+ Index('allocations_consumer_id_idx', 'consumer_id')
+ )
+
+ id = Column(Integer, primary_key=True, nullable=False)
+ resource_provider_id = Column(Integer, nullable=False)
+ consumer_id = Column(String(36), nullable=False)
+ resource_class_id = Column(Integer, nullable=False)
+ used = Column(Integer, nullable=False)
diff --git a/nova/db/sqlalchemy/utils.py b/nova/db/sqlalchemy/utils.py
index 9a7c2d6e70..7d07709828 100644
--- a/nova/db/sqlalchemy/utils.py
+++ b/nova/db/sqlalchemy/utils.py
@@ -38,7 +38,7 @@ class DeleteFromSelect(UpdateBase):
self.column = column
-# NOTE(guochbo): some verions of MySQL doesn't yet support subquery with
+# NOTE(guochbo): some versions of MySQL doesn't yet support subquery with
# 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select .
@compiles(DeleteFromSelect)
def visit_delete_from_select(element, compiler, **kw):
diff --git a/nova/exception.py b/nova/exception.py
index 3f2d727161..23c3c6eb7b 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -843,6 +843,12 @@ class PortNotUsable(Invalid):
msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
+class PortNotUsableDNS(Invalid):
+ msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s. "
+ "Value %(value)s assigned to dns_name attribute does not "
+ "match instance's hostname %(hostname)s")
+
+
class PortNotFree(Invalid):
msg_fmt = _("No free port available for instance %(instance)s.")
@@ -1122,6 +1128,17 @@ class MigrationNotFoundByStatus(MigrationNotFound):
"with status %(status)s.")
+class MigrationNotFoundForInstance(MigrationNotFound):
+ msg_fmt = _("Migration %(migration_id)s not found for instance "
+ "%(instance_id)s")
+
+
+class InvalidMigrationState(Invalid):
+ msg_fmt = _("Migration %(migration_id)s state of instance "
+ "%(instance_uuid)s is %(state)s. Cannot %(method)s while the "
+ "migration is in this state.")
+
+
class ConsoleLogOutputException(NovaException):
msg_fmt = _("Console log output could not be retrieved for instance "
"%(instance_id)s. Reason: %(reason)s")
@@ -1183,7 +1200,7 @@ class FlavorAccessNotFound(NotFound):
class FlavorExtraSpecUpdateCreateFailed(NovaException):
- msg_fmt = _("Flavor %(id)d extra spec cannot be updated or created "
+ msg_fmt = _("Flavor %(id)s extra spec cannot be updated or created "
"after %(retries)d retries.")
@@ -1304,6 +1321,10 @@ class MigrationPreCheckError(MigrationError):
msg_fmt = _("Migration pre-check error: %(reason)s")
+class MigrationSchedulerRPCError(MigrationError):
+ msg_fmt = _("Migration select destinations error: %(reason)s")
+
+
class MalformedRequestBody(NovaException):
msg_fmt = _("Malformed message body: %(reason)s")
@@ -1586,11 +1607,6 @@ class InstanceRecreateNotSupported(Invalid):
msg_fmt = _('Instance recreate is not supported.')
-class ServiceGroupUnavailable(NovaException):
- msg_fmt = _("The service from servicegroup driver %(driver)s is "
- "temporarily unavailable.")
-
-
class DBNotAllowed(NovaException):
msg_fmt = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
@@ -1736,6 +1752,19 @@ class PciDeviceInvalidStatus(Invalid):
"instead of %(hopestatus)s")
+class PciDeviceVFInvalidStatus(Invalid):
+ msg_fmt = _(
+ "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s "
+ "are free.")
+
+
+class PciDevicePFInvalidStatus(Invalid):
+ msg_fmt = _(
+ "Physical Function %(compute_node_id)s:%(address)s, related to VF"
+ " %(compute_node_id)s:%(vf_address)s is %(status)s "
+ "instead of %(hopestatus)s")
+
+
class PciDeviceInvalidOwner(Invalid):
msg_fmt = _(
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
@@ -1828,6 +1857,11 @@ class LiveMigrationWithOldNovaNotSafe(NovaException):
"Upgrade Nova on %(server)s and try again.")
+class LiveMigrationURINotAvailable(NovaException):
+ msg_fmt = _('No live migration URI configured and no default available '
+ 'for "%(virt_type)s" hypervisor virtualization type.')
+
+
class UnshelveException(NovaException):
msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s")
@@ -2034,6 +2068,10 @@ class RequestSpecNotFound(NotFound):
msg_fmt = _("RequestSpec not found for instance %(instance_uuid)s")
+class UEFINotSupported(Invalid):
+ msg_fmt = _("UEFI is not supported")
+
+
class NMINotSupported(Invalid):
msg_fmt = _("Injecting NMI is not supported")
@@ -2050,3 +2088,8 @@ class RealtimeMaskNotFoundOrInvalid(Invalid):
msg_fmt = _("Realtime policy needs vCPU(s) mask configured with at least "
"1 RT vCPU and 1 ordinary vCPU. See hw:cpu_realtime_mask "
"or hw_cpu_realtime_mask")
+
+
+class OsInfoNotFound(NotFound):
+ msg_fmt = _("No configuration information found for operating system "
+ "%(os_name)s")
diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py
index 8d1237299e..6a0ab2fbdc 100644
--- a/nova/hacking/checks.py
+++ b/nova/hacking/checks.py
@@ -101,6 +101,10 @@ http_not_implemented_re = re.compile(r"raise .*HTTPNotImplemented\(")
spawn_re = re.compile(
r".*(eventlet|greenthread)\.(?P<spawn_part>spawn(_n)?)\(.*\)")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
+doubled_words_re = re.compile(
+ r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
+
+opt_help_text_min_char_count = 10
class BaseASTChecker(ast.NodeVisitor):
@@ -576,6 +580,109 @@ def check_config_option_in_central_place(logical_line, filename):
yield(0, msg)
+def check_doubled_words(physical_line, filename):
+ """Check for the common doubled-word typos
+
+ N343
+ """
+ msg = ("N343: Doubled word '%(word)s' typo found")
+
+ match = re.search(doubled_words_re, physical_line)
+
+ if match:
+ return (0, msg % {'word': match.group(1)})
+
+
+def check_python3_no_iteritems(logical_line):
+ msg = ("N344: Use six.iteritems() instead of dict.iteritems().")
+
+ if re.search(r".*\.iteritems\(\)", logical_line):
+ yield(0, msg)
+
+
+def check_python3_no_iterkeys(logical_line):
+ msg = ("N345: Use six.iterkeys() instead of dict.iterkeys().")
+
+ if re.search(r".*\.iterkeys\(\)", logical_line):
+ yield(0, msg)
+
+
+def check_python3_no_itervalues(logical_line):
+ msg = ("N346: Use six.itervalues() instead of dict.itervalues().")
+
+ if re.search(r".*\.itervalues\(\)", logical_line):
+ yield(0, msg)
+
+
+def cfg_help_with_enough_text(logical_line, tokens):
+ # TODO(markus_z): The count of 10 chars is the *highest* number I could
+ # use to introduce this new check without breaking the gate. IOW, if I
+ # use a value of 15 for example, the gate checks will fail because we have
+ # a few config options which use fewer chars than 15 to explain their
+ # usage (for example the options "ca_file" and "cert").
+ # As soon as the implementation of bp centralize-config-options is
+ # finished, I wanted to increase that magic number to a higher (to be
+ # defined) value.
+ # This check is an attempt to programmatically check a part of the review
+ # guidelines http://docs.openstack.org/developer/nova/code-review.html
+
+ msg = ("N347: A config option is a public interface to the cloud admins "
+ "and should be properly documented. A part of that is to provide "
+ "enough help text to describe this option. Use at least %s chars "
+ "for that description. Is is likely that this minimum will be "
+ "increased in the future." % opt_help_text_min_char_count)
+
+ if not cfg_opt_re.match(logical_line):
+ return
+
+ # ignore DeprecatedOpt objects. They get mentioned in the release notes
+ # and don't need a lengthy help text anymore
+ if "DeprecatedOpt" in logical_line:
+ return
+
+ def get_token_value(idx):
+ return tokens[idx][1]
+
+ def get_token_values(start_index, length):
+ values = ""
+ for offset in range(length):
+ values += get_token_value(start_index + offset)
+ return values
+
+ def get_help_token_index():
+ for idx in range(len(tokens)):
+ if get_token_value(idx) == "help":
+ return idx
+ return -1
+
+ def has_help():
+ return get_help_token_index() >= 0
+
+ def get_trimmed_help_text(t):
+ txt = ""
+ # len(["help", "=", "_", "("]) ==> 4
+ if get_token_values(t, 4) == "help=_(":
+ txt = get_token_value(t + 4)
+ # len(["help", "=", "("]) ==> 3
+ elif get_token_values(t, 3) == "help=(":
+ txt = get_token_value(t + 3)
+ # len(["help", "="]) ==> 2
+ else:
+ txt = get_token_value(t + 2)
+ return " ".join(txt.strip('\"\'').split())
+
+ def has_enough_help_text(txt):
+ return len(txt) >= opt_help_text_min_char_count
+
+ if has_help():
+ t = get_help_token_index()
+ txt = get_trimmed_help_text(t)
+ if not has_enough_help_text(txt):
+ yield(0, msg)
+ else:
+ yield(0, msg)
+
+
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
@@ -605,3 +712,8 @@ def factory(register):
register(check_no_contextlib_nested)
register(check_greenthread_spawns)
register(check_config_option_in_central_place)
+ register(check_doubled_words)
+ register(check_python3_no_iteritems)
+ register(check_python3_no_iterkeys)
+ register(check_python3_no_itervalues)
+ register(cfg_help_with_enough_text)
diff --git a/nova/image/download/__init__.py b/nova/image/download/__init__.py
index ee0cd341ec..81c7de485f 100644
--- a/nova/image/download/__init__.py
+++ b/nova/image/download/__init__.py
@@ -13,14 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
from oslo_log import log as logging
import stevedore.driver
import stevedore.extension
from nova.i18n import _LE
-CONF = cfg.CONF
LOG = logging.getLogger(__name__)
diff --git a/nova/image/glance.py b/nova/image/glance.py
index afa5ea321b..87a8b4cf56 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -23,6 +23,7 @@ import random
import sys
import time
+import cryptography
import glanceclient
from glanceclient.common import http
import glanceclient.exc
@@ -40,6 +41,8 @@ import six.moves.urllib.parse as urlparse
from nova import exception
from nova.i18n import _LE, _LI, _LW
import nova.image.download as image_xfers
+from nova import objects
+from nova import signature_utils
glance_opts = [
@@ -81,6 +84,10 @@ should be fully qualified urls of the form
help='A list of url scheme that can be downloaded directly '
'via the direct_url. Currently supported schemes: '
'[file].'),
+ cfg.BoolOpt('verify_glance_signatures',
+ default=False,
+ help='Require Nova to perform signature verification on '
+ 'each image downloaded from Glance.'),
]
LOG = logging.getLogger(__name__)
@@ -366,17 +373,70 @@ class GlanceImageService(object):
except Exception:
_reraise_translated_image_exception(image_id)
+ # Retrieve properties for verification of Glance image signature
+ verifier = None
+ if CONF.glance.verify_glance_signatures:
+ image_meta_dict = self.show(context, image_id,
+ include_locations=False)
+ image_meta = objects.ImageMeta.from_dict(image_meta_dict)
+ img_signature = image_meta.properties.get('img_signature')
+ img_sig_hash_method = image_meta.properties.get(
+ 'img_signature_hash_method'
+ )
+ img_sig_cert_uuid = image_meta.properties.get(
+ 'img_signature_certificate_uuid'
+ )
+ img_sig_key_type = image_meta.properties.get(
+ 'img_signature_key_type'
+ )
+ try:
+ verifier = signature_utils.get_verifier(context,
+ img_sig_cert_uuid,
+ img_sig_hash_method,
+ img_signature,
+ img_sig_key_type)
+ except exception.SignatureVerificationError:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE('Image signature verification failed '
+ 'for image: %s'), image_id)
+
close_file = False
if data is None and dst_path:
data = open(dst_path, 'wb')
close_file = True
if data is None:
+
+ # Perform image signature verification
+ if verifier:
+ try:
+ for chunk in image_chunks:
+ verifier.update(chunk)
+ verifier.verify()
+
+ LOG.info(_LI('Image signature verification succeeded '
+ 'for image: %s'), image_id)
+
+ except cryptography.exceptions.InvalidSignature:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE('Image signature verification failed '
+ 'for image: %s'), image_id)
return image_chunks
else:
try:
for chunk in image_chunks:
+ if verifier:
+ verifier.update(chunk)
data.write(chunk)
+ if verifier:
+ verifier.verify()
+ LOG.info(_LI('Image signature verification succeeded '
+ 'for image %s'), image_id)
+ except cryptography.exceptions.InvalidSignature:
+ data.truncate(0)
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE('Image signature verification failed '
+ 'for image: %s'), image_id)
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error writing to %(path)s: %(exception)s"),
diff --git a/nova/keymgr/barbican.py b/nova/keymgr/barbican.py
index 9579ea0623..99bd76eab8 100644
--- a/nova/keymgr/barbican.py
+++ b/nova/keymgr/barbican.py
@@ -22,7 +22,8 @@ import base64
import binascii
from barbicanclient import client as barbican_client
-from keystoneclient import session
+from keystoneauth1 import loading as ks_loading
+from keystoneauth1 import session
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
@@ -52,7 +53,7 @@ BARBICAN_OPT_GROUP = 'barbican'
CONF.register_opts(barbican_opts, group=BARBICAN_OPT_GROUP)
-session.Session.register_conf_options(CONF, BARBICAN_OPT_GROUP)
+ks_loading.register_session_conf_options(CONF, BARBICAN_OPT_GROUP)
LOG = logging.getLogger(__name__)
@@ -89,7 +90,7 @@ class BarbicanKeyManager(key_mgr.KeyManager):
return self._barbican_client
try:
- _SESSION = session.Session.load_from_conf_options(
+ _SESSION = ks_loading.load_session_from_conf_options(
CONF,
BARBICAN_OPT_GROUP)
diff --git a/nova/locale/cs/LC_MESSAGES/nova-log-critical.po b/nova/locale/cs/LC_MESSAGES/nova-log-critical.po
index 2efa240c85..82ae45ba19 100644
--- a/nova/locale/cs/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/cs/LC_MESSAGES/nova-log-critical.po
@@ -7,19 +7,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.dev41\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-10-27 06:30+0000\n"
-"PO-Revision-Date: 2015-08-20 08:42+0000\n"
-"Last-Translator: Zbyněk Schwarz <zbynek.schwarz@gmail.com>\n"
-"Language-Team: Czech\n"
-"Language: cs\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-08-20 08:42+0000\n"
+"Last-Translator: Zbyněk Schwarz <zbynek.schwarz@gmail.com>\n"
+"Language: cs\n"
"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Czech\n"
#, python-format
msgid "Missing core API extensions: %s"
diff --git a/nova/locale/cs/LC_MESSAGES/nova-log-error.po b/nova/locale/cs/LC_MESSAGES/nova-log-error.po
index 63ac9b20f1..bff393a44a 100644
--- a/nova/locale/cs/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/cs/LC_MESSAGES/nova-log-error.po
@@ -8,19 +8,19 @@
# Zbyněk Schwarz <zbynek.schwarz@gmail.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-12-21 12:12+0000\n"
-"Last-Translator: Zbyněk Schwarz <zbynek.schwarz@gmail.com>\n"
-"Language-Team: Czech\n"
-"Language: cs\n"
+"POT-Creation-Date: 2016-02-08 05:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-12-21 12:12+0000\n"
+"Last-Translator: Zbyněk Schwarz <zbynek.schwarz@gmail.com>\n"
+"Language: cs\n"
"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Czech\n"
msgid "\"Look for the VDIs failed"
msgstr "\"Hledání VBD selhalo"
@@ -233,10 +233,6 @@ msgid "Environment variable 'NETWORK_ID' must be set."
msgstr "Proměnná prostředí 'NETWORK_ID' musí být nastavena."
#, python-format
-msgid "Environment: %s"
-msgstr "Prostředí: %s"
-
-#, python-format
msgid ""
"Error contacting glance server '%(server)s' for '%(method)s', %(extra)s."
msgstr ""
@@ -393,10 +389,6 @@ msgstr "Chyba při pokusu o záchranu instance"
msgid "Error trying to reschedule"
msgstr "Chyba při pokusu o znovu naplánování"
-#, python-format
-msgid "Error updating resources for node %(node)s: %(e)s"
-msgstr "Chyba při aktualizaci zdrojů pro uzel %(node)s: %(e)s"
-
msgid "Error waiting for responses from neighbor cells"
msgstr "Chyba pÅ™i Äekání na odpovÄ›Ä od sousedních bunÄ›k"
@@ -763,9 +755,6 @@ msgstr "Čištění připojených VDI selhalo"
msgid "Fatal Exception running %(name)s %(type)s-hook: %(obj)s"
msgstr "Závažná výjimka při spuštění %(name)s %(type)s-hák: %(obj)s"
-msgid "FaultWrapper error"
-msgstr "Chyba ve FaultWrapper"
-
msgid "Guest does not have a console available"
msgstr "Host nemá konzoli k dispozici"
@@ -828,10 +817,6 @@ msgid "Invalid server_string: %s"
msgstr "Neplatný řetězec serveru: %s"
#, python-format
-msgid "Keystone failure: %s"
-msgstr "Selhání Keystone: %s"
-
-#, python-format
msgid "Live Migration failure: %s"
msgstr "Selhání přesunu za běhu: %s"
@@ -1121,10 +1106,6 @@ msgid "Unable to parse rrd of %s"
msgstr "Nelze zpracovat rrd z %s"
#, python-format
-msgid "Unable to preallocate image at path: %(path)s"
-msgstr "Nelze předpřidělit obraz na cestě: %(path)s"
-
-#, python-format
msgid "Unable to retrieve certificate with ID %(id)s: %(e)s"
msgstr "Nelze získat certifikát s ID %(id)s: %(e)s"
@@ -1147,10 +1128,6 @@ msgstr "Nelze aktualizovat hostitele na portu %s"
msgid "Unable to update instance VNIC index for port %s."
msgstr "Nelze aktualizovat index VNIC instance pro port %s."
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr "ZjiÅ¡tÄ›no neoÄekávané %(ex_name)s: %(ex_str)s"
-
msgid "Unexpected build failure, not rescheduling build."
msgstr "NeoÄekávané selhání sestavení, nebude znovu naplánováno."
diff --git a/nova/locale/cs/LC_MESSAGES/nova-log-info.po b/nova/locale/cs/LC_MESSAGES/nova-log-info.po
index 27fc4173b1..02646891a1 100644
--- a/nova/locale/cs/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/cs/LC_MESSAGES/nova-log-info.po
@@ -8,19 +8,19 @@
# Zbyněk Schwarz <zbynek.schwarz@gmail.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-12-16 03:45+0000\n"
-"Last-Translator: Zbyněk Schwarz <zbynek.schwarz@gmail.com>\n"
-"Language-Team: Czech\n"
-"Language: cs\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-12-16 03:45+0000\n"
+"Last-Translator: Zbyněk Schwarz <zbynek.schwarz@gmail.com>\n"
+"Language: cs\n"
"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Czech\n"
#, python-format
msgid "%(action_str)s instance"
@@ -68,25 +68,14 @@ msgid "Adding security group %(security_group_id)s to port %(port_id)s"
msgstr ""
"PÅ™idávání bezpeÄnostní skupiny %(security_group_id)s k portu %(port_id)s"
-msgid "Allocate address"
-msgstr "Přidělit adresu"
-
#, python-format
msgid "Allocated network: '%s' for instance"
msgstr "Přidělena síť: '%s' pro instanci"
-#, python-format
-msgid "Associate address %(public_ip)s to instance %(instance_id)s"
-msgstr "Přidružit adresu %(public_ip)s k instanci %(instance_id)s"
-
msgid "Attach interface"
msgstr "Připojit rozhraní"
#, python-format
-msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-msgstr "Připojit svazek %(volume_id)s k instanci %(instance_id)s na %(device)s"
-
-#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr "Připojit svazek %(volume_id)s k instanci %(server_id)s na %(device)s"
@@ -230,22 +219,10 @@ msgid "Create assisted snapshot from volume %s"
msgstr "Vytvořit snímek s pomocí ze svazku %s"
#, python-format
-msgid "Create key pair %s"
-msgstr "VytvoÅ™it pár klíÄů %s"
-
-#, python-format
msgid "Create snapshot from volume %s"
msgstr "Vytvořit snímek ze svazku %s"
#, python-format
-msgid "Create snapshot of volume %s"
-msgstr "Vytvořit snímek svazku %s"
-
-#, python-format
-msgid "Create volume from snapshot %s"
-msgstr "Vytvořit svazek ze snímku %s"
-
-#, python-format
msgid "Create volume of %s GB"
msgstr "Vytvořit svazek o %s GB"
@@ -272,14 +249,6 @@ msgstr ""
"před %(last)d vteřinami"
#, python-format
-msgid "De-registering image %s"
-msgstr "Zrušení registrace obrazu %s"
-
-#, python-format
-msgid "Delete key pair %s"
-msgstr "Smazat pár klíÄů %s"
-
-#, python-format
msgid "Delete security group %s"
msgstr "Smazat bezpeÄnostní skupinu %s"
@@ -360,10 +329,6 @@ msgid "Disabling host %s."
msgstr "Zakazování hostitele %s."
#, python-format
-msgid "Disassociate address %s"
-msgstr "OdlouÄit adresu %s"
-
-#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr "Odpojování starého VDI %s z výpoÄetního domU"
@@ -533,10 +498,6 @@ msgid "Get console output"
msgstr "Získat výstup konzole"
#, python-format
-msgid "Get console output for instance %s"
-msgstr "Získat výstup konzole pro instanci %s"
-
-#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
@@ -597,10 +558,6 @@ msgstr ""
"%(downloaded)s trvání: %(duration).2f vteřin pro obraz %(image_id)s"
#, python-format
-msgid "Import key %s"
-msgstr "Importovat klÃ­Ä %s"
-
-#, python-format
msgid ""
"Increasing downtime to %(downtime)d ms after %(waittime)d sec elapsed time"
msgstr "Snižování odstávky na %(downtime)d ms po uplynutí %(waittime)d vteřin"
@@ -828,10 +785,6 @@ msgstr ""
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr "Přepínání hostitele %(host_name)s do režimu údržby %(mode)s."
-#, python-format
-msgid "Reboot instance %r"
-msgstr "Restartovat instanci %r"
-
msgid "Rebooting instance"
msgstr "Restartování instance"
@@ -879,14 +832,6 @@ msgstr ""
msgid "Recovered from being unable to report status."
msgstr "Nemožnost hlásit stav byla opravena."
-#, python-format
-msgid "Registered image %(image_location)s with id %(image_id)s"
-msgstr "Obraz %(image_location)s registrován s id %(image_id)s"
-
-#, python-format
-msgid "Release address %s"
-msgstr "Uvolnit adresu %s"
-
msgid "Reloading compute RPC API"
msgstr "Znovu naÄítání výpoÄtové API RPC"
@@ -1113,11 +1058,6 @@ msgstr ""
"%(ex)s"
#, python-format
-msgid ""
-"Unauthorized request for controller=%(controller)s and action=%(action)s"
-msgstr "NeoprávnÄ›ný požadavek pro ovladaÄ=%(controller)s a Äinnost=%(action)s"
-
-#, python-format
msgid "Unexpected error: %s"
msgstr "NeoÄekávaná chyba: %s"
@@ -1135,10 +1075,6 @@ msgid "Updating from migration %s"
msgstr "Aktualizování z přesunu %s"
#, python-format
-msgid "Updating image %s publicity"
-msgstr "Aktualizace zveřejnění obrazu %s"
-
-#, python-format
msgid "Updating instance to original state: '%s'"
msgstr "Aktualizování instance do původního stavu: '%s'"
diff --git a/nova/locale/cs/LC_MESSAGES/nova-log-warning.po b/nova/locale/cs/LC_MESSAGES/nova-log-warning.po
index 916f5640c2..8d7b414759 100644
--- a/nova/locale/cs/LC_MESSAGES/nova-log-warning.po
+++ b/nova/locale/cs/LC_MESSAGES/nova-log-warning.po
@@ -8,19 +8,19 @@
# Zbyněk Schwarz <zbynek.schwarz@gmail.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-12-18 02:33+0000\n"
-"Last-Translator: Zbyněk Schwarz <zbynek.schwarz@gmail.com>\n"
-"Language-Team: Czech\n"
-"Language: cs\n"
+"POT-Creation-Date: 2016-02-08 05:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-12-18 02:33+0000\n"
+"Last-Translator: Zbyněk Schwarz <zbynek.schwarz@gmail.com>\n"
+"Language: cs\n"
"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Czech\n"
#, python-format
msgid ""
@@ -70,14 +70,6 @@ msgstr ""
"%s odkládací obraz byl použit instanci ale žáfné záložní soubory neexistují!"
#, python-format
-msgid ""
-"Access key %(access_key)s has had %(failures)d failed authentications and "
-"will be locked out for %(lock_mins)d minutes."
-msgstr ""
-"Přístupový klÃ­Ä %(access_key)s %(failures)d krát selhal pÅ™i ověření a bude "
-"zablokován na %(lock_mins)d minut."
-
-#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr "Adresa |%(address)s| není přidělena"
@@ -1133,36 +1125,6 @@ msgstr ""
"zastaralé. Požadovaná minimální verze vCenter bude ve verzi 13.0.0 zvýšena "
"na %(version)s."
-#, python-format
-msgid ""
-"Running Nova with a block_migration_flag config option which does not "
-"contain %(flag)s will cause all block-migrations to be live-migrations "
-"instead. This setting should be on the block_migration_flag."
-msgstr ""
-"Spuštění Nova s volbou nastavení block_migration_flag neobsahující %(flag)s "
-"způsobí, že všechny přenosy bloku budou přenosy za provozu. Toto nastavení "
-"by se mělo používat pouze u block_migration_flag."
-
-#, python-format
-msgid ""
-"Running Nova with a libvirt version less than %(version)s is deprecated. The "
-"required minimum version of libvirt will be raised to %(version)s in the "
-"13.0.0 release."
-msgstr ""
-"Spouštět Nova pomocí libvirt s verzí starší než %(version)s je zastaralé. "
-"Požadovaná minimální verze libvirt bude ve verzi 13.0.0 zvýšena na "
-"%(version)s."
-
-#, python-format
-msgid ""
-"Running Nova with a live_migration_flag config option which contains "
-"%(flag)s will cause all live-migrations to be block-migrations instead. This "
-"setting should only be on the block_migration_flag instead."
-msgstr ""
-"Spuštění Nova s volbou nastavení live_migration_flag obsahující %(flag)s "
-"způsobí, že všechny přenosy za provozu budou přenosy bloku. Toto nastavení "
-"by se mělo používat u block_migration_flag."
-
msgid ""
"Running libvirt-lxc without user namespaces is dangerous. Containers spawned "
"by Nova will be run as the host's root user. It is highly suggested that "
@@ -1271,15 +1233,6 @@ msgstr ""
"Volby nastavení rozšíření jsou zastaralé. V blízké budoucnosti budete muset "
"spouštět celé API."
-msgid ""
-"The in tree EC2 API is deprecated as of Kilo release and may be removed in a "
-"future release. The openstack ec2-api project http://git.openstack.org/cgit/"
-"openstack/ec2-api/ is the target replacement for this functionality."
-msgstr ""
-"EC2 API ve stromÄ› je od verze Kilo oznaÄeno jako zastaralé a v budoucích "
-"verzích mlže být odstraněno. Projekt openstack ec2-api na adrese http://git."
-"openstack.org/cgit/openstack/ec2-api/ má za cíl nahradit tuto funkcionalitu."
-
#, python-format
msgid ""
"The legacy v2 API module already moved into'nova.api.openstack.compute."
@@ -1703,10 +1656,6 @@ msgstr ""
"Existuje mnoho pevných ip adres, bude použita první pevná ip adresa IPv4: %s"
#, python-format
-msgid "multiple fixed_ips exist, using the first: %s"
-msgstr "existuje mnoho pevných ip adres, bude použita první: %s"
-
-#, python-format
msgid ""
"my_ip address (%(my_ip)s) was not found on any of the interfaces: %(ifaces)s"
msgstr ""
diff --git a/nova/locale/cs/LC_MESSAGES/nova.po b/nova/locale/cs/LC_MESSAGES/nova.po
index 5a34a734cf..c9807191b2 100644
--- a/nova/locale/cs/LC_MESSAGES/nova.po
+++ b/nova/locale/cs/LC_MESSAGES/nova.po
@@ -1,22 +1,30 @@
-# Czech translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
+# Translators:
+# David Soukup <daos2@seznam.cz>, 2013
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# Jaroslav Lichtblau <dragonlord@seznam.cz>, 2014
+# Zbyněk Schwarz <zbynek.schwarz@gmail.com>, 2013,2015
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Zbyněk Schwarz <zbynek.schwarz@gmail.com>, 2015. #zanata
+# Zbyněk Schwarz <zbynek.schwarz@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-12-21 12:12+0000\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-01-08 01:32+0000\n"
"Last-Translator: Zbyněk Schwarz <zbynek.schwarz@gmail.com>\n"
"Language: cs\n"
+"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
"Language-Team: Czech\n"
-"Plural-Forms: nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
#, python-format
msgid "%(address)s is not a valid IP address."
@@ -236,9 +244,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "API s verzí %(version)s není v této metodě podporován."
-msgid "Access key not provided"
-msgstr "Přístupový klÃ­Ä není zadán"
-
msgid "Access list not available for public flavors."
msgstr "Seznam přístupu není dostupný pro veřejné konfigurace."
@@ -916,10 +921,6 @@ msgstr "Nelze najít binární soubor %(binary)s v hostiteli %(host)s."
msgid "Could not find config at %(path)s"
msgstr "Nelze najít nastavení v %(path)s"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "Nelze najít pár klíÄů: %s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr "Nelze najít odkazy datového úložiště, který VM používá."
@@ -953,14 +954,6 @@ msgstr "Nelze nahrát obraz %(image_id)s"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "Nelze získat IP místního spojení %(interface)s :%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"NeÅ¡lo zastavit instanci %(instance)s do jedné hodiny. SouÄasný stav vm: "
-"%(vm_state)s, souÄasný stav úkolu: %(task_state)s"
-
msgid "Create networks failed"
msgstr "Vytvoření sítí selhalo"
@@ -1232,21 +1225,6 @@ msgstr "OÄekáváno uuid ale obdrženo %(uuid)s."
msgid "Expected object of type: %s"
msgstr "OÄekáván objekt typu: %s"
-msgid "Expecting a list of resources"
-msgstr "OÄekáván seznam zdrojů"
-
-msgid "Expecting a list of tagSets"
-msgstr "OÄekáván seznam sad znaÄek"
-
-msgid "Expecting both key and value to be set"
-msgstr "OÄekáváno zadání jak klíÄe tak i hodnoty"
-
-msgid "Expecting key to be set"
-msgstr "OÄekáváno zadání klíÄe"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "OÄekáváno, že seznam znaÄek bude ve formátu párů klíÄ/hodnota"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "Sloupec %(table)s.%(column)s je ve stínové tabulce navíc"
@@ -1412,10 +1390,6 @@ msgstr "Nelze pozastavit instanci: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Nelze ukonÄit instanci: %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "Nelze analyzovat odpovÄ›Ä od keystone: %s"
-
msgid "Failure prepping block device."
msgstr "Chyba při přípravě blokového zařízení."
@@ -1509,14 +1483,6 @@ msgstr ""
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"DodateÄná specifikace konfigurace %(id)d nemohla být vytvoÅ™ena, Äi "
-"aktualizována po %(retries)d pokusech."
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1625,9 +1591,6 @@ msgstr "Plovoucí IP adresa nenalezeno pro hostitele %(host)s."
msgid "Floating IP pool not found."
msgstr "Zásoba plovoucích IP adres nenalezena."
-msgid "Floating ip is not associated."
-msgstr "Plovoucí ip adresa není přidružena."
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1800,9 +1763,6 @@ msgstr "Popisná data obrazu pÅ™ekraÄují limit"
msgid "Image model '%(image)s' is not supported"
msgstr "Model obrazu '%(image)s' není podporován"
-msgid "Image must be available"
-msgstr "Obraz musí být dostupný"
-
msgid "Image not found."
msgstr "Obraz nenalezen"
@@ -2058,9 +2018,6 @@ msgstr "Rozhraní %(interface)s nenalezeno."
msgid "Invalid Base 64 data for file %(path)s"
msgstr "Neplatná data Base 64 pro soubor %(path)s"
-msgid "Invalid CIDR"
-msgstr "Neplatný CIDR"
-
msgid "Invalid Connection Info"
msgstr "Neplatné informace o připojení"
@@ -2073,10 +2030,6 @@ msgid "Invalid IP format %s"
msgstr "Neplatný formát IP adresy %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "Neplatný IP protokol %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "Neplatný protokol IP %(protocol)s."
@@ -2385,20 +2338,9 @@ msgid "Invalid usage_type: %s"
msgstr "Neplatný typ použití: %s"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"Neplatná hodnota '%(ec2_instance_id)s' pro id instance. Instance nemá v "
-"kořeni připojen svazek (%(root)s)"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "Neplatná hodnota '%s' pro vynucení."
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "Neplatná hodnota pro 'scheduler_max_attempts', musí být >= 1"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "Neplatná hodnota pro volbu konfiguraÄní jednotky: %(option)s"
@@ -3016,9 +2958,6 @@ msgstr "Žádné tělo žádosti"
msgid "No root disk defined."
msgstr "Nezadán žádný kořenový disk."
-msgid "No rule for the specified parameters."
-msgstr "Pro zadané parametry není žádné pravidlo."
-
msgid "No suitable network for migrate"
msgstr "Žádné vhodné sítě pro přesun"
@@ -3052,10 +2991,6 @@ msgstr "Nelze získat volný port pro %(host)s"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "Nelze svázat %(host)s:%(port)d, %(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "Nemáte oprávnění měnit vlastnosti obrazu %s"
-
msgid "Not an rbd snapshot"
msgstr "Není snímkem rbd"
@@ -3137,9 +3072,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "Pouze parametry hostitele mohou být zadány"
-msgid "Only instances implemented"
-msgstr "Jsou zavedeny pouze instance"
-
msgid "Only root certificate can be retrieved."
msgstr "Může být získán pouze kořenový certifikát."
@@ -3185,6 +3117,10 @@ msgid "PCI device %(id)s not found"
msgstr "PCI zařízení %(id)s nenalezeno"
#, python-format
+msgid "PCI device request %(requests)s failed"
+msgstr "Žádost zařízení PCI %(requests)s selhala"
+
+#, python-format
msgid ""
"PCS doesn't support images in %s format. You should either set "
"force_raw_images=True in config or upload an image in ploop or raw format."
@@ -3453,6 +3389,10 @@ msgstr ""
msgid "RequestSpec not found for instance %(instance_uuid)s"
msgstr "U instance %(instance_uuid)s nebyla nalezena žádost o specifikaci"
+msgid "Requested CPU control policy not supported by host"
+msgstr ""
+"Požadovaná zásada kontroly procesoru není podporována na tomto hostiteli"
+
#, python-format
msgid "Requested cidr (%(cidr)s) conflicts with existing cidr (%(other)s)"
msgstr ""
@@ -3495,6 +3435,14 @@ msgstr ""
"pro daný poÄet vCPU %(vcpus)d"
#, python-format
+msgid ""
+"Required image properties for signature verification do not exist. Cannot "
+"verify signature. Missing property: %s"
+msgstr ""
+"Požadované vlastnosti obrazu pro ověření podpisu neexistují. Nelze ověřit "
+"podpis. Chybí vlastnost: %s"
+
+#, python-format
msgid "Rescue device does not exist for instance %s"
msgstr "Záchranné zařízení neexistuje v instanci %s"
@@ -3544,14 +3492,6 @@ msgstr ""
"Spuštění Nova s paralelními typy virtualizací vyžaduje libvirt verze %s"
#, python-format
-msgid ""
-"Running Nova with qemu/kvm virt_type on s390/s390x requires libvirt version "
-"%(libvirt_ver)s and qemu version %(qemu_ver)s, or greater"
-msgstr ""
-"Spuštění Nova s typem virtualizace qemu/kvm na s390/s390x vyžaduje libvirt "
-"verze %(libvirt_ver)s a verzí qemu %(qemu_ver)s, nebo novější"
-
-#, python-format
msgid "Running cmd (subprocess): %s"
msgstr "Spouštění příkazu (podproces): %s"
@@ -3736,9 +3676,6 @@ msgstr ""
"Zmenšení souborového systému pomocí resize2fs selhalo, prosím zkontrolujte, "
"zda máte na svém disku dostatek volného místa."
-msgid "Signature not provided"
-msgstr "Podpis není zadán"
-
#, python-format
msgid "Signature verification for the image failed: %(reason)s."
msgstr "Selhalo ověření podpisu pro obraz: %(reason)s."
@@ -4160,9 +4097,6 @@ msgstr ""
"PÅ™i kontrole možnosti pÅ™esunu za provozu na hostitele %s vyprÅ¡el Äasový "
"limit."
-msgid "Timestamp failed validation."
-msgstr "Časové razítko neprošlo ověřením."
-
msgid "To and From ports must be integers"
msgstr "Porty Do a Od musí být celá Äísla"
@@ -4177,18 +4111,12 @@ msgstr ""
"Bude vytvoÅ™eno příliÅ¡ mnoho IP adres. Prosím zvyÅ¡te /%s pro snížení poÄtu "
"vytvořených IP adres."
-msgid "Too many failed authentications."
-msgstr "Příliš mnoho ověření selhalo."
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "Typ a kód musí být v protokolu ICMP celá Äísla"
msgid "UUID is required to delete Neutron Networks"
msgstr "Pro smazání sítí Neutron je vyžadováno UUID"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "Nelze přidružit pevnou IP adresu, nezadáno fixed_ips."
-
#, python-format
msgid ""
"Unable to associate floating IP %(address)s to any fixed IPs for instance "
@@ -4511,9 +4439,6 @@ msgstr ""
msgid "Unknown delete_info type %s"
msgstr "Neznámý typ mazání informací %s"
-msgid "Unknown error occurred."
-msgstr "Vyskytla se neznámá chyba."
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "Neznámý typ obrazu=%s"
@@ -4751,12 +4676,6 @@ msgid "Volume resource quota exceeded"
msgstr "PÅ™ekroÄena kvóta zdroje svazku"
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr ""
-"Svazek nastavuje velikost bloku, ale to vyžaduje libvirt verze '%s' nebo "
-"vyšší."
-
-#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
@@ -4905,9 +4824,6 @@ msgstr "mapování blokového zařízení musí být seznam"
msgid "block_device_mapping_v2 must be a list"
msgstr "Mapování blokového zařízení verze 2 musí být seznam"
-msgid "can't build a valid rule"
-msgstr "nelze sestavit platné pravidlo"
-
msgid "cannot delete non-existent key"
msgstr "nelze smazat neexistující klíÄ"
@@ -5029,13 +4945,6 @@ msgstr "obraz"
msgid "image already mounted"
msgstr "obraz již je připojen"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "Obraz z %(instance)s v %(now)s"
-
-msgid "imageLocation is required"
-msgstr "ImageLocation je povinné"
-
msgid "index"
msgstr "rejstřík"
@@ -5122,9 +5031,6 @@ msgstr "zařízení nbd %s se nezobrazilo"
msgid "nbd unavailable: module not loaded"
msgstr "nbd nedostupné: modul nenaÄten"
-msgid "need group_name or group_id"
-msgstr "je třeba zadat název nebo id skupiny"
-
msgid "network"
msgstr "síť"
@@ -5155,15 +5061,9 @@ msgstr ""
msgid "onSharedStorage must be specified."
msgstr "onSharedStorage musí být zadáno."
-msgid "only group \"all\" is supported"
-msgstr "podporována je pouze skupina \"all\""
-
msgid "operation time out"
msgstr "operace vypršela"
-msgid "operation_type must be add or remove"
-msgstr "typ operace musí být add nebo remove"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "os-získání výstupu konzole poškozeno, nebo chybí z těla požadavku"
@@ -5201,9 +5101,6 @@ msgstr "python knihovny rbd nenalezeny"
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr "Ätení smazaného může být buÄ 'no', 'yes' nebo 'only', ne %r"
-msgid "resource_id and tag are required"
-msgstr "Id zdroje a znaÄka jsou vyžadovány"
-
msgid "rpc_port must be integer"
msgstr "Port rpc musí být celé Äíslo"
@@ -5269,9 +5166,6 @@ msgstr "nepodporovaná pole: %s"
msgid "user"
msgstr "uživatel"
-msgid "user or group not specified"
-msgstr "uživatel nebo skupina nebyly zadány"
-
msgid "uuid"
msgstr "uuid"
diff --git a/nova/locale/de/LC_MESSAGES/nova-log-critical.po b/nova/locale/de/LC_MESSAGES/nova-log-critical.po
index 27b5f858dc..c03dbc0eef 100644
--- a/nova/locale/de/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/de/LC_MESSAGES/nova-log-critical.po
@@ -1,18 +1,18 @@
# Andreas Jaeger <jaegerandi@gmail.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.dev147\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-10-13 06:04+0000\n"
-"PO-Revision-Date: 2015-09-27 09:38+0000\n"
-"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
-"Language-Team: German\n"
-"Language: de\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Generator: Zanata 3.7.1\n"
+"PO-Revision-Date: 2015-09-27 09:38+0000\n"
+"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
+"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: German\n"
#, python-format
msgid "Missing core API extensions: %s"
diff --git a/nova/locale/de/LC_MESSAGES/nova-log-error.po b/nova/locale/de/LC_MESSAGES/nova-log-error.po
index f03f21996a..ec1cc9f3bc 100644
--- a/nova/locale/de/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/de/LC_MESSAGES/nova-log-error.po
@@ -10,19 +10,19 @@
# Reik Keutterling <spielkind@gmail.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-10-01 06:49+0000\n"
-"Last-Translator: Frank Kloeker <eumel@arcor.de>\n"
-"Language-Team: German\n"
-"Language: de\n"
+"POT-Creation-Date: 2016-02-08 05:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-10-01 06:49+0000\n"
+"Last-Translator: Frank Kloeker <eumel@arcor.de>\n"
+"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: German\n"
msgid "\"Look for the VDIs failed"
msgstr "\"Nachschau der VDIs fehlgeschlagen"
@@ -212,10 +212,6 @@ msgid "Environment variable 'NETWORK_ID' must be set."
msgstr "Umgebungsvariable 'NETWORK_ID' muss gesetzt sein."
#, python-format
-msgid "Environment: %s"
-msgstr "Umgebung: %s"
-
-#, python-format
msgid "Error copying key: %s"
msgstr "Fehler beim Kopieren des Schlüssels: %s"
@@ -358,10 +354,6 @@ msgstr "Fehler beim Versuch des Wiederherstellens der Instanz"
msgid "Error trying to reschedule"
msgstr "Fehler beim Versuch der Neuplanung"
-#, python-format
-msgid "Error updating resources for node %(node)s: %(e)s"
-msgstr "Fehler bei der Aktualisierung der Resourcen für Knoten %(node)s: %(e)s"
-
msgid "Error waiting for responses from neighbor cells"
msgstr "Fehler beim Warten auf Antwort von den Nachbarzellen"
@@ -730,9 +722,6 @@ msgstr "Fehler beim Aufräumen der angehangenen VDIs"
msgid "Fatal Exception running %(name)s %(type)s-hook: %(obj)s"
msgstr "Schwere Ausnahme beim Lauf %(name)s %(type)s-hook: %(obj)s"
-msgid "FaultWrapper error"
-msgstr "Fehler FaultWrapper"
-
msgid "Guest does not have a console available"
msgstr "Gast hat keine Konsole verfügbar"
@@ -783,10 +772,6 @@ msgid "Invalid server_string: %s"
msgstr "Ungültiger server_string: %s"
#, python-format
-msgid "Keystone failure: %s"
-msgstr "Fehler Keystone: %s"
-
-#, python-format
msgid "Live Migration failure: %s"
msgstr "Live-Migration fehlgeschlagen: %s"
@@ -1066,10 +1051,6 @@ msgid "Unable to parse rrd of %s"
msgstr "Zerlegen des RRD von %s fehlgeschlagen"
#, python-format
-msgid "Unable to preallocate image at path: %(path)s"
-msgstr "Vorbelegen eines Abbildes auf Pfad: %(path)s nicht möglich"
-
-#, python-format
msgid "Unable to retrieve storage policy with name %s"
msgstr "Konnte Speicherregeln mit Namen %s nicht beziehen"
@@ -1088,10 +1069,6 @@ msgstr "Hostaktualisierung nicht möglich für Port %s"
msgid "Unable to update instance VNIC index for port %s."
msgstr "Instanzaktualisierung VNIC Index nicht möglich für Port %s."
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr "Unerwarteter %(ex_name)s hat %(ex_str)s ausgelöst"
-
msgid "Unexpected build failure, not rescheduling build."
msgstr "Unerwarteter Fehler beim Aufbau, wird nicht neu geplant."
diff --git a/nova/locale/de/LC_MESSAGES/nova-log-info.po b/nova/locale/de/LC_MESSAGES/nova-log-info.po
index 3b67b7ff9b..8174394620 100644
--- a/nova/locale/de/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/de/LC_MESSAGES/nova-log-info.po
@@ -9,19 +9,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-10-03 03:02+0000\n"
-"Last-Translator: Frank Kloeker <eumel@arcor.de>\n"
-"Language-Team: German\n"
-"Language: de\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-10-03 03:02+0000\n"
+"Last-Translator: Frank Kloeker <eumel@arcor.de>\n"
+"Language: de\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: German\n"
#, python-format
msgid "%(action_str)s instance"
@@ -64,25 +64,14 @@ msgstr "Aktive Basisdateien: %s"
msgid "Adding security group %(security_group_id)s to port %(port_id)s"
msgstr "Füge Sicherheitsgruppe %(security_group_id)s zu Port %(port_id)s"
-msgid "Allocate address"
-msgstr "Weise Adresse zu"
-
#, python-format
msgid "Allocated network: '%s' for instance"
msgstr "Netwerkzuweisung: '%s' für Instanz"
-#, python-format
-msgid "Associate address %(public_ip)s to instance %(instance_id)s"
-msgstr "Verbinde Adresse %(public_ip)s mit Instanz %(instance_id)s"
-
msgid "Attach interface"
msgstr "Schnittstelle hinzufügen"
#, python-format
-msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-msgstr "Hänge Volume %(volume_id)s an Instanz %(instance_id)s via %(device)s"
-
-#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr "Hänge Volume %(volume_id)s an Instanz %(server_id)s via %(device)s"
@@ -197,22 +186,10 @@ msgid "Create assisted snapshot from volume %s"
msgstr "Erstelle Schattenkopievorlage vom Datenträger %s"
#, python-format
-msgid "Create key pair %s"
-msgstr "Schlüsselpaar %s erstellen"
-
-#, python-format
msgid "Create snapshot from volume %s"
msgstr "Momentaufnahme aus Datenträger %s erstellen"
#, python-format
-msgid "Create snapshot of volume %s"
-msgstr "Erstelle Momentaufnahme aus Datenträger %s"
-
-#, python-format
-msgid "Create volume from snapshot %s"
-msgstr "Erzeuge Volume von der Schattenkopie %s"
-
-#, python-format
msgid "Create volume of %s GB"
msgstr "Datenträger mit %s GB erstellen"
@@ -239,14 +216,6 @@ msgstr ""
"Bytes seit %(last)d Sekunden"
#, python-format
-msgid "De-registering image %s"
-msgstr "Melde Abbild %s ab"
-
-#, python-format
-msgid "Delete key pair %s"
-msgstr "Schlüsselpaar %s löschen"
-
-#, python-format
msgid "Delete security group %s"
msgstr "Sicherheitsgruppe %s löschen"
@@ -327,10 +296,6 @@ msgid "Disabling host %s."
msgstr "Host %s wird deaktiviert."
#, python-format
-msgid "Disassociate address %s"
-msgstr "Trenne Adresse %s"
-
-#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr "Trenne schlechte VDI %s von Compute domU"
@@ -496,10 +461,6 @@ msgid "Get console output"
msgstr "Konsolenausgabe holen"
#, python-format
-msgid "Get console output for instance %s"
-msgstr "Konsolenausgabe holen für Instanz %s"
-
-#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
@@ -561,10 +522,6 @@ msgstr ""
"%(downloaded)s Dauer: %(duration).2f Sekunden für Abbild %(image_id)s"
#, python-format
-msgid "Import key %s"
-msgstr "Schlüssel %s importieren"
-
-#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database: "
"assuming it is a worker VM and skip ping migration to a new host"
@@ -780,10 +737,6 @@ msgstr ""
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr "Setze Host %(host_name)s in Wartungsmodus %(mode)s."
-#, python-format
-msgid "Reboot instance %r"
-msgstr "Instanz %r neu starten"
-
msgid "Rebooting instance"
msgstr "Instanz wird neu gestartet"
@@ -829,14 +782,6 @@ msgstr ""
"Dienstestatus."
#, python-format
-msgid "Registered image %(image_location)s with id %(image_id)s"
-msgstr "Angemeldetes Abbild %(image_location)s mit ID %(image_id)s"
-
-#, python-format
-msgid "Release address %s"
-msgstr "Adresse %s freigeben"
-
-#, python-format
msgid "Removable base files: %s"
msgstr "Entfernbare Basisdateien: %s"
@@ -1041,12 +986,6 @@ msgstr ""
"langsamen Code-Pfad: %(ex)s"
#, python-format
-msgid ""
-"Unauthorized request for controller=%(controller)s and action=%(action)s"
-msgstr ""
-"Unberechtigte Anfrage für Controller=%(controller)s und Aktion=%(action)s"
-
-#, python-format
msgid "Unexpected error: %s"
msgstr "Unerwarteter Fehler: %s"
@@ -1064,10 +1003,6 @@ msgid "Updating from migration %s"
msgstr "Aktualisiere von Migration %s"
#, python-format
-msgid "Updating image %s publicity"
-msgstr "Aktualisiere Abbild %s öffentlich"
-
-#, python-format
msgid "Updating instance to original state: '%s'"
msgstr "Aktualisiere Instanz in den Original-Status '%s'"
diff --git a/nova/locale/de/LC_MESSAGES/nova.po b/nova/locale/de/LC_MESSAGES/nova.po
index 11a46e8269..622cd80d59 100644
--- a/nova/locale/de/LC_MESSAGES/nova.po
+++ b/nova/locale/de/LC_MESSAGES/nova.po
@@ -1,22 +1,34 @@
-# German translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
-msgid ""
-msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+# Translators:
+# Alec Hans <alec.hans@alecsoft.net>, 2013
+# Ettore Atalan <atalanttore@googlemail.com>, 2014
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# iLennart21 <a12s34d56f78@live.com>, 2013
+# Laera Loris <llaera@outlook.com>, 2013
+# matthew wagoner <zxkuqyb@gmail.com>, 2012
+# English translations for nova.
+# Andreas Jaeger <jaegerandi@gmail.com>, 2015. #zanata
+# Frank Kloeker <eumel@arcor.de>, 2015. #zanata
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-09-29 12:26+0000\n"
"Last-Translator: Frank Kloeker <eumel@arcor.de>\n"
"Language: de\n"
"Language-Team: German\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.2.0\n"
+"X-Generator: Zanata 3.7.3\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -227,9 +239,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "API Version %(version)s ist nicht unterstützt für diese Methode."
-msgid "Access key not provided"
-msgstr "Zugriffsschlüssel nicht angegeben"
-
msgid "Access list not available for public flavors."
msgstr "Zugriffsliste ist für öffentliche Versionen nicht verfügbar. "
@@ -904,10 +913,6 @@ msgstr ""
msgid "Could not find config at %(path)s"
msgstr "Konfiguration konnte unter %(path)s nicht gefunden werden"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "Schlüsselpaar(e) konnte(n) nicht gefunden werden: %s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
"Die von der VM verwendeten Datenspeicherverweise konnten nicht gefunden "
@@ -945,14 +950,6 @@ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
"Link-Local-IP-Adresse von %(interface)s konnte nicht abgerufen werden: %(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"Instanz %(instance)s konnte nicht innerhalb einer Stunde gestoppt werden. "
-"Aktueller vm_state: %(vm_state)s, aktueller task_state: %(task_state)s"
-
msgid "Create networks failed"
msgstr "Erstellen von Netzen fehlgeschlagen"
@@ -1215,21 +1212,6 @@ msgstr "UUID erwartet, aber %(uuid)s erhalten."
msgid "Expected object of type: %s"
msgstr "Erwartetes Objekt vom Typ: %s"
-msgid "Expecting a list of resources"
-msgstr "Liste von Ressource wird erwartet"
-
-msgid "Expecting a list of tagSets"
-msgstr "Liste von tagSets wird erwartet"
-
-msgid "Expecting both key and value to be set"
-msgstr "Festzulegender Schlüssel und Wert erwartet"
-
-msgid "Expecting key to be set"
-msgstr "Festzulegender Schlüssel wird erwartet"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "tagSet wird als Schlüssel/Wert-Paare erwartet"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "Zusatzspalte %(table)s.%(column)s in Spiegeltabelle"
@@ -1410,10 +1392,6 @@ msgstr "Instanz konnte nicht ausgesetzt werden: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Instanz konnte nicht beendet werden: %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "Fehler bei der Analyse der Antwort von Keystone: %s"
-
msgid "Failure prepping block device."
msgstr "Fehler beim Vorbereiten des Block-Gerätes."
@@ -1502,14 +1480,6 @@ msgstr ""
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"Zusätzliche Spezifikation für Version %(id)d kann nach %(retries)d "
-"Neuversuchen nicht aktualisiert oder erstellt werden."
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1581,9 +1551,6 @@ msgstr "Dynamische IP-Adresse %(address)s Verknüpfung ist fehlgeschlagen."
msgid "Floating IP allocate failed."
msgstr "Allozierung von Floating IP fehlgeschlagen."
-msgid "Floating ip is not associated."
-msgstr "Dynamische IP-Adresse ist nicht zugeordnet."
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1759,9 +1726,6 @@ msgstr "Grenzwert für Imagemetadaten überschritten"
msgid "Image model '%(image)s' is not supported"
msgstr "Abbild-Modell '%(image)s' wird nicht unterstützt"
-msgid "Image must be available"
-msgstr "Abbild muss verfügbar sein"
-
msgid "Image not found."
msgstr "Abbild nicht gefunden."
@@ -2002,9 +1966,6 @@ msgstr "Schnittstelle %(interface)s nicht gefunden."
msgid "Invalid Base 64 data for file %(path)s"
msgstr "Ungültige Basis-64-Daten für Datei %(path)s"
-msgid "Invalid CIDR"
-msgstr "Ungültige CIDR"
-
msgid "Invalid Connection Info"
msgstr "Ungültige Verbindungsinformation"
@@ -2017,10 +1978,6 @@ msgid "Invalid IP format %s"
msgstr "Ungültiges IP-Format %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "Ungültiges IP-Protokoll %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "Ungültiges IP Protokoll %(protocol)s."
@@ -2314,20 +2271,9 @@ msgid "Invalid usage_type: %s"
msgstr "Ungültiger usage_type: %s"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"Ungültiger Wert '%(ec2_instance_id)s' für 'instanceId'. Instanz hat keinen "
-"Datenträger angehängt im Stammverzeichnis (%(root)s)"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "Wert '%s' für Zwangsausführung ungültig. "
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "Ungültiger Wert für 'scheduler_max_attempts', muss >= 1 sein"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "Ungültiger Wert für Konfigurationslaufwerkoption: %(option)s"
@@ -2929,9 +2875,6 @@ msgstr "Kein Anforderungshauptteil"
msgid "No root disk defined."
msgstr "Keine Root-Festplatte bestimmt."
-msgid "No rule for the specified parameters."
-msgstr "Keine Regel für die angegebenen Parameter."
-
msgid "No suitable network for migrate"
msgstr "Kein geeignetes Netzwerk zum Migrieren"
@@ -2965,10 +2908,6 @@ msgstr "Es kann kein freier Port für %(host)s angefordert werden"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "%(host)s:%(port)d kann nicht gebunden werden, %(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "Es ist nicht zulässig, Attribute von Image %s zu ändern "
-
msgid "Not an rbd snapshot"
msgstr "Keine RBD-Momentaufnahme"
@@ -3044,9 +2983,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "Nur der Hostparameter kann angegeben werden"
-msgid "Only instances implemented"
-msgstr "Nur Instanzen implementiert"
-
msgid "Only root certificate can be retrieved."
msgstr "Nur Stammzertifikat abrufbar."
@@ -3455,14 +3391,6 @@ msgstr ""
"erforderlich"
#, python-format
-msgid ""
-"Running Nova with qemu/kvm virt_type on s390/s390x requires libvirt version "
-"%(libvirt_ver)s and qemu version %(qemu_ver)s, or greater"
-msgstr ""
-"Starten Nova mit qemu/kvm virt_type an s390/s390x erfordert libvirt Version "
-"%(libvirt_ver)s und qemu Version %(qemu_ver)s, oder größer"
-
-#, python-format
msgid "Running cmd (subprocess): %s"
msgstr "Führe Kommando (subprocess) aus: %s"
@@ -3647,9 +3575,6 @@ msgstr ""
"Verkleinern des Dateisystems mit resize2fs ist fehlgeschlagen; überprüfen "
"Sie, ob auf Ihrer Platte noch genügend freier Speicherplatz vorhanden ist."
-msgid "Signature not provided"
-msgstr "Signatur nicht angegeben"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Momentaufnahme %(snapshot_id)s konnte nicht gefunden werden."
@@ -4055,9 +3980,6 @@ msgstr "Zeitüberschreitung beim Warten auf Antwort von der Zelle"
msgid "Timeout while checking if we can live migrate to host: %s"
msgstr "Zeitüberschreitung bei der Überprüfung der Live-Migration zu Host: %s"
-msgid "Timestamp failed validation."
-msgstr "Fehler bei Überprüfung der Zeitmarke. "
-
msgid "To and From ports must be integers"
msgstr "Eingangs- und Ausgangsports müssen Ganzzahlen sein"
@@ -4072,18 +3994,12 @@ msgstr ""
"Es werden zu viele IP-Adressen generiert. Erhöhen Sie /%s, um die Anzahl der "
"generierten Adressen zu verringern."
-msgid "Too many failed authentications."
-msgstr "Zu viele fehlgeschlagene Authentifizierungen."
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "Typ und Code müssen für den ICMP-Protokolltyp Ganzzahlen sein"
msgid "UUID is required to delete Neutron Networks"
msgstr "UUID ist zum Löschen von Neutron-Netzen erforderlich"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "IP-Adresse kann nicht zugeordnet werden, keine 'fixed_ips'."
-
msgid "Unable to authenticate Ironic client."
msgstr "Ironic-Client kann nicht authentifiziert werden. "
@@ -4386,9 +4302,6 @@ msgstr ""
msgid "Unknown delete_info type %s"
msgstr "Unbekannter delete_info-Typ %s"
-msgid "Unknown error occurred."
-msgstr "Unbekannter Fehler aufgetreten."
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "Unbekannter image_type=%s"
@@ -4638,12 +4551,6 @@ msgid "Volume resource quota exceeded"
msgstr "Quotaüberschreitung Datenträgerressource"
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr ""
-"Datenträger legt Blockgröße fest, aber mindestens libvirt '%s' ist "
-"erforderlich."
-
-#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
@@ -4787,9 +4694,6 @@ msgstr " Block_Geräte_Zuordnung muß eine Liste sein"
msgid "block_device_mapping_v2 must be a list"
msgstr "Block_Geräte_Zuordnung_v2 muß eine Liste sein"
-msgid "can't build a valid rule"
-msgstr "Es kann keine gültige Regel erstellt werden"
-
msgid "cannot delete non-existent key"
msgstr "nicht vorhandener Schlüssel kann nicht gelöscht werden"
@@ -4905,13 +4809,6 @@ msgstr "Abbild"
msgid "image already mounted"
msgstr "Abbild bereits eingehängt"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "Abbild von %(instance)s bei %(now)s"
-
-msgid "imageLocation is required"
-msgstr "imageLocation ist erforderlich"
-
msgid "index"
msgstr "Index"
@@ -4996,9 +4893,6 @@ msgstr "NBD-Einheit %s wurde nicht angezeigt"
msgid "nbd unavailable: module not loaded"
msgstr "nbd nicht verfügbar: Modul nicht geladen"
-msgid "need group_name or group_id"
-msgstr "'group_name' oder 'group_id' erforderlich"
-
msgid "network"
msgstr "Netzwerk"
@@ -5028,15 +4922,9 @@ msgstr ""
msgid "onSharedStorage must be specified."
msgstr "onSharedStorage muss angegeben werden."
-msgid "only group \"all\" is supported"
-msgstr "nur Gruppe \"Alle\" wird unterstützt"
-
msgid "operation time out"
msgstr "Vorgangszeitüberschreitung"
-msgid "operation_type must be add or remove"
-msgstr "'operation_type' muss 'add' oder 'remove' sein"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "os-getConsoleOutput fehlerhaft oder fehlt in Anforderungshauptteil"
@@ -5074,9 +4962,6 @@ msgstr "rbd Python-Bibliotheken nicht gefunden"
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr "'read_deleted' kann nur 'no', 'yes' oder 'only' sein, nicht '%r'"
-msgid "resource_id and tag are required"
-msgstr "resource_id und Tag sind erforderlich"
-
msgid "rpc_port must be integer"
msgstr "rpc_port muss eine Ganzzahl sein"
@@ -5145,9 +5030,6 @@ msgstr "nicht unterstützte Felder: %s"
msgid "user"
msgstr "Benutzer"
-msgid "user or group not specified"
-msgstr "Benutzer oder Gruppe nicht bestimmt"
-
msgid "uuid"
msgstr "UUID"
diff --git a/nova/locale/es/LC_MESSAGES/nova-log-critical.po b/nova/locale/es/LC_MESSAGES/nova-log-critical.po
index e2b0f6e8dd..6a22abe798 100644
--- a/nova/locale/es/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/es/LC_MESSAGES/nova-log-critical.po
@@ -7,19 +7,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.dev41\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-10-27 06:30+0000\n"
-"PO-Revision-Date: 2015-08-16 05:55+0000\n"
-"Last-Translator: Ed Gonzalez <proplw@gmail.com>\n"
-"Language-Team: Spanish\n"
-"Language: es\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-08-16 05:55+0000\n"
+"Last-Translator: Ed Gonzalez <proplw@gmail.com>\n"
+"Language: es\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Spanish\n"
#, python-format
msgid "Missing core API extensions: %s"
diff --git a/nova/locale/es/LC_MESSAGES/nova-log-error.po b/nova/locale/es/LC_MESSAGES/nova-log-error.po
index dc48c8d076..00de70e6d9 100644
--- a/nova/locale/es/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/es/LC_MESSAGES/nova-log-error.po
@@ -11,21 +11,22 @@
# Sergio Cuellar Valdes <scuellarv@kionetworks.com>, 2015
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
# Travis Nguyen <travisn@us.ibm.com>, 2015. #zanata
+# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-10-07 05:39+0000\n"
-"Last-Translator: Travis Nguyen <travisn@us.ibm.com>\n"
-"Language-Team: Spanish\n"
-"Language: es\n"
+"POT-Creation-Date: 2016-02-08 05:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-01-20 01:51+0000\n"
+"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
+"Language: es\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Spanish\n"
msgid "\"Look for the VDIs failed"
msgstr "\"Ha fallado la búsqueda de las VDIs"
@@ -207,10 +208,6 @@ msgid "Environment variable 'NETWORK_ID' must be set."
msgstr "La variable de entorno 'NETWORK_ID' debe ser establecida."
#, python-format
-msgid "Environment: %s"
-msgstr "Entorno: %s"
-
-#, python-format
msgid "Error copying key: %s"
msgstr "Error al copiar la llave: %s"
@@ -356,10 +353,6 @@ msgstr "Error al intentar Rescatar Instancia"
msgid "Error trying to reschedule"
msgstr "Error al intentar volver a programar "
-#, python-format
-msgid "Error updating resources for node %(node)s: %(e)s"
-msgstr "Error al actualizar recursos para nodo %(node)s: %(e)s"
-
msgid "Error waiting for responses from neighbor cells"
msgstr "Error al esperar respuestas de celdas vecinas"
@@ -699,9 +692,6 @@ msgstr "Error al limpiar VDI conectados "
msgid "Fatal Exception running %(name)s %(type)s-hook: %(obj)s"
msgstr "Excepción fatal ejecutando %(name)s %(type)s-enlace: %(obj)s"
-msgid "FaultWrapper error"
-msgstr "Error de FaultWrapper"
-
msgid "Guest does not have a console available"
msgstr "El invitado no tiene una consola disponible"
@@ -756,10 +746,6 @@ msgid "Invalid server_string: %s"
msgstr "Serie del servidor no válido: %s"
#, python-format
-msgid "Keystone failure: %s"
-msgstr "Falla de keystone: %s"
-
-#, python-format
msgid "Live Migration failure: %s"
msgstr "Fallo en migración en vivo: %s"
@@ -946,7 +932,7 @@ msgid ""
"%(args)r"
msgstr ""
"La llamada del agente para %(method)s ha arrojado una respuesta inválida: "
-"%(ret)r. args=$(args)r"
+"%(ret)r. args=%(args)r"
#, python-format
msgid "The call to %(method)s returned an error: %(e)s."
@@ -1042,10 +1028,6 @@ msgid "Unable to parse rrd of %s"
msgstr "Incapaz de analizar rrd de %s"
#, python-format
-msgid "Unable to preallocate image at path: %(path)s"
-msgstr "No es posible pre-asignar imagen a ruta: %(path)s"
-
-#, python-format
msgid "Unable to retrieve storage policy with name %s"
msgstr ""
"No ha sido posible recuperar la política de almacenamiento de nombre %s"
@@ -1065,10 +1047,6 @@ msgstr "Incapaz de actualizar el anfitrión del puerto %s"
msgid "Unable to update instance VNIC index for port %s."
msgstr "No pudo actualizar el índice VNIC de la instancia para el puerto %s."
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr "Inesperado %(ex_name)s: %(ex_str)s"
-
msgid "Unexpected build failure, not rescheduling build."
msgstr "Fallo de compilación inesperado, no se reprogramará la compilación."
diff --git a/nova/locale/es/LC_MESSAGES/nova-log-info.po b/nova/locale/es/LC_MESSAGES/nova-log-info.po
index 6c86123393..12b3515f8b 100644
--- a/nova/locale/es/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/es/LC_MESSAGES/nova-log-info.po
@@ -9,19 +9,19 @@
# Travis Nguyen <travisn@us.ibm.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-09-29 04:25+0000\n"
-"Last-Translator: Travis Nguyen <travisn@us.ibm.com>\n"
-"Language-Team: Spanish\n"
-"Language: es\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-09-29 04:25+0000\n"
+"Last-Translator: Travis Nguyen <travisn@us.ibm.com>\n"
+"Language: es\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Spanish\n"
#, python-format
msgid "%(action_str)s instance"
@@ -68,27 +68,14 @@ msgid "Adding security group %(security_group_id)s to port %(port_id)s"
msgstr ""
"Añadiendo grupo de seguridad %(security_group_id)s al puerto %(port_id)s"
-msgid "Allocate address"
-msgstr "Asignar dirección"
-
#, python-format
msgid "Allocated network: '%s' for instance"
msgstr "Red asignada: '%s' para instancia"
-#, python-format
-msgid "Associate address %(public_ip)s to instance %(instance_id)s"
-msgstr "Asociar dirección %(public_ip)s a instancia %(instance_id)s"
-
msgid "Attach interface"
msgstr "Conectar interfaz"
#, python-format
-msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-msgstr ""
-"Conectar el volumen %(volume_id)s a la instancia %(instance_id)s en "
-"%(device)s"
-
-#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr ""
"Conectar el volumen %(volume_id)s a la instancia %(server_id)s en %(device)s"
@@ -204,22 +191,10 @@ msgid "Create assisted snapshot from volume %s"
msgstr "Se ha creado instantánea asistida del volúmen %s"
#, python-format
-msgid "Create key pair %s"
-msgstr "Creando par de claves %s"
-
-#, python-format
msgid "Create snapshot from volume %s"
msgstr "Crear instantánea desde el volumen %s"
#, python-format
-msgid "Create snapshot of volume %s"
-msgstr "Crear instantánea del volumen %s"
-
-#, python-format
-msgid "Create volume from snapshot %s"
-msgstr "Crear volumen desde la instantánea %s"
-
-#, python-format
msgid "Create volume of %s GB"
msgstr "Crear volumen de %s GB"
@@ -238,14 +213,6 @@ msgid "Creating image"
msgstr "Creando imagen"
#, python-format
-msgid "De-registering image %s"
-msgstr "Des-registrando la imagen %s"
-
-#, python-format
-msgid "Delete key pair %s"
-msgstr "Borrar para de claves %s"
-
-#, python-format
msgid "Delete security group %s"
msgstr "Borrar grupo de seguridad %s"
@@ -328,10 +295,6 @@ msgid "Disabling host %s."
msgstr "Inhabilitando el host %s."
#, python-format
-msgid "Disassociate address %s"
-msgstr "Desasociar dirección %s"
-
-#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr "Desconectando VDI obsoleto %s de domU de cálculo "
@@ -492,10 +455,6 @@ msgid "Get console output"
msgstr "Obtener salida de consola "
#, python-format
-msgid "Get console output for instance %s"
-msgstr "Obtener salida de la consola para la instancia %s"
-
-#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
@@ -558,10 +517,6 @@ msgstr ""
"%(image_id)s"
#, python-format
-msgid "Import key %s"
-msgstr "Importar la clave %s"
-
-#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database: "
"assuming it is a worker VM and skip ping migration to a new host"
@@ -780,10 +735,6 @@ msgstr ""
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr "Poniendo el host %(host_name)s en modalidad de mantenimiento %(mode)s."
-#, python-format
-msgid "Reboot instance %r"
-msgstr "Reiniciar instancia %r"
-
msgid "Rebooting instance"
msgstr "Rearrancando instancia"
@@ -824,14 +775,6 @@ msgid "Reclaiming deleted instance"
msgstr "Reclamando instancia suprimida"
#, python-format
-msgid "Registered image %(image_location)s with id %(image_id)s"
-msgstr "Imagen registrada %(image_location)s con el id %(image_id)s"
-
-#, python-format
-msgid "Release address %s"
-msgstr "Liberar dirección %s"
-
-#, python-format
msgid "Removable base files: %s"
msgstr "Archivos de base eliminables: %s"
@@ -1034,13 +977,6 @@ msgstr ""
"la ruta del código lenta: %(ex)s"
#, python-format
-msgid ""
-"Unauthorized request for controller=%(controller)s and action=%(action)s"
-msgstr ""
-"Solicitud no autorizada para el controlador=%(controller)s y la acción="
-"%(action)s"
-
-#, python-format
msgid "Unexpected error: %s"
msgstr "Error inesperado: %s "
@@ -1058,10 +994,6 @@ msgid "Updating from migration %s"
msgstr "Actualizando desde la migración %s"
#, python-format
-msgid "Updating image %s publicity"
-msgstr "Actualizando imagen %s públicamente"
-
-#, python-format
msgid "Updating instance to original state: '%s'"
msgstr "Actualizando el estado original de instancia hacia: '%s'"
diff --git a/nova/locale/es/LC_MESSAGES/nova-log-warning.po b/nova/locale/es/LC_MESSAGES/nova-log-warning.po
index e5889c5ce8..0a76a75760 100644
--- a/nova/locale/es/LC_MESSAGES/nova-log-warning.po
+++ b/nova/locale/es/LC_MESSAGES/nova-log-warning.po
@@ -10,19 +10,19 @@
# Travis Nguyen <travisn@us.ibm.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-10-07 05:39+0000\n"
-"Last-Translator: Travis Nguyen <travisn@us.ibm.com>\n"
-"Language-Team: Spanish\n"
-"Language: es\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-10-07 05:39+0000\n"
+"Last-Translator: Travis Nguyen <travisn@us.ibm.com>\n"
+"Language: es\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Spanish\n"
#, python-format
msgid ""
@@ -72,14 +72,6 @@ msgstr ""
"de respaldo!"
#, python-format
-msgid ""
-"Access key %(access_key)s has had %(failures)d failed authentications and "
-"will be locked out for %(lock_mins)d minutes."
-msgstr ""
-"La clave de acceso %(access_key)s ha tenido %(failures)d autenticaciones "
-"anómalas y estará bloqueada durante %(lock_mins)d minutos."
-
-#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr "La dirección |%(address)s| no está asignada"
@@ -958,16 +950,6 @@ msgstr ""
"%(version)s. La versión mínima requerida de vCenter será aumentada a "
"%(version)s en el lanzamiento 13.0.0."
-#, python-format
-msgid ""
-"Running Nova with a libvirt version less than %(version)s is deprecated. The "
-"required minimum version of libvirt will be raised to %(version)s in the "
-"13.0.0 release."
-msgstr ""
-"Se desaprueba ejecutar Nova con una version de libvirt inferior a "
-"%(version)s. La version minima requerida de libvirt será aumentada a "
-"%(version)s en el lanzamiento 13.0.0."
-
msgid ""
"Running libvirt-lxc without user namespaces is dangerous. Containers spawned "
"by Nova will be run as the host's root user. It is highly suggested that "
@@ -1367,10 +1349,6 @@ msgid "multiple fixed_ips exist, using the first IPv4 fixed_ip: %s"
msgstr "existen múltiples fixed_ips, utilizando la primer IPv4 fixed_ip: %s"
#, python-format
-msgid "multiple fixed_ips exist, using the first: %s"
-msgstr "existen múltiples fixed_ips, utilizando la primera: %s"
-
-#, python-format
msgid ""
"my_ip address (%(my_ip)s) was not found on any of the interfaces: %(ifaces)s"
msgstr ""
diff --git a/nova/locale/es/LC_MESSAGES/nova.po b/nova/locale/es/LC_MESSAGES/nova.po
index 96d91c64d2..198c6d3f47 100644
--- a/nova/locale/es/LC_MESSAGES/nova.po
+++ b/nova/locale/es/LC_MESSAGES/nova.po
@@ -1,22 +1,36 @@
-# Spanish translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
-msgid ""
-msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+# Translators:
+# Adriana Chisco Landazábal <achisco94@gmail.com>, 2015
+# Alberto Molina Coballes <alb.molina@gmail.com>, 2012-2014
+# Ying Chun Guo <daisy.ycguo@gmail.com>, 2013
+# David Martinez Morata, 2014
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# Jose Ramirez Garcia <jose.ramirez.rk@gmail.com>, 2014
+# Edgar Carballo <karvayoEdgar@gmail.com>, 2013
+# Pablo Sanchez <furybeat@gmail.com>, 2015
+# Marian Tort <marian.tort@gmail.com>, 2015. #zanata
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Travis Nguyen <travisn@us.ibm.com>, 2015. #zanata
+# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
+# Tom Cocozzello <tjcocozz@us.ibm.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-10-07 05:39+0000\n"
-"Last-Translator: Travis Nguyen <travisn@us.ibm.com>\n"
-"Language: es\n"
-"Language-Team: Spanish\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
+"PO-Revision-Date: 2016-01-20 01:51+0000\n"
+"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
+"Language: es\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Spanish\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -229,9 +243,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "Versión API %(version)s, no soportada en este método."
-msgid "Access key not provided"
-msgstr "Clave de acceso no proporcionada"
-
msgid "Access list not available for public flavors."
msgstr "La lista de acceso no está disponible para tipos públicos. "
@@ -424,6 +435,14 @@ msgstr ""
msgid "Binary"
msgstr "Binario"
+#, python-format
+msgid ""
+"Binding failed for port %(port_id)s, please check neutron logs for more "
+"information."
+msgstr ""
+"El enlace ha fallado para el puerto %(port_id)s, compruebe los registros de "
+"neutron para más información."
+
msgid "Blank components"
msgstr "Componentes en blanco"
@@ -534,6 +553,14 @@ msgstr "El numero de CPU %(cpuset)s no esta asignado a ningún nodo"
msgid "CPU pinning is not supported by the host: %(reason)s"
msgstr "el host no soporta anclaje de CPU: %(reason)s"
+#, python-format
+msgid ""
+"CPU set to pin/unpin %(requested)s must be a subset of known CPU set "
+"%(cpuset)s"
+msgstr ""
+"La CPU establecida para anclar/desanclar %(requested)s debe ser un "
+"subconjunto de CPU conocido %(cpuset)s"
+
msgid "Can not add access to a public flavor."
msgstr "No se puede añadir acceso al sabor público."
@@ -881,10 +908,6 @@ msgstr "No se ha podido encontrar el binario %(binary)s en el host %(host)s."
msgid "Could not find config at %(path)s"
msgstr "No se ha podido encontrar configuración en %(path)s"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "No se ha podido encontrar par(es) de claves: %s "
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
"No se ha podido encontrar la(s) referencia(s) de almacén de datos que la MV "
@@ -921,14 +944,6 @@ msgstr "No se ha podido cargar la imagen %(image_id)s"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"No se pudo detener la instancia %(instance)s durante 1 hora. Actual "
-"vm_state: %(vm_state)s, actual task_state: %(task_state)s"
-
msgid "Create networks failed"
msgstr "Ha fallado la creación de redes"
@@ -1195,21 +1210,6 @@ msgstr "Se esperaba un uuid pero se ha recibido %(uuid)s."
msgid "Expected object of type: %s"
msgstr "Se esperaba un objeto de tipo: %s"
-msgid "Expecting a list of resources"
-msgstr "Esperando una lista de recursos"
-
-msgid "Expecting a list of tagSets"
-msgstr "Esperando una lista de tagSets"
-
-msgid "Expecting both key and value to be set"
-msgstr "Esperando establecimiento tanto de clave como valor"
-
-msgid "Expecting key to be set"
-msgstr "Esperando el establecimiento de la clave"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "Esperando que tagSet sea un par clave/valor"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "Columna extra %(table)s.%(column)s en la tabla shadow"
@@ -1383,10 +1383,6 @@ msgstr "Fallo al suspender instancia: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Fallo al terminar la instancia: %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "Error al analizar la respuesta de keystone: %s"
-
msgid "Failure prepping block device."
msgstr "Fallo al preparar el dispositivo de bloque."
@@ -1474,14 +1470,6 @@ msgstr ""
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"No se puede crear o actualizar el tipo %(id)d de especificaciones "
-"adicionales después de %(retries)d intentos."
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1535,6 +1523,14 @@ msgstr ""
"metadatos del imagen. El disco del sabor es %(flavor_size)i bytes, tamaño "
"mínimo es %(image_min_disk)i bytes."
+#, python-format
+msgid ""
+"Flavor's disk is too small for requested image. Flavor disk is "
+"%(flavor_size)i bytes, image is %(image_size)i bytes."
+msgstr ""
+"El disco Flavor es demasiado pequeño para la imagen solicitada. El disco "
+"Flavor tiene %(flavor_size)i bytes, la imagen tiene %(image_size)i bytes."
+
msgid "Flavor's memory is too small for requested image."
msgstr "La memoria del tipo es demasiado pequeña para la imagen solicitada."
@@ -1545,9 +1541,6 @@ msgstr "Ha fallado la asociación de IP flotante %(address)s."
msgid "Floating IP allocate failed."
msgstr "Falló asignación de IP flotante."
-msgid "Floating ip is not associated."
-msgstr "La IP flotante no está asociada. "
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1721,9 +1714,6 @@ msgstr "Se ha superado el límite de metadatos de imágenes"
msgid "Image model '%(image)s' is not supported"
msgstr "No se soporta modelo de imagen '%(image)s'"
-msgid "Image must be available"
-msgstr "La imagen debe estar disponible "
-
msgid "Image not found."
msgstr "Imagen no encontrada."
@@ -1963,9 +1953,6 @@ msgstr "No se ha encontrado la interfaz %(interface)s."
msgid "Invalid Base 64 data for file %(path)s"
msgstr "Datos Base-64 inválidos para el archivo %(path)s"
-msgid "Invalid CIDR"
-msgstr "CIDR no válido"
-
msgid "Invalid Connection Info"
msgstr "Información de conexión no válida"
@@ -1978,10 +1965,6 @@ msgid "Invalid IP format %s"
msgstr "Formato IP inválido %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "Protocolo IP no válido %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "Protocolo IP invalido %(protocol)s"
@@ -2125,7 +2108,7 @@ msgstr "Se ha proporcionado una referencia de imagen no válida."
#, python-format
msgid "Invalid inclusion expression %r"
-msgstr "Expresión de inclusión inválida %"
+msgstr "Expresión de inclusión inválida %r"
#, python-format
msgid ""
@@ -2218,7 +2201,7 @@ msgstr "Firma de solicitud de proxy no válida."
#, python-format
msgid "Invalid range expression %r"
-msgstr "Expresión de intérvalo inválida %"
+msgstr "Expresión de intérvalo inválida %r"
msgid "Invalid request body"
msgstr "Cuerpo de solicitud no válido"
@@ -2277,20 +2260,9 @@ msgid "Invalid usage_type: %s"
msgstr "usage_type: %s no válido"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"Valor no válido '%(ec2_instance_id)s' para el ID de instancia. La instancia "
-"no tiene ningún volumen conectado en la raíz (%(root)s)."
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "Valor inválido '%s' para forzar."
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "Valor no válido para 'scheduler_max_attempts', debe ser >= 1 "
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr ""
@@ -2430,6 +2402,14 @@ msgstr ""
"Lista de los sistemas de archivo que estan configurados en este archivo en "
"la image_file_url: secciones <list entry name>"
+msgid ""
+"Live migration can not be used without shared storage except a booted from "
+"volume VM which does not have a local disk."
+msgstr ""
+"No se puede utilizar la migración en directo sin el almacenamiento "
+"compartido excepto si arranca desde VM de volumen que no tiene un disco "
+"local."
+
msgid "Live migration is supported starting with Hyper-V Server 2012"
msgstr "la migración en vivo está soportada a partir de Hyper-V server 2012"
@@ -2890,9 +2870,6 @@ msgstr "Ningún cuerpo de solicitud "
msgid "No root disk defined."
msgstr "No se ha definido un disco raíz."
-msgid "No rule for the specified parameters."
-msgstr "No hay regla para los parámetros especificados."
-
msgid "No suitable network for migrate"
msgstr "No hay red adecuada para migrar"
@@ -2927,10 +2904,6 @@ msgstr "No se puede obtener un puerto libre para %(host)s"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "No se puede enlazar %(host)s:%(port)d, %(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "No está permitido modificar los atributos para la imagen %s"
-
msgid "Not an rbd snapshot"
msgstr "No es una instantánea rbd"
@@ -3007,9 +2980,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "Solo se puede especificar el parámetro host"
-msgid "Only instances implemented"
-msgstr "Sólo están implementadas instancias"
-
msgid "Only root certificate can be retrieved."
msgstr "Sólo se puede recuperar el certificado raíz. "
@@ -3180,6 +3150,10 @@ msgstr "La acción watchdog proporcionada (%(action)s) no está soportada."
msgid "QEMU guest agent is not enabled"
msgstr "Agente invitado QEMU no está habilitado"
+#, python-format
+msgid "Quiescing is not supported in instance %(instance_id)s"
+msgstr "No hay soporte para la desactivación en la instancia %(instance_id)s"
+
msgid "Quota"
msgstr "Cuota"
@@ -3191,6 +3165,14 @@ msgid "Quota could not be found"
msgstr "No se ha podido encontrar la cuota"
#, python-format
+msgid ""
+"Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s "
+"of %(allowed)s %(overs)s"
+msgstr ""
+"Se ha superado la cuota para %(overs)s: Solicitado %(req)s, pero ya se "
+"utiliza %(used)s de %(allowed)s %(overs)s"
+
+#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr "Cuota superada para recursos: %(overs)s"
@@ -3590,9 +3572,6 @@ msgstr ""
"La reducción del sistema de archivos con resize2fs ha fallado, por favor "
"verifica si tienes espacio libre suficiente en tu disco."
-msgid "Signature not provided"
-msgstr "Firma no proporcionada"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "No se ha podido encontrar la instantánea %(snapshot_id)s."
@@ -3719,6 +3698,9 @@ msgstr ""
"Por favor crea una base de datos usando 'nova-manage db sync' antes de "
"ejecutar este comando."
+msgid "The backlog must be more than 0"
+msgstr "El retraso debe ser mayor que 0"
+
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr ""
@@ -3738,6 +3720,11 @@ msgstr "La política PBM por defecto no existe en el backend."
msgid "The firewall filter for %s does not exist"
msgstr "El filtro de cortafuegos para %s no existe "
+msgid "The floating IP request failed with a BadRequest"
+msgstr ""
+"La solicitud de la IP flotante ha fallado con BadRequest (Solicitud "
+"incorrecta)"
+
#, python-format
msgid "The group %(group_name)s must be configured with an id."
msgstr "El grupo %(group_name)s debe ser configurado con un id."
@@ -3802,7 +3789,7 @@ msgstr ""
#, python-format
msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
-msgstr "El número de puertos definidos: %(ports)s es más del límite: %(quota)d"
+msgstr "El número de puertos definidos: %(ports)d es más del límite: %(quota)d"
msgid "The only partition should be partition 1."
msgstr "La unica partición debe ser la partición 1."
@@ -3846,6 +3833,10 @@ msgstr ""
"El servicio del controlador servicegroup %(driver)s está temporalmente no "
"disponible."
+#, python-format
+msgid "The specified cluster '%s' was not found in vCenter"
+msgstr "El clúster especificado '%s' no se ha encontrado en vCenter"
+
msgid ""
"The string containing the reason for disabling the service contains invalid "
"characters or is too long."
@@ -3985,8 +3976,11 @@ msgstr "Se ha excedido el tiempo esperando a que se creara el dispositivo %s"
msgid "Timeout waiting for response from cell"
msgstr "Se ha excedido el tiempo de espera de respuesta de la célula"
-msgid "Timestamp failed validation."
-msgstr "Ha fallado la validación de indicación de fecha y hora."
+#, python-format
+msgid "Timeout while checking if we can live migrate to host: %s"
+msgstr ""
+"Se ha agotado el tiempo de espera mientras se comprobaba si se puede migrar "
+"en directo al host: %s"
msgid "To and From ports must be integers"
msgstr "Puertos De y Hacia deben ser enteros"
@@ -4002,18 +3996,12 @@ msgstr ""
"Se generarán demasiadas direcciones IP. Por favor incremente /%s para "
"disminuir el número generado."
-msgid "Too many failed authentications."
-msgstr "Demasiados intentos de autenticacion fallidos."
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "Tipo y Código deben ser enteros del tipo de protocolo ICMP"
msgid "UUID is required to delete Neutron Networks"
msgstr "UUID es obligatorio para borrar redes en Neutron"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "No se puede asociar la dirección IP, sin fixed_ips."
-
msgid "Unable to authenticate Ironic client."
msgstr "No se puede autenticar cliente Ironic."
@@ -4139,6 +4127,12 @@ msgstr "No es posible obtener dominio dns"
msgid "Unable to get dns entry"
msgstr "No es posible obtener ingreso dns"
+msgid "Unable to get host UUID: /etc/machine-id does not exist"
+msgstr "No se puede obtener el UUID de host: /etc/machine-id no existe"
+
+msgid "Unable to get host UUID: /etc/machine-id is empty"
+msgstr "No se puede obtener el UUID de host: /etc/machine-id está vacío"
+
msgid "Unable to get rdp console, functionality not implemented"
msgstr "Incapaz de obtener consola rdp, funcionalidad no implementada"
@@ -4301,9 +4295,6 @@ msgstr ""
msgid "Unknown delete_info type %s"
msgstr "Tipo delete_info %s desconocido"
-msgid "Unknown error occurred."
-msgstr "Ha ocurrido un error desconocido."
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "image_type=%s desconocido "
@@ -4548,12 +4539,6 @@ msgid "Volume resource quota exceeded"
msgstr "Cuota de recurso de volumen excedida"
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr ""
-"El volúmen establece el tamaño de bloque, pero se requiere libvirt '%s' o "
-"mayor."
-
-#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
@@ -4642,6 +4627,17 @@ msgstr ""
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
"your destination node does not support retrieving listen addresses. In "
+"order for live migration to work properly you must either disable serial "
+"console or upgrade your libvirt version."
+msgstr ""
+"La versión de libvirt no da soporte al distintivo VIR_DOMAIN_XML_MIGRATABLE "
+"o el nodo de destino no da soporte a la recuperación de las direcciones de "
+"escucha. Para que la migración en directo funcione correctamente, debe "
+"inhabilitar la consola serie o actualizar la versión de libvirt."
+
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
+"your destination node does not support retrieving listen addresses. In "
"order for live migration to work properly, you must configure the graphics "
"(VNC and/or SPICE) listen addresses to be either the catch-all address "
"(0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
@@ -4684,9 +4680,6 @@ msgstr "block_device_mapping debe ser una lista"
msgid "block_device_mapping_v2 must be a list"
msgstr "block_device_mapping_v2 debe ser una lista"
-msgid "can't build a valid rule"
-msgstr "No se ha podido crear una regla válida"
-
msgid "cannot delete non-existent key"
msgstr "No se puede eliminar claves no-existentes"
@@ -4805,13 +4798,6 @@ msgstr "imagen"
msgid "image already mounted"
msgstr "imagen ya montada"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "imagen de %(instance)s en %(now)s"
-
-msgid "imageLocation is required"
-msgstr "Se necesita imageLocation"
-
msgid "index"
msgstr "índice"
@@ -4893,9 +4879,6 @@ msgstr "el dispositivo nbd %s no se ha mostrado"
msgid "nbd unavailable: module not loaded"
msgstr "nbd no disponible: módulo no cargado"
-msgid "need group_name or group_id"
-msgstr "se necesita group_name o group_id"
-
msgid "network"
msgstr "red"
@@ -4913,18 +4896,22 @@ msgstr "nodo"
msgid "not able to execute ssh command: %s"
msgstr "No es posible ejecutar comando ssh: %s"
+msgid ""
+"nova-idmapshift is a tool that properly sets the ownership of a filesystem "
+"for use with linux user namespaces. This tool can only be used with linux "
+"lxc containers. See the man page for details."
+msgstr ""
+"nova-idmapshift es una herramienta que establece correctamente la propiedad "
+"de un sistema de archivos para su uso con espacios de nombres de usuarios "
+"linux. Esta herramienta sólo se puede utilizar con contenedores linux lxc. "
+"Consulte la página man para obtener información."
+
msgid "onSharedStorage must be specified."
msgstr "Se debe especificar onSharedStorage."
-msgid "only group \"all\" is supported"
-msgstr "sólo el grupo \"all\" está soportado"
-
msgid "operation time out"
msgstr "Tiempo de espera agotado para la operación"
-msgid "operation_type must be add or remove"
-msgstr "operation_type debe ser añadir o eliminar"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr ""
"os-getConsoleOutput formada incorrectamente u omitida en el cuerpo de "
@@ -4964,9 +4951,6 @@ msgstr "Las librerías rbd python no han sido encontradas"
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr "read_deleted solo puede ser 'no', 'yes' o 'only', no %r"
-msgid "resource_id and tag are required"
-msgstr "resource_id y tag son necesarios"
-
msgid "rpc_port must be integer"
msgstr "rpc_port debe ser un entero"
@@ -5034,9 +5018,6 @@ msgstr "Campos no soportados: %s"
msgid "user"
msgstr "usuario"
-msgid "user or group not specified"
-msgstr "usuario o grupo no especificado"
-
msgid "uuid"
msgstr "uuid"
diff --git a/nova/locale/es_MX/LC_MESSAGES/nova-log-critical.po b/nova/locale/es_MX/LC_MESSAGES/nova-log-critical.po
index 1d2360828c..4aacfa0bc2 100644
--- a/nova/locale/es_MX/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/es_MX/LC_MESSAGES/nova-log-critical.po
@@ -7,19 +7,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.dev41\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-10-27 06:30+0000\n"
-"PO-Revision-Date: 2015-08-16 05:53+0000\n"
-"Last-Translator: Ed Gonzalez <proplw@gmail.com>\n"
-"Language-Team: Spanish (Mexico)\n"
-"Language: es-MX\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-08-16 05:53+0000\n"
+"Last-Translator: Ed Gonzalez <proplw@gmail.com>\n"
+"Language: es-MX\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Spanish (Mexico)\n"
#, python-format
msgid "Missing core API extensions: %s"
diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-critical.po b/nova/locale/fr/LC_MESSAGES/nova-log-critical.po
index 18ba08d156..4cedc058e9 100644
--- a/nova/locale/fr/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/fr/LC_MESSAGES/nova-log-critical.po
@@ -7,19 +7,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.dev41\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-10-27 06:30+0000\n"
-"PO-Revision-Date: 2014-09-17 07:50+0000\n"
-"Last-Translator: Frédéric <frosmont@free.fr>\n"
-"Language-Team: French\n"
-"Language: fr\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-09-17 07:50+0000\n"
+"Last-Translator: Frédéric <frosmont@free.fr>\n"
+"Language: fr\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: French\n"
#, python-format
msgid "Missing core API extensions: %s"
diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-error.po b/nova/locale/fr/LC_MESSAGES/nova-log-error.po
index 993e37e56f..e41d990eeb 100644
--- a/nova/locale/fr/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/fr/LC_MESSAGES/nova-log-error.po
@@ -12,19 +12,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-11-27 05:04+0000\n"
-"Last-Translator: Maxime Coquerel <max.coquerel@gmail.com>\n"
-"Language-Team: French\n"
-"Language: fr\n"
+"POT-Creation-Date: 2016-02-08 05:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-11-27 05:04+0000\n"
+"Last-Translator: Maxime Coquerel <max.coquerel@gmail.com>\n"
+"Language: fr\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: French\n"
msgid "\"Look for the VDIs failed"
msgstr "Échec de la recherche du VDIs"
@@ -202,10 +202,6 @@ msgid "Environment variable 'NETWORK_ID' must be set."
msgstr "Variable d'environnement 'NETWORK_ID' doit être définie."
#, python-format
-msgid "Environment: %s"
-msgstr "Environnement : %s"
-
-#, python-format
msgid "Error copying key: %s"
msgstr "Erreur lors de la copie de clé: %s"
@@ -356,10 +352,6 @@ msgstr "Erreur en essayant de Sauver l'Instance"
msgid "Error trying to reschedule"
msgstr "Erreur lors de la tentative de replanification"
-#, python-format
-msgid "Error updating resources for node %(node)s: %(e)s"
-msgstr "Erreur lors de la mise a jour des ressources du nœud %(node)s: %(e)s"
-
msgid "Error waiting for responses from neighbor cells"
msgstr "Erreur lors de l'attente de réponses des cellules voisines"
@@ -728,9 +720,6 @@ msgstr "Echec lors du nettoyage des VDI connectées"
msgid "Fatal Exception running %(name)s %(type)s-hook: %(obj)s"
msgstr "Exception fatale de fonctionnement %(name)s %(type)s-hook: %(obj)s"
-msgid "FaultWrapper error"
-msgstr "Erreur FaultWrapper"
-
msgid "Guest does not have a console available"
msgstr "Aucune console n'est disponible pour l'invité"
@@ -783,10 +772,6 @@ msgid "Invalid server_string: %s"
msgstr "server_string non valide : %s"
#, python-format
-msgid "Keystone failure: %s"
-msgstr "Défaillance de Keystone : %s"
-
-#, python-format
msgid "Live Migration failure: %s"
msgstr "Migration Directe a échoué : %s"
@@ -1077,10 +1062,6 @@ msgid "Unable to parse rrd of %s"
msgstr "Impossible d’analyser %s de rrd"
#, python-format
-msgid "Unable to preallocate image at path: %(path)s"
-msgstr "Impossible de pré-allouer une image à l'emplacement: %(path)s"
-
-#, python-format
msgid "Unable to retrieve storage policy with name %s"
msgstr "Impossible de récuper les détails du stockage ayant le nom %s"
@@ -1100,10 +1081,6 @@ msgid "Unable to update instance VNIC index for port %s."
msgstr ""
"Impossible de mettre à jour les index des VNIC des instances pour le port %s."
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr "%(ex_name)s inattendu levé : %(ex_str)s"
-
msgid "Unexpected build failure, not rescheduling build."
msgstr "Échec inexpliqué de la génération, pas de construction re-planifiée."
diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-info.po b/nova/locale/fr/LC_MESSAGES/nova-log-info.po
index 3d357f9c71..7388366f78 100644
--- a/nova/locale/fr/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/fr/LC_MESSAGES/nova-log-info.po
@@ -10,19 +10,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-09-14 10:47+0000\n"
-"Last-Translator: Corinne Verheyde <cverheyd@gmail.com>\n"
-"Language-Team: French\n"
-"Language: fr\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-09-14 10:47+0000\n"
+"Last-Translator: Corinne Verheyde <cverheyd@gmail.com>\n"
+"Language: fr\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: French\n"
#, python-format
msgid "%(action_str)s instance"
@@ -63,23 +63,10 @@ msgstr "Fichiers de base actifs : %s"
msgid "Adding security group %(security_group_id)s to port %(port_id)s"
msgstr "Ajout du groupe de sécurité %(security_group_id)s au port %(port_id)s"
-msgid "Allocate address"
-msgstr "Allocation d'adresse"
-
-#, python-format
-msgid "Associate address %(public_ip)s to instance %(instance_id)s"
-msgstr "Association de l'adresse %(public_ip)s avec l'instance %(instance_id)s"
-
msgid "Attach interface"
msgstr "Connexion à l'interface"
#, python-format
-msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-msgstr ""
-"Montage du volume %(volume_id)s sur l'instance %(instance_id)s en tant que "
-"%(device)s"
-
-#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr ""
"Connexion du volume %(volume_id)s à l'instance %(server_id)s à %(device)s"
@@ -155,22 +142,10 @@ msgid "Create assisted snapshot from volume %s"
msgstr "Créer un instantané assisté du volume %s"
#, python-format
-msgid "Create key pair %s"
-msgstr "Création du bi-clef %s"
-
-#, python-format
msgid "Create snapshot from volume %s"
msgstr "Créer un instantané depuis le volume %s"
#, python-format
-msgid "Create snapshot of volume %s"
-msgstr "Création de l'instantané du volume %s"
-
-#, python-format
-msgid "Create volume from snapshot %s"
-msgstr "Création du volume à partir de l'instantané %s"
-
-#, python-format
msgid "Create volume of %s GB"
msgstr "Création d'un volume de %s Go"
@@ -187,14 +162,6 @@ msgid "Creating image"
msgstr "Création de l'image"
#, python-format
-msgid "De-registering image %s"
-msgstr "Dé-enregitrement de l'image %s"
-
-#, python-format
-msgid "Delete key pair %s"
-msgstr "Suppression du bi-clef %s"
-
-#, python-format
msgid "Delete security group %s"
msgstr "Suppression du groupe de sécurité %s"
@@ -261,10 +228,6 @@ msgid "Disabling host %s."
msgstr "Désactivation de l'hôte %s."
#, python-format
-msgid "Disassociate address %s"
-msgstr "Désassociation de l'adresse %s"
-
-#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr "Déconnexion VDI périmé %s à partir de l'ordinateur domU"
@@ -355,10 +318,6 @@ msgid "Get console output"
msgstr "Obtenir l'affichage de la console"
#, python-format
-msgid "Get console output for instance %s"
-msgstr "Récupération de la sortie de la console de l'instance %s"
-
-#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
@@ -383,10 +342,6 @@ msgstr ""
"%(downloaded)s durée : %(duration).2f secondes pour l'image %(image_id)s"
#, python-format
-msgid "Import key %s"
-msgstr "Importation de la clé %s"
-
-#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database: "
"assuming it is a worker VM and skip ping migration to a new host"
@@ -542,10 +497,6 @@ msgstr "Opération postérieure à la migration démarrée"
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr "Placement de l'hôte %(host_name)s en mode maintenance %(mode)s."
-#, python-format
-msgid "Reboot instance %r"
-msgstr "Re-démarrage de l'instance %r"
-
msgid "Rebooting instance"
msgstr "Réamorçage de l'instance"
@@ -563,14 +514,6 @@ msgid "Reclaiming deleted instance"
msgstr "Récupération de l'instance supprimée"
#, python-format
-msgid "Registered image %(image_location)s with id %(image_id)s"
-msgstr "Image %(image_location)s enregistré avec l'id %(image_id)s"
-
-#, python-format
-msgid "Release address %s"
-msgstr "Désallocation de l'adresse %s"
-
-#, python-format
msgid "Removable base files: %s"
msgstr "Fichiers de base pouvant être retirés : %s"
@@ -698,13 +641,6 @@ msgstr ""
"chemin : %(ex)s"
#, python-format
-msgid ""
-"Unauthorized request for controller=%(controller)s and action=%(action)s"
-msgstr ""
-"Requête non authorisé pour le controlleur=%(controller)s et l'action="
-"%(action)s"
-
-#, python-format
msgid "Unexpected error: %s"
msgstr "Erreur inattendue : %s"
@@ -722,10 +658,6 @@ msgid "Updating from migration %s"
msgstr "Mise à jour à partir de la migration %s"
#, python-format
-msgid "Updating image %s publicity"
-msgstr "Mis à jour de la publication de l'image %s"
-
-#, python-format
msgid "Updating instance to original state: '%s'"
msgstr "Mise à jour de l'instance à l'état d'origine: '%s'"
diff --git a/nova/locale/fr/LC_MESSAGES/nova-log-warning.po b/nova/locale/fr/LC_MESSAGES/nova-log-warning.po
index e34826b8d0..6a9a7dc2a9 100644
--- a/nova/locale/fr/LC_MESSAGES/nova-log-warning.po
+++ b/nova/locale/fr/LC_MESSAGES/nova-log-warning.po
@@ -10,19 +10,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-09-13 01:37+0000\n"
-"Last-Translator: Corinne Verheyde <cverheyd@gmail.com>\n"
-"Language-Team: French\n"
-"Language: fr\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-09-13 01:37+0000\n"
+"Last-Translator: Corinne Verheyde <cverheyd@gmail.com>\n"
+"Language: fr\n"
"Plural-Forms: nplurals=2; plural=(n > 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: French\n"
#, python-format
msgid "%(cmd)s failed. Sleeping %(time)s seconds before retry."
@@ -48,15 +48,6 @@ msgid "%s is already mounted"
msgstr "%s est déjà monté"
#, python-format
-msgid ""
-"Access key %(access_key)s has had %(failures)d failed authentications and "
-"will be locked out for %(lock_mins)d minutes."
-msgstr ""
-"La clef d'accès %(access_key)s a rencontrée %(failures)d echecs "
-"d'authentification et sera par conséquent vérouillée pour %(lock_mins)d "
-"minutes."
-
-#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr "L'adresse |%(address)s| n'est pas allouée"
@@ -723,16 +714,6 @@ msgstr ""
"%(version)s est déprécié. Le minimum requis de vCenter sera augmenté à "
"%(version)s dans la version 13.0.0 de Nova"
-#, python-format
-msgid ""
-"Running Nova with a libvirt version less than %(version)s is deprecated. The "
-"required minimum version of libvirt will be raised to %(version)s in the "
-"13.0.0 release."
-msgstr ""
-"L’exécution de Nova avec une version de libvirt inférieure à %(version)s est "
-"déprécié. La version minimum requise de libvirt sera augmenté a %(version)s "
-"dans la version 13.0.0 de Nova"
-
msgid ""
"Running libvirt-lxc without user namespaces is dangerous. Containers spawned "
"by Nova will be run as the host's root user. It is highly suggested that "
@@ -1010,10 +991,6 @@ msgid "instance-dns-zone not found |%s|."
msgstr "nstance-dns-zone non trouvé |%s|."
#, python-format
-msgid "multiple fixed_ips exist, using the first: %s"
-msgstr "plusieurs fixed_ips existent, utilisation de la première : %s"
-
-#, python-format
msgid ""
"my_ip address (%(my_ip)s) was not found on any of the interfaces: %(ifaces)s"
msgstr ""
diff --git a/nova/locale/fr/LC_MESSAGES/nova.po b/nova/locale/fr/LC_MESSAGES/nova.po
index 3b7b94cf36..a572155d92 100644
--- a/nova/locale/fr/LC_MESSAGES/nova.po
+++ b/nova/locale/fr/LC_MESSAGES/nova.po
@@ -1,22 +1,47 @@
-# French translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
-msgid ""
-msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+# Translators:
+# ariivarua <lchevouline@sgc-ingenierie.com>, 2013
+# Fabien B. <fabien.bavent@gmail.com>, 2013
+# Corina Roe <croe@redhat.com>, 2014
+# CryLegend <crylegend95@gmail.com>, 2013
+# EVEILLARD <stephane.eveillard@gmail.com>, 2013
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# Frédéric <frosmont@free.fr>, 2014
+# GuiTsi <g-tsilefski@hotmail.fr>, 2013
+# Jonathan Dupart <jonathan+transifex@dupart.org>, 2014
+# Kodoku <kevintibi@hotmail.com>, 2013
+# Lucas Mascaro <mascaro.lucas@yahoo.fr>, 2015
+# Eric Marques <marques94@free.fr>, 2013
+# Maxime COQUEREL <max.coquerel@gmail.com>, 2014-2015
+# Andrew Melim <nokostya.translation@gmail.com>, 2014
+# Olivier Buisson <obuisson1976@gmail.com>, 2012
+# Patrice LACHANCE <patlachance@gmail.com>, 2013
+# EVEILLARD <stephane.eveillard@gmail.com>, 2013
+# Vincent JOBARD <vinzjobard@gmail.com>, 2013
+# Benjamin Godard <ben@hacktopie.net>, 2013
+# Corinne Verheyde <cverheyd@gmail.com>, 2015. #zanata
+# Olivier Buisson <olivier@openkumo.fr>, 2015. #zanata
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
+# Tom Cocozzello <tjcocozz@us.ibm.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-10-07 11:52+0000\n"
-"Last-Translator: Corinne Verheyde <cverheyd@gmail.com>\n"
-"Language: fr\n"
-"Language-Team: French\n"
-"Plural-Forms: nplurals=2; plural=(n > 1)\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
+"PO-Revision-Date: 2016-02-02 09:55+0000\n"
+"Last-Translator: Tom Cocozzello <tjcocozz@us.ibm.com>\n"
+"Language: fr\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: French\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -190,6 +215,10 @@ msgstr ""
"Aucun nom de modèle d'UC ne doit être défini lorsqu'un modèle d'UC hôte est "
"demandé"
+#, python-format
+msgid "A NetworkModel is required in field %s"
+msgstr "Un modèle de réseau est requis dans la zone %s"
+
msgid ""
"A unique ID given to each file system. This is value is set in Glance and "
"agreed upon here so that the operator knowns they are dealing with the same "
@@ -223,9 +252,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "La version %(version)s de l'API n'est pas supporté par cette méthode "
-msgid "Access key not provided"
-msgstr "Clé d'accès non fournie"
-
msgid "Access list not available for public flavors."
msgstr "Liste d'accès non disponible pour les versions publiques."
@@ -325,6 +351,10 @@ msgstr "La stratégie de groupe d'instances anti-affinité a été violée."
msgid "Architecture name '%(arch)s' is not recognised"
msgstr "Nom d'architecture '%(arch)s' n'est pas reconnu"
+#, python-format
+msgid "Architecture name '%s' is not valid"
+msgstr "Le nom d'architecture '%s' n'est pas valide"
+
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr "L'argument 'type' pour le redémarrage n'est pas HARD ou SOFT"
@@ -422,6 +452,12 @@ msgstr ""
msgid "Blank components"
msgstr "Composants vides"
+msgid ""
+"Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size"
+msgstr ""
+"Les volumes vides (source : 'vide', dest : 'volume') doivent avoir une "
+"taille non zéro"
+
#, python-format
msgid "Block Device %(id)s is not bootable."
msgstr "L'unité par bloc %(id)s n'est pas amorçable."
@@ -519,6 +555,19 @@ msgstr "Le nombre de CPU %(cpunum)d est plus grand que le maximum %(cpumax)d"
msgid "CPU number %(cpuset)s is not assigned to any node"
msgstr "Le numéro de CPU %(cpuset)s n'est assigné à aucun node"
+#, python-format
+msgid "CPU pinning is not supported by the host: %(reason)s"
+msgstr ""
+"Le rivetage d'unité centrale n'est pas pris en charge par l'hôte : %(reason)s"
+
+#, python-format
+msgid ""
+"CPU set to pin/unpin %(requested)s must be a subset of known CPU set "
+"%(cpuset)s"
+msgstr ""
+"L'unité centrale définie à pin/unpin %(requested)s doit être un sous-"
+"ensemble d'un groupe d'unités centrales connu%(cpuset)s"
+
msgid "Can not add access to a public flavor."
msgstr "Impossible d'ajouter l'accès à un gabarit public."
@@ -600,6 +649,13 @@ msgstr ""
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr "Pas d'appel de %(method)s sur un objet %(objtype)s orphelin"
+msgid ""
+"Cannot create default bittorrent URL without xenserver.torrent_base_url "
+"configuration option set."
+msgstr ""
+"Impossible de créer une URL bittorrent par défaut sans définir l'option de "
+"configuration xenserver.torrent_base_url."
+
msgid "Cannot execute /sbin/mount.sofs"
msgstr "Impossible d'exécuter /sbin/mount.sofs"
@@ -867,10 +923,6 @@ msgstr "Impossible de trouver le binaire %(binary)s sur l'hôte %(host)s."
msgid "Could not find config at %(path)s"
msgstr "Configuration introuvable dans %(path)s"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "Impossible de trouver la ou les paires de clés : %s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
"Impossible de trouver la ou les références de magasin de données utilisé par "
@@ -906,14 +958,6 @@ msgstr "Impossible de télécharger l'image %(image_id)s"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "Impossible de trouver l'IP du lien local de %(interface)s :%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"L'instance %(instance)s n'a pas pu être stoppée en moins de 1 heure. Etat "
-"actuel de la VM: %(vm_state)s, etat actuel de la tache: %(task_state)s"
-
msgid "Create networks failed"
msgstr "La création de réseaux a échoué"
@@ -1124,6 +1168,26 @@ msgstr ""
"d'erreur %(error_code)s] %(ex)s"
#, python-format
+msgid ""
+"Error from libvirt while set password for username \"%(user)s\": [Error Code "
+"%(error_code)s] %(ex)s"
+msgstr ""
+"Erreur libvirt lors de la définition du mot de passe pour le nom "
+"d'utilisateur \"%(user)s\". [Code d'erreur : %(error_code)s] %(ex)s"
+
+#, python-format
+msgid ""
+"Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs "
+"(%(e)s)"
+msgstr ""
+"Erreur de montage de %(device)s pour %(dir)s dans l'image %(image)s avec "
+"libguestfs (%(e)s)"
+
+#, python-format
+msgid "Error mounting %(image)s with libguestfs (%(e)s)"
+msgstr "Erreur lors du montage de %(image)s avec libguestfs (%(e)s)"
+
+#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Erreur lors de la création du moniteur de ressource : %(monitor)s"
@@ -1158,21 +1222,6 @@ msgstr "UUID attendu mais %(uuid)s reçu."
msgid "Expected object of type: %s"
msgstr "Objet attendu de type : %s"
-msgid "Expecting a list of resources"
-msgstr "En attente d'une liste de ressources"
-
-msgid "Expecting a list of tagSets"
-msgstr "En attente d'un ensemble d'étiquettes"
-
-msgid "Expecting both key and value to be set"
-msgstr "En attente à la fois de la clé et de la valeur pour être positionné"
-
-msgid "Expecting key to be set"
-msgstr "La clée doit être définie"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "En attente d'un ensemble d'étiquettes pour être des paires clé/valeur"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "Colonne supplémentaire %(table)s.%(column)s dans la table image"
@@ -1190,6 +1239,14 @@ msgstr ""
"attendue"
#, python-format
+msgid "Failed to access port %(port_id)s: %(reason)s"
+msgstr "Impossible d'accéder au port %(port_id)s : %(reason)s"
+
+#, python-format
+msgid "Failed to add bridge: %s"
+msgstr "Echec de l'ajout du pont : %s"
+
+#, python-format
msgid ""
"Failed to add deploy parameters on node %(node)s when provisioning the "
"instance %(instance)s"
@@ -1339,10 +1396,6 @@ msgstr "Échec à suspendre l'instance : %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Échec à terminer l'instance : %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "Echec de l'analyse syntaxique de la réponse de Keystone : %s"
-
msgid "Failure prepping block device."
msgstr "Echec de préparation de l'unité par bloc."
@@ -1372,6 +1425,12 @@ msgid "Filename of root Certificate Revocation List"
msgstr "Nom du fichier de la liste de révocation du Certificat Racine"
#, python-format
+msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
+msgstr ""
+"L'IP fixe %(ip)s n'est pas une adresse IP valide pour le réseau "
+"%(network_id)s."
+
+#, python-format
msgid "Fixed IP %s has been deleted"
msgstr "L'adresse IP fixe %s a été supprimée"
@@ -1427,14 +1486,6 @@ msgstr ""
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"Les spécification supplémentaires du type d'instance %(id)d n'ont pas pu "
-"être mise a jour ou créées après %(retries)d essais."
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1506,9 +1557,6 @@ msgstr "L'association de l'IP floattante %(address)s a échoué."
msgid "Floating IP allocate failed."
msgstr "L'allocation de l'IP flottante a échoué"
-msgid "Floating ip is not associated."
-msgstr "L'adresse IP flottante n'est pas associée."
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1575,6 +1623,10 @@ msgstr ""
"migrations live effectuée avec cette version peuvent causer des pertes de "
"données. Mettez à jour Nova sur %(server)s et essayez à nouveau."
+#, python-format
+msgid "Host '%(name)s' is not mapped to any cell"
+msgstr "L'hôte '%(name)s' n'est mappé à aucune cellule"
+
msgid "Host PowerOn is not supported by the Hyper-V driver"
msgstr ""
"La mise sous tension de l'hôte n'est pas prise en charge par le pilote Hyper-"
@@ -1606,6 +1658,10 @@ msgstr ""
"post_live_migration_at_source"
#, python-format
+msgid "Hypervisor virt type '%s' is not valid"
+msgstr "Le type virtuel d'hyperviseur '%s' n'est pas valide"
+
+#, python-format
msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised"
msgstr ""
"Le type de virtualisation de l'hyperviseur '%(hv_type)s' n'est pas reconnu."
@@ -1682,9 +1738,6 @@ msgstr "Limite de métadonnées d'image dépassée"
msgid "Image model '%(image)s' is not supported"
msgstr "Le modèle d'image '%(image)s' n'est pas supporté"
-msgid "Image must be available"
-msgstr "L'image doit être disponible"
-
msgid "Image not found."
msgstr "Image introuvable."
@@ -1808,6 +1861,10 @@ msgid "Instance %(instance_uuid)s does not specify a NUMA topology"
msgstr "L'instance %(instance_uuid)s ne spécifie pas une topologie NUMA"
#, python-format
+msgid "Instance %(instance_uuid)s does not specify a migration context."
+msgstr "L'instance %(instance_uuid)s ne spécifie pas de contexte de migration."
+
+#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while "
"the instance is in this state."
@@ -1917,9 +1974,6 @@ msgstr "L'interface %(interface)s non trouvée."
msgid "Invalid Base 64 data for file %(path)s"
msgstr "Contenu BAse 64 invalide pour le fichier %(path)s"
-msgid "Invalid CIDR"
-msgstr "CIDR non valide"
-
msgid "Invalid Connection Info"
msgstr "Informations de connexion non valides"
@@ -1932,10 +1986,6 @@ msgid "Invalid IP format %s"
msgstr "Format adresse IP non valide %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "Protocol IP non valide %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "Le protocole IP %(protocol)s est invalide"
@@ -2111,6 +2161,10 @@ msgid "Invalid key_name provided."
msgstr "key_name fourni non valide."
#, python-format
+msgid "Invalid libvirt version %(version)s"
+msgstr "Version libvirt %(version)s non valide"
+
+#, python-format
msgid "Invalid memory page size '%(pagesize)s'"
msgstr "Taille de page de mémoire non valide '%(pagesize)s'"
@@ -2227,24 +2281,22 @@ msgid "Invalid usage_type: %s"
msgstr "Type d'utilisation (usage_type) non valide : %s"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"Valeur non valide '%(ec2_instance_id)s' pour instanceId. L'instance ne "
-"comporte pas de volume associé à la racine (%(root)s)"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "Valeur invalide '%s' pour le 'forçage'."
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "Valeur non valide pour 'scheduler_max_attempts', doit être >= 1"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "Valeur invalide pour l'option du lecteur de configuration : %(option)s"
+#, python-format
+msgid ""
+"Invalid vcpu_pin_set config, one or more of the specified cpuset is not "
+"online. Online cpuset(s): %(online)s, requested cpuset(s): %(req)s"
+msgstr ""
+"Configuration vcpu_pin_set non valide, une ou plusieurs unités centrales "
+"spécifiées ne sont pas en ligne. Unités centrales en ligne : %(online)s, "
+"unités centrales demandées : %(req)s"
+
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr "Paramètre vcpu_pin_set invalide, hors plage cpu de l'hyperviseur."
@@ -2371,6 +2423,13 @@ msgstr ""
"Liste des systèmes de fichiers qui sont configurés dans ce fichier dans les "
"sections image_file_url:<list entry name>"
+msgid ""
+"Live migration can not be used without shared storage except a booted from "
+"volume VM which does not have a local disk."
+msgstr ""
+"La migration active ne peut pas être utilisée sans stockage partagé excepté "
+"lors d'un amorçage depuis une machine virtuelle de volume sans disque local."
+
msgid "Live migration is supported starting with Hyper-V Server 2012"
msgstr ""
"La migration active est prise en charge, démarrage avec Hyper-V Server 2012"
@@ -2619,6 +2678,10 @@ msgid "Netmask to push into openvpn config"
msgstr "Masque réseau à passer à la configuration d'openvpn"
#, python-format
+msgid "Network \"%(val)s\" is not valid in field %(attr)s"
+msgstr "Le réseau \"%(val)s\" n'est pas valide dans la zone %(attr)s"
+
+#, python-format
msgid "Network %(network_id)s could not be found."
msgstr "Le réseau %(network_id)s n'a pas été trouvé."
@@ -2714,6 +2777,10 @@ msgstr "Aucun corps de demande"
msgid "No Unique Match Found."
msgstr "Correspondance unique non trouvée."
+msgid "No access_url in connection_info. Cannot validate protocol"
+msgstr ""
+"Aucune access_url dans connection_info. Impossible de valider le protocole"
+
msgid "No adminPass was specified"
msgstr "adminPass non spécifié"
@@ -2830,9 +2897,6 @@ msgstr "Aucun corps de demande"
msgid "No root disk defined."
msgstr "Aucun disque racine défini."
-msgid "No rule for the specified parameters."
-msgstr "Pas de règle pour les paramètres spécifiés."
-
msgid "No suitable network for migrate"
msgstr "Aucun réseau adéquat pour migrer"
@@ -2866,10 +2930,6 @@ msgstr "Pas capable d'acquérir un port libre pour %(host)s"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "Pas capable de lier %(host)s : %(port)d, %(error)s "
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "Non autorisé à modifier les attributs de l'image %s"
-
msgid "Not an rbd snapshot"
msgstr "N'est pas un instantané rbd"
@@ -2948,9 +3008,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "Seul le paramètre Hôte doit être spécifié"
-msgid "Only instances implemented"
-msgstr "Seules les instances implémentées"
-
msgid "Only root certificate can be retrieved."
msgstr "Seul le certificat racine peut être extrait."
@@ -3121,6 +3178,11 @@ msgstr "L'action de garde fourni (%(action)s) n'est pas supportée"
msgid "QEMU guest agent is not enabled"
msgstr "L'agent invité QEMU n'est pas activé"
+#, python-format
+msgid "Quiescing is not supported in instance %(instance_id)s"
+msgstr ""
+"La mise au repos n'est pas prise en charge dans l'instance %(instance_id)s"
+
msgid "Quota"
msgstr "Quota"
@@ -3132,6 +3194,14 @@ msgid "Quota could not be found"
msgstr "Le quota ne peut pas être trouvé"
#, python-format
+msgid ""
+"Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s "
+"of %(allowed)s %(overs)s"
+msgstr ""
+"Quota dépassé pour %(overs)s : demandé %(req)s, mais %(used)s déjà utilisés"
+"%(allowed)s %(overs)s"
+
+#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr "Quota dépassé pour les ressources : %(overs)s"
@@ -3225,6 +3295,15 @@ msgstr ""
"Nombre maximal de nouvelles tentatives atteint pour le débranchement de VBD "
"%s"
+#, python-format
+msgid ""
+"Relative blockcommit support was not detected. Libvirt '%s' or later is "
+"required for online deletion of file/network storage-backed volume snapshots."
+msgstr ""
+"Prise en charge de blockcommit relatif non détectée. Libvirt '%s' ou "
+"ultérieur est requis pour la détection en ligne d'instantanés de volume "
+"stockés sur fichier/ sur le réseau."
+
msgid "Request body and URI mismatch"
msgstr "Corps et URI de demande discordants"
@@ -3501,6 +3580,14 @@ msgstr "La définition du mot de passe admin n'est pas supportée"
msgid "Shadow table with name %(name)s already exists."
msgstr "La table fantôme avec le nom %(name)s existe déjà."
+#, python-format
+msgid "Share '%s' is not supported"
+msgstr "Le partage '%s' n'est pas pris en charge"
+
+#, python-format
+msgid "Share level '%s' cannot have share configured"
+msgstr "Le niveau de partage '%s' n'a pas de partage configuré"
+
msgid "Should we use a CA for each project?"
msgstr "Doit-on utiliser une autorité de certification pour chaque projet ?"
@@ -3512,9 +3599,6 @@ msgstr ""
"veuillez vérifier si vous avez suffisamment d'espace disponible sur votre "
"disque."
-msgid "Signature not provided"
-msgstr "Signature non fournie"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Le snapshot %(snapshot_id)s n'a pas été trouvé."
@@ -3664,6 +3748,10 @@ msgstr "Le filtre de pare-feu de %s n'existe pas"
msgid "The floating IP request failed with a BadRequest"
msgstr "La demande d'IP flottante a échouée avec l'erreur Mauvaise Requête"
+#, python-format
+msgid "The group %(group_name)s must be configured with an id."
+msgstr "Le groupe %(group_name)s doit être configuré avec un ID."
+
msgid "The input is not a string or unicode"
msgstr "L'entrée n'est pas une chaine de caractère ou au format unicode"
@@ -3724,6 +3812,10 @@ msgstr ""
"La plage de réseaux n'est pas suffisante pour %(num_networks)s réseaux. La "
"taille de réseau est %(network_size)s"
+#, python-format
+msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
+msgstr "Le nombre de ports définis (%(ports)d) dépasse la limite (%(quota)d)"
+
msgid "The only partition should be partition 1."
msgstr "La seule partition doit être la partition 1."
@@ -3765,6 +3857,10 @@ msgstr ""
"Le service du pilote groupe de service %(driver)s est temporairement "
"indisponible."
+#, python-format
+msgid "The specified cluster '%s' was not found in vCenter"
+msgstr "Le cluster spécifié, '%s', est introuvable dans vCenter"
+
msgid ""
"The string containing the reason for disabling the service contains invalid "
"characters or is too long."
@@ -3853,6 +3949,17 @@ msgid "There are not enough hosts available."
msgstr "Le nombre d'hôtes disponibles est insuffisant"
#, python-format
+msgid ""
+"There are still %(count)i unmigrated flavor records. Migration cannot "
+"continue until all instance flavor records have been migrated to the new "
+"format. Please run `nova-manage db migrate_flavor_data' first."
+msgstr ""
+"Il existe encore %(count)i enregistrements de version non migrés. La "
+"migration ne peut pas continuer tant que tous les enregistrements de version "
+"d'instance n'ont pas été migrés vers le nouveau format. Exécutez tout "
+"d'abord `nova-manage db migrate_flavor_data'."
+
+#, python-format
msgid "There is no such action: %s"
msgstr "Aucune action de ce type : %s"
@@ -3898,9 +4005,6 @@ msgstr ""
"Timeout lors de la vérification de la possibilité de migrer à chaud vers "
"l'hôte: %s"
-msgid "Timestamp failed validation."
-msgstr "Echec de validation de l'horodatage."
-
msgid "To and From ports must be integers"
msgstr "Les ports de destination et d'origine doivent être des entiers"
@@ -3915,9 +4019,6 @@ msgstr ""
"Trop adresses IPs vont être générés. Augmentez /%s pour réduire le nombre "
"généré."
-msgid "Too many failed authentications."
-msgstr "Trop d'erreur d'authentification"
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr ""
"Le type et le code doivent être des entiers pour le type de protocole ICMP"
@@ -3925,9 +4026,6 @@ msgstr ""
msgid "UUID is required to delete Neutron Networks"
msgstr "UUID est demandée pour supprimer les réseaux Neutron"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "Impossible d'associer l'adresse IP, pas de fixed_ips."
-
msgid "Unable to authenticate Ironic client."
msgstr "Impossible d'authentifier le client Ironic."
@@ -4051,6 +4149,12 @@ msgstr "Incapable d'obtenir le domaine dns"
msgid "Unable to get dns entry"
msgstr "Impossible d'obtenir l'entrée DNS"
+msgid "Unable to get host UUID: /etc/machine-id does not exist"
+msgstr "Impossible d'obtenir l'UUID de l'hôte : /etc/machine-id n'existe pas"
+
+msgid "Unable to get host UUID: /etc/machine-id is empty"
+msgstr "Impossible d'obtenir l'UUID de l'hôte : /etc/machine-id est vide"
+
msgid "Unable to get rdp console, functionality not implemented"
msgstr "Incapable d'avoir la console rdp, fonctionnalité non implémentée"
@@ -4212,9 +4316,6 @@ msgstr ""
msgid "Unknown delete_info type %s"
msgstr "Type inconnu delete_info %s"
-msgid "Unknown error occurred."
-msgstr "Erreur inconnue est survenue."
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "image_type=%s inconnu"
@@ -4396,6 +4497,10 @@ msgid "Virtual machine mode '%(vmmode)s' is not recognised"
msgstr "Le mode de la machine virtuelle '%(vmmode)s' n'est pas reconnu"
#, python-format
+msgid "Virtual machine mode '%s' is not valid"
+msgstr "Le mode de machine virtuelle '%s' n'est pas valide"
+
+#, python-format
msgid ""
"Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
@@ -4453,12 +4558,6 @@ msgid "Volume resource quota exceeded"
msgstr "Quota de ressource de Volume dépassé."
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr ""
-"Le volume définit la taille de bloc, mais libvirt '%s' ou version ultérieure "
-"est requis."
-
-#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
@@ -4549,6 +4648,18 @@ msgstr ""
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
"your destination node does not support retrieving listen addresses. In "
+"order for live migration to work properly you must either disable serial "
+"console or upgrade your libvirt version."
+msgstr ""
+"Votre version libvirt ne prend pas en charge l'indicateur "
+"VIR_DOMAIN_XML_MIGRATABLE ou votre noeud de destination ne prend pas en "
+"charge l'extraction d'adresses en mode écoute. Pour que la live active "
+"fonctionne correctement, vous devez désactiver la console série ou mettre à "
+"niveau votre version libvirt."
+
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
+"your destination node does not support retrieving listen addresses. In "
"order for live migration to work properly, you must configure the graphics "
"(VNC and/or SPICE) listen addresses to be either the catch-all address "
"(0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
@@ -4591,9 +4702,6 @@ msgstr "block_device_mapping doit être une liste"
msgid "block_device_mapping_v2 must be a list"
msgstr "block_device_mapping_v2 doit être une liste"
-msgid "can't build a valid rule"
-msgstr "ne peut construire une règle valide"
-
msgid "cannot delete non-existent key"
msgstr "Impossible de supprimer une clé inexistante"
@@ -4712,13 +4820,6 @@ msgstr "image"
msgid "image already mounted"
msgstr "image déjà montée"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "image de %(instance)s à %(now)s"
-
-msgid "imageLocation is required"
-msgstr "imageLocation est obligatoire"
-
msgid "index"
msgstr "index"
@@ -4802,9 +4903,6 @@ msgstr "Device nbd %s n'est pas apparu"
msgid "nbd unavailable: module not loaded"
msgstr "nbd non disponible : module non chargé"
-msgid "need group_name or group_id"
-msgstr "besoin de group_name ou de group_id"
-
msgid "network"
msgstr "network"
@@ -4835,17 +4933,9 @@ msgstr ""
msgid "onSharedStorage must be specified."
msgstr "onSharedStorage doit etre spécifié"
-msgid "only group \"all\" is supported"
-msgstr "Seul le group \"tous\" est supporté"
-
msgid "operation time out"
msgstr "l'opération a dépassé le délai d'attente"
-msgid "operation_type must be add or remove"
-msgstr ""
-"le type d'opération (operation_type) doit être ajout (add) ou suppression "
-"(remove)"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "os-getConsoleOutput incorrect ou manquant dans le corps de demande"
@@ -4884,9 +4974,6 @@ msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr ""
"read_deleted peut uniquement correspondre à 'no', 'yes' ou 'only', et non %r"
-msgid "resource_id and tag are required"
-msgstr "les champs 'resource_id' et 'tag' sont requis"
-
msgid "rpc_port must be integer"
msgstr "rpc_port doit être un entier"
@@ -4955,9 +5042,6 @@ msgstr "Champs non supportés : %s"
msgid "user"
msgstr "utilisateur"
-msgid "user or group not specified"
-msgstr "Utilisateur ou groupe non spécifié"
-
msgid "uuid"
msgstr "uuid"
diff --git a/nova/locale/it/LC_MESSAGES/nova-log-error.po b/nova/locale/it/LC_MESSAGES/nova-log-error.po
index a55fe209fb..f0b2642087 100644
--- a/nova/locale/it/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/it/LC_MESSAGES/nova-log-error.po
@@ -7,19 +7,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-08-30 12:41+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Italian\n"
-"Language: it\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-08-30 12:41+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: it\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Italian\n"
#, python-format
msgid ""
@@ -153,10 +153,6 @@ msgid "Environment variable 'NETWORK_ID' must be set."
msgstr "La variabile d'ambiente 'NETWORK_ID' deve essere impostata."
#, python-format
-msgid "Environment: %s"
-msgstr "Ambiente: %s"
-
-#, python-format
msgid "Error defining a domain with XML: %s"
msgstr "Errore nella definizione di un dominio tramite XML: %s"
@@ -610,10 +606,6 @@ msgid "Invalid server_string: %s"
msgstr "stringa_server non valida %s"
#, python-format
-msgid "Keystone failure: %s"
-msgstr "Errore keystone: %s"
-
-#, python-format
msgid "Live Migration failure: %s"
msgstr "Fallimento di Live Migration: %s"
@@ -843,10 +835,6 @@ msgstr "Impossibile configurare il pool: %s."
msgid "Unable to update host of port %s"
msgstr "Impossibile aggiornare l'host con porta %s"
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr " %(ex_name)s inaspettato sollevato da: %(ex_str)s"
-
msgid "Unexpected build failure, not rescheduling build."
msgstr ""
"Errore di generazione non previsto, la generazione non sarà riproposta."
diff --git a/nova/locale/it/LC_MESSAGES/nova-log-info.po b/nova/locale/it/LC_MESSAGES/nova-log-info.po
index 3e579076be..6449419ac4 100644
--- a/nova/locale/it/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/it/LC_MESSAGES/nova-log-info.po
@@ -8,19 +8,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-08-30 12:40+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Italian\n"
-"Language: it\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-08-30 12:40+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: it\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Italian\n"
#, python-format
msgid "%(action_str)s instance"
@@ -51,22 +51,10 @@ msgid "Adding security group %(security_group_id)s to port %(port_id)s"
msgstr ""
"Aggiunta del gruppo sicurezza %(security_group_id)s alla porta %(port_id)s"
-msgid "Allocate address"
-msgstr "Alloca indirizzo"
-
-#, python-format
-msgid "Associate address %(public_ip)s to instance %(instance_id)s"
-msgstr "Associa indirizzo %(public_ip)s all'istanza %(instance_id)s"
-
msgid "Attach interface"
msgstr "Collega interfaccia"
#, python-format
-msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-msgstr ""
-"Collegamento volume %(volume_id)s all'istanza %(instance_id)s in %(device)s"
-
-#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr ""
"Collegamento volume %(volume_id)s all'istanza %(server_id)s in %(device)s"
@@ -150,22 +138,10 @@ msgid "Create Security Group %s"
msgstr "Crea gruppo di sicurezza %s"
#, python-format
-msgid "Create key pair %s"
-msgstr "Crea (create) coppia di chiavi %s"
-
-#, python-format
msgid "Create snapshot from volume %s"
msgstr "Crea istantanea dal volume %s"
#, python-format
-msgid "Create snapshot of volume %s"
-msgstr "Crea istantanea del volume %s"
-
-#, python-format
-msgid "Create volume from snapshot %s"
-msgstr "Crea volume dall'istantanea %s"
-
-#, python-format
msgid "Create volume of %s GB"
msgstr "Crea volume di %s GB"
@@ -177,14 +153,6 @@ msgid "Creating image"
msgstr "Creazione immagine"
#, python-format
-msgid "De-registering image %s"
-msgstr "Deregistrazione immagine %s"
-
-#, python-format
-msgid "Delete key pair %s"
-msgstr "Elimina (delete) coppia di chiavi %s"
-
-#, python-format
msgid "Delete security group %s"
msgstr "Elimina gruppo di sicurezza %s"
@@ -258,10 +226,6 @@ msgid "Disabling host %s."
msgstr "Disabilitazione host %s."
#, python-format
-msgid "Disassociate address %s"
-msgstr "Separare l'indirizzo %s"
-
-#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr "Disconnessione della VDI %s obsoleta da compute domU"
@@ -368,10 +332,6 @@ msgid "Get console output"
msgstr "Ottieni l'output della console"
#, python-format
-msgid "Get console output for instance %s"
-msgstr "Ottieni l'output della console per l'istanza %s"
-
-#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
@@ -411,10 +371,6 @@ msgstr ""
"durata: %(duration).2f secondi per l'immagine %(image_id)s"
#, python-format
-msgid "Import key %s"
-msgstr "Importa (import) chiave %s"
-
-#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database: "
"assuming it is a worker VM and skip ping migration to a new host"
@@ -581,10 +537,6 @@ msgstr ""
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr "Inserimento dell'host %(host_name)s in modalità manutenzione %(mode)s."
-#, python-format
-msgid "Reboot instance %r"
-msgstr "Riavvia l'istanza %r"
-
msgid "Rebooting instance"
msgstr "Riavvio dell'istanza"
@@ -602,14 +554,6 @@ msgid "Reclaiming deleted instance"
msgstr "Recupero dell'istanza eliminata"
#, python-format
-msgid "Registered image %(image_location)s with id %(image_id)s"
-msgstr "Immagine registrata %(image_location)s con id %(image_id)s"
-
-#, python-format
-msgid "Release address %s"
-msgstr "Rilascia indirizzo %s"
-
-#, python-format
msgid "Removable base files: %s"
msgstr "File di base rimovibili: %s"
@@ -742,12 +686,6 @@ msgstr ""
"code: %(ex)s"
#, python-format
-msgid ""
-"Unauthorized request for controller=%(controller)s and action=%(action)s"
-msgstr ""
-"Richiesta non autorizzata per controller=%(controller)s e action=%(action)s"
-
-#, python-format
msgid "Unexpected error: %s"
msgstr "Errore imprevisto: %s"
@@ -765,10 +703,6 @@ msgid "Updating from migration %s"
msgstr "Aggiornamento dalla migrazione %s"
#, python-format
-msgid "Updating image %s publicity"
-msgstr "Aggiornamento pubblicità immagine %s"
-
-#, python-format
msgid "Updating instance to original state: '%s'"
msgstr "Aggiornamento dell'istanza allo stato originale: '%s'"
diff --git a/nova/locale/it/LC_MESSAGES/nova.po b/nova/locale/it/LC_MESSAGES/nova.po
index 4e3a17c013..d32f0e02a1 100644
--- a/nova/locale/it/LC_MESSAGES/nova.po
+++ b/nova/locale/it/LC_MESSAGES/nova.po
@@ -1,22 +1,31 @@
-# Italian translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
-msgid ""
-msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+# Translators:
+# Ying Chun Guo <daisy.ycguo@gmail.com>, 2013
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# Loris Strozzini, 2012
+# ls, 2012
+# Mariano Iumiento <miumiento@gmail.com>, 2013
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Tom Cocozzello <tjcocozz@us.ibm.com>, 2015. #zanata
+# Tom Cocozzello <tjcocozz@us.ibm.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-09-06 10:17+0000\n"
"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
"Language: it\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
"Language-Team: Italian\n"
-"Plural-Forms: nplurals=2; plural=(n != 1)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -34,10 +43,32 @@ msgstr ""
"dalla politica"
#, python-format
+msgid ""
+"%(desc)r\n"
+"command: %(cmd)r\n"
+"exit code: %(code)r\n"
+"stdout: %(stdout)r\n"
+"stderr: %(stderr)r"
+msgstr ""
+"%(desc)r\n"
+"comando: %(cmd)r\n"
+"codice di uscita: %(code)r\n"
+"stdout: %(stdout)r\n"
+"stderr: %(stderr)r"
+
+#, python-format
msgid "%(err)s"
msgstr "%(err)s"
#, python-format
+msgid "%(field)s should not be part of the updates."
+msgstr "%(field)s non deve fare parte degli aggiornamenti."
+
+#, python-format
+msgid "%(fieldname)s missing field type"
+msgstr "Per %(fieldname)s manca il tipo di campo"
+
+#, python-format
msgid "%(host)s:%(port)s: Target closed"
msgstr "%(host)s:%(port)s: destinazione chiusa"
@@ -81,6 +112,10 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "l'hypervisor %(type)s non supporta i dispositivi PCI"
#, python-format
+msgid "%(typename)s in %(fieldname)s is not an instance of Enum"
+msgstr "%(typename)s in %(fieldname)s non è un'istanza di Enum"
+
+#, python-format
msgid "%(value_name)s must be <= %(max_value)d"
msgstr "%(value_name)s deve essere <= %(max_value)d"
@@ -103,6 +138,10 @@ msgstr ""
"di 0"
#, python-format
+msgid "%r failed. Not Retrying."
+msgstr "%r non riuscito. Nessun nuovo tentativo."
+
+#, python-format
msgid "%r failed. Retrying."
msgstr "%r non riuscito. Nuovo tentativo."
@@ -131,6 +170,10 @@ msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr "%s deve essere 'MANUAL' o 'AUTO'."
#, python-format
+msgid "'%(other)s' should be an instance of '%(cls)s'"
+msgstr "'%(other)s' deve essere un'istanza di '%(cls)s'"
+
+#, python-format
msgid "'%s' is either missing or empty."
msgstr "'%s' manca o è vuoto."
@@ -160,6 +203,10 @@ msgstr ""
"Un nome modello di CPU non deve essere impostato quando viene richiesto un "
"modello di CPU host"
+#, python-format
+msgid "A NetworkModel is required in field %s"
+msgstr "Un modello di rete è richiesto nel campo %s"
+
msgid ""
"A unique ID given to each file system. This is value is set in Glance and "
"agreed upon here so that the operator knowns they are dealing with the same "
@@ -193,9 +240,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "Versione API %(version)s non supportata in questo metodo."
-msgid "Access key not provided"
-msgstr "Chiave di accesso non fornita"
-
msgid "Access list not available for public flavors."
msgstr "Elenco accessi non disponibile per i flavor pubblici."
@@ -226,6 +270,9 @@ msgstr "L'indirizzo non può essere convertito."
msgid "Address not specified"
msgstr "Indirizzo non specificato"
+msgid "Affinity instance group policy was violated."
+msgstr "La politica di affinità del gruppo di istanze è stata violata."
+
#, python-format
msgid "Agent does not support the call: %(method)s"
msgstr "L'agent non supporta la chiamata: %(method)s"
@@ -291,6 +338,10 @@ msgstr "La politica di anti-affinità del gruppo di istanze è stata violata."
msgid "Architecture name '%(arch)s' is not recognised"
msgstr "Il nome architettura '%(arch)s' non è riconosciuto"
+#, python-format
+msgid "Architecture name '%s' is not valid"
+msgstr "Il nome architettura '%s' non è valido"
+
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr "Argomento 'type' per il riavvio non è HARD o SOFT"
@@ -377,9 +428,23 @@ msgstr ""
msgid "Binary"
msgstr "Valore binario"
+#, python-format
+msgid ""
+"Binding failed for port %(port_id)s, please check neutron logs for more "
+"information."
+msgstr ""
+"Bind non riuscito per la porta %(port_id)s, controllare i log neutron per "
+"ulteriori informazioni."
+
msgid "Blank components"
msgstr "Componenti vuoti"
+msgid ""
+"Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size"
+msgstr ""
+"I volumi vuoti (origine: 'blank', dest: 'volume') devono avere una "
+"dimensione diversa da zero"
+
#, python-format
msgid "Block Device %(id)s is not bootable."
msgstr "Il dispositivo di blocco %(id)s non è riavviabile."
@@ -478,6 +543,21 @@ msgstr "Il numero CPU %(cpunum)d è superiore a quello massimo %(cpumax)d"
msgid "CPU number %(cpuset)s is not assigned to any node"
msgstr "Il numero CPU %(cpuset)s non è assegnato a nessun nodo"
+#, python-format
+msgid "CPU pinning is not supported by the host: %(reason)s"
+msgstr "Blocco CPU non supportato dall'host: %(reason)s"
+
+#, python-format
+msgid ""
+"CPU set to pin/unpin %(requested)s must be a subset of known CPU set "
+"%(cpuset)s"
+msgstr ""
+"Serie CPU per blocco/sblocco %(requested)s deve essere una sottoserie di una "
+"serie CPU nota%(cpuset)s"
+
+msgid "Can not add access to a public flavor."
+msgstr "Impossibile aggiungere l'accesso a una versione pubblica."
+
msgid "Can not find requested image"
msgstr "Impossibile trovare l'immagine richiesta"
@@ -554,6 +634,13 @@ msgstr "Impossibile migrare l'istanza %s con volumi associati"
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr "Impossibile chiamare %(method)s su oggetto orfano %(objtype)s"
+msgid ""
+"Cannot create default bittorrent URL without xenserver.torrent_base_url "
+"configuration option set."
+msgstr ""
+"Impossibile creare URL bittorrent predefinito senza l'opzione di "
+"configurazione xenserver.torrent_base_url impostata."
+
msgid "Cannot execute /sbin/mount.sofs"
msgstr "Impossibile eseguire /sbin/mount.sofs"
@@ -649,6 +736,9 @@ msgstr ""
msgid "Cell name cannot be empty"
msgstr "Il nome cella non può essere vuoto"
+msgid "Cell name cannot contain '!', '.' or '@'"
+msgstr "Il nome cella non può contenere '!', '.' o '@'"
+
msgid "Cell type must be 'parent' or 'child'"
msgstr "Il tipo di cella deve essere 'parent' o 'child'"
@@ -715,6 +805,22 @@ msgstr ""
"Config ha richiesto un modello di CPU esplicito, ma l'hypervisor libvirt "
"'%s' non supporta la selezione dei modelli di CPU"
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s, but we were unable to "
+"determine the cause"
+msgstr ""
+"Conflitto durante l'aggiornamento dell'istanza %(instance_uuid)s, ma non è "
+"stato possibile determinare la causa."
+
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. "
+"Actual: %(actual)s"
+msgstr ""
+"Conflitto durante l'aggiornamento dell'istanza %(instance_uuid)s. Previsto: "
+"%(expected)s. Reale: %(actual)s"
+
msgid "Conflicting policies configured!"
msgstr "Sono state configurate politiche in conflitto!"
@@ -800,10 +906,6 @@ msgstr "Impossibile trovare il binario %(binary)s nell'host %(host)s."
msgid "Could not find config at %(path)s"
msgstr "Impossibile trovare la configurazione in %(path)s"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "Impossibile trovare coppia(e) di chiavi: %s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
"Impossibile trovare il riferimento(i) archivio dati utilizzato dalla VM."
@@ -838,14 +940,6 @@ msgstr "Impossibile caricare l'immagine %(image_id)s"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "Impossibile ottenere l'IP Link Local di %(interface)s :%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"Impossibile arrestare l'istanza %(instance)s entro 1 ora. Vm_state corrente: "
-"%(vm_state)s, task_state corrente: %(task_state)s"
-
msgid "Create networks failed"
msgstr "Creazione reti non riuscita"
@@ -1057,6 +1151,26 @@ msgstr ""
"errore %(error_code)s] %(ex)s"
#, python-format
+msgid ""
+"Error from libvirt while set password for username \"%(user)s\": [Error Code "
+"%(error_code)s] %(ex)s"
+msgstr ""
+"Errore da libvirt durante l'impostazione della password per il nome utente "
+"\"%(user)s\": [Codice di errore %(error_code)s] %(ex)s"
+
+#, python-format
+msgid ""
+"Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs "
+"(%(e)s)"
+msgstr ""
+"Errore di montaggio %(device)s in %(dir)s nell'immagine %(image)s con "
+"libguestfs (%(e)s)"
+
+#, python-format
+msgid "Error mounting %(image)s with libguestfs (%(e)s)"
+msgstr "Errore di montaggio %(image)s con libguestfs (%(e)s)"
+
+#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Errore durante la creazione del monitor di risorse: %(monitor)s"
@@ -1079,6 +1193,10 @@ msgstr ""
"l'istanza %(instance_uuid)s durante la migrazione attiva"
#, python-format
+msgid "Exceeded maximum number of retries. %(reason)s"
+msgstr "Superato numero massimo di tentativi. %(reason)s"
+
+#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr "Era previsto un uuid ma è stato ricevuto %(uuid)s."
@@ -1086,21 +1204,6 @@ msgstr "Era previsto un uuid ma è stato ricevuto %(uuid)s."
msgid "Expected object of type: %s"
msgstr "Oggetto previsto di tipo: %s"
-msgid "Expecting a list of resources"
-msgstr "È previsto un elenco di risorse"
-
-msgid "Expecting a list of tagSets"
-msgstr "È previsto un elenco di tagSets"
-
-msgid "Expecting both key and value to be set"
-msgstr "È previsto che vengano impostati sia la chiave che il valore"
-
-msgid "Expecting key to be set"
-msgstr "È previsto che venga impostata la chiave"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "È previsto che tagSet sia una coppia chiave/valore"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "Colonna supplementare %(table)s.%(column)s nella tabella cronologica"
@@ -1118,6 +1221,14 @@ msgstr ""
"una stringa"
#, python-format
+msgid "Failed to access port %(port_id)s: %(reason)s"
+msgstr "Impossibile accedere alla porta %(port_id)s: %(reason)s"
+
+#, python-format
+msgid "Failed to add bridge: %s"
+msgstr "Impossibile aggiungere il bridge: %s"
+
+#, python-format
msgid ""
"Failed to add deploy parameters on node %(node)s when provisioning the "
"instance %(instance)s"
@@ -1272,10 +1383,6 @@ msgstr "Impossibile sospendere l'istanza: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Impossibile terminare l'istanza: %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "Errore durante l'analisi della risposta dal keystone: %s"
-
msgid "Failure prepping block device."
msgstr "Errore durante l'esecuzione preparatoria del dispositivo di blocco."
@@ -1305,6 +1412,11 @@ msgid "Filename of root Certificate Revocation List"
msgstr "Nome file della root elenco di revoche di certificati"
#, python-format
+msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
+msgstr ""
+"L'IP fisso %(ip)s non è un indirizzo IP valido per la rete %(network_id)s."
+
+#, python-format
msgid "Fixed IP %s has been deleted"
msgstr "l'IP fisso %s è stato eliminato"
@@ -1357,14 +1469,6 @@ msgstr ""
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"Le specifiche supplementari del flavor %(id)d non possono essere aggiornate "
-"o create dopo %(retries)d tentativi."
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1408,15 +1512,34 @@ msgstr "Impossibile trovare il flavor con nome %(flavor_name)s."
msgid "Flavor with name %(name)s already exists."
msgstr "Il flavor con nome %(name)s esiste già."
+#, python-format
+msgid ""
+"Flavor's disk is smaller than the minimum size specified in image metadata. "
+"Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i "
+"bytes."
+msgstr ""
+"Il disco della versione è più piccolo della dimensione minima specificata "
+"nei metadati dell'immagine. Il disco versione è %(flavor_size)i byte, la "
+"dimensione minima è %(image_min_disk)i byte."
+
+#, python-format
+msgid ""
+"Flavor's disk is too small for requested image. Flavor disk is "
+"%(flavor_size)i bytes, image is %(image_size)i bytes."
+msgstr ""
+"Il disco della versione è troppo piccolo per l'immagine richiesta. Il disco "
+"versione è %(flavor_size)i byte, l'immagine è %(image_size)i byte."
+
msgid "Flavor's memory is too small for requested image."
msgstr "La memoria flavor è troppo piccola per l'immagine richiesta."
+#, python-format
+msgid "Floating IP %(address)s association has failed."
+msgstr "Associazione IP %(address)s mobile non riuscita."
+
msgid "Floating IP allocate failed."
msgstr "Allocazione IP variabile non riuscita."
-msgid "Floating ip is not associated."
-msgstr "L'ip mobile non è associato."
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1482,6 +1605,10 @@ msgstr ""
"migrazioni attive che coinvolgono tale versione possono causare la perdita "
"di dati. Aggiornare Nova sul %(server)s e ritentare."
+#, python-format
+msgid "Host '%(name)s' is not mapped to any cell"
+msgstr "L'host '%(name)s' non è associato a una cella"
+
msgid "Host PowerOn is not supported by the Hyper-V driver"
msgstr "Host PowerOn non supportato dal driver Hyper-V"
@@ -1509,6 +1636,10 @@ msgstr ""
"Il driver hypervisor non supporta il metodo post_live_migration_at_source"
#, python-format
+msgid "Hypervisor virt type '%s' is not valid"
+msgstr "Tipo virt hypervisor '%s' non valido"
+
+#, python-format
msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised"
msgstr ""
"Il tipo di virtualizzazione di hypervisor '%(hv_type)s' non è riconosciuto"
@@ -1581,8 +1712,9 @@ msgstr "La chiave di metadati dell'immagine è troppo lunga"
msgid "Image metadata limit exceeded"
msgstr "Superato il limite dei metadati dell'immagine"
-msgid "Image must be available"
-msgstr "L'immagine deve essere disponibile"
+#, python-format
+msgid "Image model '%(image)s' is not supported"
+msgstr "Modello immagine '%(image)s' non supportato"
msgid "Image not found."
msgstr "Immagine non trovata."
@@ -1709,6 +1841,10 @@ msgid "Instance %(instance_uuid)s does not specify a NUMA topology"
msgstr "L'istanza %(instance_uuid)s non specifica una topologia NUMA"
#, python-format
+msgid "Instance %(instance_uuid)s does not specify a migration context."
+msgstr "L'istanza %(instance_uuid)s non specifica un contesto di migrazione."
+
+#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while "
"the instance is in this state."
@@ -1741,6 +1877,10 @@ msgstr "L'istanza %s non è collegata."
msgid "Instance %s not found"
msgstr "Istanza %s non trovata"
+#, python-format
+msgid "Instance %s provisioning was aborted"
+msgstr "Il provisioning dell'istanza %s è stato interrotto"
+
msgid "Instance could not be found"
msgstr "Impossibile trovare l'istanza"
@@ -1814,9 +1954,6 @@ msgstr "Impossibile trovare l'interfaccia %(interface)s."
msgid "Invalid Base 64 data for file %(path)s"
msgstr "I dati della base 64 non sono validi per il file %(path)s"
-msgid "Invalid CIDR"
-msgstr "CIDR non valido"
-
msgid "Invalid Connection Info"
msgstr "Informazioni sulla connessione non valide"
@@ -1829,10 +1966,6 @@ msgid "Invalid IP format %s"
msgstr "Formato IP non valido %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "Protocollo IP non valido %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "Protocollo IP non valido %(protocol)s."
@@ -1919,6 +2052,10 @@ msgid "Invalid entry: '%s'; Expecting list or dict"
msgstr "Voce non valida: '%s'; è previsto list o dict"
#, python-format
+msgid "Invalid event name %s"
+msgstr "Nome evento non valido %s"
+
+#, python-format
msgid "Invalid event status `%s'"
msgstr "Stato dell'evento non valido `%s'"
@@ -1958,6 +2095,10 @@ msgid "Invalid id: %(volume_id)s (expecting \"i-...\")"
msgstr "ID non valido: %(volume_id)s (previsto \"i-...\")"
#, python-format
+msgid "Invalid image format '%(format)s'"
+msgstr "Formato immagine non valido '%(format)s'"
+
+#, python-format
msgid "Invalid image href %(image_href)s."
msgstr "href immagine %(image_href)s non valido."
@@ -2000,6 +2141,10 @@ msgid "Invalid key_name provided."
msgstr "Il nome_chiave specificato non è valido."
#, python-format
+msgid "Invalid libvirt version %(version)s"
+msgstr "Versione libvirt non valida %(version)s"
+
+#, python-format
msgid "Invalid memory page size '%(pagesize)s'"
msgstr "Dimensione pagina di memoria non valida '%(pagesize)s'"
@@ -2116,24 +2261,21 @@ msgid "Invalid usage_type: %s"
msgstr "usage_type non valido: %s"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"Valore non valido '%(ec2_instance_id)s' per instanceId. L'istanza non "
-"dispone di un volume collegato nella root (%(root)s)"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "Valore non valido '%s' per force."
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "Valore non valido per 'scheduler_max_attempts', deve essere >= 1"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "Valore non valido per l'opzione unità di config: %(option)s"
+#, python-format
+msgid ""
+"Invalid vcpu_pin_set config, one or more of the specified cpuset is not "
+"online. Online cpuset(s): %(online)s, requested cpuset(s): %(req)s"
+msgstr ""
+"Configurazione vcpu_pin_set non valida, uno o più cpuset specificati non è "
+"online. cpuset online: %(online)s, cpuset richiesti: %(req)s"
+
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr ""
"Configurazione vcpu_pin_set non valida, ha superato l'intervallo cpu "
@@ -2261,6 +2403,13 @@ msgstr ""
"Elenco di file system configurati in questo file nelle sezioni "
"image_file_url:<nome voce di elenco>"
+msgid ""
+"Live migration can not be used without shared storage except a booted from "
+"volume VM which does not have a local disk."
+msgstr ""
+"La migrazione live non può essere utilizzata senza memoria condivisa eccetto "
+"se avviata dalla VM volume che non ha un disco locale."
+
msgid "Live migration is supported starting with Hyper-V Server 2012"
msgstr "La migrazione attiva è supportata a partire dal Hyper-V Server 2012"
@@ -2408,6 +2557,9 @@ msgstr "Manca il campo causa disabilitata"
msgid "Missing flavorRef attribute"
msgstr "Manca l'attributo flavorRef"
+msgid "Missing forced_down field"
+msgstr "Campo forced_down mancante"
+
msgid "Missing imageRef attribute"
msgstr "Manca l'attributo imageRef"
@@ -2504,6 +2656,10 @@ msgid "Netmask to push into openvpn config"
msgstr "Netmask da inserire nella configurazione openvpn"
#, python-format
+msgid "Network \"%(val)s\" is not valid in field %(attr)s"
+msgstr "La rete \"%(val)s\" non è valida nel campo %(attr)s"
+
+#, python-format
msgid "Network %(network_id)s could not be found."
msgstr "Impossibile trovare la rete %(network_id)s."
@@ -2581,6 +2737,10 @@ msgstr "Il nuovo volume deve essere scollegato per lo scambio."
msgid "New volume must be the same size or larger."
msgstr "Il nuovo volume deve avere la stessa dimensione o superiore."
+#, python-format
+msgid "No Block Device Mapping with id %(id)s."
+msgstr "Nessuna associazione dispositivo di blocco con id %(id)s."
+
msgid "No CIDR requested"
msgstr "Nessun CIDR richiesto"
@@ -2594,6 +2754,10 @@ msgstr "Nessun corpo della richiesta"
msgid "No Unique Match Found."
msgstr "Non è stata trovata nessuna corrispondenza univoca."
+msgid "No access_url in connection_info. Cannot validate protocol"
+msgstr ""
+"Nessun access_url in connection_info. Impossibile convalidare il protocollo"
+
msgid "No adminPass was specified"
msgstr "Nessun adminPass è stato specificato"
@@ -2676,6 +2840,10 @@ msgstr "Non è stato trovato nessun ID corrispondente per l'URL %s."
msgid "No more available networks."
msgstr "Non vi sono più reti disponibili."
+#, python-format
+msgid "No mount points found in %(root)s of %(image)s"
+msgstr "Nessun punto di montaggio trovato in %(root)s di %(image)s"
+
msgid "No networks defined."
msgstr "Nessuna rete definita."
@@ -2702,9 +2870,6 @@ msgstr "Nessun corpo della richiesta"
msgid "No root disk defined."
msgstr "Nessun disco root definito"
-msgid "No rule for the specified parameters."
-msgstr "Nessuna regola per i parametri specificati."
-
msgid "No suitable network for migrate"
msgstr "Nessuna rete adatta per la migrazione"
@@ -2739,10 +2904,6 @@ msgstr "Impossibile acquisire una porta libera per %(host)s"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "Impossibile collegare %(host)s:%(port)d, %(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "Non è consentito modificare gli attributi per l'immagine %s"
-
msgid "Not an rbd snapshot"
msgstr "Non è un'istantanea rbd"
@@ -2790,6 +2951,10 @@ msgid "Old volume is attached to a different instance."
msgstr "Il volume precedente è collegato ad un'istanza diversa."
#, python-format
+msgid "One or more hosts already in availability zone(s) %s"
+msgstr "Uno o più host sono già nelle zone di disponibilità %s"
+
+#, python-format
msgid ""
"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
"%(unit_string)s."
@@ -2814,9 +2979,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "Può essere specificato solo il parametro host"
-msgid "Only instances implemented"
-msgstr "Sono state implementate solo le istanze"
-
msgid "Only root certificate can be retrieved."
msgstr "Solo il certificato root può essere recuperato."
@@ -2893,6 +3055,16 @@ msgid "Page size %(pagesize)s is not supported by the host."
msgstr "Dimensione pagina %(pagesize)s non supportata dall'host."
#, python-format
+msgid ""
+"Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. "
+"Check your Neutron configuration to validate that the macvtap parameters are "
+"correct."
+msgstr ""
+"Parametri %(missing_params)s non presenti in vif_details per vif %(vif_id)s. "
+"Controllare la configurazione neutron per confermare che i parametri macvtap "
+"siano corretti."
+
+#, python-format
msgid "Path %s must be LVM logical volume"
msgstr "Il percorso %s deve essere un volume logico LVM"
@@ -2978,6 +3150,10 @@ msgstr "L'azione watchdog (%(action)s) non è supportata."
msgid "QEMU guest agent is not enabled"
msgstr "Agent guest QEMU non abilitato"
+#, python-format
+msgid "Quiescing is not supported in instance %(instance_id)s"
+msgstr "Sospensione non supportata per l'istanza %(instance_id)s"
+
msgid "Quota"
msgstr "Limite"
@@ -2989,6 +3165,14 @@ msgid "Quota could not be found"
msgstr "Impossibile trovare la quota"
#, python-format
+msgid ""
+"Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s "
+"of %(allowed)s %(overs)s"
+msgstr ""
+"Quota superata per %(overs)s: Richiesto %(req)s, ma già utilizzato %(used)s "
+"%(allowed)s %(overs)s"
+
+#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr "Quota superata per le risorse: %(overs)s"
@@ -3044,6 +3228,14 @@ msgstr ""
#, python-format
msgid ""
+"Quota limit %(limit)s for %(resource)s must be in the range of -1 and "
+"%(max)s."
+msgstr ""
+"Il limite della quota %(limit)s per %(resource)s deve essere nell'intervallo "
+"tra -1 e %(max)s."
+
+#, python-format
+msgid ""
"Quota limit %(limit)s for %(resource)s must be less than or equal to "
"%(maximum)s."
msgstr ""
@@ -3074,6 +3266,15 @@ msgstr ""
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr "Raggiunto numero massimo di tentativi per scollegare VBD %s"
+#, python-format
+msgid ""
+"Relative blockcommit support was not detected. Libvirt '%s' or later is "
+"required for online deletion of file/network storage-backed volume snapshots."
+msgstr ""
+"Non è stato rilevato il supporto dell'operazione blockcommit. Richiesto "
+"Libvirt '%s' o successivo per eliminazione online di istantanee volume "
+"supportate da memoria di rete/file."
+
msgid "Request body and URI mismatch"
msgstr "Il corpo della richiesta e l'URI non corrispondono"
@@ -3347,10 +3548,22 @@ msgstr "Il servizio con valore host %(host)s binario %(binary)s esiste già."
msgid "Service with host %(host)s topic %(topic)s exists."
msgstr "Il servizio con host %(host)s topic %(topic)s esiste."
+msgid "Set admin password is not supported"
+msgstr "L'impostazione della password admin non è supportata"
+
#, python-format
msgid "Shadow table with name %(name)s already exists."
msgstr "La tabella cronologia con il nome %(name)s esiste già."
+#, python-format
+msgid "Share '%s' is not supported"
+msgstr "La condivisione '%s' non è supportata"
+
+#, python-format
+msgid "Share level '%s' cannot have share configured"
+msgstr ""
+"Il livello di condivisione '%s' non può avere la condivisione configurata"
+
msgid "Should we use a CA for each project?"
msgstr "Si dovrebbe usare un CA per ogni progetto?"
@@ -3361,9 +3574,6 @@ msgstr ""
"Riduzione del filesystem con resize2fs non riuscita, controllare se si "
"dispone di spazio sufficiente sul proprio disco."
-msgid "Signature not provided"
-msgstr "Firma non fornita"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Impossibile trovare l'istantanea %(snapshot_id)s."
@@ -3434,6 +3644,9 @@ msgstr ""
msgid "Success"
msgstr "Riuscito"
+msgid "Suspended"
+msgstr "Sospeso"
+
msgid "Swap drive requested is larger than instance type allows."
msgstr ""
"L'unità di scambio richiesta è più grande di quanto consentito dal tipo di "
@@ -3483,10 +3696,16 @@ msgstr ""
"Creare un database utilizzando 'nova-manage db sync' prima di eseguire "
"questo comando."
+msgid "The backlog must be more than 0"
+msgstr "Il backlog deve essere maggiore di 0"
+
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr "La serie di porte di console %(min_port)d-%(max_port)d è esaurita."
+msgid "The created instance's disk would be too small."
+msgstr "Il disco dell'istanza creata potrebbe essere troppo piccolo."
+
msgid "The current driver does not support preserving ephemeral partitions."
msgstr ""
"Il driver corrente non supporta la conservazione di partizioni effimere."
@@ -3498,6 +3717,13 @@ msgstr "La politica PBM predefinita non esiste sul backend."
msgid "The firewall filter for %s does not exist"
msgstr "Il filtro firewall per %s non esiste"
+msgid "The floating IP request failed with a BadRequest"
+msgstr "Richiesta IP mobile non riuscita con errore Richiesta non corretta"
+
+#, python-format
+msgid "The group %(group_name)s must be configured with an id."
+msgstr "Il gruppo %(group_name)s deve essere configurato con un id."
+
msgid "The input is not a string or unicode"
msgstr "L'input non è una stringa o unicode"
@@ -3554,6 +3780,11 @@ msgstr ""
"L'intervallo di rete non è abbastanza grande per contenere le reti "
"%(num_networks)s. La dimensione della rete è %(network_size)s"
+#, python-format
+msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
+msgstr ""
+"Il numero di porte definite: %(ports)d è superiore al limite: %(quota)d"
+
msgid "The only partition should be partition 1."
msgstr "L'unica partizione dovrebbe essere la partizione 1."
@@ -3595,6 +3826,10 @@ msgstr ""
"Il servizio del driver servicegroup %(driver)s temporaneamente non è "
"disponibile."
+#, python-format
+msgid "The specified cluster '%s' was not found in vCenter"
+msgstr "Il cluster specificato '%s' non è stato trovato in vCenter"
+
msgid ""
"The string containing the reason for disabling the service contains invalid "
"characters or is too long."
@@ -3684,6 +3919,17 @@ msgid "There are not enough hosts available."
msgstr "Numero di host disponibili non sufficiente."
#, python-format
+msgid ""
+"There are still %(count)i unmigrated flavor records. Migration cannot "
+"continue until all instance flavor records have been migrated to the new "
+"format. Please run `nova-manage db migrate_flavor_data' first."
+msgstr ""
+"Ci sono ancora %(count)i record di versione non migrati. La migrazione non "
+"può continuare finché tutti i record di versione istanza non sono stati "
+"migrati al nuovo formato. Eseguire prima 'nova-manage db "
+"migrate_flavor_data'."
+
+#, python-format
msgid "There is no such action: %s"
msgstr "Non esiste alcuna azione simile: %s"
@@ -3722,8 +3968,11 @@ msgstr "Timeout in attesa che l'unità %s venga creata"
msgid "Timeout waiting for response from cell"
msgstr "Timeout in attesa di risposta dalla cella"
-msgid "Timestamp failed validation."
-msgstr "La convalida di data/ora non è riucita."
+#, python-format
+msgid "Timeout while checking if we can live migrate to host: %s"
+msgstr ""
+"Timeout durante il controllo della possibilità di eseguire la migrazione "
+"live all'host: %s"
msgid "To and From ports must be integers"
msgstr "Le porte 'Da' e 'A' devono essere numeri interi"
@@ -3739,18 +3988,12 @@ msgstr ""
"Verranno generati troppi indirizzi IP. Aumentare /%s per ridurre il numero "
"generato."
-msgid "Too many failed authentications."
-msgstr "Troppe autenticazioni non riuscite."
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "Tipo e codice devono essere numeri interi per il tipo protocollo ICMP"
msgid "UUID is required to delete Neutron Networks"
msgstr "L'UUID è richiesto per eliminare le reti Neutron"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "Impossibile associare indirizzo IP, nessun ip_fisso."
-
msgid "Unable to authenticate Ironic client."
msgstr "Impossibile autenticare il client Ironic."
@@ -3875,6 +4118,12 @@ msgstr "Impossibile ottenere il dominio dns"
msgid "Unable to get dns entry"
msgstr "Impossibile ottenere la voce dns"
+msgid "Unable to get host UUID: /etc/machine-id does not exist"
+msgstr "Impossibile richiamare l'UUID host: /etc/machine-id non esiste"
+
+msgid "Unable to get host UUID: /etc/machine-id is empty"
+msgstr "Impossibile richiamare l'UUID host: /etc/machine-id è vuoto"
+
msgid "Unable to get rdp console, functionality not implemented"
msgstr ""
"Impossibile ottenere la console rdp, la funzionalità non è implementata"
@@ -4041,9 +4290,6 @@ msgstr ""
msgid "Unknown delete_info type %s"
msgstr "Tipo di delete_info %s sconosciuto"
-msgid "Unknown error occurred."
-msgstr "Si è verificato un errore sconosciuto."
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "image_type=%s sconosciuto"
@@ -4182,6 +4428,10 @@ msgstr ""
"Il valore (%(value)s) per il parametro Group%(property)s non è valido. "
"Contenuto limitato a '%(allowed)s'."
+#, python-format
+msgid "Value must be >= 0 for field %s"
+msgstr "Il valore deve essere >= 0 per il campo %s"
+
msgid "Value required for 'scality_sofs_config'"
msgstr "Valore richiesto per 'scality_sofs_config'"
@@ -4216,6 +4466,10 @@ msgid "Virtual machine mode '%(vmmode)s' is not recognised"
msgstr "La modalità della macchina virtuale '%(vmmode)s' non è riconosciuta"
#, python-format
+msgid "Virtual machine mode '%s' is not valid"
+msgstr "La modalità della macchina virtuale '%s' non è valida"
+
+#, python-format
msgid ""
"Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
@@ -4236,20 +4490,42 @@ msgid "Volume %(volume_id)s could not be found."
msgstr "Impossibile trovare il volume %(volume_id)s."
#, python-format
+msgid ""
+"Volume %(volume_id)s did not finish being created even after we waited "
+"%(seconds)s seconds or %(attempts)s attempts. And its status is "
+"%(volume_status)s."
+msgstr ""
+"La creazione del volume %(volume_id)s non è stata completata anche dopo "
+"un'attesa di %(seconds)s secondi o %(attempts)s tentativi e lo stato è "
+"%(volume_status)s."
+
+#, python-format
msgid "Volume %(volume_id)s is not attached to anything"
msgstr "Volume %(volume_id)s non è collegato a niente"
msgid "Volume does not belong to the requested instance."
msgstr "Il volume non appartiene all'istanza richiesta."
-msgid "Volume must be attached in order to detach."
-msgstr "Volume deve essere collegato per riuscire a scollegarlo."
+#, python-format
+msgid ""
+"Volume encryption is not supported for %(volume_type)s volume %(volume_id)s"
+msgstr ""
+"Codifica volume non supportata per volume %(volume_type)s %(volume_id)s"
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
+msgid ""
+"Volume is smaller than the minimum size specified in image metadata. Volume "
+"size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes."
msgstr ""
-"Il volume imposta la dimensione del blocco ma è necessaria libvirt '%s' o "
-"successiva."
+"Il volume è più piccolo della dimensione minima specificata nei metadati "
+"dell'immagine. Dimensione volume %(volume_size)i byte, dimensione minima "
+"%(image_min_disk)i byte."
+
+msgid "Volume must be attached in order to detach."
+msgstr "Volume deve essere collegato per riuscire a scollegarlo."
+
+msgid "Volume resource quota exceeded"
+msgstr "Quota risorsa volume superata"
#, python-format
msgid ""
@@ -4297,12 +4573,18 @@ msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
msgstr ""
"Tipo di metodo hook non valido. Sono consentiti solo i tipi 'pre' e 'post'"
+msgid "X-Forwarded-For is missing from request."
+msgstr "X-Forwarded-For manca dalla richiesta."
+
msgid "X-Instance-ID header is missing from request."
msgstr "L'intestazione X-Instance-ID manca nella richiesta."
msgid "X-Instance-ID-Signature header is missing from request."
msgstr "Intestazione X-Instance-ID-Signature non presente nella richiesta."
+msgid "X-Metadata-Provider is missing from request."
+msgstr "X-Metadata-Provider manca dalla richiesta."
+
msgid "X-Tenant-ID header is missing from request."
msgstr "L'intestazione X-Tenant-ID non è presente nella richiesta."
@@ -4334,6 +4616,17 @@ msgstr ""
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
"your destination node does not support retrieving listen addresses. In "
+"order for live migration to work properly you must either disable serial "
+"console or upgrade your libvirt version."
+msgstr ""
+"La versione libvirt non supporta l'indicatore VIR_DOMAIN_XML_MIGRATABLE o il "
+"nodo di destinazione non supporta il richiamo degli indirizzi in ascolto. "
+"Per consentire una corretta migrazione live, è necessario disabilitare la "
+"console di serie o aggiornare la versione di libvirt."
+
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
+"your destination node does not support retrieving listen addresses. In "
"order for live migration to work properly, you must configure the graphics "
"(VNC and/or SPICE) listen addresses to be either the catch-all address "
"(0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
@@ -4376,9 +4669,6 @@ msgstr "block_device_mapping deve essere un elenco"
msgid "block_device_mapping_v2 must be a list"
msgstr "block_device_mapping_v2 deve essere un elenco"
-msgid "can't build a valid rule"
-msgstr "impossibile creare una regola valida"
-
msgid "cannot delete non-existent key"
msgstr "impossibile eliminare una chiave non esistente"
@@ -4388,6 +4678,9 @@ msgstr "impossibile memorizzare chiavi arbitrarie"
msgid "cannot understand JSON"
msgstr "impossibile riconoscere JSON"
+msgid "cell_uuid must be set"
+msgstr "cell_uuid deve essere impostato"
+
msgid "clone() is not implemented"
msgstr "Il clone () non è implementato"
@@ -4491,13 +4784,6 @@ msgstr "immagine"
msgid "image already mounted"
msgstr "immagine già montata"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "immagine di %(instance)s in %(now)s"
-
-msgid "imageLocation is required"
-msgstr "imageLocation è obbligatorio"
-
msgid "index"
msgstr "indice"
@@ -4581,9 +4867,6 @@ msgstr "unità nbd %s non visualizzata"
msgid "nbd unavailable: module not loaded"
msgstr "nbd non disponibile: modulo non caricato"
-msgid "need group_name or group_id"
-msgstr "è necessario group_name o group_id"
-
msgid "network"
msgstr "network"
@@ -4601,18 +4884,22 @@ msgstr "nodo"
msgid "not able to execute ssh command: %s"
msgstr "Impossibile eseguire il comando ssh: %s"
+msgid ""
+"nova-idmapshift is a tool that properly sets the ownership of a filesystem "
+"for use with linux user namespaces. This tool can only be used with linux "
+"lxc containers. See the man page for details."
+msgstr ""
+"nova-idmapshift è uno strumento che imposta correttamente la proprietà di un "
+"filesystem per l'utilizzo con spazi nomi utente linux. Questo strumento può "
+"essere utilizzato solo con contenitori lxc linux. Vedere la pagina man per i "
+"dettagli."
+
msgid "onSharedStorage must be specified."
msgstr "deve essere specificato onSharedStorage."
-msgid "only group \"all\" is supported"
-msgstr "è supportato solo il gruppo \"all\""
-
msgid "operation time out"
msgstr "timeout operazione"
-msgid "operation_type must be add or remove"
-msgstr "tipo_operazione deve essere aggiunto o rimosso"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "os-getConsoleOutput non corretto o mancante nel corpo della richiesta"
@@ -4650,9 +4937,6 @@ msgstr "Impossibile trovare le librerie rbd python"
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr "read_deleted può essere solo 'no', 'yes' o 'only', non %r"
-msgid "resource_id and tag are required"
-msgstr "resource_id e tag sono richiesti"
-
msgid "rpc_port must be integer"
msgstr "rpc_port deve essere un numero intero"
@@ -4718,9 +5002,6 @@ msgstr "campi non supportati: %s"
msgid "user"
msgstr "utente"
-msgid "user or group not specified"
-msgstr "utente o gruppo non specificato"
-
msgid "uuid"
msgstr "uuid"
diff --git a/nova/locale/ja/LC_MESSAGES/nova-log-critical.po b/nova/locale/ja/LC_MESSAGES/nova-log-critical.po
index da71596922..c4bd764ddb 100644
--- a/nova/locale/ja/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/ja/LC_MESSAGES/nova-log-critical.po
@@ -1,18 +1,18 @@
-# KATO Tomoyuki <kato.tomoyuki@jp.fujitsu.com>, 2015. #zanata
+# Akihiro Motoki <amotoki@gmail.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.dev302\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-09-29 06:31+0000\n"
-"PO-Revision-Date: 2015-09-24 03:07+0000\n"
-"Last-Translator: KATO Tomoyuki <kato.tomoyuki@jp.fujitsu.com>\n"
-"Language-Team: Japanese\n"
-"Language: ja\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Generator: Zanata 3.7.1\n"
+"PO-Revision-Date: 2015-10-07 05:39+0000\n"
+"Last-Translator: KATO Tomoyuki <kato.tomoyuki@jp.fujitsu.com>\n"
+"Language: ja\n"
"Plural-Forms: nplurals=1; plural=0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Japanese\n"
#, python-format
msgid "Missing core API extensions: %s"
diff --git a/nova/locale/ja/LC_MESSAGES/nova.po b/nova/locale/ja/LC_MESSAGES/nova.po
index dcec83e94f..54d9713270 100644
--- a/nova/locale/ja/LC_MESSAGES/nova.po
+++ b/nova/locale/ja/LC_MESSAGES/nova.po
@@ -1,22 +1,33 @@
-# Japanese translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
-msgid ""
-msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+# Translators:
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# Sasuke(Kyohei MORIYAMA) <>, 2015
+# *pokotan-in-the-sky* <>, 2012
+# Tom Fifield <tom@openstack.org>, 2013
+# Tomoyuki KATO <tomo@dream.daynight.jp>, 2013
+# Akihiro Motoki <amotoki@gmail.com>, 2015. #zanata
+# KATO Tomoyuki <kato.tomoyuki@jp.fujitsu.com>, 2015. #zanata
+# Mie Yamamoto <myamamot@redhat.com>, 2015. #zanata
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Yuko Katabami <yukokatabami@gmail.com>, 2015. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2015-10-08 04:20+0000\n"
"Last-Translator: Akihiro Motoki <amotoki@gmail.com>\n"
"Language: ja\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
"Language-Team: Japanese\n"
-"Plural-Forms: nplurals=1; plural=0\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -191,9 +202,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "API ãƒãƒ¼ã‚¸ãƒ§ãƒ³ %(version)s ã¯ã“ã®ãƒ¡ã‚½ãƒƒãƒ‰ã§ã¯ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã›ã‚“。"
-msgid "Access key not provided"
-msgstr "アクセスキーãŒæŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“"
-
msgid "Access list not available for public flavors."
msgstr "パブリックフレーãƒãƒ¼ã§ã¯ã‚¢ã‚¯ã‚»ã‚¹ãƒªã‚¹ãƒˆã‚’使用ã§ãã¾ã›ã‚“。"
@@ -784,10 +792,6 @@ msgstr "ホスト %(host)s 上ã§ãƒã‚¤ãƒŠãƒªãƒ¼ %(binary)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã
msgid "Could not find config at %(path)s"
msgstr "%(path)s ã« config ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "キーペアãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸ: %s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr "VM ãŒä½¿ç”¨ã™ã‚‹ãƒ‡ãƒ¼ã‚¿ã‚¹ãƒˆã‚¢å‚ç…§ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚"
@@ -823,14 +827,6 @@ msgstr "イメージ %(image_id)s をアップロードã§ãã¾ã›ã‚“ã§ã—ãŸ"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "%(interface)s ã®ãƒ­ãƒ¼ã‚«ãƒ« IP アドレスã®ãƒªãƒ³ã‚¯ãŒå–å¾—ã§ãã¾ã›ã‚“:%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"1 時間以内ã«ã‚¤ãƒ³ã‚¹ã‚¿ãƒ³ã‚¹ %(instance)s ã‚’åœæ­¢ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚ç¾åœ¨ã® "
-"vm_state: %(vm_state)sã€ç¾åœ¨ã® task_state: %(task_state)s"
-
msgid "Create networks failed"
msgstr "ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã®ä½œæˆã«å¤±æ•—ã—ã¾ã—ãŸ"
@@ -1066,21 +1062,6 @@ msgstr "UUID ãŒå¿…è¦ã§ã™ãŒã€%(uuid)s ã‚’å—ã‘å–ã‚Šã¾ã—ãŸã€‚"
msgid "Expected object of type: %s"
msgstr "想定ã•ã‚Œã‚‹ã‚ªãƒ–ジェクトタイプ: %s"
-msgid "Expecting a list of resources"
-msgstr "リソースã®ãƒªã‚¹ãƒˆãŒæœŸå¾…ã•ã‚Œã¦ã„ã¾ã™"
-
-msgid "Expecting a list of tagSets"
-msgstr "tagSets ã®ãƒªã‚¹ãƒˆãŒæœŸå¾…ã•ã‚Œã¦ã„ã¾ã™"
-
-msgid "Expecting both key and value to be set"
-msgstr "キーã¨å€¤ã®ä¸¡æ–¹ãŒè¨­å®šã•ã‚Œã‚‹ã“ã¨ãŒæœŸå¾…ã•ã‚Œã¦ã„ã¾ã™"
-
-msgid "Expecting key to be set"
-msgstr "キーãŒè¨­å®šã•ã‚Œã¦ã„ã‚‹ã“ã¨ãŒæœŸå¾…ã•ã‚Œã¦ã„ã¾ã™"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "tagSet ã¯ã‚­ãƒ¼/値ã®ãƒšã‚¢ã§ã‚ã‚‹ã“ã¨ãŒæœŸå¾…ã•ã‚Œã¦ã„ã¾ã™"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "シャドーテーブルã«ä½™åˆ†ãªã‚«ãƒ©ãƒ  %(table)s.%(column)s ãŒã‚ã‚Šã¾ã™"
@@ -1248,10 +1229,6 @@ msgstr "インスタンスを休止ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "インスタンスを削除ã§ãã¾ã›ã‚“ã§ã—ãŸ: %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "keystone %s ã‹ã‚‰ã®å¿œç­”ã®æ§‹æ–‡è§£æžã«å¤±æ•—ã—ã¾ã—ãŸ"
-
msgid "Failure prepping block device."
msgstr "ブロックデãƒã‚¤ã‚¹ã‚’準備ã§ãã¾ã›ã‚“ã§ã—ãŸ"
@@ -1335,14 +1312,6 @@ msgstr ""
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"%(retries)d 回å†è©¦è¡Œã—ã¾ã—ãŸãŒã€ãƒ•ãƒ¬ãƒ¼ãƒãƒ¼ %(id)d ã®è¿½åŠ ã‚¹ãƒšãƒƒã‚¯ã®æ›´æ–°ã¾ãŸã¯"
-"作æˆãŒã§ãã¾ã›ã‚“。"
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1390,9 +1359,6 @@ msgstr "フレーãƒãƒ¼ã®ãƒ¡ãƒ¢ãƒªãƒ¼ã¯è¦æ±‚ã•ã‚ŒãŸã‚¤ãƒ¡ãƒ¼ã‚¸ã«å¯¾ã—ã¦
msgid "Floating IP allocate failed."
msgstr "Floating IP ã®ç¢ºä¿ã«å¤±æ•—ã—ã¾ã—ãŸã€‚"
-msgid "Floating ip is not associated."
-msgstr "Floating IP ãŒé–¢é€£ä»˜ã‘られã¦ã„ã¾ã›ã‚“。"
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1556,9 +1522,6 @@ msgstr "イメージメタデータキーãŒé•·ã™ãŽã¾ã™"
msgid "Image metadata limit exceeded"
msgstr "イメージメタデータ数ã®ä¸Šé™ã‚’超ãˆã¾ã—ãŸ"
-msgid "Image must be available"
-msgstr "イメージãŒä½¿ç”¨å¯èƒ½ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
-
msgid "Image not found."
msgstr "イメージãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
@@ -1792,9 +1755,6 @@ msgstr "インターフェース %(interface)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
msgid "Invalid Base 64 data for file %(path)s"
msgstr "ファイル %(path)s ã® Base64 データãŒç„¡åŠ¹ã§ã™"
-msgid "Invalid CIDR"
-msgstr "無効㪠CIDR"
-
msgid "Invalid Connection Info"
msgstr "無効ãªæŽ¥ç¶šæƒ…å ±"
@@ -1807,10 +1767,6 @@ msgid "Invalid IP format %s"
msgstr "%s ã¯ç„¡åŠ¹ãª IP å½¢å¼ã§ã™"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "無効㪠IP プロトコル %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "無効㪠IP プロトコル %(protocol)s。"
@@ -2090,20 +2046,9 @@ msgid "Invalid usage_type: %s"
msgstr "usage_type %s ã¯ç„¡åŠ¹ã§ã™"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"InstanceId ã®å€¤ '%(ec2_instance_id)s' ã¯ç„¡åŠ¹ã§ã™ã€‚インスタンスã«ã¯ã€ãƒ«ãƒ¼ãƒˆ "
-"(%(root)s) ã§æŽ¥ç¶šã•ã‚ŒãŸãƒœãƒªãƒ¥ãƒ¼ãƒ ãŒã‚ã‚Šã¾ã›ã‚“"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "force ã®å€¤ '%s' ã¯ç„¡åŠ¹ã§ã™ã€‚"
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "'scheduler_max_attempts' ã®å€¤ãŒç„¡åŠ¹ã§ã™ã€‚1 以上ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "コンフィグドライブã®ã‚ªãƒ—ション %(option)s ã®å€¤ãŒç„¡åŠ¹ã§ã™"
@@ -2679,9 +2624,6 @@ msgstr "リクエスト本文ãŒã‚ã‚Šã¾ã›ã‚“"
msgid "No root disk defined."
msgstr "ルートディスクãŒå®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
-msgid "No rule for the specified parameters."
-msgstr "指定ã•ã‚ŒãŸãƒ‘ラメータã«è©²å½“ã™ã‚‹ãƒ«ãƒ¼ãƒ«ãŒã‚ã‚Šã¾ã›ã‚“。"
-
msgid "No suitable network for migrate"
msgstr "マイグレーションã«é©åˆ‡ãªãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ãŒã‚ã‚Šã¾ã›ã‚“"
@@ -2716,10 +2658,6 @@ msgstr "%(host)s 用ã®æœªä½¿ç”¨ãƒãƒ¼ãƒˆã‚’å–å¾—ã§ãã¾ã›ã‚“"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "%(host)s:%(port)d ã‚’ãƒã‚¤ãƒ³ãƒ‰ã§ãã¾ã›ã‚“。%(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "イメージ %s ã®å±žæ€§ã®å¤‰æ›´ã¯è¨±å¯ã•ã‚Œã¾ã›ã‚“"
-
msgid "Not an rbd snapshot"
msgstr "rbd スナップショットã§ã¯ã‚ã‚Šã¾ã›ã‚“"
@@ -2790,9 +2728,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "指定ã§ãã‚‹ã®ã¯ãƒ›ã‚¹ãƒˆãƒ‘ラメーターã®ã¿ã§ã™"
-msgid "Only instances implemented"
-msgstr "インスタンスã ã‘ãŒå®Ÿè£…ã•ã‚Œã¦ã„ã¾ã™"
-
msgid "Only root certificate can be retrieved."
msgstr "ルート証明書ã®ã¿ãŒå–å¾—å¯èƒ½ã§ã™ã€‚"
@@ -3342,9 +3277,6 @@ msgstr ""
"resize2fs ã§ãƒ•ã‚¡ã‚¤ãƒ«ã‚·ã‚¹ãƒ†ãƒ ã®ã‚µã‚¤ã‚ºã‚’縮å°ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚ディスク上ã«å分"
"ãªç©ºã容é‡ãŒã‚ã‚‹ã‹ã©ã†ã‹ã‚’確èªã—ã¦ãã ã•ã„。"
-msgid "Signature not provided"
-msgstr "シグニãƒãƒ£ãƒ¼ãŒæŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "スナップショット %(snapshot_id)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“ã§ã—ãŸã€‚"
@@ -3708,9 +3640,6 @@ msgstr "デãƒã‚¤ã‚¹ %s ãŒä½œæˆã•ã‚Œã‚‹ã®ã‚’å¾…ã£ã¦ã„ã‚‹éš›ã«ã‚¿ã‚¤ãƒ ã‚
msgid "Timeout waiting for response from cell"
msgstr "セルã‹ã‚‰ã®å¿œç­”を待機中ã«ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—ãŸ"
-msgid "Timestamp failed validation."
-msgstr "タイムスタンプã®æ¤œè¨¼ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚"
-
msgid "To and From ports must be integers"
msgstr "開始ãƒãƒ¼ãƒˆã¨çµ‚了ãƒãƒ¼ãƒˆã¯æ•´æ•°ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
@@ -3725,18 +3654,12 @@ msgstr ""
"生æˆã•ã‚Œã‚‹ IP アドレスãŒå¤šã™ãŽã¾ã™ã€‚/%s ã®å€¤ã‚’増やã—ã¦ã€ç”Ÿæˆã•ã‚Œã‚‹æ•°ã‚’減らã—"
"ã¦ãã ã•ã„。"
-msgid "Too many failed authentications."
-msgstr "èªè¨¼å¤±æ•—回数ãŒå¤šã™ãŽã¾ã™ã€‚"
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "ICMP プロトコルã®ã‚¿ã‚¤ãƒ—ãŠã‚ˆã³ã‚³ãƒ¼ãƒ‰ã¯æ•´æ•°ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
msgid "UUID is required to delete Neutron Networks"
msgstr "Neutron ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã‚’削除ã™ã‚‹ã«ã¯ã€UUID ãŒå¿…è¦ã§ã™"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "IP アドレスを関連付ã‘ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“。fixed_ips ãŒã‚ã‚Šã¾ã›ã‚“。"
-
msgid "Unable to authenticate Ironic client."
msgstr "Ironic クライアントをèªè¨¼ã§ãã¾ã›ã‚“。"
@@ -4023,9 +3946,6 @@ msgstr ""
msgid "Unknown delete_info type %s"
msgstr "ä¸æ˜Žãª delete_info タイプ %s"
-msgid "Unknown error occurred."
-msgstr "ä¸æ˜Žãªã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚"
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "ä¸æ˜Žãª image_type=%s"
@@ -4225,11 +4145,6 @@ msgid "Volume must be attached in order to detach."
msgstr "ボリュームを切り離ã™ã«ã¯ãƒœãƒªãƒ¥ãƒ¼ãƒ ã‚’接続ã•ã‚Œã¦ã„ãªã‘ã‚Œã°ã„ã‘ã¾ã›ã‚“。"
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr ""
-"ボリュームã§ãƒ–ロックサイズãŒè¨­å®šã•ã‚Œã¦ã„ã¾ã™ãŒã€libvirt '%s' 以é™ãŒå¿…è¦ã§ã™ã€‚"
-
-#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
@@ -4355,9 +4270,6 @@ msgstr "block_device_mapping ã¯ãƒªã‚¹ãƒˆã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
msgid "block_device_mapping_v2 must be a list"
msgstr "block_device_mapping_v2 ã¯ãƒªã‚¹ãƒˆã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
-msgid "can't build a valid rule"
-msgstr "有効ãªãƒ«ãƒ¼ãƒ«ã‚’作æˆã§ãã¾ã›ã‚“"
-
msgid "cannot delete non-existent key"
msgstr "存在ã—ãªã„キーã¯å‰Šé™¤ã§ãã¾ã›ã‚“"
@@ -4470,13 +4382,6 @@ msgstr "イメージ"
msgid "image already mounted"
msgstr "イメージã¯æ—¢ã«ãƒžã‚¦ãƒ³ãƒˆã•ã‚Œã¦ã„ã¾ã™"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "%(now)s 時点㮠%(instance)s ã®ã‚¤ãƒ¡ãƒ¼ã‚¸"
-
-msgid "imageLocation is required"
-msgstr "imageLocation ãŒå¿…è¦ã§ã™"
-
msgid "index"
msgstr "インデックス"
@@ -4563,9 +4468,6 @@ msgstr "nbd デãƒã‚¤ã‚¹ %s ãŒå‡ºç¾ã—ã¾ã›ã‚“"
msgid "nbd unavailable: module not loaded"
msgstr "nbd ãŒä½¿ç”¨ä¸å¯ã§ã™: モジュールãŒãƒ­ãƒ¼ãƒ‰ã•ã‚Œã¦ã„ã¾ã›ã‚“"
-msgid "need group_name or group_id"
-msgstr "group_name ã¾ãŸã¯ group_id ãŒå¿…è¦ã§ã™"
-
#, fuzzy
msgid "network"
msgstr "Network"
@@ -4587,15 +4489,9 @@ msgstr "ssh コマンドを実行ã§ãã¾ã›ã‚“: %s"
msgid "onSharedStorage must be specified."
msgstr "onSharedStorage を指定ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
-msgid "only group \"all\" is supported"
-msgstr "グループ \"all\" ã®ã¿ã‚µãƒãƒ¼ãƒˆã•ã‚Œã¦ã„ã¾ã™ã€‚"
-
msgid "operation time out"
msgstr "æ“作ãŒã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã—ã¾ã—ãŸ"
-msgid "operation_type must be add or remove"
-msgstr "operation_type 㯠add ã¾ãŸã¯ remove ã®ã„ãšã‚Œã‹ã§ã‚ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr ""
"os-getConsoleOutput ã¯ã€å½¢å¼ã«èª¤ã‚ŠãŒã‚ã‚‹ã‹ã€ãƒªã‚¯ã‚¨ã‚¹ãƒˆæœ¬æ–‡ã«å«ã¾ã‚Œã¦ã„ã¾ã›ã‚“"
@@ -4636,9 +4532,6 @@ msgstr ""
"read_deleted ã«æŒ‡å®šã§ãã‚‹ã®ã¯ 'no', 'yes', 'only' ã®ã„ãšã‚Œã‹ã§ã™ã€‚%r ã¯æŒ‡å®šã§"
"ãã¾ã›ã‚“。"
-msgid "resource_id and tag are required"
-msgstr "resource_id ãŠã‚ˆã³ã‚¿ã‚°ãŒå¿…è¦ã§ã™"
-
msgid "rpc_port must be integer"
msgstr "rpc_port ã¯æ•´æ•°ã§ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“"
@@ -4703,9 +4596,6 @@ msgstr "サãƒãƒ¼ãƒˆã•ã‚Œãªã„フィールド: %s"
msgid "user"
msgstr "ユーザー"
-msgid "user or group not specified"
-msgstr "ユーザーã¾ãŸã¯ã‚°ãƒ«ãƒ¼ãƒ—ãŒæŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“。"
-
msgid "uuid"
msgstr "UUID"
diff --git a/nova/locale/ko_KR/LC_MESSAGES/nova.po b/nova/locale/ko_KR/LC_MESSAGES/nova.po
index a389089df4..1991077401 100644
--- a/nova/locale/ko_KR/LC_MESSAGES/nova.po
+++ b/nova/locale/ko_KR/LC_MESSAGES/nova.po
@@ -1,22 +1,32 @@
-# Korean (South Korea) translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
-msgid ""
-msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+# Translators:
+# Seunghyo Chun <seunghyo.chun@gmail.com>, 2013
+# Seunghyo Chun <seunghyo.chun@gmail.com>, 2013
+# Sungjin Kang <potopro@gmail.com>, 2013
+# Sungjin Kang <potopro@gmail.com>, 2013
+# Ian Y. Choi <ianyrchoi@gmail.com>, 2015. #zanata
+# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Sungjin Kang <gang.sungjin@gmail.com>, 2015. #zanata
+# Lucas Palm <lapalm@us.ibm.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-10-07 05:39+0000\n"
-"Last-Translator: Sungjin Kang <gang.sungjin@gmail.com>\n"
-"Language: ko_KR\n"
-"Language-Team: Korean (South Korea)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
+"PO-Revision-Date: 2016-02-03 07:15+0000\n"
+"Last-Translator: Lucas Palm <lapalm@us.ibm.com>\n"
+"Language: ko-KR\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Korean (South Korea)\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -33,10 +43,32 @@ msgstr ""
"%(binary)sì—ì„œ ì •ì±…ì ìœ¼ë¡œ 허용ë˜ì§€ 않는 ì§ì ‘ ë°ì´í„°ë² ì´ìŠ¤ 액세스를 ì‹œë„함"
#, python-format
+msgid ""
+"%(desc)r\n"
+"command: %(cmd)r\n"
+"exit code: %(code)r\n"
+"stdout: %(stdout)r\n"
+"stderr: %(stderr)r"
+msgstr ""
+"%(desc)r\n"
+"명령: %(cmd)r\n"
+"종료 코드: %(code)r\n"
+"stdout: %(stdout)r\n"
+"stderr: %(stderr)r"
+
+#, python-format
msgid "%(err)s"
msgstr "%(err)s"
#, python-format
+msgid "%(field)s should not be part of the updates."
+msgstr "%(field)sì€(는) ì—…ë°ì´íŠ¸ì˜ ì¼ë¶€ì—¬ì„œëŠ” 안 ë©ë‹ˆë‹¤. "
+
+#, python-format
+msgid "%(fieldname)s missing field type"
+msgstr "%(fieldname)sì— í•„ë“œ ìœ í˜•ì´ ëˆ„ë½ë¨"
+
+#, python-format
msgid "%(host)s:%(port)s: Target closed"
msgstr "%(host)s:%(port)s: ëŒ€ìƒ ì²˜ë¦¬ì™„ë£Œë¨"
@@ -77,6 +109,10 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "%(type)s 하ì´í¼ë°”ì´ì €ê°€ PCI 디바ì´ìŠ¤ë¥¼ 지ì›í•˜ì§€ ì•ŠìŒ"
#, python-format
+msgid "%(typename)s in %(fieldname)s is not an instance of Enum"
+msgstr "%(fieldname)sì— ìžˆëŠ” %(typename)sì´(ê°€) ì—´ê±°ì˜ ì¸ìŠ¤í„´ìŠ¤ê°€ 아님"
+
+#, python-format
msgid "%(value_name)s must be <= %(max_value)d"
msgstr "%(value_name)sì€(는) %(max_value)d보다 작거나 같아야 함"
@@ -99,6 +135,10 @@ msgstr ""
"니다."
#, python-format
+msgid "%r failed. Not Retrying."
+msgstr "%rì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. 재시ë„하지 않습니다. "
+
+#, python-format
msgid "%r failed. Retrying."
msgstr "%r 실패. ìž¬ì‹œë„ ì¤‘ìž…ë‹ˆë‹¤. "
@@ -127,6 +167,10 @@ msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr "%sì€(는) 'MANUAL' ë˜ëŠ” 'AUTO'여야 합니다. "
#, python-format
+msgid "'%(other)s' should be an instance of '%(cls)s'"
+msgstr "'%(other)s'ì€(는) '%(cls)s'ì˜ ì¸ìŠ¤í„´ìŠ¤ì—¬ì•¼ 함"
+
+#, python-format
msgid "'%s' is either missing or empty."
msgstr "'%s'ì´(ê°€) 누ë½ë˜ì—ˆê±°ë‚˜ 비어 있습니다."
@@ -151,6 +195,10 @@ msgstr "/%sì´(ê°€) cidr 형ì‹ì´ ì•„ë‹ˆë¼ ë‹¨ì¼ ì£¼ì†Œë¡œ 지정ë˜ì–´ì•¼ í•
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr "호스트 CPU 모ë¸ì´ ìš”ì²­ë  ë•Œ CPU ëª¨ë¸ ì´ë¦„ì´ ì„¤ì •ë˜ì§€ 않아야 함"
+#, python-format
+msgid "A NetworkModel is required in field %s"
+msgstr "NetworkModelì´ í•„ë“œ %sì— í•„ìš”í•¨"
+
msgid ""
"A unique ID given to each file system. This is value is set in Glance and "
"agreed upon here so that the operator knowns they are dealing with the same "
@@ -183,9 +231,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "API 버전 %(version)sì—서는 ì´ ë©”ì†Œë“œë¥¼ 지ì›í•˜ì§€ 않습니다.."
-msgid "Access key not provided"
-msgstr "액세스 키가 제공ë˜ì§€ ì•ŠìŒ"
-
msgid "Access list not available for public flavors."
msgstr "액세스 목ë¡ì´ 공용 플레ì´ë²„ì— ì‚¬ìš©í•  수 없습니다. "
@@ -215,6 +260,9 @@ msgstr "주소를 변환할 수 없습니다. "
msgid "Address not specified"
msgstr "주소가 지정ë˜ì§€ ì•ŠìŒ"
+msgid "Affinity instance group policy was violated."
+msgstr "ì„ í˜¸ë„ ì¸ìŠ¤í„´ìŠ¤ 그룹 ì •ì±…ì„ ìœ„ë°˜í–ˆìŠµë‹ˆë‹¤. "
+
#, python-format
msgid "Agent does not support the call: %(method)s"
msgstr "ì—ì´ì „트가 í˜¸ì¶œì„ ì§€ì›í•˜ì§€ ì•ŠìŒ: %(method)s"
@@ -278,6 +326,10 @@ msgstr "안티 ì„ í˜¸ë„ ì¸ìŠ¤í„´ìŠ¤ 그룹 ì •ì±…ì„ ìœ„ë°˜í–ˆìŠµë‹ˆë‹¤."
msgid "Architecture name '%(arch)s' is not recognised"
msgstr "아키í…처 ì´ë¦„ '%(arch)s'ì´(ê°€) ì¸ì‹ë˜ì§€ ì•ŠìŒ"
+#, python-format
+msgid "Architecture name '%s' is not valid"
+msgstr "아키í…처 ì´ë¦„ '%s'ì´(ê°€) 올바르지 ì•ŠìŒ"
+
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr "다시 ë¶€íŒ…ì— ëŒ€í•œ ì¸ìˆ˜ 'type'ì´ HARD ë˜ëŠ” SOFTê°€ 아님"
@@ -360,9 +412,21 @@ msgstr "ìž˜ëª»ëœ volumeId 형ì‹: volumeIdê°€ ì ì ˆí•œ 형ì‹(%s)ì´ ì•„ë‹˜"
msgid "Binary"
msgstr "2진"
+#, python-format
+msgid ""
+"Binding failed for port %(port_id)s, please check neutron logs for more "
+"information."
+msgstr ""
+"í¬íŠ¸ %(port_id)sì— ëŒ€í•´ ë°”ì¸ë”©ì— 실패했습니다. ìžì„¸í•œ 정보는 neutron 로그를확"
+"ì¸í•˜ì‹­ì‹œì˜¤. "
+
msgid "Blank components"
msgstr "비어 있는 구성요소"
+msgid ""
+"Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size"
+msgstr "공백 볼륨(소스: 'blank', 대ìƒ: 'volume')ì€ í¬ê¸°ê°€ 0(ì˜)ì´ ì•„ë‹ˆì–´ì•¼ 함"
+
#, python-format
msgid "Block Device %(id)s is not bootable."
msgstr "%(id)s ë¸”ë¡ ë””ë°”ì´ìŠ¤ë¡œ 부팅할 수 없습니다."
@@ -452,6 +516,21 @@ msgstr "CPU 번호 %(cpunum)dì€(는) 최대값 %(cpumax)d 보다 í¼"
msgid "CPU number %(cpuset)s is not assigned to any node"
msgstr "CPU 번호 %(cpuset)sì´(ê°€) ì–´ëŠ ë…¸ë“œì—ë„ ì§€ì •ë˜ì§€ 않았ìŒ"
+#, python-format
+msgid "CPU pinning is not supported by the host: %(reason)s"
+msgstr "CPU ê³ ì •ì´ í˜¸ìŠ¤íŠ¸ì—ì„œ 지ì›ë˜ì§€ ì•ŠìŒ: %(reason)s"
+
+#, python-format
+msgid ""
+"CPU set to pin/unpin %(requested)s must be a subset of known CPU set "
+"%(cpuset)s"
+msgstr ""
+"%(requested)sì„(를) ê³ ì •/ê³ ì • 해제할 CPU 세트는 알려진 CPU 세트 %(cpuset)sì˜ "
+"서브세트여야 함"
+
+msgid "Can not add access to a public flavor."
+msgstr "공용 플레ì´ë²„ì— ëŒ€í•œ 액세스를 추가할 수 없습니다. "
+
msgid "Can not find requested image"
msgstr "ìš”ì²­ëœ ì´ë¯¸ì§€ë¥¼ ì°¾ì„ ìˆ˜ ì—†ìŒ"
@@ -520,6 +599,13 @@ msgstr "ë§µí•‘ëœ ë³¼ë¥¨ìœ¼ë¡œ 마ì´ê·¸ë ˆì´ì…˜ ì¸ìŠ¤í„´ìŠ¤ %sì„(를) 차단
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr "ê³ ì•„ %(objtype)s 오브ì íŠ¸ì—ì„œ %(method)s 메소드를 호출할 수 ì—†ìŒ"
+msgid ""
+"Cannot create default bittorrent URL without xenserver.torrent_base_url "
+"configuration option set."
+msgstr ""
+"xenserver.torrent_base_url 구성 옵션 설정 ì—†ì´ ê¸°ë³¸ bittorrent URLì„ ìž‘ì„±í•  "
+"수 없습니다. "
+
msgid "Cannot execute /sbin/mount.sofs"
msgstr "/sbin/mount.sofs를 실행할 수 ì—†ìŒ"
@@ -609,6 +695,9 @@ msgstr "ì…€ 메시지가 최대 홉 ê³„ìˆ˜ì— ë„달함: %(hop_count)s"
msgid "Cell name cannot be empty"
msgstr "ì…€ ì´ë¦„ì€ ê³µë°±ì¼ ìˆ˜ ì—†ìŒ"
+msgid "Cell name cannot contain '!', '.' or '@'"
+msgstr "ì…€ ì´ë¦„ì€ '!', '.' ë˜ëŠ” '@'를 í¬í•¨í•  수 ì—†ìŒ"
+
msgid "Cell type must be 'parent' or 'child'"
msgstr "ì…€ ìœ í˜•ì€ 'parent' ë˜ëŠ” 'child'여야 함"
@@ -670,6 +759,22 @@ msgstr ""
"êµ¬ì„±ì´ ëª…ì‹œì  CPU 모ë¸ì„ 요청했지만 현재 libvirt 하ì´í¼ë°”ì´ì € '%s'ì´(ê°€) CPU "
"ëª¨ë¸ ì„ íƒì„ 지ì›í•˜ì§€ ì•ŠìŒ"
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s, but we were unable to "
+"determine the cause"
+msgstr ""
+"ì¸ìŠ¤í„´ìŠ¤ %(instance_uuid)s ì—…ë°ì´íŠ¸ ì¤‘ì— ì¶©ëŒì´ ë°œìƒí–ˆì§€ë§Œ ì›ì¸ì„ íŒë³„í•  수 "
+"ì—†ìŒ"
+
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. "
+"Actual: %(actual)s"
+msgstr ""
+"ì¸ìŠ¤í„´ìŠ¤ %(instance_uuid)s ì—…ë°ì´íŠ¸ ì¤‘ì— ì¶©ëŒì´ ë°œìƒí–ˆìŠµë‹ˆë‹¤. 예ìƒ: "
+"%(expected)s. 실제: %(actual)s"
+
msgid "Conflicting policies configured!"
msgstr "êµ¬ì„±ëœ ì •ì±…ì´ ì¶©ëŒí•¨!"
@@ -754,10 +859,6 @@ msgstr "%(host)s 호스트ì—ì„œ 2진 %(binary)sì„(를) ì°¾ì„ ìˆ˜ 없습니다
msgid "Could not find config at %(path)s"
msgstr "%(path)sì—ì„œ êµ¬ì„±ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "키 ìŒì„ ì°¾ì„ ìˆ˜ ì—†ìŒ: %s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr "VMì´ ì‚¬ìš©í•˜ëŠ” ë°ì´í„° 저장소 참조를 ì°¾ì„ ìˆ˜ 없습니다. "
@@ -791,14 +892,6 @@ msgstr "%(image_id)s ì´ë¯¸ì§€ë¥¼ 업로드할 수 ì—†ìŒ"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "%(interface)sì˜ ë§í¬ 로컬 IP를 가져올 수 ì—†ìŒ :%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"%(instance)s ì¸ìŠ¤í„´ìŠ¤ë¥¼ 1시간 ë‚´ì— ì¤‘ì§€í•  수 없습니다. 현재 vm_state: "
-"%(vm_state)s, 현재 task_state: %(task_state)s"
-
msgid "Create networks failed"
msgstr "ë„¤íŠ¸ì›Œí¬ ìž‘ì„± 실패"
@@ -994,6 +1087,26 @@ msgstr ""
"%(error_code)s] %(ex)s"
#, python-format
+msgid ""
+"Error from libvirt while set password for username \"%(user)s\": [Error Code "
+"%(error_code)s] %(ex)s"
+msgstr ""
+"ì‚¬ìš©ìž ì´ë¦„ \"%(user)s\"ì— ëŒ€í•œ 비밀번호 설정 중 libvirtì—ì„œ 오류 ë°œìƒ: [오"
+"류 코드 %(error_code)s] %(ex)s"
+
+#, python-format
+msgid ""
+"Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs "
+"(%(e)s)"
+msgstr ""
+"%(image)s ì´ë¯¸ì§€ì—ì„œ %(device)sì„(를) %(dir)sì— ë§ˆìš´íŠ¸í•˜ëŠ” 중 오류 ë°œìƒ"
+"(libguestfs(%(e)s)) "
+
+#, python-format
+msgid "Error mounting %(image)s with libguestfs (%(e)s)"
+msgstr "libguestfs(%(e)s)를 갖는 %(image)s 마운트 오류"
+
+#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "ìžì› 모니터 작성 ì¤‘ì— ì˜¤ë¥˜ ë°œìƒ: %(monitor)s"
@@ -1016,6 +1129,10 @@ msgstr ""
"함%(instance_uuid)s"
#, python-format
+msgid "Exceeded maximum number of retries. %(reason)s"
+msgstr "최대 ìž¬ì‹œë„ íšŸìˆ˜ë¥¼ 초과했습니다. %(reason)s"
+
+#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr "uuid를 예ìƒí–ˆì§€ë§Œ %(uuid)sì„(를) 수신했습니다. "
@@ -1023,21 +1140,6 @@ msgstr "uuid를 예ìƒí–ˆì§€ë§Œ %(uuid)sì„(를) 수신했습니다. "
msgid "Expected object of type: %s"
msgstr "예ìƒí•œ 오브ì íŠ¸ 유형: %s"
-msgid "Expecting a list of resources"
-msgstr "ìžì› ëª©ë¡ ì˜ˆìƒ"
-
-msgid "Expecting a list of tagSets"
-msgstr "tagSetsì˜ ëª©ë¡ ì˜ˆìƒ"
-
-msgid "Expecting both key and value to be set"
-msgstr "키와 ê°’ì„ ëª¨ë‘ ì„¤ì •í•´ì•¼ 함"
-
-msgid "Expecting key to be set"
-msgstr "키 ì„¤ì •ì„ ì˜ˆìƒ"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "tagSet는 키/ê°’ ìŒì´ì–´ì•¼ 함"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "새ë„ìš° í…Œì´ë¸”ì— %(table)s.%(column)s ì—´ì´ ì¶”ê°€ë¡œ 있ìŒ"
@@ -1053,6 +1155,14 @@ msgid "Fail to validate provided extra specs keys. Expected string"
msgstr "ì œê³µëœ ì¶”ê°€ 스펙 í‚¤ì˜ ìœ íš¨ì„±ì„ ê²€ì¦í•˜ì§€ 못함. 문ìžì—´ì„ 예ìƒí•¨"
#, python-format
+msgid "Failed to access port %(port_id)s: %(reason)s"
+msgstr "í¬íŠ¸ %(port_id)sì— ì•¡ì„¸ìŠ¤ 실패: %(reason)s"
+
+#, python-format
+msgid "Failed to add bridge: %s"
+msgstr "브릿지 추가 실패: %s"
+
+#, python-format
msgid ""
"Failed to add deploy parameters on node %(node)s when provisioning the "
"instance %(instance)s"
@@ -1195,10 +1305,6 @@ msgstr "ì¸ìŠ¤í„´ìŠ¤ ì¼ì‹œì¤‘단 실패: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "ì¸ìŠ¤í„´ìŠ¤ 종료 실패: %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "키스톤ì—ì„œ ì‘ë‹µì„ êµ¬ë¬¸ 분ì„하지 못함: %s"
-
msgid "Failure prepping block device."
msgstr "ë¸”ë¡ ë””ë°”ì´ìŠ¤ 준비 실패"
@@ -1227,6 +1333,12 @@ msgid "Filename of root Certificate Revocation List"
msgstr "Root ì¸ì¦ì„œ 해지 ëª©ë¡ íŒŒì¼ ì´ë¦„"
#, python-format
+msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
+msgstr ""
+"ê³ ì • IP %(ip)sì´(ê°€) ë„¤íŠ¸ì›Œí¬ %(network_id)sì— ëŒ€í•´ 올바른 IP 주소가 아닙니"
+"다. "
+
+#, python-format
msgid "Fixed IP %s has been deleted"
msgstr "ê³ ì • IP %sì´(ê°€) ì‚­ì œë˜ì—ˆìŒ"
@@ -1278,14 +1390,6 @@ msgstr "플레ì´ë²„ %(flavor_id)sì— %(key)s 키가 있는 추가 ìŠ¤íŽ™ì´ ì—†
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"%(retries)d번 ìž¬ì‹œë„ í›„ 플레ì´ë²„ %(id)d 추가 ìŠ¤íŽ™ì„ ì—…ë°ì´íŠ¸í•˜ê±°ë‚˜ 작성할 수 "
-"없습니다."
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1328,15 +1432,34 @@ msgstr "ì´ë¦„ì´ %(flavor_name)sì¸ í”Œë ˆì´ë²„를 ì°¾ì„ ìˆ˜ 없습니다."
msgid "Flavor with name %(name)s already exists."
msgstr "ì´ë¦„ì´ %(name)sì¸ í”Œë ˆì´ë²„ê°€ ì´ë¯¸ 있습니다."
+#, python-format
+msgid ""
+"Flavor's disk is smaller than the minimum size specified in image metadata. "
+"Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i "
+"bytes."
+msgstr ""
+"플레ì´ë²„ì˜ ë””ìŠ¤í¬ê°€ ì´ë¯¸ì§€ 메타ë°ì´í„°ì—ì„œ ì§€ì •ëœ ìµœì†Œ í¬ê¸°ë³´ë‹¤ 작습니다. 플레"
+"ì´ë²„ 디스í¬ëŠ” %(flavor_size)ië°”ì´íŠ¸ì´ê³  최소 í¬ê¸°ëŠ” %(image_min_disk)ië°”ì´íŠ¸"
+"입니다. "
+
+#, python-format
+msgid ""
+"Flavor's disk is too small for requested image. Flavor disk is "
+"%(flavor_size)i bytes, image is %(image_size)i bytes."
+msgstr ""
+"플레ì´ë²„ì˜ ë””ìŠ¤í¬ê°€ ìš”ì²­ëœ ì´ë¯¸ì§€ì— 비해 너무 작습니다. 플레ì´ë²„ 디스í¬ëŠ” "
+"%(flavor_size)ië°”ì´íŠ¸ì´ê³  ì´ë¯¸ì§€ëŠ” %(image_size)ië°”ì´íŠ¸ìž…니다. "
+
msgid "Flavor's memory is too small for requested image."
msgstr "플레ì´ë²„ì˜ ë©”ëª¨ë¦¬ê°€ ìš”ì²­ëœ ì´ë¯¸ì§€ì— 대해 너무 작습니다."
+#, python-format
+msgid "Floating IP %(address)s association has failed."
+msgstr "ë¶€ë™ IP %(address)s ì—°ê´€ì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. "
+
msgid "Floating IP allocate failed."
msgstr "ë¶€ë™ IP í• ë‹¹ì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤."
-msgid "Floating ip is not associated."
-msgstr "ë¶€ë™ IPê°€ ì—°ê´€ë˜ì§€ 않았습니다. "
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1400,6 +1523,10 @@ msgstr ""
"ê°„ 마ì´ê·¸ë ˆì´ì…˜ì„ 수행할 경우 ë°ì´í„°ê°€ ìœ ì‹¤ë  ìˆ˜ 있습니다. %(server)sì—ì„œ "
"Nova를 업그레ì´ë“œí•˜ì—¬ 다시 ì‹œë„하십시오."
+#, python-format
+msgid "Host '%(name)s' is not mapped to any cell"
+msgstr "'%(name)s' 호스트가 ì…€ì— ë§µí•‘ë˜ì§€ ì•ŠìŒ"
+
msgid "Host PowerOn is not supported by the Hyper-V driver"
msgstr "Hyper-V ë“œë¼ì´ë²„ì—ì„œ 호스트 PowerOnì„ ì§€ì›í•˜ì§€ 않습니다."
@@ -1427,6 +1554,10 @@ msgstr ""
"하ì´í¼ë°”ì´ì € ë“œë¼ì´ë²„ê°€ post_live_migration_at_source 메소드를 지ì›í•˜ì§€ ì•ŠìŒ"
#, python-format
+msgid "Hypervisor virt type '%s' is not valid"
+msgstr "하ì´í¼ë°”ì´ì € ê°€ìƒí™” 유형 '%s'ì´(ê°€) 올바르지 ì•ŠìŒ"
+
+#, python-format
msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised"
msgstr "하ì´í¼ë°”ì´ì € ê°€ìƒí™” 유형 '%(hv_type)s'ì´(ê°€) ì¸ì‹ë˜ì§€ ì•ŠìŒ"
@@ -1495,8 +1626,9 @@ msgstr "ì´ë¯¸ì§€ 메타ë°ì´í„° 키가 너무 ê¹€"
msgid "Image metadata limit exceeded"
msgstr "ì´ë¯¸ì§€ 메타ë°ì´í„° 한계 초과"
-msgid "Image must be available"
-msgstr "ì´ë¯¸ì§€ê°€ 사용 가능해야 함"
+#, python-format
+msgid "Image model '%(image)s' is not supported"
+msgstr "ì´ë¯¸ì§€ ëª¨ë¸ '%(image)s'ì€(는) 지ì›ë˜ì§€ ì•ŠìŒ"
msgid "Image not found."
msgstr "ì´ë¯¸ì§€ë¥¼ ì°¾ì„ ìˆ˜ 없습니다. "
@@ -1616,6 +1748,11 @@ msgid "Instance %(instance_uuid)s does not specify a NUMA topology"
msgstr "ì¸ìŠ¤í„´ìŠ¤ %(instance_uuid)sì´(ê°€) NUMA 토í´ë¡œì§€ë¥¼ 지정하지 ì•ŠìŒ"
#, python-format
+msgid "Instance %(instance_uuid)s does not specify a migration context."
+msgstr ""
+"ì¸ìŠ¤í„´ìŠ¤ %(instance_uuid)sì´(ê°€) 마ì´ê·¸ë ˆì´ì…˜ 컨í…스트를 지정하지 않습니다. "
+
+#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while "
"the instance is in this state."
@@ -1647,6 +1784,10 @@ msgstr "%s ì¸ìŠ¤í„´ìŠ¤ê°€ 첨부ë˜ì§€ 않았습니다. "
msgid "Instance %s not found"
msgstr "%s ì¸ìŠ¤í„´ìŠ¤ë¥¼ ì°¾ì„ ìˆ˜ ì—†ìŒ"
+#, python-format
+msgid "Instance %s provisioning was aborted"
+msgstr "ì¸ìŠ¤í„´ìŠ¤ %s 프로비저ë‹ì´ 중단ë¨"
+
msgid "Instance could not be found"
msgstr "ì¸ìŠ¤í„´ìŠ¤ë¥¼ ì°¾ì„ ìˆ˜ ì—†ìŒ"
@@ -1718,9 +1859,6 @@ msgstr "%(interface)s ì¸í„°íŽ˜ì´ìŠ¤ë¥¼ ì°¾ì„ ìˆ˜ 없습니다. "
msgid "Invalid Base 64 data for file %(path)s"
msgstr "íŒŒì¼ %(path)sì— ëŒ€í•´ 올바르지 ì•Šì€ Base 64 ë°ì´í„°"
-msgid "Invalid CIDR"
-msgstr "올바르지 ì•Šì€ CIDR"
-
msgid "Invalid Connection Info"
msgstr "올바르지 ì•Šì€ ì—°ê²° ì •ë³´"
@@ -1733,10 +1871,6 @@ msgid "Invalid IP format %s"
msgstr "올바르지 ì•Šì€ IP í˜•ì‹ %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "올바르지 ì•Šì€ IP 프로토콜 %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "올바르지 ì•Šì€ IP 프로토콜 %(protocol)s."
@@ -1821,6 +1955,10 @@ msgid "Invalid entry: '%s'; Expecting list or dict"
msgstr "올바르지 ì•Šì€ í•­ëª©: '%s', ëª©ë¡ ë˜ëŠ” 사전 예ìƒ"
#, python-format
+msgid "Invalid event name %s"
+msgstr "올바르지 ì•Šì€ ì´ë²¤íŠ¸ ì´ë¦„ %s"
+
+#, python-format
msgid "Invalid event status `%s'"
msgstr "올바르지 ì•Šì€ ì´ë²¤íŠ¸ ìƒíƒœ `%s'"
@@ -1860,6 +1998,10 @@ msgid "Invalid id: %(volume_id)s (expecting \"i-...\")"
msgstr "올바르지 ì•Šì€ ID: %(volume_id)s (\"i-...\" 예ìƒ)"
#, python-format
+msgid "Invalid image format '%(format)s'"
+msgstr "올바르지 ì•Šì€ ì´ë¯¸ì§€ í˜•ì‹ '%(format)s'"
+
+#, python-format
msgid "Invalid image href %(image_href)s."
msgstr "올바르지 ì•Šì€ ì´ë¯¸ì§€ href %(image_href)s."
@@ -1902,6 +2044,10 @@ msgid "Invalid key_name provided."
msgstr "올바르지 ì•Šì€ key_nameì´ ì œê³µë˜ì—ˆìŠµë‹ˆë‹¤. "
#, python-format
+msgid "Invalid libvirt version %(version)s"
+msgstr "올바르지 ì•Šì€ libvirt 버전 %(version)s"
+
+#, python-format
msgid "Invalid memory page size '%(pagesize)s'"
msgstr "올바르지 ì•Šì€ ë©”ëª¨ë¦¬ 페ì´ì§€ í¬ê¸° '%(pagesize)s'"
@@ -2017,24 +2163,21 @@ msgid "Invalid usage_type: %s"
msgstr "올바르지 ì•Šì€ usage_type: %s"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"instanceIdì— ëŒ€í•œ 올바르지 ì•Šì€ ê°’ '%(ec2_instance_id)s'입니다. 루트"
-"(%(root)s)ì— ì²¨ë¶€ëœ ë³¼ë¥¨ì´ ì¸ìŠ¤í„´ìŠ¤ì— 없습니다."
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "ê°•ì œ ì‹¤í–‰ì— ëŒ€í•œ 올바르지 ì•Šì€ ê°’ '%s'입니다. "
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "'scheduler_max_attempts'ì— ëŒ€í•œ 올바르지 ì•Šì€ ê°’, >= 1ì´ì–´ì•¼ 함"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "구성 ë“œë¼ì´ë¸Œ ì˜µì…˜ì— ëŒ€í•´ 올바르지 ì•Šì€ ê°’: %(option)s"
+#, python-format
+msgid ""
+"Invalid vcpu_pin_set config, one or more of the specified cpuset is not "
+"online. Online cpuset(s): %(online)s, requested cpuset(s): %(req)s"
+msgstr ""
+"올바르지 ì•Šì€ vcpu_pin_set 구성, ì§€ì •ëœ cpuset 중 하나 ì´ìƒì´ 온ë¼ì¸ ìƒíƒœê°€ "
+"아닙니다. 온ë¼ì¸ cpuset: %(online)s, ìš”ì²­ëœ cpuset: %(req)s"
+
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr "올바르지 ì•Šì€ vcpu_pin_set 구성, 하ì´í¼ë°”ì´ì € cpu 범위ì—ì„œ 벗어남."
@@ -2150,6 +2293,13 @@ msgid ""
msgstr ""
"ì´ íŒŒì¼ì˜ image_file_url:<list entry name> 섹션ì—ì„œ êµ¬ì„±ëœ íŒŒì¼ ì‹œìŠ¤í…œ 목ë¡"
+msgid ""
+"Live migration can not be used without shared storage except a booted from "
+"volume VM which does not have a local disk."
+msgstr ""
+"공유 스토리지가 없으면 ë¼ì´ë¸Œ 마ì´ê·¸ë ˆì´ì…˜ì„ 사용할 수 없습니다(로컬 디스í¬"
+"ê°€ 없는 볼륨ì—ì„œ ë¶€íŒ…ëœ VMì€ ì œì™¸). "
+
msgid "Live migration is supported starting with Hyper-V Server 2012"
msgstr "Hyper-V Server 2012부터 ë¼ì´ë¸Œ 마ì´ê·¸ë ˆì´ì…˜ì´ 지ì›ë¨ "
@@ -2292,6 +2442,9 @@ msgstr "사용 안함 ì´ìœ  필드가 누ë½ë¨"
msgid "Missing flavorRef attribute"
msgstr "flavorRef ì†ì„± 누ë½"
+msgid "Missing forced_down field"
+msgstr "forced_down í•„ë“œ 누ë½"
+
msgid "Missing imageRef attribute"
msgstr "imageRef ì†ì„± 누ë½"
@@ -2385,6 +2538,10 @@ msgid "Netmask to push into openvpn config"
msgstr "openvpn êµ¬ì„±ì— í‘¸ì‹œí•  넷마스í¬"
#, python-format
+msgid "Network \"%(val)s\" is not valid in field %(attr)s"
+msgstr "ë„¤íŠ¸ì›Œí¬ \"%(val)s\"ì´(ê°€) í•„ë“œ %(attr)sì—ì„œ 올바르지 ì•ŠìŒ"
+
+#, python-format
msgid "Network %(network_id)s could not be found."
msgstr "%(network_id)s 네트워í¬ë¥¼ ì°¾ì„ ìˆ˜ 없습니다. "
@@ -2460,6 +2617,10 @@ msgstr "스왑하려면 새 ë³¼ë¥¨ì„ ë¶„ë¦¬í•´ì•¼ 합니다."
msgid "New volume must be the same size or larger."
msgstr "새 ë³¼ë¥¨ì€ ë™ì¼í•œ í¬ê¸°ì´ê±°ë‚˜ ì´ìƒì´ì–´ì•¼ 합니다."
+#, python-format
+msgid "No Block Device Mapping with id %(id)s."
+msgstr "IDê°€ %(id)sì¸ ë¸”ë¡ ë””ë°”ì´ìŠ¤ ë§µí•‘ì´ ì—†ìŠµë‹ˆë‹¤. "
+
msgid "No CIDR requested"
msgstr "ìš”ì²­ëœ CIDRì´ ì—†ìŒ"
@@ -2473,6 +2634,11 @@ msgstr "요청 본문 ì—†ìŒ"
msgid "No Unique Match Found."
msgstr "고유한 ì¼ì¹˜ì ì„ 찾지 못했습니다."
+msgid "No access_url in connection_info. Cannot validate protocol"
+msgstr ""
+"connection_infoì— access_urlì´ ì—†ìŠµë‹ˆë‹¤. í”„ë¡œí† ì½œì„ ìœ íš¨ì„± ê²€ì¦í•  수 없습니"
+"다. "
+
msgid "No adminPass was specified"
msgstr "adminPassê°€ 지정ë˜ì§€ 않았ìŒ"
@@ -2555,6 +2721,10 @@ msgstr "%s URLì— ëŒ€í•´ ì¼ì¹˜í•˜ëŠ” ID를 ì°¾ì„ ìˆ˜ 없습니다."
msgid "No more available networks."
msgstr "ë” ì´ìƒ 사용 가능한 네트워í¬ê°€ 없습니다."
+#, python-format
+msgid "No mount points found in %(root)s of %(image)s"
+msgstr "%(image)sì˜ %(root)sì— ë§ˆìš´íŠ¸ 지ì ì´ ì—†ìŒ"
+
msgid "No networks defined."
msgstr "ì •ì˜ëœ 네트워í¬ê°€ 없습니다. "
@@ -2581,9 +2751,6 @@ msgstr "요청 본문 ì—†ìŒ"
msgid "No root disk defined."
msgstr "루트 디스í¬ê°€ ì •ì˜ë˜ì§€ 않았습니다."
-msgid "No rule for the specified parameters."
-msgstr "ì§€ì •ëœ ë§¤ê°œë³€ìˆ˜ì— ëŒ€í•œ ê·œì¹™ì´ ì—†ìŠµë‹ˆë‹¤. "
-
msgid "No suitable network for migrate"
msgstr "마ì´ê·¸ë ˆì´ì…˜ì„ 위한 ì§€ì† ê°€ëŠ¥í•œ ë„¤íŠ¸ì›Œí¬ ì—†ìŒ"
@@ -2617,10 +2784,6 @@ msgstr "%(host)sì— ì‚¬ìš© 가능한 í¬íŠ¸ë¥¼ íšë“í•  수 ì—†ìŒ"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "%(host)s:%(port)d, %(error)sì„(를) ë°”ì¸ë“œí•  수 ì—†ìŒ"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "%s ì´ë¯¸ì§€ì˜ ì†ì„±ì„ 수정하ë„ë¡ í—ˆìš©í•˜ì§€ ì•ŠìŒ"
-
msgid "Not an rbd snapshot"
msgstr "rbd ìŠ¤ëƒ…ìƒ·ì´ ì•„ë‹˜"
@@ -2668,6 +2831,10 @@ msgid "Old volume is attached to a different instance."
msgstr "ì´ì „ ë³¼ë¥¨ì´ ë‹¤ë¥¸ ì¸ìŠ¤í„´ìŠ¤ì— ì ‘ì†ë˜ì–´ 있습니다."
#, python-format
+msgid "One or more hosts already in availability zone(s) %s"
+msgstr "하나 ì´ìƒì˜ 호스트가 ì´ë¯¸ 가용성 구역 %sì— ìžˆìŒ"
+
+#, python-format
msgid ""
"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
"%(unit_string)s."
@@ -2692,9 +2859,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "호스트 매개변수만 지정할 수 있ìŒ"
-msgid "Only instances implemented"
-msgstr "ì¸ìŠ¤í„´ìŠ¤ë§Œ 구현ë¨"
-
msgid "Only root certificate can be retrieved."
msgstr "루트 ì¸ì¦ì„œë§Œ 검색할 수 있습니다. "
@@ -2771,6 +2935,15 @@ msgid "Page size %(pagesize)s is not supported by the host."
msgstr "호스트ì—ì„œ 페ì´ì§€ í¬ê¸° %(pagesize)sì„(를) 지ì›í•˜ì§€ 않습니다."
#, python-format
+msgid ""
+"Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. "
+"Check your Neutron configuration to validate that the macvtap parameters are "
+"correct."
+msgstr ""
+"매개변수 %(missing_params)sì´(ê°€) vif %(vif_id)sì— ëŒ€í•œ vif_detailsì— ì—†ìŠµë‹ˆ"
+"다. Neutron êµ¬ì„±ì„ í™•ì¸í•˜ì—¬ macvtap 매개변수가 올바른지 유효성 ê²€ì¦í•˜ì‹­ì‹œì˜¤. "
+
+#, python-format
msgid "Path %s must be LVM logical volume"
msgstr "경로 %sì€(는) LVM ë…¼ë¦¬ì  ë³¼ë¥¨ì´ì–´ì•¼ 함"
@@ -2854,6 +3027,10 @@ msgstr "ì œê³µëœ watchdog 조치(%(action)s)ê°€ 지ì›ë˜ì§€ 않습니다."
msgid "QEMU guest agent is not enabled"
msgstr "QEMU 게스트 ì—ì´ì „트가 사용ë˜ì§€ ì•ŠìŒ"
+#, python-format
+msgid "Quiescing is not supported in instance %(instance_id)s"
+msgstr "ì¸ìŠ¤í„´ìŠ¤ %(instance_id)sì—ì„œ Quiesceê°€ 지ì›ë˜ì§€ ì•ŠìŒ"
+
msgid "Quota"
msgstr "Quota"
@@ -2865,6 +3042,14 @@ msgid "Quota could not be found"
msgstr "í• ë‹¹ëŸ‰ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ"
#, python-format
+msgid ""
+"Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s "
+"of %(allowed)s %(overs)s"
+msgstr ""
+"%(overs)sì— ëŒ€í•œ 할당량 초과: %(req)sì„(를) 요청했지만 ì´ë¯¸ %(allowed)s "
+"%(overs)s 중 %(used)sì„(를) 사용했습니다. "
+
+#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr "ìžì›ì— 대한 할당량 초과: %(overs)s"
@@ -2920,6 +3105,14 @@ msgstr ""
#, python-format
msgid ""
+"Quota limit %(limit)s for %(resource)s must be in the range of -1 and "
+"%(max)s."
+msgstr ""
+"%(resource)sì˜ í• ë‹¹ëŸ‰ 한계 %(limit)sì€(는) -1 ê³¼ %(max)s 사ì´ì˜ ë²”ìœ„ì— ìžˆì–´"
+"야 합니다. "
+
+#, python-format
+msgid ""
"Quota limit %(limit)s for %(resource)s must be less than or equal to "
"%(maximum)s."
msgstr ""
@@ -2948,6 +3141,14 @@ msgstr "%(project_id)s 프로ì íŠ¸ì— 대한 할당 ì‚¬ìš©ëŸ‰ì„ ì°¾ì„ ìˆ˜ ì—†
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr "VBD %sì„(를) 언플러그하려는 최대 ìž¬ì‹œë„ íšŸìˆ˜ì— ë„달했ìŒ"
+#, python-format
+msgid ""
+"Relative blockcommit support was not detected. Libvirt '%s' or later is "
+"required for online deletion of file/network storage-backed volume snapshots."
+msgstr ""
+"ìƒëŒ€ blockcommit 지ì›ì´ 발견ë˜ì§€ 않았습니다. 온ë¼ì¸ìœ¼ë¡œ 파ì¼/ë„¤íŠ¸ì›Œí¬ ìŠ¤í† ë¦¬"
+"지 백업 볼륨 ìŠ¤ëƒ…ìƒ·ì„ ì‚­ì œí•˜ë ¤ë©´ Libvirt '%s' ì´ìƒì´ 필요합니다. "
+
msgid "Request body and URI mismatch"
msgstr "요청 본문 ë° URI 불ì¼ì¹˜"
@@ -3206,10 +3407,21 @@ msgstr "호스트 %(host)s ë°”ì´ë„ˆë¦¬ %(binary)sì¸ ì„œë¹„ìŠ¤ê°€ 존재합니ë
msgid "Service with host %(host)s topic %(topic)s exists."
msgstr "호스트 %(host)s 주제 %(topic)sì¸ ì„œë¹„ìŠ¤ê°€ 존재합니다. "
+msgid "Set admin password is not supported"
+msgstr "ì„¤ì •ëœ ê´€ë¦¬ 비밀번호가 지ì›ë˜ì§€ ì•ŠìŒ"
+
#, python-format
msgid "Shadow table with name %(name)s already exists."
msgstr "ì´ë¦„ì´ %(name)sì¸ ìƒˆë„ìš° í…Œì´ë¸”ì´ ì´ë¯¸ 존재합니다. "
+#, python-format
+msgid "Share '%s' is not supported"
+msgstr "공유 '%s'ì€(는) 지ì›ë˜ì§€ ì•ŠìŒ"
+
+#, python-format
+msgid "Share level '%s' cannot have share configured"
+msgstr "공유 레벨 '%s'ì—는 공유를 구성할 수 ì—†ìŒ"
+
msgid "Should we use a CA for each project?"
msgstr "ê° í”„ë¡œì íŠ¸ì—ì„œ CA 사용 하실껀가요?"
@@ -3220,9 +3432,6 @@ msgstr ""
"resize2fs를 사용한 íŒŒì¼ ì‹œìŠ¤í…œ ì¶•ì†Œì— ì‹¤íŒ¨í–ˆìŠµë‹ˆë‹¤. 사용ìžì˜ 디스í¬ì—충분한 "
"여유 ê³µê°„ì´ ìžˆëŠ”ì§€ 확ì¸í•˜ì‹­ì‹œì˜¤."
-msgid "Signature not provided"
-msgstr "ì„œëª…ì´ ì œê³µë˜ì§€ ì•ŠìŒ"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "%(snapshot_id)s ìŠ¤ëƒ…ìƒ·ì„ ì°¾ì„ ìˆ˜ 없습니다. "
@@ -3339,10 +3548,16 @@ msgstr ""
"ì´ ëª…ë ¹ì„ ì‹¤í–‰í•˜ê¸° ì „ì— 'nova-manage db sync'를 사용하여 ë°ì´í„°ë² ì´ìŠ¤ë¥¼ìž‘성하"
"십시오."
+msgid "The backlog must be more than 0"
+msgstr "백로그는 0보다 커야 함"
+
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr "콘솔 í¬íŠ¸ 범위 %(min_port)d-%(max_port)dì´(ê°€) 소진ë˜ì—ˆìŠµë‹ˆë‹¤."
+msgid "The created instance's disk would be too small."
+msgstr "ìž‘ì„±ëœ ì¸ìŠ¤í„´ìŠ¤ì˜ 디스í¬ê°€ 너무 작습니다. "
+
msgid "The current driver does not support preserving ephemeral partitions."
msgstr "현재 ë“œë¼ì´ë²„는 ìž„ì‹œ 파티션 유지를 지ì›í•˜ì§€ 않습니다."
@@ -3353,6 +3568,13 @@ msgstr "ë°±ì—”ë“œì— ê¸°ë³¸ PBM ì •ì±…ì´ ì—†ìŠµë‹ˆë‹¤."
msgid "The firewall filter for %s does not exist"
msgstr "%sì˜ ë°©í™”ë²½ í•„í„°ê°€ ì—†ìŒ"
+msgid "The floating IP request failed with a BadRequest"
+msgstr "ë¶€ë™ IP ìš”ì²­ì´ ì‹¤íŒ¨í•˜ì—¬ BadRequestê°€ ìƒì„±ë¨"
+
+#, python-format
+msgid "The group %(group_name)s must be configured with an id."
+msgstr "%(group_name)s ê·¸ë£¹ì€ ID와 함께 구성해야 합니다. "
+
msgid "The input is not a string or unicode"
msgstr "ìž…ë ¥ì´ ë¬¸ìžì—´ ë˜ëŠ” Unicodeê°€ 아님"
@@ -3406,6 +3628,10 @@ msgstr ""
"ë„¤íŠ¸ì›Œí¬ ë²”ìœ„ê°€ %(num_networks)sì— ë§žì¶”ê¸°ì—는 충분히 í¬ì§€ 않습니다. ë„¤íŠ¸ì›Œí¬ "
"í¬ê¸°ëŠ” %(network_size)s입니다."
+#, python-format
+msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
+msgstr "ì •ì˜ëœ í¬íŠ¸ 수 %(ports)dì´(ê°€) 한계를 초과함: %(quota)d"
+
msgid "The only partition should be partition 1."
msgstr "유ì¼í•œ íŒŒí‹°ì…˜ì€ íŒŒí‹°ì…˜ 1ì´ì–´ì•¼ 합니다."
@@ -3445,6 +3671,10 @@ msgid ""
msgstr ""
"서비스 그룹 ë“œë¼ì´ë²„ %(driver)sì˜ ì„œë¹„ìŠ¤ê°€ ì¼ì‹œì ìœ¼ë¡œì‚¬ìš© 불가능합니다."
+#, python-format
+msgid "The specified cluster '%s' was not found in vCenter"
+msgstr "ì§€ì •ëœ í´ëŸ¬ìŠ¤í„° '%s'ì„(를) vCenterì—ì„œ ì°¾ì„ ìˆ˜ ì—†ìŒ"
+
msgid ""
"The string containing the reason for disabling the service contains invalid "
"characters or is too long."
@@ -3527,6 +3757,17 @@ msgid "There are not enough hosts available."
msgstr "사용 가능한 호스트가 부족합니다."
#, python-format
+msgid ""
+"There are still %(count)i unmigrated flavor records. Migration cannot "
+"continue until all instance flavor records have been migrated to the new "
+"format. Please run `nova-manage db migrate_flavor_data' first."
+msgstr ""
+"여전히 %(count)iê°œì˜ í”Œë ˆì´ë²„ 레코드가 마ì´ê·¸ë ˆì´ì…˜ë˜ì§€ 않았습니다. 모든 ì¸ìŠ¤"
+"턴스 플레ì´ë²„ 레코드가 새로운 형ì‹ìœ¼ë¡œ 마ì´ê·¸ë ˆì´ì…˜ë  때까지 마ì´ê·¸ë ˆì´ì…˜ì„ "
+"계ì†í•  수 없습니다. 먼저 `nova-manage db migrate_flavor_data'를 실행하십시"
+"오. "
+
+#, python-format
msgid "There is no such action: %s"
msgstr "해당 조치가 ì—†ìŒ: %s"
@@ -3563,8 +3804,10 @@ msgstr "%s 디바ì´ìŠ¤ê°€ 작성ë˜ê¸°ë¥¼ 기다리다가 제한시간 초과í•
msgid "Timeout waiting for response from cell"
msgstr "ì…€ì˜ ì‘ë‹µì„ ëŒ€ì‹œí•˜ëŠ” ì¤‘ì— ì œí•œì‹œê°„ 초과"
-msgid "Timestamp failed validation."
-msgstr "시간소ì¸ì´ 유효성 ê²€ì¦ì— 실패했습니다. "
+#, python-format
+msgid "Timeout while checking if we can live migrate to host: %s"
+msgstr ""
+"호스트로 ë¼ì´ë¸Œ 마ì´ê·¸ë ˆì´ì…˜í•  수 있는지 확ì¸í•˜ëŠ” ì¤‘ì— ì œí•œì‹œê°„ 초과 ë°œìƒ: %s"
msgid "To and From ports must be integers"
msgstr "발신 ë° ìˆ˜ì‹  í¬íŠ¸ëŠ” 정수여야 함"
@@ -3579,18 +3822,12 @@ msgid ""
msgstr ""
"너무 ë§Žì€ IP 주소가 ìƒì„±ë©ë‹ˆë‹¤. ìƒì„±ëœ 숫ìžë¥¼ 줄ì´ë ¤ë©´ /%sì„(를)늘리십시오. "
-msgid "Too many failed authentications."
-msgstr "실패한 ì¸ì¦ì´ 너무 많습니다. "
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "ICMP 프로토콜 ìœ í˜•ì˜ ê²½ìš° 유형 ë° ì½”ë“œëŠ” 정수여야 함"
msgid "UUID is required to delete Neutron Networks"
msgstr "Neutron 네트워í¬ë¥¼ 삭제하기 위해 UUIDê°€ 필요합니다. "
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "IP 주소를 연관시킬 수 없습니다. fixed_ips가 없습니다. "
-
msgid "Unable to authenticate Ironic client."
msgstr "ì•„ì´ë¡œë‹‰ í´ë¼ì´ì–¸íŠ¸ë¥¼ ì¸ì¦í•  수 없습니다."
@@ -3712,6 +3949,12 @@ msgstr "dns ë„ë©”ì¸ì„ 가져올 수 ì—†ìŒ"
msgid "Unable to get dns entry"
msgstr "dns í•­ëª©ì„ ê°€ì ¸ì˜¬ 수 ì—†ìŒ"
+msgid "Unable to get host UUID: /etc/machine-id does not exist"
+msgstr "호스트 UUID를 가져올 수 ì—†ìŒ: /etc/machine-idê°€ 존재하지 ì•ŠìŒ"
+
+msgid "Unable to get host UUID: /etc/machine-id is empty"
+msgstr "호스트 UUID를 가져올 수 ì—†ìŒ: /etc/machine-idê°€ 비어 있ìŒ"
+
msgid "Unable to get rdp console, functionality not implemented"
msgstr "rdp ì½˜ì†”ì„ ê°€ì ¸ì˜¬ 수 ì—†ìŒ. ê¸°ëŠ¥ì´ êµ¬í˜„ë˜ì§€ ì•ŠìŒ"
@@ -3872,9 +4115,6 @@ msgstr ""
msgid "Unknown delete_info type %s"
msgstr "알 수 없는 delete_info 유형: %s"
-msgid "Unknown error occurred."
-msgstr "ì•Œ 수 없는 오류가 ë°œìƒí–ˆìŠµë‹ˆë‹¤."
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "알 수 없는 image_type=%s"
@@ -4012,6 +4252,10 @@ msgstr ""
"%(property)s 매개변수 ê·¸ë£¹ì— ëŒ€í•œ ê°’(%(value)s)ì´ ì˜¬ë°”ë¥´ì§€ 않습니다. 컨í…츠"
"는 '%(allowed)s'(으)ë¡œ 제한ë©ë‹ˆë‹¤. "
+#, python-format
+msgid "Value must be >= 0 for field %s"
+msgstr "ê°’ì€ í•„ë“œ %sì— ëŒ€í•´ 0 ì´ìƒì´ì–´ì•¼ 함"
+
msgid "Value required for 'scality_sofs_config'"
msgstr "'scality_sofs_config'ì— í•„ìš”í•œ ê°’"
@@ -4046,6 +4290,10 @@ msgid "Virtual machine mode '%(vmmode)s' is not recognised"
msgstr "ê°€ìƒ ë¨¸ì‹  모드 '%(vmmode)s'ì´(ê°€) ì¸ì‹ë˜ì§€ ì•ŠìŒ"
#, python-format
+msgid "Virtual machine mode '%s' is not valid"
+msgstr "ê°€ìƒ ë¨¸ì‹  모드 '%s'ì´(ê°€) 올바르지 ì•ŠìŒ"
+
+#, python-format
msgid ""
"Virtual switch associated with the network adapter %(adapter)s not found."
msgstr "%(adapter)s ë„¤íŠ¸ì›Œí¬ ì–´ëŒ‘í„°ì™€ ì—°ê´€ëœ ê°€ìƒ ìŠ¤ìœ„ì¹˜ë¥¼ ì°¾ì„ ìˆ˜ 없습니다. "
@@ -4082,6 +4330,14 @@ msgid ""
"Volume encryption is not supported for %(volume_type)s volume %(volume_id)s"
msgstr "볼륨 암호화는 %(volume_type)s 볼륨 %(volume_id)s 를 지ì›í•˜ì§€ 않습니다"
+#, python-format
+msgid ""
+"Volume is smaller than the minimum size specified in image metadata. Volume "
+"size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes."
+msgstr ""
+"ë³¼ë¥¨ì´ ì´ë¯¸ì§€ 메타ë°ì´í„°ì—ì„œ ì§€ì •ëœ ìµœì†Œ í¬ê¸°ë³´ë‹¤ 작습니다. 볼륨í¬ê¸°ëŠ” "
+"%(volume_size)ië°”ì´íŠ¸ì´ê³  최소 í¬ê¸°ëŠ” %(image_min_disk)ië°”ì´íŠ¸ìž…니다. "
+
msgid "Volume must be attached in order to detach."
msgstr "분리하려면 ë³¼ë¥¨ì´ ì²¨ë¶€ë˜ì–´ì•¼ 합니다. "
@@ -4089,10 +4345,6 @@ msgid "Volume resource quota exceeded"
msgstr "볼륨 리소스 quota를 초과하였습니다"
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr "볼륨ì—ì„œ ë¸”ë¡ í¬ê¸°ë¥¼ 설정하지만 libvirt '%s' ì´ìƒì´ 필요합니다. "
-
-#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
@@ -4138,12 +4390,18 @@ msgstr "%(res)s ìžì›ì—ì„œ 올바르지 ì•Šì€ í• ë‹¹ëŸ‰ 메소드 %(method)sì
msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
msgstr "ìž˜ëª»ëœ ìœ í˜•ì˜ í›„í¬ ë©”ì†Œë“œìž„. 'pre' ë° 'post' 유형만 허용ë¨"
+msgid "X-Forwarded-For is missing from request."
+msgstr "X-Forwarded-Forê°€ 요청ì—ì„œ 누ë½ë˜ì—ˆìŠµë‹ˆë‹¤. "
+
msgid "X-Instance-ID header is missing from request."
msgstr "X-Instance-ID í—¤ë”ê°€ 요청ì—ì„œ 누ë½ë˜ì—ˆìŠµë‹ˆë‹¤. "
msgid "X-Instance-ID-Signature header is missing from request."
msgstr "X-Instance-ID-Signature í—¤ë”ê°€ 요청ì—ì„œ 누ë½ë˜ì—ˆìŠµë‹ˆë‹¤."
+msgid "X-Metadata-Provider is missing from request."
+msgstr "X-Metadata-Providerê°€ 요청ì—ì„œ 누ë½ë˜ì—ˆìŠµë‹ˆë‹¤. "
+
msgid "X-Tenant-ID header is missing from request."
msgstr "X-Tenant-ID í—¤ë”ê°€ 요청ì—ì„œ 누ë½ë˜ì—ˆìŠµë‹ˆë‹¤."
@@ -4169,6 +4427,17 @@ msgstr "LVM ì´ë¯¸ì§€ë¥¼ 사용하려면 images_volume_group 플래그를 지정
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
"your destination node does not support retrieving listen addresses. In "
+"order for live migration to work properly you must either disable serial "
+"console or upgrade your libvirt version."
+msgstr ""
+"libvirt ë²„ì „ì´ VIR_DOMAIN_XML_MIGRATABLE 플래그를 지ì›í•˜ì§€ 않거나 ëŒ€ìƒ ë…¸ë“œ"
+"ê°€ ì²­ì·¨ 주소 ê²€ìƒ‰ì„ ì§€ì›í•˜ì§€ 않습니다. ë¼ì´ë¸Œ 마ì´ê·¸ë ˆì´ì…˜ì´ì œëŒ€ë¡œ ìž‘ë™í•˜ë ¤"
+"ë©´ ì§ë ¬ ì½˜ì†”ì„ ì‚¬ìš© 안함으로 설정하거나 libvirt ë²„ì „ì„ ì—…ê·¸ë ˆì´ë“œí•´ì•¼ 합니"
+"다. "
+
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
+"your destination node does not support retrieving listen addresses. In "
"order for live migration to work properly, you must configure the graphics "
"(VNC and/or SPICE) listen addresses to be either the catch-all address "
"(0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
@@ -4210,9 +4479,6 @@ msgstr "block_device_mappingì€ ëª©ë¡ì´ì–´ì•¼ 함"
msgid "block_device_mapping_v2 must be a list"
msgstr "block_device_mapping_v2는 목ë¡ì´ì–´ì•¼ 함"
-msgid "can't build a valid rule"
-msgstr "올바른 ê·œì¹™ì„ êµ¬ì„±í•  수 ì—†ìŒ"
-
msgid "cannot delete non-existent key"
msgstr "존재하지 않는 키를 삭제할 수 ì—†ìŒ"
@@ -4222,6 +4488,9 @@ msgstr "ìž„ì˜ì˜ 키를 저장할 수 ì—†ìŒ"
msgid "cannot understand JSON"
msgstr "JSONì„ ì´í•´í•  수 ì—†ìŒ"
+msgid "cell_uuid must be set"
+msgstr "cell_uuid를 설정해야 함"
+
msgid "clone() is not implemented"
msgstr "clone()ì´ êµ¬í˜„ë˜ì§€ ì•ŠìŒ"
@@ -4325,13 +4594,6 @@ msgstr "ì´ë¯¸ì§€"
msgid "image already mounted"
msgstr "ì´ë¯¸ì§€ê°€ ì´ë¯¸ 마운트ë˜ì—ˆìŒ"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "%(now)sì— %(instance)sì˜ ì´ë¯¸ì§€"
-
-msgid "imageLocation is required"
-msgstr "imageLocation í•„ìš”"
-
msgid "index"
msgstr "ì¸ë±ìŠ¤"
@@ -4413,9 +4675,6 @@ msgstr "nbd 디바ì´ìŠ¤ %sì´(ê°€) 표시ë˜ì§€ ì•ŠìŒ"
msgid "nbd unavailable: module not loaded"
msgstr "nbd 사용 불가능: ëª¨ë“ˆì´ ë¡œë“œë˜ì§€ 않았ìŒ"
-msgid "need group_name or group_id"
-msgstr "group_name ë˜ëŠ” group_idê°€ 필요함"
-
msgid "network"
msgstr "네트워í¬"
@@ -4433,18 +4692,21 @@ msgstr "노드"
msgid "not able to execute ssh command: %s"
msgstr "ssh ëª…ë ¹ì„ ì‹¤í–‰í•  수 ì—†ìŒ: %s"
+msgid ""
+"nova-idmapshift is a tool that properly sets the ownership of a filesystem "
+"for use with linux user namespaces. This tool can only be used with linux "
+"lxc containers. See the man page for details."
+msgstr ""
+"nova-idmapshift는 linux ì‚¬ìš©ìž ë„¤ìž„ìŠ¤íŽ˜ì´ìŠ¤ì™€ 함께 사용하기 위해 íŒŒì¼ ì‹œìŠ¤í…œ"
+"ì˜ ì†Œìœ ê¶Œì„ ì ì ˆí•˜ê²Œ 설정하는 ë„구입니다. ì´ ë„구는 linux lxc 컨테ì´ë„ˆì—만 사"
+"ìš©í•  수 있습니다. ìžì„¸í•œ ë‚´ìš©ì€ ê¸°ë³¸ 페ì´ì§€ë¥¼ 참조하십시오. "
+
msgid "onSharedStorage must be specified."
msgstr "onSharedStorage를 지정해야 합니다."
-msgid "only group \"all\" is supported"
-msgstr "\"all\" 그룹만 지ì›ë¨"
-
msgid "operation time out"
msgstr "ì¡°ìž‘ ì œí•œì‹œê°„ì´ ì´ˆê³¼ë¨"
-msgid "operation_type must be add or remove"
-msgstr "operation_typeì´ ì¶”ê°€ ë˜ëŠ” 제거여야 함"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "os-getConsoleOutputì´ ìž˜ëª» ë˜ì—ˆê±°ë‚˜ 요청 본문ì—ì„œ 누ë½ë˜ì—ˆìŒ"
@@ -4484,9 +4746,6 @@ msgstr ""
"read_deleted는 'no', 'yes', 'only' 중 하나만 가능하며, %rì€(는) 사용하지 못 "
"합니다."
-msgid "resource_id and tag are required"
-msgstr "ìžì› ID 와 태그가 필요함"
-
msgid "rpc_port must be integer"
msgstr "rpc_port는 정수여야 함"
@@ -4551,9 +4810,6 @@ msgstr "지ì›ë˜ì§€ 않는 í•„ë“œ: %s"
msgid "user"
msgstr "user"
-msgid "user or group not specified"
-msgstr "ì‚¬ìš©ìž ë˜ëŠ” ê·¸ë£¹ì´ ì§€ì •ë˜ì§€ ì•ŠìŒ"
-
msgid "uuid"
msgstr "uuid"
diff --git a/nova/locale/nova-log-error.pot b/nova/locale/nova-log-error.pot
index 9d554bcaf9..996e7c70bb 100644
--- a/nova/locale/nova-log-error.pot
+++ b/nova/locale/nova-log-error.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
+"POT-Creation-Date: 2016-02-08 07:01+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -58,16 +58,16 @@ msgstr ""
msgid "Failed to roll back reservations %s"
msgstr ""
-#: nova/service.py:145
+#: nova/service.py:134
#, python-format
msgid "Unable to find a service record to update for %(binary)s on %(host)s"
msgstr ""
-#: nova/service.py:314
+#: nova/service.py:303
msgid "Service error occurred during cleanup_host"
msgstr ""
-#: nova/service.py:331
+#: nova/service.py:320
#, python-format
msgid "Temporary directory is invalid: %s"
msgstr ""
@@ -77,12 +77,12 @@ msgstr ""
msgid "Unable to retrieve certificate with ID %(id)s: %(e)s"
msgstr ""
-#: nova/utils.py:636
+#: nova/utils.py:635
#, python-format
msgid "Invalid server_string: %s"
msgstr ""
-#: nova/utils.py:923
+#: nova/utils.py:922
#, python-format
msgid "Could not remove tmpdir: %s"
msgstr ""
@@ -107,43 +107,24 @@ msgstr ""
msgid "Couldn't lookup app: %s"
msgstr ""
-#: nova/api/ec2/__init__.py:96
-msgid "FaultWrapper error"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:300
-#, python-format
-msgid "Keystone failure: %s"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:556
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:583
-#, python-format
-msgid "Environment: %s"
-msgstr ""
-
-#: nova/api/metadata/handler.py:163 nova/api/metadata/handler.py:171
+#: nova/api/metadata/handler.py:164 nova/api/metadata/handler.py:172
#, python-format
msgid "Failed to get metadata for IP: %s"
msgstr ""
-#: nova/api/metadata/handler.py:242
+#: nova/api/metadata/handler.py:243
#, python-format
msgid ""
"Failed to get instance id for metadata request, provider %(provider)s "
"networks %(networks)s requester %(requester)s. Error: %(error)s"
msgstr ""
-#: nova/api/metadata/handler.py:312 nova/api/metadata/handler.py:320
+#: nova/api/metadata/handler.py:313 nova/api/metadata/handler.py:321
#, python-format
msgid "Failed to get metadata for instance id: %s"
msgstr ""
-#: nova/api/openstack/__init__.py:102
+#: nova/api/openstack/__init__.py:108
#, python-format
msgid "Caught error: %s"
msgstr ""
@@ -164,7 +145,7 @@ msgstr ""
msgid "Unexpected exception in API method"
msgstr ""
-#: nova/api/openstack/wsgi.py:533
+#: nova/api/openstack/wsgi.py:432
#, python-format
msgid "Exception handling resource: %s"
msgstr ""
@@ -304,12 +285,12 @@ msgstr ""
msgid "Unknown cell '%(cell_name)s' when trying to update capacities"
msgstr ""
-#: nova/cmd/all.py:63
+#: nova/cmd/all.py:62
#, python-format
msgid "Failed to load %s-api"
msgstr ""
-#: nova/cmd/all.py:69 nova/cmd/all.py:92
+#: nova/cmd/all.py:68 nova/cmd/all.py:91
#, python-format
msgid "Failed to load %s"
msgstr ""
@@ -324,7 +305,7 @@ msgstr ""
msgid "No db access allowed in nova-dhcpbridge: %s"
msgstr ""
-#: nova/cmd/dhcpbridge.py:143
+#: nova/cmd/dhcpbridge.py:151
msgid "Environment variable 'NETWORK_ID' must be set."
msgstr ""
@@ -333,22 +314,22 @@ msgstr ""
msgid "No db access allowed in nova-network: %s"
msgstr ""
-#: nova/compute/api.py:560
+#: nova/compute/api.py:562
msgid "Failed to set instance name using multi_instance_display_name_template."
msgstr ""
-#: nova/compute/api.py:1555 nova/compute/manager.py:2550
+#: nova/compute/api.py:1548 nova/compute/manager.py:2556
msgid ""
"Something wrong happened when trying to delete snapshot from shelved "
"instance."
msgstr ""
-#: nova/compute/api.py:3226
+#: nova/compute/api.py:3309
#, python-format
msgid "Instance compute service state on %s expected to be down, but it was up."
msgstr ""
-#: nova/compute/api.py:3931
+#: nova/compute/api.py:4048
msgid "Failed to update usages deallocating security group"
msgstr ""
@@ -362,195 +343,200 @@ msgstr ""
msgid "Error while trying to clean up image %s"
msgstr ""
-#: nova/compute/manager.py:883
+#: nova/compute/manager.py:880
msgid "Failed to check if instance shared"
msgstr ""
-#: nova/compute/manager.py:971 nova/compute/manager.py:1046
+#: nova/compute/manager.py:968 nova/compute/manager.py:1043
msgid "Failed to complete a deletion"
msgstr ""
-#: nova/compute/manager.py:1016
+#: nova/compute/manager.py:1013
msgid "Failed to cleanup snapshot."
msgstr ""
-#: nova/compute/manager.py:1107
+#: nova/compute/manager.py:1104
msgid "Failed to unpause instance"
msgstr ""
-#: nova/compute/manager.py:1119
+#: nova/compute/manager.py:1116
msgid "Failed to stop instance"
msgstr ""
-#: nova/compute/manager.py:1131
+#: nova/compute/manager.py:1128
msgid "Failed to start instance"
msgstr ""
-#: nova/compute/manager.py:1142
+#: nova/compute/manager.py:1139
msgid "Vifs plug failed"
msgstr ""
-#: nova/compute/manager.py:1162
+#: nova/compute/manager.py:1159
msgid "Failed to revert crashed migration"
msgstr ""
-#: nova/compute/manager.py:1487
+#: nova/compute/manager.py:1479
#, python-format
msgid "Error: %s"
msgstr ""
-#: nova/compute/manager.py:1584
+#: nova/compute/manager.py:1578
#, python-format
msgid "Instance failed network setup after %(attempts)d attempt(s)"
msgstr ""
-#: nova/compute/manager.py:1756
+#: nova/compute/manager.py:1760
msgid "Instance failed block device setup"
msgstr ""
-#: nova/compute/manager.py:1983
+#: nova/compute/manager.py:1987
msgid "Unexpected build failure, not rescheduling build."
msgstr ""
-#: nova/compute/manager.py:2061 nova/compute/manager.py:2136
+#: nova/compute/manager.py:2066 nova/compute/manager.py:2142
msgid "Failed to allocate network(s)"
msgstr ""
-#: nova/compute/manager.py:2172
+#: nova/compute/manager.py:2178
msgid "Failure prepping block device"
msgstr ""
-#: nova/compute/manager.py:2187 nova/compute/manager.py:4355
+#: nova/compute/manager.py:2193 nova/compute/manager.py:4376
msgid "Instance failed to spawn"
msgstr ""
-#: nova/compute/manager.py:2215
+#: nova/compute/manager.py:2221 nova/conductor/manager.py:261
msgid "Failed to deallocate networks"
msgstr ""
-#: nova/compute/manager.py:2235
+#: nova/compute/manager.py:2241
msgid "Failed to deallocate network for instance."
msgstr ""
-#: nova/compute/manager.py:2476 nova/compute/manager.py:3989
-#: nova/compute/manager.py:6419
+#: nova/compute/manager.py:2482 nova/compute/manager.py:4009
+#: nova/compute/manager.py:6468
msgid "Setting instance vm_state to ERROR"
msgstr ""
-#: nova/compute/manager.py:2756 nova/compute/manager.py:5415
+#: nova/compute/manager.py:2762 nova/compute/manager.py:5456
#, python-format
msgid "Failed to get compute_info for %s"
msgstr ""
-#: nova/compute/manager.py:3034
+#: nova/compute/manager.py:3055
#, python-format
msgid "Cannot reboot instance: %s"
msgstr ""
-#: nova/compute/manager.py:3265
+#: nova/compute/manager.py:3286
msgid "set_admin_password failed"
msgstr ""
-#: nova/compute/manager.py:3351
+#: nova/compute/manager.py:3370
msgid "Error trying to Rescue Instance"
msgstr ""
-#: nova/compute/manager.py:3425
+#: nova/compute/manager.py:3444
#, python-format
msgid "Migration %s is not found during confirmation"
msgstr ""
-#: nova/compute/manager.py:3788
+#: nova/compute/manager.py:3807
msgid "Error trying to reschedule"
msgstr ""
-#: nova/compute/manager.py:3995
+#: nova/compute/manager.py:4015
msgid "Failed to rollback quota for failed finish_resize"
msgstr ""
-#: nova/compute/manager.py:4678
+#: nova/compute/manager.py:4699
#, python-format
msgid "Failed to attach %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:4722
+#: nova/compute/manager.py:4743
#, python-format
msgid "Failed to detach volume %(volume_id)s from %(mp)s"
msgstr ""
-#: nova/compute/manager.py:4825
+#: nova/compute/manager.py:4849
#, python-format
msgid "Failed to swap volume %(old_volume_id)s for %(new_volume_id)s"
msgstr ""
-#: nova/compute/manager.py:4832
+#: nova/compute/manager.py:4856
#, python-format
msgid "Failed to connect to volume %(volume_id)s with volume at %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:4935
+#: nova/compute/manager.py:4961
#, python-format
msgid "allocate_port_for_instance returned %(ports)s ports"
msgstr ""
-#: nova/compute/manager.py:5159
+#: nova/compute/manager.py:5200
#, python-format
msgid "Pre live migration failed at %s"
msgstr ""
-#: nova/compute/manager.py:5177
+#: nova/compute/manager.py:5220
msgid "Live migration failed."
msgstr ""
-#: nova/compute/manager.py:5507
+#: nova/compute/manager.py:5552
msgid "An error occurred while deallocating network."
msgstr ""
-#: nova/compute/manager.py:5615
+#: nova/compute/manager.py:5665
msgid "An error occurred while refreshing the network cache."
msgstr ""
-#: nova/compute/manager.py:5771
+#: nova/compute/manager.py:5821
msgid "Periodic task failed to offload instance."
msgstr ""
-#: nova/compute/manager.py:5815
+#: nova/compute/manager.py:5865
#, python-format
msgid "Failed to generate usage audit for instance on host %s"
msgstr ""
-#: nova/compute/manager.py:6004
+#: nova/compute/manager.py:6054
msgid "Periodic sync_power_state task had an error while processing an instance."
msgstr ""
-#: nova/compute/manager.py:6137 nova/compute/manager.py:6146
-#: nova/compute/manager.py:6185 nova/compute/manager.py:6196
+#: nova/compute/manager.py:6187 nova/compute/manager.py:6196
+#: nova/compute/manager.py:6235 nova/compute/manager.py:6246
msgid "error during stop() in sync_power_state."
msgstr ""
-#: nova/compute/manager.py:6272
+#: nova/compute/manager.py:6322
#, python-format
-msgid "Error updating resources for node %(node)s: %(e)s"
+msgid "Error updating resources for node %(node)s."
msgstr ""
-#: nova/compute/manager.py:6294
+#: nova/compute/manager.py:6343
#, python-format
msgid "No compute node record for host %s"
msgstr ""
-#: nova/compute/manager.py:6661
+#: nova/compute/manager.py:6709
#, python-format
msgid "Exception while waiting completion of volume snapshots: %s"
msgstr ""
-#: nova/compute/rpcapi.py:353
+#: nova/compute/resource_tracker.py:504
+#, python-format
+msgid "Migration for instance %(uuid)s refers to another host's instance!"
+msgstr ""
+
+#: nova/compute/rpcapi.py:359
#, python-format
msgid ""
"Failed to extract compute RPC version from service history because I am "
"too old (minimum version is now %(version)i)"
msgstr ""
-#: nova/compute/rpcapi.py:360
+#: nova/compute/rpcapi.py:366
#, python-format
msgid ""
"Failed to extract compute RPC version from service history for version "
@@ -561,18 +547,18 @@ msgstr ""
msgid "Not all properties needed are implemented in the compute driver"
msgstr ""
-#: nova/conductor/manager.py:305
+#: nova/conductor/manager.py:328
#, python-format
msgid ""
"Migration of instance %(instance_id)s to host %(dest)s unexpectedly "
"failed."
msgstr ""
-#: nova/conductor/manager.py:471
+#: nova/conductor/manager.py:496
msgid "Unshelve attempted but an error has occurred"
msgstr ""
-#: nova/conductor/manager.py:474
+#: nova/conductor/manager.py:499
msgid "Unshelve attempted but vm_state not SHELVED or SHELVED_OFFLOADED"
msgstr ""
@@ -585,11 +571,11 @@ msgstr ""
msgid "Error starting xvp: %s"
msgstr ""
-#: nova/db/api.py:1682
+#: nova/db/api.py:1672
msgid "Failed to notify cells of bw_usage update"
msgstr ""
-#: nova/db/sqlalchemy/api.py:872
+#: nova/db/sqlalchemy/api.py:865
msgid "Failed to update usages bulk deallocating floating IP"
msgstr ""
@@ -603,32 +589,37 @@ msgstr ""
msgid "Exception while seeding instance_types table"
msgstr ""
-#: nova/image/glance.py:161
+#: nova/image/glance.py:176
msgid "Unable to determine the glance API version"
msgstr ""
-#: nova/image/glance.py:239
+#: nova/image/glance.py:253
#, python-format
msgid "Error contacting glance server '%(server)s' for '%(method)s', %(extra)s."
msgstr ""
-#: nova/image/glance.py:318
+#: nova/image/glance.py:284
#, python-format
msgid ""
"When loading the module %(module_str)s the following error occurred: "
"%(ex)s"
msgstr ""
-#: nova/image/glance.py:383
+#: nova/image/glance.py:349
#, python-format
msgid "Failed to instantiate the download handler for %(scheme)s"
msgstr ""
-#: nova/image/glance.py:403
+#: nova/image/glance.py:369
msgid "Download image error"
msgstr ""
-#: nova/image/glance.py:423
+#: nova/image/glance.py:400 nova/image/glance.py:422 nova/image/glance.py:438
+#, python-format
+msgid "Image signature verification failed for image: %s"
+msgstr ""
+
+#: nova/image/glance.py:442
#, python-format
msgid "Error writing to %(path)s: %(exception)s"
msgstr ""
@@ -660,42 +651,42 @@ msgid ""
"used."
msgstr ""
-#: nova/keymgr/barbican.py:115
+#: nova/keymgr/barbican.py:126
#, python-format
msgid "Error creating Barbican client: %s"
msgstr ""
-#: nova/keymgr/barbican.py:151
+#: nova/keymgr/barbican.py:162
#, python-format
msgid "Error creating key: %s"
msgstr ""
-#: nova/keymgr/barbican.py:205
+#: nova/keymgr/barbican.py:216
#, python-format
msgid "Error storing key: %s"
msgstr ""
-#: nova/keymgr/barbican.py:231
+#: nova/keymgr/barbican.py:242
#, python-format
msgid "Error copying key: %s"
msgstr ""
-#: nova/keymgr/barbican.py:281
+#: nova/keymgr/barbican.py:292
#, python-format
msgid "Error getting secret data: %s"
msgstr ""
-#: nova/keymgr/barbican.py:301
+#: nova/keymgr/barbican.py:312
#, python-format
msgid "Error getting secret metadata: %s"
msgstr ""
-#: nova/keymgr/barbican.py:329
+#: nova/keymgr/barbican.py:340
#, python-format
msgid "Error getting key: %s"
msgstr ""
-#: nova/keymgr/barbican.py:346
+#: nova/keymgr/barbican.py:357
#, python-format
msgid "Error deleting key: %s"
msgstr ""
@@ -737,12 +728,12 @@ msgstr ""
msgid "Unable to execute %(cmd)s. Exception: %(exception)s"
msgstr ""
-#: nova/network/linux_net.py:1425
+#: nova/network/linux_net.py:1440
#, python-format
msgid "Failed removing net device: '%s'"
msgstr ""
-#: nova/network/linux_net.py:1436
+#: nova/network/linux_net.py:1451
#, python-format
msgid "Failed removing bridge device: '%s'"
msgstr ""
@@ -756,32 +747,32 @@ msgstr ""
msgid "Error releasing DHCP for IP %(address)s with MAC %(mac_address)s"
msgstr ""
-#: nova/network/neutronv2/api.py:294
+#: nova/network/neutronv2/api.py:295
#, python-format
msgid "Neutron error creating port on network %s"
msgstr ""
-#: nova/network/neutronv2/api.py:336
+#: nova/network/neutronv2/api.py:338
#, python-format
msgid "Unable to clear device ID for port '%s'"
msgstr ""
-#: nova/network/neutronv2/api.py:1271
+#: nova/network/neutronv2/api.py:1291
#, python-format
msgid "Unable to access floating IP %s"
msgstr ""
-#: nova/network/neutronv2/api.py:1400
+#: nova/network/neutronv2/api.py:1420
#, python-format
msgid "Unable to access floating IP for %s"
msgstr ""
-#: nova/network/neutronv2/api.py:1784
+#: nova/network/neutronv2/api.py:1801
#, python-format
msgid "Unable to update host of port %s"
msgstr ""
-#: nova/network/neutronv2/api.py:1801
+#: nova/network/neutronv2/api.py:1818
#, python-format
msgid "Unable to update instance VNIC index for port %s."
msgstr ""
@@ -834,7 +825,7 @@ msgstr ""
msgid "Neutron Error unable to delete %s"
msgstr ""
-#: nova/objects/instance.py:610
+#: nova/objects/instance.py:642
#, python-format
msgid "No save handler for %s"
msgstr ""
@@ -847,7 +838,7 @@ msgstr ""
msgid "Failed to notify cells of instance info cache update"
msgstr ""
-#: nova/pci/stats.py:154
+#: nova/pci/stats.py:157
msgid ""
"Failed to allocate PCI devices for instance. Unassigning devices back to "
"pools. This should not happen, since the scheduler should have accurate "
@@ -864,7 +855,7 @@ msgstr ""
msgid "Could not decode scheduler options"
msgstr ""
-#: nova/scheduler/utils.py:185
+#: nova/scheduler/utils.py:202
#, python-format
msgid "Error from last host: %(last_host)s (node %(last_node)s): %(exc)s"
msgstr ""
@@ -885,26 +876,31 @@ msgstr ""
msgid "Driver failed to attach volume %(volume_id)s at %(mountpoint)s"
msgstr ""
-#: nova/virt/driver.py:1471
+#: nova/virt/driver.py:1461
#, python-format
msgid "Exception dispatching event %(event)s: %(ex)s"
msgstr ""
-#: nova/virt/driver.py:1624
+#: nova/virt/driver.py:1630
msgid "Compute driver option required, but not specified"
msgstr ""
-#: nova/virt/driver.py:1634
+#: nova/virt/driver.py:1640
msgid "Unable to load the virtualization driver"
msgstr ""
-#: nova/virt/images.py:114
+#: nova/virt/images.py:150
#, python-format
msgid ""
"%(base)s virtual size %(disk_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
+#: nova/virt/osinfo.py:95
+#, python-format
+msgid "Cannot find OS information - Reason: (%s)"
+msgstr ""
+
#: nova/virt/disk/api.py:450
#, python-format
msgid ""
@@ -929,7 +925,7 @@ msgstr ""
msgid "nbd module not loaded"
msgstr ""
-#: nova/virt/hyperv/driver.py:114
+#: nova/virt/hyperv/driver.py:120
msgid ""
"You are running nova-compute on an unsupported version of Windows (older "
"than Windows / Hyper-V Server 2012). The support for this version of "
@@ -950,8 +946,8 @@ msgstr ""
msgid "Requested VM Generation %s, but provided VHD instead of VHDX."
msgstr ""
-#: nova/virt/hyperv/vmops.py:396 nova/virt/ironic/driver.py:677
-#: nova/virt/libvirt/driver.py:2990 nova/virt/vmwareapi/vmops.py:839
+#: nova/virt/hyperv/vmops.py:396 nova/virt/ironic/driver.py:686
+#: nova/virt/libvirt/driver.py:3230 nova/virt/vmwareapi/vmops.py:852
#, python-format
msgid "Creating config drive failed with error: %s"
msgstr ""
@@ -983,126 +979,130 @@ msgstr ""
msgid "Attach volume failed to %(instance_name)s: %(exn)s"
msgstr ""
-#: nova/virt/ironic/driver.py:384
+#: nova/virt/ironic/driver.py:393
#, python-format
msgid ""
"Failed to clean up the parameters on node %(node)s when unprovisioning "
"the instance %(instance)s"
msgstr ""
-#: nova/virt/ironic/driver.py:748
+#: nova/virt/ironic/driver.py:755
#, python-format
msgid ""
"Error preparing deploy for instance %(instance)s on baremetal node "
"%(node)s."
msgstr ""
-#: nova/virt/ironic/driver.py:777
+#: nova/virt/ironic/driver.py:784
#, python-format
msgid "Failed to request Ironic to provision instance %(inst)s: %(reason)s"
msgstr ""
-#: nova/virt/ironic/driver.py:794
+#: nova/virt/ironic/driver.py:800
#, python-format
msgid "Error deploying instance %(instance)s on baremetal node %(node)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:787
+#: nova/virt/libvirt/driver.py:962
#, python-format
msgid "Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:850
+#: nova/virt/libvirt/driver.py:1025
#, python-format
msgid "Error from libvirt during undefine. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:878
+#: nova/virt/libvirt/driver.py:1053
#, python-format
msgid "Error from libvirt during unfilter. Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1135
+#: nova/virt/libvirt/driver.py:1333
#, python-format
msgid "Failed to attach volume at mountpoint: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1294
+#: nova/virt/libvirt/driver.py:1490
msgid "attaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1318
+#: nova/virt/libvirt/driver.py:1514
msgid "detaching network adapter failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:1632
+#: nova/virt/libvirt/driver.py:1707
+msgid "Failed to snapshot image"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:1877
msgid "Failed to send updated snapshot status to volume service."
msgstr ""
-#: nova/virt/libvirt/driver.py:1731
+#: nova/virt/libvirt/driver.py:1976
msgid ""
"Unable to create quiesced VM snapshot, attempting again with quiescing "
"disabled."
msgstr ""
-#: nova/virt/libvirt/driver.py:1739
+#: nova/virt/libvirt/driver.py:1984
msgid "Unable to create VM snapshot, failing volume_snapshot operation."
msgstr ""
-#: nova/virt/libvirt/driver.py:1790
+#: nova/virt/libvirt/driver.py:2035
msgid ""
"Error occurred during volume_snapshot_create, sending error status to "
"Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:2088
+#: nova/virt/libvirt/driver.py:2333
msgid ""
"Error occurred during volume_snapshot_delete, sending error status to "
"Cinder."
msgstr ""
-#: nova/virt/libvirt/driver.py:2342
+#: nova/virt/libvirt/driver.py:2585
#, python-format
msgid ""
"Error from libvirt while injecting an NMI to %(instance_uuid)s: [Error "
"Code %(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2672 nova/virt/libvirt/driver.py:2677
+#: nova/virt/libvirt/driver.py:2910 nova/virt/libvirt/driver.py:2915
#, python-format
msgid "Error on '%(path)s' while checking direct I/O: '%(ex)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:2820
+#: nova/virt/libvirt/driver.py:3058
#, python-format
msgid "Error injecting data into image %(img_id)s (%(e)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:3096
+#: nova/virt/libvirt/driver.py:3336
#, python-format
msgid "Attaching PCI devices %(dev)s to %(dom)s failed."
msgstr ""
-#: nova/virt/libvirt/driver.py:4505
+#: nova/virt/libvirt/driver.py:4801
#, python-format
msgid "Neutron Reported failure on event %(event)s for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5232
+#: nova/virt/libvirt/driver.py:5520
#, python-format
msgid "Cannot block migrate instance %s with mapped volumes"
msgstr ""
-#: nova/virt/libvirt/driver.py:5705
+#: nova/virt/libvirt/driver.py:5995
#, python-format
msgid "Live Migration failure: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:6042
+#: nova/virt/libvirt/driver.py:6332
msgid "Migration operation has aborted"
msgstr ""
-#: nova/virt/libvirt/driver.py:7196
+#: nova/virt/libvirt/driver.py:7495
#, python-format
msgid "Failed to cleanup directory %(target)s: %(e)s"
msgstr ""
@@ -1139,19 +1139,14 @@ msgstr ""
msgid "Error defining a secret with XML: %s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:271
-#, python-format
-msgid "Unable to preallocate image at path: %(path)s"
-msgstr ""
-
-#: nova/virt/libvirt/imagebackend.py:297
+#: nova/virt/libvirt/imagebackend.py:299
#, python-format
msgid ""
"%(base)s virtual size %(base_size)s larger than flavor root disk size "
"%(size)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:670
+#: nova/virt/libvirt/imagebackend.py:732
msgid "Failed to retrieve ephemeral encryption key"
msgstr ""
@@ -1182,23 +1177,23 @@ msgstr ""
msgid "Unexpected error while checking process %(pid)s."
msgstr ""
-#: nova/virt/libvirt/vif.py:559
+#: nova/virt/libvirt/vif.py:569
msgid "Failed while plugging ib hostdev vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:598 nova/virt/libvirt/vif.py:620
-#: nova/virt/libvirt/vif.py:680
+#: nova/virt/libvirt/vif.py:608 nova/virt/libvirt/vif.py:630
+#: nova/virt/libvirt/vif.py:667 nova/virt/libvirt/vif.py:729
msgid "Failed while plugging vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:731 nova/virt/libvirt/vif.py:745
-#: nova/virt/libvirt/vif.py:764 nova/virt/libvirt/vif.py:815
-#: nova/virt/libvirt/vif.py:824 nova/virt/libvirt/vif.py:844
-#: nova/virt/libvirt/vif.py:869
+#: nova/virt/libvirt/vif.py:780 nova/virt/libvirt/vif.py:794
+#: nova/virt/libvirt/vif.py:813 nova/virt/libvirt/vif.py:864
+#: nova/virt/libvirt/vif.py:873 nova/virt/libvirt/vif.py:893
+#: nova/virt/libvirt/vif.py:912 nova/virt/libvirt/vif.py:946
msgid "Failed while unplugging vif"
msgstr ""
-#: nova/virt/libvirt/vif.py:784
+#: nova/virt/libvirt/vif.py:833
msgid "Failed while unplugging ib hostdev vif"
msgstr ""
@@ -1215,12 +1210,12 @@ msgid ""
"succeed."
msgstr ""
-#: nova/virt/libvirt/storage/rbd_utils.py:64
+#: nova/virt/libvirt/storage/rbd_utils.py:68
#, python-format
msgid "error opening rbd image %s"
msgstr ""
-#: nova/virt/libvirt/storage/rbd_utils.py:259
+#: nova/virt/libvirt/storage/rbd_utils.py:294
#, python-format
msgid "image %(volume)s in pool %(pool)s has snapshots, failed to remove"
msgstr ""
@@ -1230,7 +1225,7 @@ msgstr ""
msgid "Couldn't unmount the GlusterFS share %s"
msgstr ""
-#: nova/virt/libvirt/volume/nfs.py:80
+#: nova/virt/libvirt/volume/nfs.py:81
#, python-format
msgid "Couldn't unmount the NFS share %s"
msgstr ""
@@ -1255,12 +1250,12 @@ msgstr ""
msgid "Couldn't unmount the share %s"
msgstr ""
-#: nova/virt/libvirt/volume/volume.py:93
+#: nova/virt/libvirt/volume/volume.py:97
#, python-format
msgid "Unknown content in connection_info/access_mode: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:436
+#: nova/virt/vmwareapi/driver.py:433
#, python-format
msgid "Failed to detach %(device_name)s. Exception: %(exc)s"
msgstr ""
@@ -1270,7 +1265,7 @@ msgstr ""
msgid "Unable to retrieve storage policy with name %s"
msgstr ""
-#: nova/virt/vmwareapi/images.py:220
+#: nova/virt/vmwareapi/images.py:221
msgid "Transfer data failed"
msgstr ""
@@ -1287,42 +1282,42 @@ msgstr ""
msgid "Extending virtual disk failed with error: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1085
+#: nova/virt/vmwareapi/vmops.py:1098
msgid "Destroy instance failed"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1214
+#: nova/virt/vmwareapi/vmops.py:1227
msgid "Unable to access the rescue disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1721
+#: nova/virt/vmwareapi/vmops.py:1735
#, python-format
msgid "Attaching network adapter failed. Exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1772
+#: nova/virt/vmwareapi/vmops.py:1786
#, python-format
msgid "Detaching network adapter failed. Exception: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1846
+#: nova/virt/vmwareapi/vmops.py:1860
#, python-format
msgid "Failed to copy cached image %(source)s to %(dest)s for resize: %(error)s"
msgstr ""
-#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:1998
+#: nova/virt/xenapi/agent.py:112 nova/virt/xenapi/vmops.py:2007
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:124 nova/virt/xenapi/vmops.py:2003
+#: nova/virt/xenapi/agent.py:124 nova/virt/xenapi/vmops.py:2012
#, python-format
msgid ""
"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. "
"args=%(args)r"
msgstr ""
-#: nova/virt/xenapi/agent.py:129 nova/virt/xenapi/vmops.py:2008
+#: nova/virt/xenapi/agent.py:129 nova/virt/xenapi/vmops.py:2017
#, python-format
msgid "The call to %(method)s returned an error: %(e)s. args=%(args)r"
msgstr ""
@@ -1448,54 +1443,54 @@ msgstr ""
msgid "_migrate_disk_resizing_up failed. Restoring orig vm due_to: %s."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1540
+#: nova/virt/xenapi/vmops.py:1549
#, python-format
msgid "Failed to find an SR for volume %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1547
+#: nova/virt/xenapi/vmops.py:1556
#, python-format
msgid "Volume %s is associated with the instance but no SR was found for it"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1551 nova/virt/xenapi/vmops.py:2382
+#: nova/virt/xenapi/vmops.py:1560 nova/virt/xenapi/vmops.py:2392
#, python-format
msgid "Failed to forget the SR for volume %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1761
+#: nova/virt/xenapi/vmops.py:1770
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2106
+#: nova/virt/xenapi/vmops.py:2112
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2163
+#: nova/virt/xenapi/vmops.py:2168
msgid "Plugin config_file get_val failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2329
+#: nova/virt/xenapi/vmops.py:2339
msgid "Migrate Send failed"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:174
+#: nova/virt/xenapi/volume_utils.py:185
msgid "Unable to introduce VDI on SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:189
+#: nova/virt/xenapi/volume_utils.py:200
msgid "Unable to get record of VDI"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:211
+#: nova/virt/xenapi/volume_utils.py:222
msgid "Unable to introduce VDI for SR"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:309
+#: nova/virt/xenapi/volume_utils.py:320
msgid "Unable to find SR from VBD"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:320
+#: nova/virt/xenapi/volume_utils.py:331
msgid "Unable to find SR from VDI"
msgstr ""
@@ -1503,14 +1498,14 @@ msgstr ""
msgid "Host is member of a pool, but DB says otherwise"
msgstr ""
-#: nova/volume/cinder.py:357
+#: nova/volume/cinder.py:402
#, python-format
msgid ""
"Initialize connection failed for volume %(vol)s on host %(host)s. Error: "
"%(msg)s Code: %(code)s. Attempting to terminate connection."
msgstr ""
-#: nova/volume/cinder.py:368
+#: nova/volume/cinder.py:413
#, python-format
msgid ""
"Connection between volume %(vol)s and host %(host)s might have succeeded,"
diff --git a/nova/locale/nova-log-info.pot b/nova/locale/nova-log-info.pot
index 8c84e9c6a0..eb6de5cdc5 100644
--- a/nova/locale/nova-log-info.pot
+++ b/nova/locale/nova-log-info.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
+"POT-Creation-Date: 2016-02-08 07:00+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -29,19 +29,19 @@ msgid ""
"'%(inst_uuid)s'. Filter results: %(str_results)s"
msgstr ""
-#: nova/service.py:151
+#: nova/service.py:140
#, python-format
msgid ""
"Updating service version for %(binary)s on %(host)s from %(old)i to "
"%(new)i"
msgstr ""
-#: nova/service.py:191
+#: nova/service.py:180
#, python-format
msgid "Starting %(topic)s node (version %(version)s)"
msgstr ""
-#: nova/utils.py:326
+#: nova/utils.py:325
#, python-format
msgid "Executing RootwrapDaemonHelper.execute cmd=[%(cmd)r] kwargs=[%(kwargs)r]"
msgstr ""
@@ -59,121 +59,30 @@ msgstr ""
msgid "WSGI server has stopped."
msgstr ""
-#: nova/api/ec2/__init__.py:466
-#, python-format
-msgid "Unauthorized request for controller=%(controller)s and action=%(action)s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:419
-#, python-format
-msgid "Create snapshot of volume %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:462
-#, python-format
-msgid "Create key pair %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:474
-#, python-format
-msgid "Import key %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:487
-#, python-format
-msgid "Delete key pair %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:790
-#, python-format
-msgid "Get console output for instance %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:863
-#, python-format
-msgid "Create volume from snapshot %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:867
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:166
-#, python-format
-msgid "Create volume of %s GB"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:907
-#, python-format
-msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:937
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:396
-#, python-format
-msgid "Detach volume %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1304
-msgid "Allocate address"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1309
-#, python-format
-msgid "Release address %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1314
-#, python-format
-msgid "Associate address %(public_ip)s to instance %(instance_id)s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1347
-#, python-format
-msgid "Disassociate address %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1500
-#, python-format
-msgid "Reboot instance %r"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1615
-#, python-format
-msgid "De-registering image %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1651
-#, python-format
-msgid "Registered image %(image_location)s with id %(image_id)s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1720
-#, python-format
-msgid "Updating image %s publicity"
-msgstr ""
-
#: nova/api/ec2/ec2utils.py:292
msgid "Timestamp is invalid."
msgstr ""
-#: nova/api/openstack/__init__.py:111
+#: nova/api/openstack/__init__.py:117
#, python-format
msgid "%(url)s returned with HTTP %(status)d"
msgstr ""
-#: nova/api/openstack/__init__.py:340
+#: nova/api/openstack/__init__.py:372
msgid "V2.1 API has been disabled by configuration"
msgstr ""
-#: nova/api/openstack/__init__.py:393
+#: nova/api/openstack/__init__.py:425
#, python-format
msgid "Loaded extensions: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:537
+#: nova/api/openstack/wsgi.py:436
#, python-format
msgid "Fault thrown: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:540
+#: nova/api/openstack/wsgi.py:439
#, python-format
msgid "HTTP exception thrown: %s"
msgstr ""
@@ -186,7 +95,7 @@ msgstr ""
#: nova/api/openstack/compute/assisted_volume_snapshots.py:72
#: nova/api/openstack/compute/legacy_v2/contrib/assisted_volume_snapshots.py:64
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:505
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:517
#, python-format
msgid "Delete snapshot with id: %s"
msgstr ""
@@ -224,22 +133,32 @@ msgstr ""
msgid "Detach interface %s"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:77
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:89
#, python-format
msgid "vol=%s"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:111
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:123
#, python-format
msgid "Delete volume with id: %s"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:299
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:178
+#, python-format
+msgid "Create volume of %s GB"
+msgstr ""
+
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:311
#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:543
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:408
+#, python-format
+msgid "Detach volume %s"
+msgstr ""
+
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:555
#, python-format
msgid "Create snapshot from volume %s"
msgstr ""
@@ -265,56 +184,63 @@ msgid ""
"hint"
msgstr ""
-#: nova/compute/api.py:1531
+#: nova/compute/api.py:1088
+#, python-format
+msgid ""
+"max count reduced from %(max_count)d to %(max_net_count)d due to network "
+"port quota"
+msgstr ""
+
+#: nova/compute/api.py:1524
msgid "instance termination disabled"
msgstr ""
-#: nova/compute/api.py:1544
+#: nova/compute/api.py:1537
#, python-format
msgid "Working on deleting snapshot %s from shelved instance..."
msgstr ""
-#: nova/compute/api.py:1614
+#: nova/compute/api.py:1607
msgid "Instance is already in deleting state, ignoring this request"
msgstr ""
-#: nova/compute/api.py:1659
+#: nova/compute/api.py:1652
#, python-format
msgid ""
"Found an unconfirmed migration during delete, id: %(id)s, status: "
"%(status)s"
msgstr ""
-#: nova/compute/api.py:1669
+#: nova/compute/api.py:1662
msgid "Instance may have been confirmed during delete"
msgstr ""
-#: nova/compute/api.py:1686
+#: nova/compute/api.py:1679
#, python-format
msgid "Migration %s may have been confirmed during delete"
msgstr ""
-#: nova/compute/api.py:1726
+#: nova/compute/api.py:1746
msgid ""
"instance is in SHELVED_OFFLOADED state, cleanup the instance's info from "
"database."
msgstr ""
-#: nova/compute/api.py:2135
+#: nova/compute/api.py:2141
msgid "It's not supported to backup volume backed instance."
msgstr ""
-#: nova/compute/api.py:2273
+#: nova/compute/api.py:2279
#, python-format
msgid "Skipping quiescing instance: %(reason)s."
msgstr ""
-#: nova/compute/api.py:3829
+#: nova/compute/api.py:3946
#, python-format
msgid "Create Security Group %s"
msgstr ""
-#: nova/compute/api.py:3934
+#: nova/compute/api.py:4051
#, python-format
msgid "Delete security group %s"
msgstr ""
@@ -350,250 +276,250 @@ msgstr ""
msgid "Task possibly preempted: %s"
msgstr ""
-#: nova/compute/manager.py:730 nova/conductor/manager.py:155
-#: nova/console/manager.py:68 nova/consoleauth/manager.py:60
+#: nova/compute/manager.py:727 nova/conductor/manager.py:159
+#: nova/console/manager.py:68 nova/consoleauth/manager.py:73
msgid "Reloading compute RPC API"
msgstr ""
-#: nova/compute/manager.py:843
+#: nova/compute/manager.py:840
msgid "Deleting instance as it has been evacuated from this host"
msgstr ""
-#: nova/compute/manager.py:855
+#: nova/compute/manager.py:852
msgid "Instance has been marked deleted already, removing it from the hypervisor."
msgstr ""
-#: nova/compute/manager.py:1031
+#: nova/compute/manager.py:1028
msgid ""
"Service started deleting the instance during the previous run, but did "
"not finish. Restarting the deletion now."
msgstr ""
-#: nova/compute/manager.py:1165
+#: nova/compute/manager.py:1162
msgid "Instance found in migrating state during startup. Resetting task_state"
msgstr ""
-#: nova/compute/manager.py:1187
+#: nova/compute/manager.py:1184
msgid "Rebooting instance after nova-compute restart."
msgstr ""
-#: nova/compute/manager.py:1237
+#: nova/compute/manager.py:1234
#, python-format
msgid "VM %(state)s (Lifecycle Event)"
msgstr ""
-#: nova/compute/manager.py:1302
+#: nova/compute/manager.py:1299
msgid ""
"Instance lifecycle events from the compute driver have been disabled. "
"Note that lifecycle changes to an instance outside of the compute service"
" will only be synchronized by the _sync_power_states periodic task."
msgstr ""
-#: nova/compute/manager.py:1819
+#: nova/compute/manager.py:1823
#, python-format
msgid "Took %0.2f seconds to deallocate network for instance."
msgstr ""
-#: nova/compute/manager.py:1917
+#: nova/compute/manager.py:1921
#, python-format
msgid "Took %0.2f seconds to build instance."
msgstr ""
-#: nova/compute/manager.py:2030
+#: nova/compute/manager.py:2035
#, python-format
msgid "Took %0.2f seconds to spawn the instance on the hypervisor."
msgstr ""
-#: nova/compute/manager.py:2275
+#: nova/compute/manager.py:2281
#, python-format
msgid "%(action_str)s instance"
msgstr ""
-#: nova/compute/manager.py:2299
+#: nova/compute/manager.py:2305
#, python-format
msgid "Took %0.2f seconds to destroy the instance on the hypervisor."
msgstr ""
-#: nova/compute/manager.py:2341
+#: nova/compute/manager.py:2347
#, python-format
msgid "Took %(time).2f seconds to detach %(num)s volumes for instance."
msgstr ""
-#: nova/compute/manager.py:2470
+#: nova/compute/manager.py:2476
msgid "Instance disappeared during terminate"
msgstr ""
-#: nova/compute/manager.py:2514
+#: nova/compute/manager.py:2520
msgid "Instance is already powered off in the hypervisor when stop is called."
msgstr ""
-#: nova/compute/manager.py:2736
+#: nova/compute/manager.py:2742
msgid "Rebuilding instance"
msgstr ""
-#: nova/compute/manager.py:2833
+#: nova/compute/manager.py:2853
msgid "disk on shared storage, recreating using existing disk"
msgstr ""
-#: nova/compute/manager.py:2837
+#: nova/compute/manager.py:2857
#, python-format
msgid "disk not on shared storage, rebuilding from: '%s'"
msgstr ""
-#: nova/compute/manager.py:2917
+#: nova/compute/manager.py:2938
#, python-format
msgid "bringing vm to original state: '%s'"
msgstr ""
-#: nova/compute/manager.py:2948
+#: nova/compute/manager.py:2969
#, python-format
msgid "Detaching from volume api: %s"
msgstr ""
-#: nova/compute/manager.py:2975
+#: nova/compute/manager.py:2996
msgid "Rebooting instance"
msgstr ""
-#: nova/compute/manager.py:3111
+#: nova/compute/manager.py:3132
msgid "instance snapshotting"
msgstr ""
-#: nova/compute/manager.py:3245
+#: nova/compute/manager.py:3266
msgid "Root password set"
msgstr ""
-#: nova/compute/manager.py:3293
+#: nova/compute/manager.py:3314
#, python-format
msgid "injecting file to %s"
msgstr ""
-#: nova/compute/manager.py:3328
+#: nova/compute/manager.py:3347
msgid "Rescuing"
msgstr ""
-#: nova/compute/manager.py:3377
+#: nova/compute/manager.py:3396
msgid "Unrescuing"
msgstr ""
-#: nova/compute/manager.py:3431
+#: nova/compute/manager.py:3450
#, python-format
msgid "Migration %s is already confirmed"
msgstr ""
-#: nova/compute/manager.py:3452
+#: nova/compute/manager.py:3471
msgid "Instance is not found during confirmation"
msgstr ""
-#: nova/compute/manager.py:3649
+#: nova/compute/manager.py:3668
#, python-format
msgid "Updating instance to original state: '%s'"
msgstr ""
-#: nova/compute/manager.py:3699
+#: nova/compute/manager.py:3718
msgid "Migrating"
msgstr ""
-#: nova/compute/manager.py:4053
+#: nova/compute/manager.py:4073
msgid "Pausing"
msgstr ""
-#: nova/compute/manager.py:4069
+#: nova/compute/manager.py:4089
msgid "Unpausing"
msgstr ""
-#: nova/compute/manager.py:4106 nova/compute/manager.py:4123
+#: nova/compute/manager.py:4126 nova/compute/manager.py:4143
msgid "Retrieving diagnostics"
msgstr ""
-#: nova/compute/manager.py:4162
+#: nova/compute/manager.py:4182
msgid "Resuming"
msgstr ""
-#: nova/compute/manager.py:4401
+#: nova/compute/manager.py:4422
msgid "Get console output"
msgstr ""
-#: nova/compute/manager.py:4669
+#: nova/compute/manager.py:4690
#, python-format
msgid "Attaching volume %(volume_id)s to %(mountpoint)s"
msgstr ""
-#: nova/compute/manager.py:4694
+#: nova/compute/manager.py:4715
#, python-format
msgid "Detach volume %(volume_id)s from mountpoint %(mp)s"
msgstr ""
-#: nova/compute/manager.py:4879
+#: nova/compute/manager.py:4903
#, python-format
msgid "Swapping volume %(old_volume)s for %(new_volume)s"
msgstr ""
-#: nova/compute/manager.py:5260
+#: nova/compute/manager.py:5301
msgid "_post_live_migration() is started.."
msgstr ""
-#: nova/compute/manager.py:5342
+#: nova/compute/manager.py:5383
#, python-format
msgid "Migrating instance to %s finished successfully."
msgstr ""
-#: nova/compute/manager.py:5344
+#: nova/compute/manager.py:5385
msgid ""
"You may see the error \"libvirt: QEMU error: Domain not found: no domain "
"with matching name.\" This error can be safely ignored."
msgstr ""
-#: nova/compute/manager.py:5382
+#: nova/compute/manager.py:5423
msgid "Post operation of migration started"
msgstr ""
-#: nova/compute/manager.py:5671
+#: nova/compute/manager.py:5721
#, python-format
msgid ""
"Found %(migration_count)d unconfirmed migrations older than "
"%(confirm_window)d seconds"
msgstr ""
-#: nova/compute/manager.py:5686
+#: nova/compute/manager.py:5736
#, python-format
msgid ""
"Automatically confirming migration %(migration_id)s for instance "
"%(instance_uuid)s"
msgstr ""
-#: nova/compute/manager.py:5740
+#: nova/compute/manager.py:5790
#, python-format
msgid "Error auto-confirming resize: %s. Will retry later."
msgstr ""
-#: nova/compute/manager.py:5791
+#: nova/compute/manager.py:5841
#, python-format
msgid ""
"Running instance usage audit for host %(host)s from %(begin_time)s to "
"%(end_time)s. %(number_instances)s instances."
msgstr ""
-#: nova/compute/manager.py:5838
+#: nova/compute/manager.py:5888
msgid "Updating bandwidth usage cache"
msgstr ""
-#: nova/compute/manager.py:5860
+#: nova/compute/manager.py:5910
msgid "Bandwidth usage not supported by hypervisor."
msgstr ""
-#: nova/compute/manager.py:6023 nova/compute/manager.py:6080
+#: nova/compute/manager.py:6073 nova/compute/manager.py:6130
#, python-format
msgid "During sync_power_state the instance has a pending task (%(task)s). Skip."
msgstr ""
-#: nova/compute/manager.py:6067
+#: nova/compute/manager.py:6117
#, python-format
msgid ""
"During the sync_power process the instance has moved from host %(src)s to"
" host %(dst)s"
msgstr ""
-#: nova/compute/manager.py:6088
+#: nova/compute/manager.py:6138
#, python-format
msgid ""
"During _sync_instance_power_state the DB power_state (%(db_power_state)s)"
@@ -602,52 +528,52 @@ msgid ""
"hypervisor."
msgstr ""
-#: nova/compute/manager.py:6235
+#: nova/compute/manager.py:6285
msgid "Reclaiming deleted instance"
msgstr ""
-#: nova/compute/manager.py:6268
+#: nova/compute/manager.py:6318
#, python-format
msgid "Compute node '%s' not found in update_available_resource."
msgstr ""
-#: nova/compute/manager.py:6286
+#: nova/compute/manager.py:6335
#, python-format
msgid "Deleting orphan compute node %s"
msgstr ""
-#: nova/compute/manager.py:6336
+#: nova/compute/manager.py:6385
#, python-format
msgid ""
"Powering off instance with name label '%s' which is marked as DELETED but"
" still present on host."
msgstr ""
-#: nova/compute/manager.py:6354
+#: nova/compute/manager.py:6403
#, python-format
msgid ""
"Destroying instance with name label '%s' which is marked as DELETED but "
"still present on host."
msgstr ""
-#: nova/compute/manager.py:6402
+#: nova/compute/manager.py:6451
#, python-format
msgid "Setting instance back to %(state)s after: %(error)s"
msgstr ""
-#: nova/compute/manager.py:6412
+#: nova/compute/manager.py:6461
#, python-format
msgid "Setting instance back to ACTIVE after: %s"
msgstr ""
-#: nova/compute/manager.py:6475
+#: nova/compute/manager.py:6524
#, python-format
msgid ""
"Neutron deleted interface %(intf)s; detaching it from the instance and "
"deleting it from the info cache"
msgstr ""
-#: nova/compute/manager.py:6510
+#: nova/compute/manager.py:6559
#, python-format
msgid "Failed to process external instance event %(event)s due to: %(error)s"
msgstr ""
@@ -668,17 +594,17 @@ msgid ""
"is disabled."
msgstr ""
-#: nova/compute/resource_tracker.py:555
+#: nova/compute/resource_tracker.py:571
#, python-format
msgid "Compute_service record updated for %(host)s:%(node)s"
msgstr ""
-#: nova/compute/resource_tracker.py:622
+#: nova/compute/resource_tracker.py:638
#, python-format
msgid "Total usable vcpus: %(tcpu)s, total allocated vcpus: %(ucpu)s"
msgstr ""
-#: nova/compute/resource_tracker.py:631
+#: nova/compute/resource_tracker.py:647
#, python-format
msgid ""
"Final resource view: name=%(node)s phys_ram=%(phys_ram)sMB "
@@ -687,12 +613,12 @@ msgid ""
"used_vcpus=%(used_vcpus)s pci_stats=%(pci_stats)s"
msgstr ""
-#: nova/compute/resource_tracker.py:717
+#: nova/compute/resource_tracker.py:733
#, python-format
msgid "Updating from migration %s"
msgstr ""
-#: nova/compute/rpcapi.py:365
+#: nova/compute/rpcapi.py:371
#, python-format
msgid ""
"Automatically selected compute RPC version %(rpc)s from minimum service "
@@ -703,17 +629,17 @@ msgstr ""
msgid "nova-conductor connection established successfully"
msgstr ""
-#: nova/consoleauth/manager.py:105
+#: nova/consoleauth/manager.py:120
#, python-format
msgid "Received Token: %(token)s, %(token_dict)s"
msgstr ""
-#: nova/consoleauth/manager.py:130
+#: nova/consoleauth/manager.py:145
#, python-format
msgid "Checking Token: %(token)s, %(token_valid)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:3506
+#: nova/db/sqlalchemy/api.py:3486
#, python-format
msgid ""
"quota_usages out of sync, updating. project_id: %(project_id)s, user_id: "
@@ -721,7 +647,7 @@ msgid ""
"usage: %(in_use)s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:5353
+#: nova/db/sqlalchemy/api.py:5324
#, python-format
msgid ""
"Volume(%s) has lower stats then what is in the database. Instance must "
@@ -747,11 +673,21 @@ msgstr ""
msgid "Skipped adding %s because an equivalent index already exists."
msgstr ""
-#: nova/image/glance.py:399
+#: nova/image/glance.py:365
#, python-format
msgid "Successfully transferred using %s"
msgstr ""
+#: nova/image/glance.py:417
+#, python-format
+msgid "Image signature verification succeeded for image: %s"
+msgstr ""
+
+#: nova/image/glance.py:433
+#, python-format
+msgid "Image signature verification succeeded for image %s"
+msgstr ""
+
#: nova/image/s3.py:386
#, python-format
msgid "Image %s was deleted underneath us"
@@ -762,7 +698,7 @@ msgstr ""
msgid "Copied %(source_file)s using %(module_str)s"
msgstr ""
-#: nova/network/api.py:225 nova/network/neutronv2/api.py:1176
+#: nova/network/api.py:225 nova/network/neutronv2/api.py:1196
#, python-format
msgid "re-assign floating IP %(address)s from instance %(instance_id)s"
msgstr ""
@@ -825,7 +761,7 @@ msgstr ""
msgid "deleted %s"
msgstr ""
-#: nova/network/neutronv2/api.py:1664
+#: nova/network/neutronv2/api.py:1681
#, python-format
msgid ""
"Port %s from network info_cache is no longer associated with instance in "
@@ -899,6 +835,11 @@ msgid ""
"InstanceList."
msgstr ""
+#: nova/scheduler/filters/retry_filter.py:47
+#, python-format
+msgid "Host %(host)s fails. Previously tried hosts: %(hosts)s"
+msgstr ""
+
#: nova/scheduler/filters/utils.py:64
#, python-format
msgid "%(num_values)d values found, of which the minimum value will be used."
@@ -913,7 +854,7 @@ msgstr ""
msgid "Recovered from being unable to report status."
msgstr ""
-#: nova/servicegroup/drivers/mc.py:87
+#: nova/servicegroup/drivers/mc.py:85
msgid "Recovered connection to memcache server for reporting service status."
msgstr ""
@@ -942,16 +883,16 @@ msgstr ""
msgid "Booting with blank volume at %(mountpoint)s"
msgstr ""
-#: nova/virt/driver.py:1627
+#: nova/virt/driver.py:1633
#, python-format
msgid "Loading compute driver '%s'"
msgstr ""
-#: nova/virt/firewall.py:165 nova/virt/libvirt/firewall.py:363
+#: nova/virt/firewall.py:151 nova/virt/libvirt/firewall.py:355
msgid "Attempted to unfilter instance which is not filtered"
msgstr ""
-#: nova/virt/firewall.py:425
+#: nova/virt/firewall.py:404
#, python-format
msgid "instance chain %s disappeared during refresh, skipping"
msgstr ""
@@ -978,11 +919,11 @@ msgstr ""
msgid "Spawning new instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:374 nova/virt/vmwareapi/vmops.py:812
+#: nova/virt/hyperv/vmops.py:374 nova/virt/vmwareapi/vmops.py:825
msgid "Using config drive for instance"
msgstr ""
-#: nova/virt/hyperv/vmops.py:388 nova/virt/libvirt/driver.py:2983
+#: nova/virt/hyperv/vmops.py:388 nova/virt/libvirt/driver.py:3223
#, python-format
msgid "Creating config drive at %(path)s"
msgstr ""
@@ -995,169 +936,169 @@ msgstr ""
msgid "Soft shutdown succeeded."
msgstr ""
-#: nova/virt/ironic/driver.py:766
+#: nova/virt/ironic/driver.py:773
#, python-format
msgid "Config drive for instance %(instance)s on baremetal node %(node)s created."
msgstr ""
-#: nova/virt/ironic/driver.py:790
+#: nova/virt/ironic/driver.py:796
#, python-format
msgid "Successfully provisioned Ironic node %s"
msgstr ""
-#: nova/virt/ironic/driver.py:884
+#: nova/virt/ironic/driver.py:891
#, python-format
msgid "Successfully unprovisioned Ironic node %s"
msgstr ""
-#: nova/virt/ironic/driver.py:916
+#: nova/virt/ironic/driver.py:922
#, python-format
msgid "Successfully rebooted Ironic node %s"
msgstr ""
-#: nova/virt/ironic/driver.py:941
+#: nova/virt/ironic/driver.py:946
#, python-format
msgid "Successfully powered off Ironic node %s"
msgstr ""
-#: nova/virt/ironic/driver.py:967
+#: nova/virt/ironic/driver.py:971
#, python-format
msgid "Successfully powered on Ironic node %s"
msgstr ""
-#: nova/virt/ironic/driver.py:1163
+#: nova/virt/ironic/driver.py:1160
msgid "Instance was successfully rebuilt"
msgstr ""
-#: nova/virt/libvirt/driver.py:556
+#: nova/virt/libvirt/driver.py:633
#, python-format
msgid "Connection event '%(enabled)d' reason '%(reason)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:803
+#: nova/virt/libvirt/driver.py:978
msgid "During wait destroy, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:808
+#: nova/virt/libvirt/driver.py:983
msgid "Instance destroyed successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:818
+#: nova/virt/libvirt/driver.py:993
msgid "Instance may be started again."
msgstr ""
-#: nova/virt/libvirt/driver.py:828
+#: nova/virt/libvirt/driver.py:1003
msgid "Going to destroy instance again."
msgstr ""
-#: nova/virt/libvirt/driver.py:1438
+#: nova/virt/libvirt/driver.py:1645
msgid "Beginning live snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1441
+#: nova/virt/libvirt/driver.py:1648
msgid "Beginning cold snapshot process"
msgstr ""
-#: nova/virt/libvirt/driver.py:1472
+#: nova/virt/libvirt/driver.py:1694
msgid "Snapshot extracted, beginning image upload"
msgstr ""
-#: nova/virt/libvirt/driver.py:1484
+#: nova/virt/libvirt/driver.py:1715
msgid "Snapshot image upload complete"
msgstr ""
-#: nova/virt/libvirt/driver.py:2111
+#: nova/virt/libvirt/driver.py:2356
msgid "Instance soft rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2156
+#: nova/virt/libvirt/driver.py:2401
msgid "Instance shutdown successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2164
+#: nova/virt/libvirt/driver.py:2409
msgid "Instance may have been rebooted during soft reboot, so return now."
msgstr ""
-#: nova/virt/libvirt/driver.py:2227
+#: nova/virt/libvirt/driver.py:2470
msgid "Instance rebooted successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2267
+#: nova/virt/libvirt/driver.py:2510
msgid "Instance already shutdown."
msgstr ""
-#: nova/virt/libvirt/driver.py:2282
+#: nova/virt/libvirt/driver.py:2525
#, python-format
msgid "Instance shutdown successfully after %d seconds."
msgstr ""
-#: nova/virt/libvirt/driver.py:2311
+#: nova/virt/libvirt/driver.py:2554
#, python-format
msgid "Instance failed to shutdown in %d seconds."
msgstr ""
-#: nova/virt/libvirt/driver.py:2497
+#: nova/virt/libvirt/driver.py:2735
msgid "Instance spawned successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:2513
+#: nova/virt/libvirt/driver.py:2751
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
-#: nova/virt/libvirt/driver.py:2549
+#: nova/virt/libvirt/driver.py:2787
msgid ""
"Instance is configured with a file console, but the backing file is not "
"(yet?) present"
msgstr ""
-#: nova/virt/libvirt/driver.py:2560 nova/virt/libvirt/driver.py:2587
+#: nova/virt/libvirt/driver.py:2798 nova/virt/libvirt/driver.py:2825
#, python-format
msgid "Truncated console log returned, %d bytes ignored"
msgstr ""
-#: nova/virt/libvirt/driver.py:2847
+#: nova/virt/libvirt/driver.py:3085
msgid "Creating image"
msgstr ""
-#: nova/virt/libvirt/driver.py:2974
+#: nova/virt/libvirt/driver.py:3214
msgid "Using config drive"
msgstr ""
-#: nova/virt/libvirt/driver.py:3804
+#: nova/virt/libvirt/driver.py:4069
msgid "Configuring timezone for windows instance to localtime"
msgstr ""
-#: nova/virt/libvirt/driver.py:5005
+#: nova/virt/libvirt/driver.py:5300
#, python-format
msgid ""
"Getting block stats failed, device might have been detached. "
"Instance=%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5012
+#: nova/virt/libvirt/driver.py:5307
#, python-format
msgid ""
"Could not find domain in libvirt for instance %s. Cannot get block stats "
"for device"
msgstr ""
-#: nova/virt/libvirt/driver.py:5353
+#: nova/virt/libvirt/driver.py:5641
#, python-format
msgid "Instance launched has CPU info: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5920
+#: nova/virt/libvirt/driver.py:6210
#, python-format
msgid "Error %(ex)s, migration failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:5973
+#: nova/virt/libvirt/driver.py:6263
#, python-format
msgid "Increasing downtime to %(downtime)d ms after %(waittime)d sec elapsed time"
msgstr ""
-#: nova/virt/libvirt/driver.py:6015
+#: nova/virt/libvirt/driver.py:6305
#, python-format
msgid ""
"Migration running for %(secs)d secs, memory %(remaining)d%% remaining; "
@@ -1165,45 +1106,45 @@ msgid ""
"total=%(total_memory)d)"
msgstr ""
-#: nova/virt/libvirt/driver.py:6025
+#: nova/virt/libvirt/driver.py:6315
#, python-format
msgid ""
"Data remaining %(remaining)d bytes, low watermark %(watermark)d bytes "
"%(last)d seconds ago"
msgstr ""
-#: nova/virt/libvirt/driver.py:6035
+#: nova/virt/libvirt/driver.py:6325
msgid "Migration operation has completed"
msgstr ""
-#: nova/virt/libvirt/driver.py:6807
+#: nova/virt/libvirt/driver.py:7091
msgid "Instance running successfully."
msgstr ""
-#: nova/virt/libvirt/driver.py:7190
+#: nova/virt/libvirt/driver.py:7489
#, python-format
msgid "Deleting instance files %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:7203
+#: nova/virt/libvirt/driver.py:7502
#, python-format
msgid "Deletion of %s failed"
msgstr ""
-#: nova/virt/libvirt/driver.py:7207
+#: nova/virt/libvirt/driver.py:7506
#, python-format
msgid "Deletion of %s complete"
msgstr ""
-#: nova/virt/libvirt/firewall.py:117
+#: nova/virt/libvirt/firewall.py:115
msgid "Called setup_basic_filtering in nwfilter"
msgstr ""
-#: nova/virt/libvirt/firewall.py:125
+#: nova/virt/libvirt/firewall.py:123
msgid "Ensuring static filters"
msgstr ""
-#: nova/virt/libvirt/firewall.py:289
+#: nova/virt/libvirt/firewall.py:287
#, python-format
msgid "Failed to undefine network filter %(name)s. Try %(cnt)d of %(max_retry)d."
msgstr ""
@@ -1220,7 +1161,7 @@ msgstr ""
msgid "Libvirt host capabilities %s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:444
+#: nova/virt/libvirt/imagebackend.py:492
#, python-format
msgid "Failed to get image info from path %(path)s; error: %(error)s"
msgstr ""
@@ -1279,12 +1220,12 @@ msgstr ""
msgid "Removable base files: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:150
+#: nova/virt/libvirt/utils.py:152
#, python-format
msgid "tap-ctl check: %s"
msgstr ""
-#: nova/virt/libvirt/utils.py:498
+#: nova/virt/libvirt/utils.py:520
msgid "findmnt tool is not installed"
msgstr ""
@@ -1303,7 +1244,7 @@ msgstr ""
msgid "Trying to disconnected unmounted volume at %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:197
+#: nova/virt/vmwareapi/driver.py:196
#, python-format
msgid "VMware vCenter version: %s"
msgstr ""
@@ -1333,16 +1274,16 @@ msgstr ""
msgid "Image %s is no longer used. Deleting!"
msgstr ""
-#: nova/virt/vmwareapi/images.py:380
+#: nova/virt/vmwareapi/images.py:381
#, python-format
msgid "Downloaded image file data %(image_ref)s"
msgstr ""
-#: nova/virt/vmwareapi/images.py:383 nova/virt/vmwareapi/images.py:449
+#: nova/virt/vmwareapi/images.py:385 nova/virt/vmwareapi/images.py:455
msgid "The imported VM was unregistered"
msgstr ""
-#: nova/virt/vmwareapi/images.py:444
+#: nova/virt/vmwareapi/images.py:447
#, python-format
msgid "Downloaded OVA image file %(image_ref)s"
msgstr ""
@@ -1356,16 +1297,16 @@ msgstr ""
msgid "Created folder: %(name)s in parent %(parent)s."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:919
+#: nova/virt/vmwareapi/vmops.py:932
msgid "Created linked-clone VM from snapshot"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1452 nova/virt/xenapi/vmops.py:1701
+#: nova/virt/vmwareapi/vmops.py:1464 nova/virt/xenapi/vmops.py:1710
#, python-format
msgid "Found %(instance_count)d hung reboots older than %(timeout)d seconds"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1456 nova/virt/xenapi/vmops.py:1705
+#: nova/virt/vmwareapi/vmops.py:1468 nova/virt/xenapi/vmops.py:1714
msgid "Automatically hard rebooting"
msgstr ""
@@ -1460,42 +1401,42 @@ msgstr ""
msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:97
+#: nova/vnc/xvp_proxy.py:84
#, python-format
msgid "Error in handshake format: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:103
+#: nova/vnc/xvp_proxy.py:90
#, python-format
msgid "Error in handshake: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:118
+#: nova/vnc/xvp_proxy.py:105
#, python-format
msgid "Invalid request: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:138
+#: nova/vnc/xvp_proxy.py:125
#, python-format
msgid "Request: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:141
+#: nova/vnc/xvp_proxy.py:128
#, python-format
msgid "Request made with missing token: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:151
+#: nova/vnc/xvp_proxy.py:138
#, python-format
msgid "Request made with invalid token: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:158
+#: nova/vnc/xvp_proxy.py:145
#, python-format
msgid "Unexpected error: %s"
msgstr ""
-#: nova/vnc/xvp_proxy.py:178
+#: nova/vnc/xvp_proxy.py:165
#, python-format
msgid "Starting nova-xvpvncproxy node (version %s)"
msgstr ""
diff --git a/nova/locale/nova-log-warning.pot b/nova/locale/nova-log-warning.pot
index 4043a84359..36e0af8676 100644
--- a/nova/locale/nova-log-warning.pot
+++ b/nova/locale/nova-log-warning.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
+"POT-Creation-Date: 2016-02-08 07:00+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -17,7 +17,7 @@ msgstr ""
"Content-Transfer-Encoding: 8bit\n"
"Generated-By: Babel 2.2.0\n"
-#: nova/context.py:111
+#: nova/context.py:110
#, python-format
msgid "Arguments dropped when creating context: %s"
msgstr ""
@@ -29,47 +29,34 @@ msgid ""
"occur under normal operation. Use at your own risk."
msgstr ""
-#: nova/service.py:302
+#: nova/service.py:291
msgid "Service killed that has no database entry"
msgstr ""
-#: nova/utils.py:243
+#: nova/utils.py:242
#, python-format
msgid "Expected to receive %(exp)s bytes, but actually %(act)s"
msgstr ""
-#: nova/utils.py:795
+#: nova/utils.py:794
#, python-format
msgid "Hostname %(hostname)s is longer than 63, truncate it to %(truncated_name)s"
msgstr ""
-#: nova/api/ec2/__init__.py:176
-#, python-format
-msgid ""
-"Access key %(access_key)s has had %(failures)d failed authentications and"
-" will be locked out for %(lock_mins)d minutes."
-msgstr ""
-
-#: nova/api/ec2/cloud.py:239 nova/objectstore/s3server.py:95
+#: nova/api/ec2/cloud.py:29
msgid ""
-"The in tree EC2 API is deprecated as of Kilo release and may be removed "
-"in a future release. The openstack ec2-api project "
-"http://git.openstack.org/cgit/openstack/ec2-api/ is the target "
-"replacement for this functionality."
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1333
-#, python-format
-msgid "multiple fixed_ips exist, using the first: %s"
+"The in tree EC2 API has been removed in Mitaka. Please remove entries "
+"from api-paste.ini and use the OpenStack ec2-api project "
+"http://git.openstack.org/cgit/openstack/ec2-api/"
msgstr ""
-#: nova/api/metadata/handler.py:130
+#: nova/api/metadata/handler.py:131
msgid ""
"X-Instance-ID present in request headers. The 'service_metadata_proxy' "
"option must be enabled to process this header."
msgstr ""
-#: nova/api/metadata/handler.py:294
+#: nova/api/metadata/handler.py:295
#, python-format
msgid ""
"X-Instance-ID-Signature: %(signature)s does not match the expected value:"
@@ -77,7 +64,7 @@ msgid ""
"%(requestor_address)s"
msgstr ""
-#: nova/api/metadata/handler.py:323
+#: nova/api/metadata/handler.py:324
#, python-format
msgid ""
"Tenant_id %(tenant_id)s does not match tenant_id of instance "
@@ -99,25 +86,25 @@ msgstr ""
msgid "%(logprefix)s failed to load json"
msgstr ""
-#: nova/api/openstack/__init__.py:286 nova/api/openstack/__init__.py:487
+#: nova/api/openstack/__init__.py:318 nova/api/openstack/__init__.py:519
#, python-format
msgid ""
"Extension %(ext_name)s: Cannot extend resource %(collection)s: No such "
"resource"
msgstr ""
-#: nova/api/openstack/__init__.py:341
+#: nova/api/openstack/__init__.py:373
msgid "In the M release you must run the v2.1 API."
msgstr ""
-#: nova/api/openstack/__init__.py:347
+#: nova/api/openstack/__init__.py:379
msgid ""
"In the M release you must run all of the API. The concept of API "
"extensions will be removed from the codebase to ensure there is a single "
"Compute API."
msgstr ""
-#: nova/api/openstack/__init__.py:361
+#: nova/api/openstack/__init__.py:393
#, python-format
msgid "Extensions in both blacklist and whitelist: %s"
msgstr ""
@@ -165,19 +152,19 @@ msgid ""
"configure v2.1 API."
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:200
+#: nova/api/openstack/compute/floating_ips.py:202
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:187
#, python-format
msgid "Info cache is %r during associate"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:232
+#: nova/api/openstack/compute/floating_ips.py:234
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:220
#, python-format
msgid "multiple fixed_ips exist, using the first IPv4 fixed_ip: %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:117
+#: nova/api/openstack/compute/servers.py:121
#, python-format
msgid ""
"Extension %s is both in whitelist and blacklist, blacklisting takes "
@@ -233,28 +220,28 @@ msgid ""
"mute."
msgstr ""
-#: nova/cmd/compute.py:70 nova/cmd/dhcpbridge.py:130 nova/cmd/network.py:70
+#: nova/cmd/compute.py:70 nova/cmd/dhcpbridge.py:138 nova/cmd/network.py:70
msgid ""
"Conductor local mode is deprecated and will be removed in a subsequent "
"release"
msgstr ""
-#: nova/compute/api.py:1551 nova/compute/manager.py:2546
+#: nova/compute/api.py:1544 nova/compute/manager.py:2552
#, python-format
msgid "Failed to delete snapshot from shelved instance (%s)."
msgstr ""
-#: nova/compute/api.py:1730
+#: nova/compute/api.py:1740
#, python-format
-msgid "instance's host %s is down, deleting from database"
+msgid "Ignoring volume cleanup failure due to %s"
msgstr ""
-#: nova/compute/api.py:1772
+#: nova/compute/api.py:1750
#, python-format
-msgid "Ignoring volume cleanup failure due to %s"
+msgid "instance's host %s is down, deleting from database"
msgstr ""
-#: nova/compute/api.py:2595
+#: nova/compute/api.py:2601
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
@@ -270,21 +257,21 @@ msgstr ""
msgid "Failed to revert task state for instance. Error: %s"
msgstr ""
-#: nova/compute/manager.py:947
+#: nova/compute/manager.py:944
#, python-format
msgid ""
"Instance %(uuid)s appears to not be owned by this host, but by %(host)s. "
"Startup processing is being skipped."
msgstr ""
-#: nova/compute/manager.py:1083
+#: nova/compute/manager.py:1080
#, python-format
msgid ""
"Instance in transitional state (%(task_state)s) at start-up and power "
"state is (%(power_state)s), clearing task state"
msgstr ""
-#: nova/compute/manager.py:1094
+#: nova/compute/manager.py:1091
#, python-format
msgid ""
"Instance in transitional state (%(task_state)s) at start-up and power "
@@ -292,20 +279,20 @@ msgid ""
"instance"
msgstr ""
-#: nova/compute/manager.py:1197
+#: nova/compute/manager.py:1194
msgid "Hypervisor driver does not support resume guests"
msgstr ""
-#: nova/compute/manager.py:1202
+#: nova/compute/manager.py:1199
msgid "Failed to resume instance"
msgstr ""
-#: nova/compute/manager.py:1256
+#: nova/compute/manager.py:1253
#, python-format
msgid "Unexpected power state %d"
msgstr ""
-#: nova/compute/manager.py:1295
+#: nova/compute/manager.py:1292
msgid ""
"Instance lifecycle events from the compute driver have been disabled. "
"Note that lifecycle changes to an instance outside of the compute service"
@@ -313,164 +300,164 @@ msgid ""
"periodic task is also disabled."
msgstr ""
-#: nova/compute/manager.py:1407
+#: nova/compute/manager.py:1399
#, python-format
msgid ""
"Treating negative config value (%(retries)s) for 'block_device_retries' "
"as 0."
msgstr ""
-#: nova/compute/manager.py:1423
+#: nova/compute/manager.py:1415
#, python-format
msgid ""
"Volume id: %(vol_id)s finished being created but its status is "
"%(vol_status)s."
msgstr ""
-#: nova/compute/manager.py:1539
+#: nova/compute/manager.py:1531
msgid "Instance build timed out. Set to error state."
msgstr ""
-#: nova/compute/manager.py:1557
+#: nova/compute/manager.py:1549
#, python-format
msgid ""
"Treating negative config value (%(retries)s) for "
"'network_allocate_retries' as 0."
msgstr ""
-#: nova/compute/manager.py:1588
+#: nova/compute/manager.py:1582
#, python-format
msgid "Instance failed network setup (attempt %(attempt)d of %(attempts)d)"
msgstr ""
-#: nova/compute/manager.py:1750
+#: nova/compute/manager.py:1754
msgid ""
"Failed to create block device for instance due to being over volume "
"resource quota"
msgstr ""
-#: nova/compute/manager.py:2051
+#: nova/compute/manager.py:2056
msgid "No more network or fixed IP to be allocated"
msgstr ""
-#: nova/compute/manager.py:2203
+#: nova/compute/manager.py:2209
#, python-format
msgid "Could not clean up failed build, not rescheduling. Error: %s"
msgstr ""
-#: nova/compute/manager.py:2334
+#: nova/compute/manager.py:2340
#, python-format
msgid "Ignoring EndpointNotFound: %s"
msgstr ""
-#: nova/compute/manager.py:2337
+#: nova/compute/manager.py:2343
#, python-format
msgid "Ignoring Unknown cinder exception: %s"
msgstr ""
-#: nova/compute/manager.py:2361 nova/virt/block_device.py:365
+#: nova/compute/manager.py:2367 nova/virt/block_device.py:365
#, python-format
msgid "Failed to delete volume: %(volume_id)s due to %(exc)s"
msgstr ""
-#: nova/compute/manager.py:2405
+#: nova/compute/manager.py:2411
msgid "Info cache for instance could not be found. Ignore."
msgstr ""
-#: nova/compute/manager.py:2990
+#: nova/compute/manager.py:3011
#, python-format
msgid ""
"trying to reboot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:3026
+#: nova/compute/manager.py:3047
msgid "Reboot failed but instance is running"
msgstr ""
-#: nova/compute/manager.py:3046
+#: nova/compute/manager.py:3067
msgid "Instance disappeared during reboot"
msgstr ""
-#: nova/compute/manager.py:3117
+#: nova/compute/manager.py:3138
#, python-format
msgid ""
"trying to snapshot a non-running instance: (state: %(state)s expected: "
"%(running)s)"
msgstr ""
-#: nova/compute/manager.py:3150
+#: nova/compute/manager.py:3171
#, python-format
msgid "Error while trying to clean up image %s"
msgstr ""
-#: nova/compute/manager.py:3155
+#: nova/compute/manager.py:3176
msgid "Image not found during snapshot"
msgstr ""
-#: nova/compute/manager.py:3250
+#: nova/compute/manager.py:3271
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
-#: nova/compute/manager.py:3287
+#: nova/compute/manager.py:3308
#, python-format
msgid ""
"trying to inject a file into a non-running (state: %(current_state)s "
"expected: %(expected_state)s)"
msgstr ""
-#: nova/compute/manager.py:3311
+#: nova/compute/manager.py:3332
msgid ""
"Unable to find a different image to use for rescue VM, using instance's "
"current image"
msgstr ""
-#: nova/compute/manager.py:3436
+#: nova/compute/manager.py:3455
#, python-format
msgid ""
"Unexpected confirmation status '%(status)s' of migration %(id)s, exit "
"confirmation process"
msgstr ""
-#: nova/compute/manager.py:4705
+#: nova/compute/manager.py:4726
msgid "Detaching volume from unknown instance"
msgstr ""
-#: nova/compute/manager.py:4716
+#: nova/compute/manager.py:4737
#, python-format
msgid ""
"Ignoring DiskNotFound exception while detaching volume %(volume_id)s from"
" %(mp)s: %(err)s"
msgstr ""
-#: nova/compute/manager.py:4946
+#: nova/compute/manager.py:4971
#, python-format
msgid ""
"attach interface failed , try to deallocate port %(port_id)s, reason: "
"%(msg)s"
msgstr ""
-#: nova/compute/manager.py:4954
+#: nova/compute/manager.py:4979
#, python-format
msgid "deallocate port %(port_id)s failed"
msgstr ""
-#: nova/compute/manager.py:4977 nova/compute/manager.py:6488
+#: nova/compute/manager.py:5002 nova/compute/manager.py:6537
#, python-format
msgid "Detach interface failed, port_id=%(port_id)s, reason: %(msg)s"
msgstr ""
-#: nova/compute/manager.py:4989
+#: nova/compute/manager.py:5014
#, python-format
msgid "Failed to deallocate port %(port_id)s for instance. Error: %(error)s"
msgstr ""
-#: nova/compute/manager.py:5676
+#: nova/compute/manager.py:5726
#, python-format
msgid "Setting migration %(migration_id)s to error: %(reason)s"
msgstr ""
-#: nova/compute/manager.py:5986
+#: nova/compute/manager.py:6036
#, python-format
msgid ""
"While synchronizing instance power states, found %(num_db_instances)s "
@@ -478,7 +465,7 @@ msgid ""
"hypervisor."
msgstr ""
-#: nova/compute/manager.py:6114
+#: nova/compute/manager.py:6164
#, python-format
msgid ""
"Instance shutdown by itself. Calling the stop API. Current vm_state: "
@@ -487,19 +474,19 @@ msgid ""
"%(vm_power_state)s"
msgstr ""
-#: nova/compute/manager.py:6141
+#: nova/compute/manager.py:6191
msgid "Instance is suspended unexpectedly. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:6157
+#: nova/compute/manager.py:6207
msgid "Instance is paused unexpectedly. Ignore."
msgstr ""
-#: nova/compute/manager.py:6163
+#: nova/compute/manager.py:6213
msgid "Instance is unexpectedly not found. Ignore."
msgstr ""
-#: nova/compute/manager.py:6169
+#: nova/compute/manager.py:6219
#, python-format
msgid ""
"Instance is not stopped. Calling the stop API. Current vm_state: "
@@ -508,36 +495,36 @@ msgid ""
"%(vm_power_state)s"
msgstr ""
-#: nova/compute/manager.py:6191
+#: nova/compute/manager.py:6241
msgid "Paused instance shutdown by itself. Calling the stop API."
msgstr ""
-#: nova/compute/manager.py:6205
+#: nova/compute/manager.py:6255
msgid "Instance is not (soft-)deleted."
msgstr ""
-#: nova/compute/manager.py:6239
+#: nova/compute/manager.py:6289
#, python-format
msgid "Periodic reclaim failed to delete instance: %s"
msgstr ""
-#: nova/compute/manager.py:6330
+#: nova/compute/manager.py:6379
#, python-format
msgid ""
"Detected instance with name label '%s' which is marked as DELETED but "
"still present on host."
msgstr ""
-#: nova/compute/manager.py:6350
+#: nova/compute/manager.py:6399
msgid "Failed to power off instance"
msgstr ""
-#: nova/compute/manager.py:6366
+#: nova/compute/manager.py:6415
#, python-format
msgid "Periodic cleanup failed to delete instance: %s"
msgstr ""
-#: nova/compute/manager.py:6615
+#: nova/compute/manager.py:6664
#, python-format
msgid "Migration %s is not found."
msgstr ""
@@ -559,20 +546,20 @@ msgstr ""
msgid "Cannot get the metrics from %s."
msgstr ""
-#: nova/compute/resource_tracker.py:564
+#: nova/compute/resource_tracker.py:580
#, python-format
msgid "No compute node record for %(host)s:%(node)s"
msgstr ""
-#: nova/compute/resource_tracker.py:801
+#: nova/compute/resource_tracker.py:817
msgid "Instance not resizing, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:817
+#: nova/compute/resource_tracker.py:833
msgid "Flavor could not be found, skipping migration."
msgstr ""
-#: nova/compute/resource_tracker.py:907
+#: nova/compute/resource_tracker.py:923
#, python-format
msgid ""
"Detected running orphan instance: %(uuid)s (consuming %(memory_mb)s MB "
@@ -628,15 +615,15 @@ msgid ""
"conductor connection..."
msgstr ""
-#: nova/conductor/manager.py:464
+#: nova/conductor/manager.py:489
msgid "No valid host found for unshelve instance"
msgstr ""
-#: nova/conductor/manager.py:507
+#: nova/conductor/manager.py:532
msgid "No valid host found for rebuild"
msgstr ""
-#: nova/conductor/manager.py:515
+#: nova/conductor/manager.py:540
msgid "Server with unsupported policy cannot be rebuilt"
msgstr ""
@@ -648,39 +635,39 @@ msgid ""
"%(default)s."
msgstr ""
-#: nova/consoleauth/manager.py:91
+#: nova/consoleauth/manager.py:103
#, python-format
msgid "Token: %(token)s failed to save into memcached."
msgstr ""
-#: nova/consoleauth/manager.py:101
+#: nova/consoleauth/manager.py:116
#, python-format
msgid "Instance: %(instance_uuid)s failed to save into memcached"
msgstr ""
-#: nova/db/sqlalchemy/api.py:756
+#: nova/db/sqlalchemy/api.py:753
#, python-format
msgid "Invalid floating IP ID %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:3683
+#: nova/db/sqlalchemy/api.py:3662
#, python-format
msgid "Change will make usage less than 0 for the following resources: %s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:6090
+#: nova/db/sqlalchemy/api.py:6078
#, python-format
msgid "IntegrityError detected when archiving table %(tablename)s: %(error)s"
msgstr ""
-#: nova/image/glance.py:179
+#: nova/image/glance.py:194
#, python-format
msgid ""
"No protocol specified in for api_server '%s', please update [glance] "
"api_servers with fully qualified url including scheme (http / https)"
msgstr ""
-#: nova/image/glance.py:222
+#: nova/image/glance.py:236
#, python-format
msgid ""
"Treating negative config value (%(retries)s) for 'glance.num_retries' as "
@@ -792,12 +779,12 @@ msgstr ""
msgid "hostname %s too long, truncating."
msgstr ""
-#: nova/network/linux_net.py:1749 nova/network/linux_net.py:1760
+#: nova/network/linux_net.py:1764 nova/network/linux_net.py:1775
#, python-format
msgid "%s failed. Not Retrying."
msgstr ""
-#: nova/network/linux_net.py:1753
+#: nova/network/linux_net.py:1768
#, python-format
msgid "%(cmd)s failed. Sleeping %(time)s seconds before retry."
msgstr ""
@@ -878,46 +865,53 @@ msgstr ""
msgid "Cannot delete domain |%s|"
msgstr ""
-#: nova/network/neutronv2/api.py:264
+#: nova/network/neutronv2/api.py:164
+#, python-format
+msgid ""
+"%(item)s already exists in list: %(list_name)s containing: %(items)s. "
+"ignoring it"
+msgstr ""
+
+#: nova/network/neutronv2/api.py:265
#, python-format
msgid ""
"Neutron error: %(ip)s is not a valid IP address for network "
"%(network_id)s."
msgstr ""
-#: nova/network/neutronv2/api.py:273
+#: nova/network/neutronv2/api.py:274
#, python-format
msgid "Neutron error: Fixed IP %s is already in use."
msgstr ""
-#: nova/network/neutronv2/api.py:278
+#: nova/network/neutronv2/api.py:279
#, python-format
msgid "Neutron error: Port quota exceeded in tenant: %s"
msgstr ""
-#: nova/network/neutronv2/api.py:283
+#: nova/network/neutronv2/api.py:284
#, python-format
msgid "Neutron error: No more fixed IPs in network: %s"
msgstr ""
-#: nova/network/neutronv2/api.py:287
+#: nova/network/neutronv2/api.py:288
#, python-format
msgid ""
"Neutron error: MAC address %(mac)s is already in use on network "
"%(network)s."
msgstr ""
-#: nova/network/neutronv2/api.py:723
+#: nova/network/neutronv2/api.py:725
#, python-format
msgid "Port %s does not exist"
msgstr ""
-#: nova/network/neutronv2/api.py:728
+#: nova/network/neutronv2/api.py:730
#, python-format
msgid "Failed to delete port %s for instance."
msgstr ""
-#: nova/network/neutronv2/api.py:1511
+#: nova/network/neutronv2/api.py:1531
#, python-format
msgid ""
"Network %(id)s not matched with the tenants network! The ports tenant "
@@ -931,25 +925,37 @@ msgid ""
"%(port_id)s does not meet security requirements"
msgstr ""
-#: nova/objects/service.py:285
+#: nova/objects/block_device.py:212
+#, python-format
+msgid "Legacy get_by_volume_id() call found multiple BDMs for volume %(volume)s"
+msgstr ""
+
+#: nova/objects/service.py:333
#, python-format
msgid "get_minimum_version called with likely-incorrect binary `%s'"
msgstr ""
-#: nova/pci/manager.py:131
+#: nova/pci/manager.py:129
#, python-format
msgid ""
"Trying to remove device with %(status)s ownership %(instance_uuid)s "
"because of %(pci_exception)s"
msgstr ""
-#: nova/pci/manager.py:195
+#: nova/pci/manager.py:192
#, python-format
msgid ""
"Assigning a pci device without numa affinity toinstance %(instance)s "
"which has numa topology"
msgstr ""
+#: nova/scheduler/driver.py:57
+#, python-format
+msgid ""
+"DEPRECATED: scheduler_host_manager uses classloader to load %(path)s. "
+"This legacy loading style will be removed in the N release."
+msgstr ""
+
#: nova/scheduler/host_manager.py:91
#, python-format
msgid "Selected host: %(host)s failed to consume from instance. Error: %(error)s"
@@ -967,22 +973,29 @@ msgstr ""
msgid "No compute service record found for host %(host)s"
msgstr ""
-#: nova/scheduler/utils.py:87
+#: nova/scheduler/manager.py:66
+#, python-format
+msgid ""
+"DEPRECATED: scheduler_driver uses classloader to load %(path)s. This "
+"legacy loading style will be removed in the N release."
+msgstr ""
+
+#: nova/scheduler/utils.py:90
#, python-format
msgid "Failed to %(service)s_%(method)s: %(ex)s"
msgstr ""
-#: nova/scheduler/utils.py:96
+#: nova/scheduler/utils.py:99
#, python-format
msgid "Setting instance to %s state."
msgstr ""
-#: nova/scheduler/utils.py:239
+#: nova/scheduler/utils.py:248
#, python-format
msgid "Ignoring the invalid elements of the option %(name)s: %(options)s"
msgstr ""
-#: nova/scheduler/utils.py:338
+#: nova/scheduler/utils.py:376
#, python-format
msgid ""
"Retrying %(name)s after a MessagingTimeout, attempt %(attempt)s of "
@@ -1032,6 +1045,20 @@ msgid ""
"of it in production right now may be risky."
msgstr ""
+#: nova/scheduler/weights/affinity.py:63
+msgid ""
+"For the soft_affinity_weight_multiplier only a positive value is "
+"meaningful as a negative value would mean that the affinity weigher would"
+" prefer non-collocating placement."
+msgstr ""
+
+#: nova/scheduler/weights/affinity.py:79
+msgid ""
+"For the soft_anti_affinity_weight_multiplier only a positive value is "
+"meaningful as a negative value would mean that the anti-affinity weigher "
+"would prefer collocating placement."
+msgstr ""
+
#: nova/servicegroup/api.py:59
#, python-format
msgid ""
@@ -1045,7 +1072,7 @@ msgstr ""
msgid "Lost connection to nova-conductor for reporting service status."
msgstr ""
-#: nova/servicegroup/drivers/mc.py:94
+#: nova/servicegroup/drivers/mc.py:92
msgid "Lost connection to memcache server for reporting service status."
msgstr ""
@@ -1069,7 +1096,7 @@ msgstr ""
msgid "Driver failed to detach volume %(volume_id)s at %(mount_point)s."
msgstr ""
-#: nova/virt/fake.py:276
+#: nova/virt/fake.py:275
#, python-format
msgid "Key '%(key)s' not in instances '%(inst)s'"
msgstr ""
@@ -1153,44 +1180,44 @@ msgstr ""
msgid "Could not determine iscsi initiator name"
msgstr ""
-#: nova/virt/ironic/driver.py:82
+#: nova/virt/ironic/driver.py:86
#, python-format
msgid "Power state %s not found."
msgstr ""
-#: nova/virt/ironic/driver.py:204
+#: nova/virt/ironic/driver.py:213
#, python-format
msgid "Node %(uuid)s has a malformed \"%(prop)s\". It should be an integer."
msgstr ""
-#: nova/virt/ironic/driver.py:215
+#: nova/virt/ironic/driver.py:224
#, python-format
msgid "cpu_arch not defined for node '%s'"
msgstr ""
-#: nova/virt/ironic/driver.py:240
+#: nova/virt/ironic/driver.py:249
#, python-format
msgid ""
"Node %(uuid)s has a malformed \"%(prop)s\". It should be an integer but "
"its value is \"%(value)s\"."
msgstr ""
-#: nova/virt/ironic/driver.py:285
+#: nova/virt/ironic/driver.py:294
#, python-format
msgid "Ignoring malformed capability '%s'. Format should be 'key:val'."
msgstr ""
-#: nova/virt/ironic/driver.py:609
+#: nova/virt/ironic/driver.py:618
#, python-format
msgid "Warning, memory usage is 0 for %(instance)s on baremetal node %(node)s."
msgstr ""
-#: nova/virt/ironic/driver.py:616
+#: nova/virt/ironic/driver.py:625
#, python-format
msgid "Warning, number of cpus is 0 for %(instance)s on baremetal node %(node)s."
msgstr ""
-#: nova/virt/ironic/driver.py:872
+#: nova/virt/ironic/driver.py:879
#, python-format
msgid "Destroy called on non-existing instance %s."
msgstr ""
@@ -1199,12 +1226,12 @@ msgstr ""
msgid "Race detected in libvirt.virDomain.info, trying one more time"
msgstr ""
-#: nova/virt/libvirt/driver.py:484
+#: nova/virt/libvirt/driver.py:559
#, python-format
msgid "Invalid cachemode %(cache_mode)s specified for disk type %(disk_type)s."
msgstr ""
-#: nova/virt/libvirt/driver.py:548
+#: nova/virt/libvirt/driver.py:625
#, python-format
msgid ""
"The libvirt driver is not tested on %(type)s/%(arch)s by the OpenStack "
@@ -1212,7 +1239,7 @@ msgid ""
"see: https://wiki.openstack.org/wiki/HypervisorSupportMatrix"
msgstr ""
-#: nova/virt/libvirt/driver.py:571
+#: nova/virt/libvirt/driver.py:649
msgid ""
"Running libvirt-lxc without user namespaces is dangerous. Containers "
"spawned by Nova will be run as the host's root user. It is highly "
@@ -1220,150 +1247,208 @@ msgid ""
"environment."
msgstr ""
-#: nova/virt/libvirt/driver.py:599
+#: nova/virt/libvirt/driver.py:676
#, python-format
msgid ""
"Running Nova with a libvirt version less than %(version)s is deprecated. "
"The required minimum version of libvirt will be raised to %(version)s in "
-"the 13.0.0 release."
+"the next release."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:702
+#, python-format
+msgid ""
+"Removing the VIR_MIGRATE_PEER2PEER flag from %(config_name)s because "
+"peer-to-peer migrations are not supported by the \"xen\" virt type"
msgstr ""
-#: nova/virt/libvirt/driver.py:622
+#: nova/virt/libvirt/driver.py:710
#, python-format
msgid ""
-"Running Nova with a live_migration_flag config option which contains "
-"%(flag)s will cause all live-migrations to be block-migrations instead. "
-"This setting should only be on the block_migration_flag instead."
+"Adding the VIR_MIGRATE_PEER2PEER flag to %(config_name)s because direct "
+"migrations are not supported by the %(virt_type)s virt type"
msgstr ""
-#: nova/virt/libvirt/driver.py:629
+#: nova/virt/libvirt/driver.py:719
#, python-format
msgid ""
-"Running Nova with a block_migration_flag config option which does not "
-"contain %(flag)s will cause all block-migrations to be live-migrations "
-"instead. This setting should be on the block_migration_flag."
+"Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag to %(config_name)s because, "
+"without it, migrated VMs will remain defined on the source host"
msgstr ""
-#: nova/virt/libvirt/driver.py:751
+#: nova/virt/libvirt/driver.py:726
+#, python-format
+msgid ""
+"Removing the VIR_MIGRATE_PERSIST_DEST flag from %(config_name)s as Nova "
+"ensures the VM is persisted on the destination host"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:737
+msgid ""
+"Removing the VIR_MIGRATE_NON_SHARED_INC flag from the live_migration_flag"
+" config option because it will cause all live-migrations to be block-"
+"migrations instead."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:744
+msgid ""
+"Adding the VIR_MIGRATE_NON_SHARED_INC flag to the block_migration_flag "
+"config option, otherwise all block-migrations will be live-migrations "
+"instead."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:758
+#, python-format
+msgid ""
+"The %(config_name)s config option does not contain the "
+"VIR_MIGRATE_TUNNELLED flag but the live_migration_tunnelled is set to "
+"True which causes VIR_MIGRATE_TUNNELLED to be set"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:767
+#, python-format
+msgid ""
+"The %(config_name)s config option contains the VIR_MIGRATE_TUNNELLED flag"
+" but the live_migration_tunnelled is set to False which causes "
+"VIR_MIGRATE_TUNNELLED to be unset"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:784
+#, python-format
+msgid "Ignoring unknown libvirt live migration flag '%(flag)s'"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:926
msgid "Cannot destroy instance, operation time out"
msgstr ""
-#: nova/virt/libvirt/driver.py:772
+#: nova/virt/libvirt/driver.py:947
#, python-format
msgid ""
"Error from libvirt during destroy. Code=%(errcode)s Error=%(e)s; attempt "
"%(attempt)d of 3"
msgstr ""
-#: nova/virt/libvirt/driver.py:872
+#: nova/virt/libvirt/driver.py:1047
msgid "Instance may be still running, destroy it again."
msgstr ""
-#: nova/virt/libvirt/driver.py:927
+#: nova/virt/libvirt/driver.py:1102
#, python-format
msgid "Ignoring Volume Error on vol %(vol_id)s during delete %(exc)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1261 nova/virt/libvirt/driver.py:1272
+#: nova/virt/libvirt/driver.py:1458 nova/virt/libvirt/driver.py:1469
msgid "During detach_volume, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:1314
+#: nova/virt/libvirt/driver.py:1510
msgid "During detach_interface, instance disappeared."
msgstr ""
-#: nova/virt/libvirt/driver.py:1996
+#: nova/virt/libvirt/driver.py:1666
+#, python-format
+msgid "Performing standard snapshot because direct snapshot failed: %(error)s"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:2241
msgid ""
"Relative blockrebase support was not detected. Continuing with old "
"behaviour."
msgstr ""
-#: nova/virt/libvirt/driver.py:2115
+#: nova/virt/libvirt/driver.py:2360
msgid "Failed to soft reboot instance. Trying hard reboot."
msgstr ""
-#: nova/virt/libvirt/driver.py:2595
+#: nova/virt/libvirt/driver.py:2833
#, python-format
msgid ""
"my_ip address (%(my_ip)s) was not found on any of the interfaces: "
"%(ifaces)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2809
+#: nova/virt/libvirt/driver.py:3047
#, python-format
msgid "Image %s not found on disk storage. Continue without injecting data"
msgstr ""
-#: nova/virt/libvirt/driver.py:3011
+#: nova/virt/libvirt/driver.py:3251
msgid "File injection into a boot from volume instance is not supported"
msgstr ""
-#: nova/virt/libvirt/driver.py:3085
+#: nova/virt/libvirt/driver.py:3325
msgid "Instance disappeared while detaching a PCI device from it."
msgstr ""
-#: nova/virt/libvirt/driver.py:3195
+#: nova/virt/libvirt/driver.py:3433
#, python-format
msgid "Cannot update service status on host \"%s\" since it is not registered."
msgstr ""
-#: nova/virt/libvirt/driver.py:3198
+#: nova/virt/libvirt/driver.py:3436
#, python-format
msgid ""
"Cannot update service status on host \"%s\" due to an unexpected "
"exception."
msgstr ""
-#: nova/virt/libvirt/driver.py:3525
+#: nova/virt/libvirt/driver.py:3763
msgid "Too many id maps, only included first five."
msgstr ""
-#: nova/virt/libvirt/driver.py:3535
+#: nova/virt/libvirt/driver.py:3773
#, python-format
msgid "Invalid value for id mapping %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4296
+#: nova/virt/libvirt/driver.py:4331
+msgid ""
+"uefi support is without some kind of functional testing and therefore "
+"considered experimental."
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:4592
msgid ""
"Old property name \"hw_watchdog_action\" is now deprecated and will be "
"removed in the next release. Use updated property name "
"\"hw:watchdog_action\" instead"
msgstr ""
-#: nova/virt/libvirt/driver.py:4581
+#: nova/virt/libvirt/driver.py:4876
#, python-format
msgid "Timeout waiting for vif plugging callback for instance %(uuid)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4603
+#: nova/virt/libvirt/driver.py:4898
#, python-format
msgid "couldn't obtain the XML from domain: %(uuid)s, exception: %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4643
+#: nova/virt/libvirt/driver.py:4938
msgid ""
"Cannot get the number of cpu, because this function is not implemented "
"for this platform. "
msgstr ""
-#: nova/virt/libvirt/driver.py:4658
+#: nova/virt/libvirt/driver.py:4953
#, python-format
msgid ""
"Couldn't retrieve the online CPUs due to a Libvirt error: %(error)s with "
"error code: %(error_code)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4714
+#: nova/virt/libvirt/driver.py:5009
#, python-format
msgid "couldn't obtain the vcpu count from domain id: %(uuid)s, exception: %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4864
+#: nova/virt/libvirt/driver.py:5159
#, python-format
msgid "URI %(uri)s does not support listDevices: %(error)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4883
+#: nova/virt/libvirt/driver.py:5178
#, python-format
msgid ""
"You are running with libvirt version %s which is known to have broken "
@@ -1371,7 +1456,7 @@ msgid ""
"need NUMA support."
msgstr ""
-#: nova/virt/libvirt/driver.py:5590
+#: nova/virt/libvirt/driver.py:5878
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag,"
" and the graphics (VNC and/or SPICE) listen addresses on the destination "
@@ -1381,65 +1466,65 @@ msgid ""
"succeed, but the VM will continue to listen on the current addresses."
msgstr ""
-#: nova/virt/libvirt/driver.py:5689
+#: nova/virt/libvirt/driver.py:5979
#, python-format
msgid ""
"An error occurred trying to live migrate. Falling back to legacy live "
"migrate flow. Error: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5861
+#: nova/virt/libvirt/driver.py:6151
#, python-format
msgid "Unable to stat %(disk)s: %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5949
+#: nova/virt/libvirt/driver.py:6239
#, python-format
msgid "Live migration stuck for %d sec"
msgstr ""
-#: nova/virt/libvirt/driver.py:5955
+#: nova/virt/libvirt/driver.py:6245
#, python-format
msgid "Live migration not completed after %d sec"
msgstr ""
-#: nova/virt/libvirt/driver.py:5963
+#: nova/virt/libvirt/driver.py:6253
#, python-format
msgid "Failed to abort migration %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5983
+#: nova/virt/libvirt/driver.py:6273
#, python-format
msgid "Unable to increase max downtime to %(time)dms: %(e)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:6049
+#: nova/virt/libvirt/driver.py:6339
msgid "Migration operation was cancelled"
msgstr ""
-#: nova/virt/libvirt/driver.py:6055
+#: nova/virt/libvirt/driver.py:6345
#, python-format
msgid "Unexpected migration job type: %d"
msgstr ""
-#: nova/virt/libvirt/driver.py:6120
+#: nova/virt/libvirt/driver.py:6409
#, python-format
msgid "Error monitoring migration: %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:6299
+#: nova/virt/libvirt/driver.py:6580
#, python-format
msgid "plug_vifs() failed %(cnt)d. Retry up to %(max_retry)d."
msgstr ""
-#: nova/virt/libvirt/driver.py:6580 nova/virt/libvirt/driver.py:6607
+#: nova/virt/libvirt/driver.py:6864 nova/virt/libvirt/driver.py:6891
#, python-format
msgid ""
"Error from libvirt while getting description of %(instance_name)s: [Error"
" Code %(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:6615
+#: nova/virt/libvirt/driver.py:6899
#, python-format
msgid ""
"Periodic task is updating the host stat, it is trying to get disk "
@@ -1447,7 +1532,7 @@ msgid ""
"resize."
msgstr ""
-#: nova/virt/libvirt/driver.py:6621
+#: nova/virt/libvirt/driver.py:6905
#, python-format
msgid ""
"Periodic task is updating the host stat, it is trying to get disk "
@@ -1455,7 +1540,7 @@ msgid ""
"exists on the compute node but is not managed by Nova."
msgstr ""
-#: nova/virt/libvirt/driver.py:6630
+#: nova/virt/libvirt/driver.py:6914
#, python-format
msgid ""
"Periodic task is updating the host stats, it is trying to get disk info "
@@ -1463,19 +1548,24 @@ msgid ""
"concurrent operations such as resize. Error: %(error)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:7234
+#: nova/virt/libvirt/driver.py:7250
+#, python-format
+msgid "Failed to rollback snapshot (%s)"
+msgstr ""
+
+#: nova/virt/libvirt/driver.py:7530
#, python-format
msgid ""
"Ignoring supplied device name: %(device_name)s. Libvirt can't honour "
"user-supplied dev names"
msgstr ""
-#: nova/virt/libvirt/driver.py:7257
+#: nova/virt/libvirt/driver.py:7551
#, python-format
msgid "Ignoring supplied device name: %(suggested_dev)s"
msgstr ""
-#: nova/virt/libvirt/firewall.py:62
+#: nova/virt/libvirt/firewall.py:60
msgid ""
"Libvirt module could not be loaded. NWFilterFirewall will not work "
"correctly."
@@ -1501,6 +1591,11 @@ msgstr ""
msgid "couldn't obtain the memory from domain: %(uuid)s, exception: %(ex)s"
msgstr ""
+#: nova/virt/libvirt/imagebackend.py:273
+#, python-format
+msgid "Unable to preallocate image at path: %(path)s"
+msgstr ""
+
#: nova/virt/libvirt/imagecache.py:332
#, python-format
msgid ""
@@ -1545,28 +1640,45 @@ msgstr ""
msgid "ignoring missing logical volume %(path)s"
msgstr ""
-#: nova/virt/libvirt/storage/rbd_utils.py:255
+#: nova/virt/libvirt/storage/rbd_utils.py:290
#, python-format
msgid "image %(volume)s in pool %(pool)s can not be found, failed to remove"
msgstr ""
-#: nova/virt/libvirt/storage/rbd_utils.py:286
+#: nova/virt/libvirt/storage/rbd_utils.py:326
#, python-format
msgid "rbd remove %(volume)s in pool %(pool)s failed"
msgstr ""
+#: nova/virt/libvirt/storage/rbd_utils.py:402
+#, python-format
+msgid "snapshot(%(name)s) on rbd image(%(img)s) is protected, skipping"
+msgstr ""
+
+#: nova/virt/libvirt/storage/rbd_utils.py:411
+#, python-format
+msgid "no snapshot(%(name)s) found on rbd image(%(img)s)"
+msgstr ""
+
#: nova/virt/libvirt/volume/glusterfs.py:121
-#: nova/virt/libvirt/volume/nfs.py:108 nova/virt/libvirt/volume/remotefs.py:64
+#: nova/virt/libvirt/volume/nfs.py:109 nova/virt/libvirt/volume/remotefs.py:64
#, python-format
msgid "%s is already mounted"
msgstr ""
-#: nova/virt/libvirt/volume/volume.py:84
+#: nova/virt/libvirt/volume/volume.py:88
#, python-format
msgid "Unknown content in connection_info/qos_specs: %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:200
+#: nova/virt/libvirt/volume/volume.py:115
+#, python-format
+msgid ""
+"Unable to attach %(type)s volume %(serial)s with discard enabled: qemu "
+"%(qemu)s and libvirt %(libvirt)s or later are required."
+msgstr ""
+
+#: nova/virt/vmwareapi/driver.py:199
#, python-format
msgid ""
"Running Nova with a VMware vCenter version less than %(version)s is "
@@ -1574,16 +1686,16 @@ msgid ""
"%(version)s in the 13.0.0 release."
msgstr ""
-#: nova/virt/vmwareapi/driver.py:227
+#: nova/virt/vmwareapi/driver.py:226
msgid "datastore_regex is ignored when PBM is enabled"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:431
+#: nova/virt/vmwareapi/driver.py:428
#, python-format
msgid "The volume %s does not exist!"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:462
+#: nova/virt/vmwareapi/driver.py:459
msgid ""
"Instance does not exists. Proceeding to delete instance properties on "
"datastore"
@@ -1618,31 +1730,31 @@ msgstr ""
msgid "Get esx cookies failed: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:496
+#: nova/virt/vmwareapi/vmops.py:504
#, python-format
msgid ""
"Destination %s already exists! Concurrent moves can lead to unexpected "
"results."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1055
+#: nova/virt/vmwareapi/vmops.py:1068
#, python-format
msgid ""
"In vmwareapi:vmops:_destroy_instance, got this exception while un-"
"registering the VM: %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1078
+#: nova/virt/vmwareapi/vmops.py:1091
msgid ""
"In vmwareapi:vmops:_destroy_instance, exception while deleting the VM "
"contents from the disk"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1082
+#: nova/virt/vmwareapi/vmops.py:1095
msgid "Instance does not exist on backend"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1843
+#: nova/virt/vmwareapi/vmops.py:1857
#, python-format
msgid "Root disk file creation failed - %s"
msgstr ""
@@ -1666,7 +1778,7 @@ msgstr ""
msgid "Invalid 'agent_present' value. Falling back to the default."
msgstr ""
-#: nova/virt/xenapi/driver.py:390
+#: nova/virt/xenapi/driver.py:387
#, python-format
msgid "Could not determine key: %s"
msgstr ""
@@ -1765,25 +1877,25 @@ msgstr ""
msgid "VM is not present, skipping destroy..."
msgstr ""
-#: nova/virt/xenapi/vmops.py:1652
+#: nova/virt/xenapi/vmops.py:1661
msgid "VM is not present, skipping soft delete..."
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:240
+#: nova/virt/xenapi/volume_utils.py:251
msgid "Cannot purge SR with referenced VDIs"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:257
+#: nova/virt/xenapi/volume_utils.py:268
#, python-format
msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:265
+#: nova/virt/xenapi/volume_utils.py:276
#, python-format
msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:289
+#: nova/virt/xenapi/volume_utils.py:300
#, python-format
msgid "Mountpoint cannot be translated: %s"
msgstr ""
@@ -1813,13 +1925,30 @@ msgid ""
"6)."
msgstr ""
-#: nova/volume/cinder.py:133
+#: nova/volume/cinder.py:134
msgid ""
"Cinder V1 API is deprecated as of the Juno release, and Nova is still "
"configured to use it. Enable the V2 API in Cinder and set "
"cinder.catalog_info in nova.conf to use it."
msgstr ""
+#: nova/volume/cinder.py:376
+#, python-format
+msgid ""
+"attachment_id couldn't be retrieved for volume %(volume_id)s with "
+"instance_uuid %(instance_id)s. The volume has the 'multiattach' flag "
+"enabled, without the attachment_id Cinder most probably cannot perform "
+"the detach."
+msgstr ""
+
+#: nova/volume/cinder.py:386
+#, python-format
+msgid ""
+"attachment_id couldn't be retrieved for volume %(volume_id)s. The volume "
+"has the 'multiattach' flag enabled, without the attachment_id Cinder most"
+" probably cannot perform the detach."
+msgstr ""
+
#: nova/volume/encryptors/__init__.py:70
#, python-format
msgid "Volume %s should be encrypted but there is no encryption metadata."
diff --git a/nova/locale/nova.pot b/nova/locale/nova.pot
index 1e1b7fc45d..b82e345360 100644
--- a/nova/locale/nova.pot
+++ b/nova/locale/nova.pot
@@ -6,9 +6,9 @@
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
+"POT-Creation-Date: 2016-02-08 07:00+0000\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
@@ -61,7 +61,15 @@ msgstr ""
msgid "Invalid volume_size."
msgstr ""
-#: nova/context.py:160
+#: nova/cache_utils.py:81
+msgid "memcached_servers not defined"
+msgstr ""
+
+#: nova/cache_utils.py:137
+msgid "old style configuration can use only dictionary or memcached backends"
+msgstr ""
+
+#: nova/context.py:159
#, python-format
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr ""
@@ -629,54 +637,61 @@ msgstr ""
#: nova/exception.py:639
#, python-format
+msgid ""
+"Block Device Mapping %(volume_id)s is a multi-attach volume and is not "
+"valid for this operation."
+msgstr ""
+
+#: nova/exception.py:644
+#, python-format
msgid "No volume Block Device Mapping at path: %(path)s"
msgstr ""
-#: nova/exception.py:643
+#: nova/exception.py:648
#, python-format
msgid "Device detach failed for %(device)s: %(reason)s)"
msgstr ""
-#: nova/exception.py:647
+#: nova/exception.py:652
#, python-format
msgid "Device '%(device)s' not found."
msgstr ""
-#: nova/exception.py:652
+#: nova/exception.py:657
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr ""
-#: nova/exception.py:656
+#: nova/exception.py:661
#, python-format
msgid "No disk at %(location)s"
msgstr ""
-#: nova/exception.py:660
+#: nova/exception.py:665
#, python-format
msgid "Could not find a handler for %(driver_type)s volume."
msgstr ""
-#: nova/exception.py:664
+#: nova/exception.py:669
#, python-format
msgid "Invalid image href %(image_href)s."
msgstr ""
-#: nova/exception.py:668
+#: nova/exception.py:673
#, python-format
msgid "Requested image %(image)s has automatic disk resize disabled."
msgstr ""
-#: nova/exception.py:673
+#: nova/exception.py:678
#, python-format
msgid "Image %(image_id)s could not be found."
msgstr ""
-#: nova/exception.py:677
+#: nova/exception.py:682
msgid "The current driver does not support preserving ephemeral partitions."
msgstr ""
-#: nova/exception.py:683
+#: nova/exception.py:688
#, python-format
msgid ""
"Image %(image_id)s could not be found. The nova EC2 API assigns image ids"
@@ -684,160 +699,160 @@ msgid ""
"image ids since adding this image?"
msgstr ""
-#: nova/exception.py:690
+#: nova/exception.py:695
#, python-format
msgid "Project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:694
+#: nova/exception.py:699
msgid "Cannot find SR to read/write VDI."
msgstr ""
-#: nova/exception.py:698
+#: nova/exception.py:703
#, python-format
msgid "Instance %(uuid)s has no mapping to a cell."
msgstr ""
-#: nova/exception.py:702
+#: nova/exception.py:707
#, python-format
msgid "Network %(network_id)s is duplicated."
msgstr ""
-#: nova/exception.py:706
+#: nova/exception.py:711
#, python-format
msgid "Failed to release IP %(address)s with MAC %(mac_address)s"
msgstr ""
-#: nova/exception.py:710
+#: nova/exception.py:715
#, python-format
msgid "Network %(network_id)s is still in use."
msgstr ""
-#: nova/exception.py:714
+#: nova/exception.py:719
#, python-format
msgid "Network set host failed for network %(network_id)s."
msgstr ""
-#: nova/exception.py:718
+#: nova/exception.py:723
#, python-format
msgid "%(req)s is required to create a network."
msgstr ""
-#: nova/exception.py:722
+#: nova/exception.py:727
msgid "Maximum allowed length for 'label' is 255."
msgstr ""
-#: nova/exception.py:726
+#: nova/exception.py:731
#, python-format
msgid "%(key)s must be an integer."
msgstr ""
-#: nova/exception.py:730
+#: nova/exception.py:735
#, python-format
msgid "%(cidr)s is not a valid IP network."
msgstr ""
-#: nova/exception.py:734
+#: nova/exception.py:739
#, python-format
msgid "%(address)s is not a valid IP address."
msgstr ""
-#: nova/exception.py:738
+#: nova/exception.py:743
#, python-format
msgid "%(address)s is not within %(cidr)s."
msgstr ""
-#: nova/exception.py:742
+#: nova/exception.py:747
#, python-format
msgid "Detected existing vlan with id %(vlan)d"
msgstr ""
-#: nova/exception.py:747
+#: nova/exception.py:752
#, python-format
msgid "Requested cidr (%(cidr)s) conflicts with existing cidr (%(other)s)"
msgstr ""
-#: nova/exception.py:753
+#: nova/exception.py:758
#, python-format
msgid ""
"Network must be disassociated from project %(project_id)s before it can "
"be deleted."
msgstr ""
-#: nova/exception.py:758
+#: nova/exception.py:763
#, python-format
msgid "Network %(network_id)s could not be found."
msgstr ""
-#: nova/exception.py:762
+#: nova/exception.py:767
#, python-format
msgid "Port id %(port_id)s could not be found."
msgstr ""
-#: nova/exception.py:766
+#: nova/exception.py:771
#, python-format
msgid "Network could not be found for bridge %(bridge)s"
msgstr ""
-#: nova/exception.py:770
+#: nova/exception.py:775
#, python-format
msgid "Network could not be found for uuid %(uuid)s"
msgstr ""
-#: nova/exception.py:774
+#: nova/exception.py:779
#, python-format
msgid "Network could not be found with cidr %(cidr)s."
msgstr ""
-#: nova/exception.py:778
+#: nova/exception.py:783
#, python-format
msgid "Network could not be found for instance %(instance_id)s."
msgstr ""
-#: nova/exception.py:782
+#: nova/exception.py:787
msgid "No networks defined."
msgstr ""
-#: nova/exception.py:786
+#: nova/exception.py:791
msgid "No more available networks."
msgstr ""
-#: nova/exception.py:790
+#: nova/exception.py:795
#, python-format
msgid ""
"Either network uuid %(network_uuid)s is not present or is not assigned to"
" the project %(project_id)s."
msgstr ""
-#: nova/exception.py:795
+#: nova/exception.py:800
msgid ""
"More than one possible network found. Specify network ID(s) to select "
"which one(s) to connect to."
msgstr ""
-#: nova/exception.py:800
+#: nova/exception.py:805
#, python-format
msgid "Network %(network_uuid)s requires a subnet in order to boot instances on."
msgstr ""
-#: nova/exception.py:805
+#: nova/exception.py:810
#, python-format
msgid ""
"It is not allowed to create an interface on external network "
"%(network_uuid)s"
msgstr ""
-#: nova/exception.py:810
+#: nova/exception.py:815
#, python-format
msgid "Physical network is missing for network %(network_uuid)s"
msgstr ""
-#: nova/exception.py:814
+#: nova/exception.py:819
#, python-format
msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s"
msgstr ""
-#: nova/exception.py:819
+#: nova/exception.py:824
#, python-format
msgid ""
"Parameters %(missing_params)s not present in vif_details for vif "
@@ -845,614 +860,614 @@ msgid ""
" parameters are correct."
msgstr ""
-#: nova/exception.py:826
+#: nova/exception.py:831
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
-#: nova/exception.py:830
+#: nova/exception.py:835
#, python-format
msgid "Port %(port_id)s is still in use."
msgstr ""
-#: nova/exception.py:834
+#: nova/exception.py:839
#, python-format
msgid "Port %(port_id)s requires a FixedIP in order to be used."
msgstr ""
-#: nova/exception.py:838
+#: nova/exception.py:843
#, python-format
msgid "Port %(port_id)s not usable for instance %(instance)s."
msgstr ""
-#: nova/exception.py:842
+#: nova/exception.py:847
#, python-format
msgid "No free port available for instance %(instance)s."
msgstr ""
-#: nova/exception.py:846
+#: nova/exception.py:851
#, python-format
msgid ""
"Binding failed for port %(port_id)s, please check neutron logs for more "
"information."
msgstr ""
-#: nova/exception.py:851
+#: nova/exception.py:856
#, python-format
msgid "Fixed IP %(address)s already exists."
msgstr ""
-#: nova/exception.py:855
+#: nova/exception.py:860
#, python-format
msgid "No fixed IP associated with id %(id)s."
msgstr ""
-#: nova/exception.py:859
+#: nova/exception.py:864
#, python-format
msgid "Fixed IP not found for address %(address)s."
msgstr ""
-#: nova/exception.py:863
+#: nova/exception.py:868
#, python-format
msgid "Instance %(instance_uuid)s has zero fixed IPs."
msgstr ""
-#: nova/exception.py:867
+#: nova/exception.py:872
#, python-format
msgid "Network host %(host)s has zero fixed IPs in network %(network_id)s."
msgstr ""
-#: nova/exception.py:872
+#: nova/exception.py:877
#, python-format
msgid "Instance %(instance_uuid)s doesn't have fixed IP '%(ip)s'."
msgstr ""
-#: nova/exception.py:876
+#: nova/exception.py:881
#, python-format
msgid ""
"Fixed IP address (%(address)s) does not exist in network "
"(%(network_uuid)s)."
msgstr ""
-#: nova/exception.py:881
+#: nova/exception.py:886
#, python-format
msgid "Fixed IP associate failed for network: %(net)s."
msgstr ""
-#: nova/exception.py:885
+#: nova/exception.py:890
#, python-format
msgid ""
"Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s."
msgstr ""
-#: nova/exception.py:890
+#: nova/exception.py:895
#, python-format
msgid "More than one instance is associated with fixed IP address '%(address)s'."
msgstr ""
-#: nova/exception.py:895
+#: nova/exception.py:900
#, python-format
msgid "Fixed IP address %(address)s is invalid."
msgstr ""
-#: nova/exception.py:900
+#: nova/exception.py:905
#, python-format
msgid "No fixed IP addresses available for network: %(net)s"
msgstr ""
-#: nova/exception.py:904
+#: nova/exception.py:909
msgid "Zero fixed IPs could be found."
msgstr ""
-#: nova/exception.py:908
+#: nova/exception.py:913
#, python-format
msgid "Floating IP %(address)s already exists."
msgstr ""
-#: nova/exception.py:913
+#: nova/exception.py:918
#, python-format
msgid "Floating IP not found for ID %(id)s."
msgstr ""
-#: nova/exception.py:917
+#: nova/exception.py:922
#, python-format
msgid "The DNS entry %(name)s already exists in domain %(domain)s."
msgstr ""
-#: nova/exception.py:921
+#: nova/exception.py:926
#, python-format
msgid "Floating IP not found for address %(address)s."
msgstr ""
-#: nova/exception.py:925
+#: nova/exception.py:930
#, python-format
msgid "Floating IP not found for host %(host)s."
msgstr ""
-#: nova/exception.py:929
+#: nova/exception.py:934
#, python-format
msgid "Multiple floating IPs are found for address %(address)s."
msgstr ""
-#: nova/exception.py:933
+#: nova/exception.py:938
msgid "Floating IP pool not found."
msgstr ""
-#: nova/exception.py:938
+#: nova/exception.py:943
msgid "Zero floating IPs available."
msgstr ""
-#: nova/exception.py:944
+#: nova/exception.py:949
#, python-format
msgid "Floating IP %(address)s is associated."
msgstr ""
-#: nova/exception.py:948
+#: nova/exception.py:953
#, python-format
msgid "Floating IP %(address)s is not associated."
msgstr ""
-#: nova/exception.py:952
+#: nova/exception.py:957
msgid "Zero floating IPs exist."
msgstr ""
-#: nova/exception.py:957
+#: nova/exception.py:962
#, python-format
msgid "Interface %(interface)s not found."
msgstr ""
-#: nova/exception.py:961
+#: nova/exception.py:966
msgid "Floating IP allocate failed."
msgstr ""
-#: nova/exception.py:965
+#: nova/exception.py:970
#, python-format
msgid "Floating IP %(address)s association has failed."
msgstr ""
-#: nova/exception.py:970
+#: nova/exception.py:975
msgid "The floating IP request failed with a BadRequest"
msgstr ""
-#: nova/exception.py:975 nova/api/openstack/compute/floating_ips.py:83
-#: nova/api/openstack/compute/floating_ips.py:176
+#: nova/exception.py:980 nova/api/openstack/compute/floating_ips.py:84
+#: nova/api/openstack/compute/floating_ips.py:177
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:73
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:156
msgid "Cannot disassociate auto assigned floating IP"
msgstr ""
-#: nova/exception.py:980
+#: nova/exception.py:985
#, python-format
msgid "Keypair %(name)s not found for user %(user_id)s"
msgstr ""
-#: nova/exception.py:984
+#: nova/exception.py:989
#, python-format
msgid "Service %(service_id)s could not be found."
msgstr ""
-#: nova/exception.py:988
+#: nova/exception.py:993
#, python-format
msgid "Service with host %(host)s binary %(binary)s exists."
msgstr ""
-#: nova/exception.py:992
+#: nova/exception.py:997
#, python-format
msgid "Service with host %(host)s topic %(topic)s exists."
msgstr ""
-#: nova/exception.py:996
+#: nova/exception.py:1001
#, python-format
msgid "Host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:1000
+#: nova/exception.py:1005
#, python-format
msgid "Compute host %(host)s could not be found."
msgstr ""
-#: nova/exception.py:1004
+#: nova/exception.py:1009
#, python-format
msgid "Compute host %(name)s needs to be created first before updating."
msgstr ""
-#: nova/exception.py:1009
+#: nova/exception.py:1014
#, python-format
msgid "Could not find binary %(binary)s on host %(host)s."
msgstr ""
-#: nova/exception.py:1013
+#: nova/exception.py:1018
#, python-format
msgid "Invalid reservation expiration %(expire)s."
msgstr ""
-#: nova/exception.py:1017
+#: nova/exception.py:1022
#, python-format
msgid ""
"Change would make usage less than 0 for the following resources: "
"%(unders)s"
msgstr ""
-#: nova/exception.py:1022
+#: nova/exception.py:1027
#, python-format
msgid "Wrong quota method %(method)s used on resource %(res)s"
msgstr ""
-#: nova/exception.py:1026
+#: nova/exception.py:1031
msgid "Quota could not be found"
msgstr ""
-#: nova/exception.py:1030
+#: nova/exception.py:1035
#, python-format
msgid "Quota exists for project %(project_id)s, resource %(resource)s"
msgstr ""
-#: nova/exception.py:1035
+#: nova/exception.py:1040
#, python-format
msgid "Unknown quota resources %(unknown)s."
msgstr ""
-#: nova/exception.py:1039
+#: nova/exception.py:1044
#, python-format
msgid "Quota for user %(user_id)s in project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:1044
+#: nova/exception.py:1049
#, python-format
msgid "Quota for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:1048
+#: nova/exception.py:1053
#, python-format
msgid "Quota class %(class_name)s could not be found."
msgstr ""
-#: nova/exception.py:1052
+#: nova/exception.py:1057
#, python-format
msgid "Quota usage for project %(project_id)s could not be found."
msgstr ""
-#: nova/exception.py:1056
+#: nova/exception.py:1061
#, python-format
msgid "Quota reservation %(uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1060
+#: nova/exception.py:1065
#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr ""
-#: nova/exception.py:1064
+#: nova/exception.py:1069
#, python-format
msgid "Security group %(security_group_id)s not found."
msgstr ""
-#: nova/exception.py:1068
+#: nova/exception.py:1073
#, python-format
msgid "Security group %(security_group_id)s not found for project %(project_id)s."
msgstr ""
-#: nova/exception.py:1073
+#: nova/exception.py:1078
#, python-format
msgid "Security group with rule %(rule_id)s not found."
msgstr ""
-#: nova/exception.py:1078
+#: nova/exception.py:1083
#, python-format
msgid ""
"Security group %(security_group_name)s already exists for project "
"%(project_id)s."
msgstr ""
-#: nova/exception.py:1083
+#: nova/exception.py:1088
#, python-format
msgid ""
"Security group %(security_group_id)s is already associated with the "
"instance %(instance_id)s"
msgstr ""
-#: nova/exception.py:1088
+#: nova/exception.py:1093
#, python-format
msgid ""
"Security group %(security_group_id)s is not associated with the instance "
"%(instance_id)s"
msgstr ""
-#: nova/exception.py:1093
+#: nova/exception.py:1098
#, python-format
msgid "Security group default rule (%rule_id)s not found."
msgstr ""
-#: nova/exception.py:1097
+#: nova/exception.py:1102
msgid ""
"Network requires port_security_enabled and subnet associated in order to "
"apply security groups."
msgstr ""
-#: nova/exception.py:1103
+#: nova/exception.py:1108
#, python-format
msgid "Rule already exists in group: %(rule)s"
msgstr ""
-#: nova/exception.py:1107
+#: nova/exception.py:1112
msgid "No Unique Match Found."
msgstr ""
-#: nova/exception.py:1112
+#: nova/exception.py:1117
#, python-format
msgid "Migration %(migration_id)s could not be found."
msgstr ""
-#: nova/exception.py:1116
+#: nova/exception.py:1121
#, python-format
msgid "Migration not found for instance %(instance_id)s with status %(status)s."
msgstr ""
-#: nova/exception.py:1121
+#: nova/exception.py:1126
#, python-format
msgid ""
"Console log output could not be retrieved for instance %(instance_id)s. "
"Reason: %(reason)s"
msgstr ""
-#: nova/exception.py:1126
+#: nova/exception.py:1131
#, python-format
msgid "Console pool %(pool_id)s could not be found."
msgstr ""
-#: nova/exception.py:1130
+#: nova/exception.py:1135
#, python-format
msgid ""
"Console pool with host %(host)s, console_type %(console_type)s and "
"compute_host %(compute_host)s already exists."
msgstr ""
-#: nova/exception.py:1136
+#: nova/exception.py:1141
#, python-format
msgid ""
"Console pool of type %(console_type)s for compute host %(compute_host)s "
"on proxy host %(host)s not found."
msgstr ""
-#: nova/exception.py:1142
+#: nova/exception.py:1147
#, python-format
msgid "Console %(console_id)s could not be found."
msgstr ""
-#: nova/exception.py:1146
+#: nova/exception.py:1151
#, python-format
msgid "Console for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1150
+#: nova/exception.py:1155
#, python-format
msgid ""
"Console for instance %(instance_uuid)s in pool %(pool_id)s could not be "
"found."
msgstr ""
-#: nova/exception.py:1155
+#: nova/exception.py:1160
#, python-format
msgid "Invalid console type %(console_type)s"
msgstr ""
-#: nova/exception.py:1159
+#: nova/exception.py:1164
#, python-format
msgid "Unavailable console type %(console_type)s."
msgstr ""
-#: nova/exception.py:1163
+#: nova/exception.py:1168
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr ""
-#: nova/exception.py:1168
+#: nova/exception.py:1173
#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr ""
-#: nova/exception.py:1172
+#: nova/exception.py:1177
#, python-format
msgid "Flavor with name %(flavor_name)s could not be found."
msgstr ""
-#: nova/exception.py:1176
+#: nova/exception.py:1181
#, python-format
msgid "Flavor access not found for %(flavor_id)s / %(project_id)s combination."
msgstr ""
-#: nova/exception.py:1181
+#: nova/exception.py:1186
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
+"Flavor %(id)s extra spec cannot be updated or created after %(retries)d "
"retries."
msgstr ""
-#: nova/exception.py:1186
+#: nova/exception.py:1191
#, python-format
msgid "Cell %(cell_name)s doesn't exist."
msgstr ""
-#: nova/exception.py:1190
+#: nova/exception.py:1195
#, python-format
msgid "Cell with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:1194
+#: nova/exception.py:1199
#, python-format
msgid "Inconsistency in cell routing: %(reason)s"
msgstr ""
-#: nova/exception.py:1198
+#: nova/exception.py:1203
#, python-format
msgid "Service API method not found: %(detail)s"
msgstr ""
-#: nova/exception.py:1202
+#: nova/exception.py:1207
msgid "Timeout waiting for response from cell"
msgstr ""
-#: nova/exception.py:1206
+#: nova/exception.py:1211
#, python-format
msgid "Cell message has reached maximum hop count: %(hop_count)s"
msgstr ""
-#: nova/exception.py:1210
+#: nova/exception.py:1215
msgid "No cells available matching scheduling criteria."
msgstr ""
-#: nova/exception.py:1214
+#: nova/exception.py:1219
msgid "Cannot update cells configuration file."
msgstr ""
-#: nova/exception.py:1218
+#: nova/exception.py:1223
#, python-format
msgid "Cell is not known for instance %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:1222
+#: nova/exception.py:1227
#, python-format
msgid "Scheduler Host Filter %(filter_name)s could not be found."
msgstr ""
-#: nova/exception.py:1226
+#: nova/exception.py:1231
#, python-format
msgid "Flavor %(flavor_id)s has no extra specs with key %(extra_specs_key)s."
msgstr ""
-#: nova/exception.py:1231
+#: nova/exception.py:1236
#, python-format
msgid ""
"Metric %(name)s could not be found on the compute host node "
"%(host)s.%(node)s."
msgstr ""
-#: nova/exception.py:1236
+#: nova/exception.py:1241
#, python-format
msgid "File %(file_path)s could not be found."
msgstr ""
-#: nova/exception.py:1240
+#: nova/exception.py:1245
#, python-format
msgid "Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
-#: nova/exception.py:1245
+#: nova/exception.py:1250
#, python-format
msgid "Network adapter %(adapter)s could not be found."
msgstr ""
-#: nova/exception.py:1249
+#: nova/exception.py:1254
#, python-format
msgid "Class %(class_name)s could not be found: %(exception)s"
msgstr ""
-#: nova/exception.py:1253
+#: nova/exception.py:1258
#, python-format
msgid "Instance %(instance_id)s has no tag '%(tag)s'"
msgstr ""
-#: nova/exception.py:1257
+#: nova/exception.py:1262
msgid "Rotation param is required for backup image_type"
msgstr ""
-#: nova/exception.py:1262
+#: nova/exception.py:1267
#, python-format
msgid "Key pair '%(key_name)s' already exists."
msgstr ""
-#: nova/exception.py:1266
+#: nova/exception.py:1271
#, python-format
msgid "Instance %(name)s already exists."
msgstr ""
-#: nova/exception.py:1270
+#: nova/exception.py:1275
#, python-format
msgid "Flavor with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:1274
+#: nova/exception.py:1279
#, python-format
msgid "Flavor with ID %(flavor_id)s already exists."
msgstr ""
-#: nova/exception.py:1278
+#: nova/exception.py:1283
#, python-format
msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
-#: nova/exception.py:1283
+#: nova/exception.py:1288
#, python-format
msgid "%(path)s is not on shared storage: %(reason)s"
msgstr ""
-#: nova/exception.py:1287
+#: nova/exception.py:1292
#, python-format
msgid "%(path)s is not on local storage: %(reason)s"
msgstr ""
-#: nova/exception.py:1291
+#: nova/exception.py:1296
#, python-format
msgid "Storage error: %(reason)s"
msgstr ""
-#: nova/exception.py:1295
+#: nova/exception.py:1300
#, python-format
msgid "Migration error: %(reason)s"
msgstr ""
-#: nova/exception.py:1299
+#: nova/exception.py:1304
#, python-format
msgid "Migration pre-check error: %(reason)s"
msgstr ""
-#: nova/exception.py:1303
+#: nova/exception.py:1308
#, python-format
msgid "Malformed message body: %(reason)s"
msgstr ""
-#: nova/exception.py:1309
+#: nova/exception.py:1314
#, python-format
msgid "Could not find config at %(path)s"
msgstr ""
-#: nova/exception.py:1313
+#: nova/exception.py:1318
#, python-format
msgid "Could not load paste app '%(name)s' from %(path)s"
msgstr ""
-#: nova/exception.py:1317
+#: nova/exception.py:1322
msgid "When resizing, instances must change flavor!"
msgstr ""
-#: nova/exception.py:1321
+#: nova/exception.py:1326
#, python-format
msgid "Resize error: %(reason)s"
msgstr ""
-#: nova/exception.py:1325
+#: nova/exception.py:1330
#, python-format
msgid "Server disk was unable to be resized because: %(reason)s"
msgstr ""
-#: nova/exception.py:1329
+#: nova/exception.py:1334
msgid "Flavor's memory is too small for requested image."
msgstr ""
-#: nova/exception.py:1333
+#: nova/exception.py:1338
msgid "The created instance's disk would be too small."
msgstr ""
-#: nova/exception.py:1337
+#: nova/exception.py:1342
#, python-format
msgid ""
"Flavor's disk is too small for requested image. Flavor disk is "
"%(flavor_size)i bytes, image is %(image_size)i bytes."
msgstr ""
-#: nova/exception.py:1342
+#: nova/exception.py:1347
#, python-format
msgid ""
"Flavor's disk is smaller than the minimum size specified in image "
@@ -1460,7 +1475,7 @@ msgid ""
"%(image_min_disk)i bytes."
msgstr ""
-#: nova/exception.py:1348
+#: nova/exception.py:1353
#, python-format
msgid ""
"Volume is smaller than the minimum size specified in image metadata. "
@@ -1468,523 +1483,536 @@ msgid ""
"bytes."
msgstr ""
-#: nova/exception.py:1354
+#: nova/exception.py:1359
#, python-format
msgid "Insufficient free memory on compute node to start %(uuid)s."
msgstr ""
-#: nova/exception.py:1358
+#: nova/exception.py:1363
#, python-format
msgid "No valid host was found. %(reason)s"
msgstr ""
-#: nova/exception.py:1362
+#: nova/exception.py:1367
#, python-format
msgid "Exceeded maximum number of retries. %(reason)s"
msgstr ""
-#: nova/exception.py:1367
+#: nova/exception.py:1372
#, python-format
msgid "Quota exceeded: code=%(code)s"
msgstr ""
-#: nova/exception.py:1376
+#: nova/exception.py:1381
#, python-format
msgid ""
"Quota exceeded for %(overs)s: Requested %(req)s, but already used "
"%(used)s of %(allowed)s %(overs)s"
msgstr ""
-#: nova/exception.py:1381
+#: nova/exception.py:1386
msgid "Maximum number of floating IPs exceeded"
msgstr ""
-#: nova/exception.py:1385
+#: nova/exception.py:1390
msgid "Maximum number of fixed IPs exceeded"
msgstr ""
-#: nova/exception.py:1389
+#: nova/exception.py:1394
#, python-format
msgid "Maximum number of metadata items exceeds %(allowed)d"
msgstr ""
-#: nova/exception.py:1393
+#: nova/exception.py:1398
msgid "Personality file limit exceeded"
msgstr ""
-#: nova/exception.py:1397
+#: nova/exception.py:1402
msgid "Personality file path too long"
msgstr ""
-#: nova/exception.py:1401
+#: nova/exception.py:1406
msgid "Personality file content too long"
msgstr ""
-#: nova/exception.py:1405
+#: nova/exception.py:1410
msgid "Maximum number of key pairs exceeded"
msgstr ""
-#: nova/exception.py:1410
+#: nova/exception.py:1415
msgid "Maximum number of security groups or rules exceeded"
msgstr ""
-#: nova/exception.py:1414
+#: nova/exception.py:1419
msgid "Maximum number of ports exceeded"
msgstr ""
-#: nova/exception.py:1418
+#: nova/exception.py:1423
#, python-format
msgid ""
"Aggregate %(aggregate_id)s: action '%(action)s' caused an error: "
"%(reason)s."
msgstr ""
-#: nova/exception.py:1423
+#: nova/exception.py:1428
#, python-format
msgid "Aggregate %(aggregate_id)s could not be found."
msgstr ""
-#: nova/exception.py:1427
+#: nova/exception.py:1432
#, python-format
msgid "Aggregate %(aggregate_name)s already exists."
msgstr ""
-#: nova/exception.py:1431
+#: nova/exception.py:1436
#, python-format
msgid "Aggregate %(aggregate_id)s has no host %(host)s."
msgstr ""
-#: nova/exception.py:1435
+#: nova/exception.py:1440
#, python-format
msgid "Aggregate %(aggregate_id)s has no metadata with key %(metadata_key)s."
msgstr ""
-#: nova/exception.py:1440
+#: nova/exception.py:1445
#, python-format
msgid "Aggregate %(aggregate_id)s already has host %(host)s."
msgstr ""
-#: nova/exception.py:1444
+#: nova/exception.py:1449
msgid "Unable to create flavor"
msgstr ""
-#: nova/exception.py:1448
+#: nova/exception.py:1453
#, python-format
msgid "Failed to set admin password on %(instance)s because %(reason)s"
msgstr ""
-#: nova/exception.py:1455
+#: nova/exception.py:1460
#, python-format
msgid "Instance %(instance_id)s could not be found."
msgstr ""
-#: nova/exception.py:1459
+#: nova/exception.py:1464
#, python-format
msgid "Info cache for instance %(instance_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1465
+#: nova/exception.py:1470
msgid "Invalid association."
msgstr ""
-#: nova/exception.py:1469
+#: nova/exception.py:1474
#, python-format
msgid "Marker %(marker)s could not be found."
msgstr ""
-#: nova/exception.py:1473
+#: nova/exception.py:1478
#, python-format
msgid "Invalid id: %(instance_id)s (expecting \"i-...\")"
msgstr ""
-#: nova/exception.py:1478
+#: nova/exception.py:1483
#, python-format
msgid "Invalid id: %(volume_id)s (expecting \"i-...\")"
msgstr ""
-#: nova/exception.py:1483
+#: nova/exception.py:1488
#, python-format
msgid "Could not fetch image %(image_id)s"
msgstr ""
-#: nova/exception.py:1487
+#: nova/exception.py:1492
#, python-format
msgid "Could not upload image %(image_id)s"
msgstr ""
-#: nova/exception.py:1491
+#: nova/exception.py:1496
#, python-format
msgid "Task %(task_name)s is already running on host %(host)s"
msgstr ""
-#: nova/exception.py:1495
+#: nova/exception.py:1500
#, python-format
msgid "Task %(task_name)s is not running on host %(host)s"
msgstr ""
-#: nova/exception.py:1499
+#: nova/exception.py:1504
#, python-format
msgid "Instance %(instance_uuid)s is locked"
msgstr ""
-#: nova/exception.py:1503
+#: nova/exception.py:1508
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr ""
-#: nova/exception.py:1507
+#: nova/exception.py:1512
#, python-format
msgid "Config drive format '%(format)s' is not supported."
msgstr ""
-#: nova/exception.py:1511
+#: nova/exception.py:1516
#, python-format
msgid "Could not mount vfat config drive. %(operation)s failed. Error: %(error)s"
msgstr ""
-#: nova/exception.py:1516
+#: nova/exception.py:1521
#, python-format
msgid "Unknown config drive format %(format)s. Select one of iso9660 or vfat."
msgstr ""
-#: nova/exception.py:1521
+#: nova/exception.py:1526
#, python-format
msgid "Instance %(instance_uuid)s requires config drive, but it does not exist."
msgstr ""
-#: nova/exception.py:1526
+#: nova/exception.py:1531
#, python-format
msgid "Failed to attach network adapter device to %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:1531
+#: nova/exception.py:1536
#, python-format
msgid "Failed to detach network adapter device from %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:1536
+#: nova/exception.py:1541
#, python-format
msgid ""
"User data too large. User data must be no larger than %(maxsize)s bytes "
"once base64 encoded. Your data is %(length)d bytes"
msgstr ""
-#: nova/exception.py:1542
+#: nova/exception.py:1547
msgid "User data needs to be valid base 64."
msgstr ""
-#: nova/exception.py:1546
+#: nova/exception.py:1551
#, python-format
msgid ""
"Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. "
"Actual: %(actual)s"
msgstr ""
-#: nova/exception.py:1551
+#: nova/exception.py:1556
#, python-format
msgid ""
"Conflict updating instance %(instance_uuid)s, but we were unable to "
"determine the cause"
msgstr ""
-#: nova/exception.py:1564
+#: nova/exception.py:1569
#, python-format
msgid ""
"Action for request_id %(request_id)s on instance %(instance_uuid)s not "
"found"
msgstr ""
-#: nova/exception.py:1569
+#: nova/exception.py:1574
#, python-format
msgid "Event %(event)s not found for action id %(action_id)s"
msgstr ""
-#: nova/exception.py:1573
+#: nova/exception.py:1578
#, python-format
msgid "The CA file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1577
+#: nova/exception.py:1582
#, python-format
msgid "The CRL file for %(project)s could not be found"
msgstr ""
-#: nova/exception.py:1581
+#: nova/exception.py:1586
msgid "Instance recreate is not supported."
msgstr ""
-#: nova/exception.py:1585
+#: nova/exception.py:1590
#, python-format
msgid ""
"The service from servicegroup driver %(driver)s is temporarily "
"unavailable."
msgstr ""
-#: nova/exception.py:1590
+#: nova/exception.py:1595
#, python-format
msgid "%(binary)s attempted direct database access which is not allowed by policy"
msgstr ""
-#: nova/exception.py:1595
+#: nova/exception.py:1600
#, python-format
msgid "Virtualization type '%(virt)s' is not supported by this compute driver"
msgstr ""
-#: nova/exception.py:1600
+#: nova/exception.py:1605
#, python-format
msgid ""
"Requested hardware '%(model)s' is not supported by the '%(virt)s' virt "
"driver"
msgstr ""
-#: nova/exception.py:1605
+#: nova/exception.py:1610
#, python-format
msgid "Invalid Base 64 data for file %(path)s"
msgstr ""
-#: nova/exception.py:1609
+#: nova/exception.py:1614
#, python-format
msgid "Build of instance %(instance_uuid)s aborted: %(reason)s"
msgstr ""
-#: nova/exception.py:1613
+#: nova/exception.py:1618
#, python-format
msgid "Build of instance %(instance_uuid)s was re-scheduled: %(reason)s"
msgstr ""
-#: nova/exception.py:1618
+#: nova/exception.py:1623
#, python-format
msgid "Shadow table with name %(name)s already exists."
msgstr ""
-#: nova/exception.py:1623
+#: nova/exception.py:1628
#, python-format
msgid "Instance rollback performed due to: %s"
msgstr ""
-#: nova/exception.py:1629
+#: nova/exception.py:1634
#, python-format
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr ""
-#: nova/exception.py:1633
+#: nova/exception.py:1638
#, python-format
msgid "Object action %(action)s failed because: %(reason)s"
msgstr ""
-#: nova/exception.py:1637
+#: nova/exception.py:1642
#, python-format
msgid "Core API extensions are missing: %(missing_apis)s"
msgstr ""
-#: nova/exception.py:1641
+#: nova/exception.py:1646
#, python-format
msgid "Error during following call to agent: %(method)s"
msgstr ""
-#: nova/exception.py:1645
+#: nova/exception.py:1650
#, python-format
msgid "Unable to contact guest agent. The following call timed out: %(method)s"
msgstr ""
-#: nova/exception.py:1650
+#: nova/exception.py:1655
#, python-format
msgid "Agent does not support the call: %(method)s"
msgstr ""
-#: nova/exception.py:1654
+#: nova/exception.py:1659
#, python-format
msgid "Instance group %(group_uuid)s could not be found."
msgstr ""
-#: nova/exception.py:1658
+#: nova/exception.py:1663
#, python-format
msgid "Instance group %(group_uuid)s already exists."
msgstr ""
-#: nova/exception.py:1662
+#: nova/exception.py:1667
#, python-format
msgid "Instance group %(group_uuid)s has no member with id %(instance_id)s."
msgstr ""
-#: nova/exception.py:1667
+#: nova/exception.py:1672
#, python-format
msgid "Instance group %(group_uuid)s has no policy %(policy)s."
msgstr ""
-#: nova/exception.py:1671
+#: nova/exception.py:1676
#, python-format
msgid "%(field)s should not be part of the updates."
msgstr ""
-#: nova/exception.py:1675
+#: nova/exception.py:1680
#, python-format
msgid "Number of retries to plugin (%(num_retries)d) exceeded."
msgstr ""
-#: nova/exception.py:1679
+#: nova/exception.py:1684
#, python-format
msgid "There was an error with the download module %(module)s. %(reason)s"
msgstr ""
-#: nova/exception.py:1684
+#: nova/exception.py:1689
#, python-format
msgid ""
"The metadata for this location will not work with this module %(module)s."
" %(reason)s."
msgstr ""
-#: nova/exception.py:1689
+#: nova/exception.py:1694
#, python-format
msgid "The method %(method_name)s is not implemented."
msgstr ""
-#: nova/exception.py:1693
+#: nova/exception.py:1698
#, python-format
msgid "The module %(module)s is misconfigured: %(reason)s."
msgstr ""
-#: nova/exception.py:1697
+#: nova/exception.py:1702
#, python-format
msgid "Signature verification for the image failed: %(reason)s."
msgstr ""
-#: nova/exception.py:1702
+#: nova/exception.py:1707
#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr ""
-#: nova/exception.py:1706
+#: nova/exception.py:1711
#, python-format
msgid "The PCI address %(address)s has an incorrect format."
msgstr ""
-#: nova/exception.py:1710
+#: nova/exception.py:1715
#, python-format
msgid ""
"Invalid PCI Whitelist: The PCI address %(address)s has an invalid "
"%(field)s."
msgstr ""
-#: nova/exception.py:1715
+#: nova/exception.py:1720
msgid ""
"Invalid PCI Whitelist: The PCI whitelist can specify devname or address, "
"but not both"
msgstr ""
-#: nova/exception.py:1721
+#: nova/exception.py:1726
#, python-format
msgid "PCI device %(id)s not found"
msgstr ""
-#: nova/exception.py:1725
+#: nova/exception.py:1730
#, python-format
msgid "PCI Device %(node_id)s:%(address)s not found."
msgstr ""
-#: nova/exception.py:1729
+#: nova/exception.py:1734
#, python-format
msgid ""
"PCI device %(compute_node_id)s:%(address)s is %(status)s instead of "
"%(hopestatus)s"
msgstr ""
-#: nova/exception.py:1735
+#: nova/exception.py:1740
+#, python-format
+msgid "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s are free."
+msgstr ""
+
+#: nova/exception.py:1746
+#, python-format
+msgid ""
+"Physical Function %(compute_node_id)s:%(address)s, related to VF "
+"%(compute_node_id)s:%(vf_address)s is %(status)s instead of "
+"%(hopestatus)s"
+msgstr ""
+
+#: nova/exception.py:1753
#, python-format
msgid ""
"PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s instead "
"of %(hopeowner)s"
msgstr ""
-#: nova/exception.py:1741
+#: nova/exception.py:1759
#, python-format
msgid "PCI device request %(requests)s failed"
msgstr ""
-#: nova/exception.py:1746
+#: nova/exception.py:1764
#, python-format
msgid ""
"Attempt to consume PCI device %(compute_node_id)s:%(address)s from empty "
"pool"
msgstr ""
-#: nova/exception.py:1752
+#: nova/exception.py:1770
#, python-format
msgid "Invalid PCI alias definition: %(reason)s"
msgstr ""
-#: nova/exception.py:1756
+#: nova/exception.py:1774
#, python-format
msgid "PCI alias %(alias)s is not defined"
msgstr ""
-#: nova/exception.py:1761
+#: nova/exception.py:1779
#, python-format
msgid "Not enough parameters: %(reason)s"
msgstr ""
-#: nova/exception.py:1766
+#: nova/exception.py:1784
#, python-format
msgid "Invalid PCI devices Whitelist config %(reason)s"
msgstr ""
-#: nova/exception.py:1776
+#: nova/exception.py:1794
#, python-format
msgid ""
"Failed to prepare PCI device %(id)s for instance %(instance_uuid)s: "
"%(reason)s"
msgstr ""
-#: nova/exception.py:1781
+#: nova/exception.py:1799
#, python-format
msgid "Failed to detach PCI device %(dev)s: %(reason)s"
msgstr ""
-#: nova/exception.py:1785
+#: nova/exception.py:1803
#, python-format
msgid "%(type)s hypervisor does not support PCI devices"
msgstr ""
-#: nova/exception.py:1789
+#: nova/exception.py:1807
#, python-format
msgid "Key manager error: %(reason)s"
msgstr ""
-#: nova/exception.py:1793
+#: nova/exception.py:1811
#, python-format
msgid "Failed to remove volume(s): (%(reason)s)"
msgstr ""
-#: nova/exception.py:1797
+#: nova/exception.py:1815
#, python-format
msgid "Provided video model (%(model)s) is not supported."
msgstr ""
-#: nova/exception.py:1801
+#: nova/exception.py:1819
#, python-format
msgid "The provided RNG device path: (%(path)s) is not present on the host."
msgstr ""
-#: nova/exception.py:1806
+#: nova/exception.py:1824
#, python-format
msgid ""
"The requested amount of video memory %(req_vram)d is higher than the "
"maximum allowed by flavor %(max_vram)d."
msgstr ""
-#: nova/exception.py:1811
+#: nova/exception.py:1829
#, python-format
msgid "Provided watchdog action (%(action)s) is not supported."
msgstr ""
-#: nova/exception.py:1815
+#: nova/exception.py:1833
msgid ""
"Live migration of instances with config drives is not supported in "
"libvirt unless libvirt instance path and drive data is shared across "
"compute nodes."
msgstr ""
-#: nova/exception.py:1821
+#: nova/exception.py:1839
#, python-format
msgid ""
"Host %(server)s is running an old version of Nova, live migrations "
@@ -1992,255 +2020,288 @@ msgid ""
"and try again."
msgstr ""
-#: nova/exception.py:1827
+#: nova/exception.py:1845
#, python-format
msgid "Error during unshelve instance %(instance_id)s: %(reason)s"
msgstr ""
-#: nova/exception.py:1831
+#: nova/exception.py:1849
#, python-format
msgid ""
"Image vCPU limits %(sockets)d:%(cores)d:%(threads)d exceeds permitted "
"%(maxsockets)d:%(maxcores)d:%(maxthreads)d"
msgstr ""
-#: nova/exception.py:1836
+#: nova/exception.py:1854
#, python-format
msgid ""
"Image vCPU topology %(sockets)d:%(cores)d:%(threads)d exceeds permitted "
"%(maxsockets)d:%(maxcores)d:%(maxthreads)d"
msgstr ""
-#: nova/exception.py:1841
+#: nova/exception.py:1859
#, python-format
msgid ""
"Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d are impossible to"
" satisfy for vcpus count %(vcpus)d"
msgstr ""
-#: nova/exception.py:1846
+#: nova/exception.py:1864
#, python-format
msgid "Architecture name '%(arch)s' is not recognised"
msgstr ""
-#: nova/exception.py:1850
+#: nova/exception.py:1868
msgid "CPU and memory allocation must be provided for all NUMA nodes"
msgstr ""
-#: nova/exception.py:1855
+#: nova/exception.py:1873
#, python-format
msgid ""
"Image property '%(name)s' is not permitted to override NUMA configuration"
" set against the flavor"
msgstr ""
-#: nova/exception.py:1860
+#: nova/exception.py:1878
msgid ""
"Asymmetric NUMA topologies require explicit assignment of CPUs and memory"
" to nodes in image or flavor"
msgstr ""
-#: nova/exception.py:1865
+#: nova/exception.py:1883
#, python-format
msgid "CPU number %(cpunum)d is larger than max %(cpumax)d"
msgstr ""
-#: nova/exception.py:1869
+#: nova/exception.py:1887
#, python-format
msgid "CPU number %(cpunum)d is assigned to two nodes"
msgstr ""
-#: nova/exception.py:1873
+#: nova/exception.py:1891
#, python-format
msgid "CPU number %(cpuset)s is not assigned to any node"
msgstr ""
-#: nova/exception.py:1877
+#: nova/exception.py:1895
#, python-format
msgid "%(memsize)d MB of memory assigned, but expected %(memtotal)d MB"
msgstr ""
-#: nova/exception.py:1882
+#: nova/exception.py:1900
#, python-format
msgid "Invalid characters in hostname '%(hostname)s'"
msgstr ""
-#: nova/exception.py:1886
+#: nova/exception.py:1904
#, python-format
msgid "Instance %(instance_uuid)s does not specify a NUMA topology"
msgstr ""
-#: nova/exception.py:1890
+#: nova/exception.py:1908
#, python-format
msgid "Instance %(instance_uuid)s does not specify a migration context."
msgstr ""
-#: nova/exception.py:1895
+#: nova/exception.py:1913
#, python-format
msgid "Not able to acquire a free port for %(host)s"
msgstr ""
-#: nova/exception.py:1899
+#: nova/exception.py:1917
#, python-format
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr ""
-#: nova/exception.py:1903
+#: nova/exception.py:1921
#, python-format
msgid ""
"Number of serial ports '%(num_ports)s' specified in '%(property)s' isn't "
"valid."
msgstr ""
-#: nova/exception.py:1908
+#: nova/exception.py:1926
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in "
"image meta."
msgstr ""
-#: nova/exception.py:1913
+#: nova/exception.py:1931
#, python-format
msgid "Image's config drive option '%(config_drive)s' is invalid"
msgstr ""
-#: nova/exception.py:1917
+#: nova/exception.py:1935
#, python-format
msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised"
msgstr ""
-#: nova/exception.py:1922
+#: nova/exception.py:1940
#, python-format
msgid "Virtual machine mode '%(vmmode)s' is not recognised"
msgstr ""
-#: nova/exception.py:1926
+#: nova/exception.py:1944
#, python-format
msgid "The token '%(token)s' is invalid or has expired"
msgstr ""
-#: nova/exception.py:1930
+#: nova/exception.py:1948
msgid "Invalid Connection Info"
msgstr ""
-#: nova/exception.py:1934
+#: nova/exception.py:1952
#, python-format
msgid "Quiescing is not supported in instance %(instance_id)s"
msgstr ""
-#: nova/exception.py:1938
+#: nova/exception.py:1956
msgid "QEMU guest agent is not enabled"
msgstr ""
-#: nova/exception.py:1942
+#: nova/exception.py:1960
msgid "Set admin password is not supported"
msgstr ""
-#: nova/exception.py:1946
+#: nova/exception.py:1964
#, python-format
msgid "Invalid memory page size '%(pagesize)s'"
msgstr ""
-#: nova/exception.py:1950
+#: nova/exception.py:1968
#, python-format
msgid "Page size %(pagesize)s forbidden against '%(against)s'"
msgstr ""
-#: nova/exception.py:1954
+#: nova/exception.py:1972
#, python-format
msgid "Page size %(pagesize)s is not supported by the host."
msgstr ""
-#: nova/exception.py:1958
+#: nova/exception.py:1976
#, python-format
msgid "CPU pinning is not supported by the host: %(reason)s"
msgstr ""
-#: nova/exception.py:1963
+#: nova/exception.py:1981
#, python-format
msgid ""
"Cannot pin/unpin cpus %(requested)s from the following pinned set "
"%(pinned)s"
msgstr ""
-#: nova/exception.py:1968
+#: nova/exception.py:1986
#, python-format
msgid ""
"CPU set to pin/unpin %(requested)s must be a subset of known CPU set "
"%(cpuset)s"
msgstr ""
-#: nova/exception.py:1973
+#: nova/exception.py:1991
msgid ""
"Image property 'hw_cpu_policy' is not permitted to override CPU pinning "
"policy set against the flavor"
msgstr ""
-#: nova/exception.py:1978
+#: nova/exception.py:1996
+msgid ""
+"Image property 'hw_cpu_thread_policy' is not permitted to override CPU "
+"thread pinning policy set against the flavor"
+msgstr ""
+
+#: nova/exception.py:2001
#, python-format
msgid "ServerGroup policy is not supported: %(reason)s"
msgstr ""
-#: nova/exception.py:1982
+#: nova/exception.py:2005
#, python-format
msgid "Cell %(uuid)s has no mapping."
msgstr ""
-#: nova/exception.py:1986
+#: nova/exception.py:2009
msgid "Host does not support guests with NUMA topology set"
msgstr ""
-#: nova/exception.py:1990
+#: nova/exception.py:2013
msgid "Host does not support guests with custom memory page sizes"
msgstr ""
-#: nova/exception.py:1994
+#: nova/exception.py:2017
#, python-format
msgid "%(typename)s in %(fieldname)s is not an instance of Enum"
msgstr ""
-#: nova/exception.py:1998
+#: nova/exception.py:2021
#, python-format
msgid "%(fieldname)s missing field type"
msgstr ""
-#: nova/exception.py:2002
+#: nova/exception.py:2025
#, python-format
msgid "Invalid image format '%(format)s'"
msgstr ""
-#: nova/exception.py:2006
+#: nova/exception.py:2029
#, python-format
msgid "Image model '%(image)s' is not supported"
msgstr ""
-#: nova/exception.py:2010
+#: nova/exception.py:2033
#, python-format
msgid "Host '%(name)s' is not mapped to any cell"
msgstr ""
-#: nova/exception.py:2014
+#: nova/exception.py:2037
msgid "Cannot set realtime policy in a non dedicated cpu pinning policy"
msgstr ""
-#: nova/exception.py:2019
+#: nova/exception.py:2042
+msgid "Cannot set cpu thread pinning policy in a non dedicated cpu pinning policy"
+msgstr ""
+
+#: nova/exception.py:2047
#, python-format
msgid "RequestSpec not found for instance %(instance_uuid)s"
msgstr ""
-#: nova/exception.py:2023
+#: nova/exception.py:2051
+msgid "UEFI is not supported"
+msgstr ""
+
+#: nova/exception.py:2055
msgid "Injecting NMI is not supported"
msgstr ""
+#: nova/exception.py:2059
+msgid "Requested CPU control policy not supported by host"
+msgstr ""
+
+#: nova/exception.py:2063
+msgid "Realtime policy not supported by hypervisor"
+msgstr ""
+
+#: nova/exception.py:2067
+msgid ""
+"Realtime policy needs vCPU(s) mask configured with at least 1 RT vCPU and"
+" 1 ordinary vCPU. See hw:cpu_realtime_mask or hw_cpu_realtime_mask"
+msgstr ""
+
+#: nova/exception.py:2073
+#, python-format
+msgid "No configuration information found for operating system %(os_name)s"
+msgstr ""
+
#: nova/hooks.py:77
msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
msgstr ""
-#: nova/service.py:368
+#: nova/service.py:357
#, python-format
msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr ""
-#: nova/service.py:475
+#: nova/service.py:464
msgid "serve() can only be called once"
msgstr ""
@@ -2294,12 +2355,12 @@ msgstr ""
msgid "Certificate is not valid after: %s UTC"
msgstr ""
-#: nova/utils.py:334
+#: nova/utils.py:333
#, python-format
msgid "Running cmd (subprocess): %s"
msgstr ""
-#: nova/utils.py:364
+#: nova/utils.py:363
#, python-format
msgid ""
"%(desc)r\n"
@@ -2309,61 +2370,61 @@ msgid ""
"stderr: %(stderr)r"
msgstr ""
-#: nova/utils.py:373
+#: nova/utils.py:372
#, python-format
msgid "%r failed. Not Retrying."
msgstr ""
-#: nova/utils.py:377
+#: nova/utils.py:376
#, python-format
msgid "%r failed. Retrying."
msgstr ""
-#: nova/utils.py:573
+#: nova/utils.py:572
#, python-format
msgid "Link Local address is not found.:%s"
msgstr ""
-#: nova/utils.py:576
+#: nova/utils.py:575
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
-#: nova/utils.py:608
+#: nova/utils.py:607
#, python-format
msgid "Expected object of type: %s"
msgstr ""
-#: nova/utils.py:1098
+#: nova/utils.py:1097
msgid "The input is not a string or unicode"
msgstr ""
-#: nova/utils.py:1100
+#: nova/utils.py:1099
#, python-format
msgid "%s is not a string or unicode"
msgstr ""
-#: nova/utils.py:1107
+#: nova/utils.py:1106
#, python-format
msgid "%(name)s has a minimum character requirement of %(min_length)s."
msgstr ""
-#: nova/utils.py:1112
+#: nova/utils.py:1111
#, python-format
msgid "%(name)s has more than %(max_length)s characters."
msgstr ""
-#: nova/utils.py:1122
+#: nova/utils.py:1121
#, python-format
msgid "%(value_name)s must be an integer"
msgstr ""
-#: nova/utils.py:1128
+#: nova/utils.py:1127
#, python-format
msgid "%(value_name)s must be >= %(min_value)d"
msgstr ""
-#: nova/utils.py:1134
+#: nova/utils.py:1133
#, python-format
msgid "%(value_name)s must be <= %(max_value)d"
msgstr ""
@@ -2401,143 +2462,6 @@ msgstr ""
msgid "Invalid service catalog json."
msgstr ""
-#: nova/api/ec2/__init__.py:167
-msgid "Too many failed authentications."
-msgstr ""
-
-#: nova/api/ec2/__init__.py:237
-msgid "Signature not provided"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:242
-msgid "Access key not provided"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:301
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr ""
-
-#: nova/api/ec2/__init__.py:364
-msgid "Timestamp failed validation."
-msgstr ""
-
-#: nova/api/ec2/__init__.py:585
-msgid "Unknown error occurred."
-msgstr ""
-
-#: nova/api/ec2/cloud.py:446
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:629 nova/api/ec2/cloud.py:759
-msgid "need group_name or group_id"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:634
-msgid "can't build a valid rule"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:642
-#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:676 nova/api/ec2/cloud.py:712
-msgid "No rule for the specified parameters."
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1325
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1352
-msgid "Floating ip is not associated."
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1368 nova/api/openstack/compute/multiple_create.py:56
-#: nova/api/openstack/compute/legacy_v2/servers.py:601
-msgid "min_count must be <= max_count"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1400
-msgid "Image must be available"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1631
-msgid "imageLocation is required"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1712
-msgid "user or group not specified"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1715
-msgid "only group \"all\" is supported"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1718
-msgid "operation_type must be add or remove"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1733
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1763
-#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not "
-"have a volume attached at root (%(root)s)"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1796
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1806
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1830 nova/api/ec2/cloud.py:1880
-msgid "resource_id and tag are required"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1834 nova/api/ec2/cloud.py:1884
-msgid "Expecting a list of resources"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1839 nova/api/ec2/cloud.py:1889
-#: nova/api/ec2/cloud.py:1947
-msgid "Only instances implemented"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1843 nova/api/ec2/cloud.py:1893
-msgid "Expecting a list of tagSets"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1849 nova/api/ec2/cloud.py:1902
-msgid "Expecting tagSet to be key/value pairs"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1856
-msgid "Expecting both key and value to be set"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1907
-msgid "Expecting key to be set"
-msgstr ""
-
-#: nova/api/ec2/cloud.py:1981
-msgid "Invalid CIDR"
-msgstr ""
-
#: nova/api/ec2/ec2utils.py:252
#, python-format
msgid "Unacceptable attach status:%s for ec2 API."
@@ -2547,40 +2471,40 @@ msgstr ""
msgid "Request must include either Timestamp or Expires, but cannot contain both"
msgstr ""
-#: nova/api/metadata/handler.py:165 nova/api/metadata/handler.py:250
-#: nova/api/metadata/handler.py:314
+#: nova/api/metadata/handler.py:166 nova/api/metadata/handler.py:251
+#: nova/api/metadata/handler.py:315
msgid "An unknown error has occurred. Please try your request again."
msgstr ""
-#: nova/api/metadata/handler.py:185
+#: nova/api/metadata/handler.py:186
msgid "X-Instance-ID header is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:187
+#: nova/api/metadata/handler.py:188
msgid "X-Instance-ID-Signature header is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:189
+#: nova/api/metadata/handler.py:190
msgid "X-Tenant-ID header is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:191
+#: nova/api/metadata/handler.py:192
msgid "Multiple X-Instance-ID headers found within request."
msgstr ""
-#: nova/api/metadata/handler.py:193
+#: nova/api/metadata/handler.py:194
msgid "Multiple X-Tenant-ID headers found within request."
msgstr ""
-#: nova/api/metadata/handler.py:266
+#: nova/api/metadata/handler.py:267
msgid "X-Forwarded-For is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:270
+#: nova/api/metadata/handler.py:271
msgid "X-Metadata-Provider is missing from request."
msgstr ""
-#: nova/api/metadata/handler.py:304
+#: nova/api/metadata/handler.py:305
msgid "Invalid proxy request signature."
msgstr ""
@@ -2588,11 +2512,11 @@ msgstr ""
msgid "Request is too large."
msgstr ""
-#: nova/api/openstack/__init__.py:240
+#: nova/api/openstack/__init__.py:272
msgid "Must specify an ExtensionManager class"
msgstr ""
-#: nova/api/openstack/api_version_request.py:135
+#: nova/api/openstack/api_version_request.py:144
#, python-format
msgid "'%(other)s' should be an instance of '%(cls)s'"
msgstr ""
@@ -2647,20 +2571,24 @@ msgid ""
"%s"
msgstr ""
-#: nova/api/openstack/wsgi.py:279 nova/api/openstack/wsgi.py:495
+#: nova/api/openstack/wsgi.py:280 nova/api/openstack/wsgi.py:394
msgid "cannot understand JSON"
msgstr ""
-#: nova/api/openstack/wsgi.py:500
+#: nova/api/openstack/wsgi.py:399
msgid "too many body keys"
msgstr ""
-#: nova/api/openstack/wsgi.py:784
+#: nova/api/openstack/wsgi.py:663
+msgid "Unsupported Content-Type"
+msgstr ""
+
+#: nova/api/openstack/wsgi.py:685
#, python-format
msgid "There is no such action: %s"
msgstr ""
-#: nova/api/openstack/wsgi.py:787 nova/api/openstack/wsgi.py:813
+#: nova/api/openstack/wsgi.py:688 nova/api/openstack/wsgi.py:711
#: nova/api/openstack/compute/legacy_v2/server_metadata.py:55
#: nova/api/openstack/compute/legacy_v2/server_metadata.py:74
#: nova/api/openstack/compute/legacy_v2/server_metadata.py:101
@@ -2669,16 +2597,12 @@ msgstr ""
msgid "Malformed request body"
msgstr ""
-#: nova/api/openstack/wsgi.py:791
+#: nova/api/openstack/wsgi.py:692
#, python-format
msgid "Action: '%(action)s', calling method: %(meth)s, body: %(body)s"
msgstr ""
-#: nova/api/openstack/wsgi.py:810
-msgid "Unsupported Content-Type"
-msgstr ""
-
-#: nova/api/openstack/wsgi.py:822
+#: nova/api/openstack/wsgi.py:720
#, python-format
msgid ""
"Malformed request URL: URL's project_id '%(project_id)s' doesn't match "
@@ -2686,7 +2610,7 @@ msgid ""
msgstr ""
#: nova/api/openstack/compute/admin_password.py:60
-#: nova/api/openstack/compute/legacy_v2/servers.py:930
+#: nova/api/openstack/compute/legacy_v2/servers.py:934
msgid "Unable to set password on instance"
msgstr ""
@@ -2728,7 +2652,7 @@ msgstr ""
#: nova/api/openstack/compute/block_device_mapping.py:59
#: nova/api/openstack/compute/block_device_mapping_v1.py:53
-#: nova/api/openstack/compute/legacy_v2/servers.py:434
+#: nova/api/openstack/compute/legacy_v2/servers.py:435
msgid ""
"Using different block_device_mapping syntaxes is not allowed in the same "
"request."
@@ -2837,7 +2761,7 @@ msgid "Invalid minDisk filter [%s]"
msgstr ""
#: nova/api/openstack/compute/flavors.py:112
-#: nova/api/openstack/compute/servers.py:399
+#: nova/api/openstack/compute/servers.py:412
#: nova/api/openstack/compute/legacy_v2/flavors.py:105
#: nova/api/openstack/compute/legacy_v2/servers.py:229
#, python-format
@@ -2874,52 +2798,52 @@ msgstr ""
msgid "DNS entries not found."
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:104
-#: nova/api/openstack/compute/floating_ips.py:161
+#: nova/api/openstack/compute/floating_ips.py:105
+#: nova/api/openstack/compute/floating_ips.py:162
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:93
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:144
#, python-format
msgid "Floating IP not found for ID %s"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:134
+#: nova/api/openstack/compute/floating_ips.py:135
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:119
#, python-format
msgid "No more floating IPs in pool %s."
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:136
+#: nova/api/openstack/compute/floating_ips.py:137
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:121
msgid "No more floating IPs available."
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:140
+#: nova/api/openstack/compute/floating_ips.py:141
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:125
#, python-format
msgid "IP allocation over quota in pool %s."
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:142
+#: nova/api/openstack/compute/floating_ips.py:143
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:127
msgid "IP allocation over quota."
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:202
+#: nova/api/openstack/compute/floating_ips.py:204
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:189
msgid "No nw_info cache associated with instance"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:207
+#: nova/api/openstack/compute/floating_ips.py:209
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:194
msgid "No fixed IPs associated to instance"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:217
+#: nova/api/openstack/compute/floating_ips.py:219
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:205
msgid "Specified fixed address not assigned to instance"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:225
+#: nova/api/openstack/compute/floating_ips.py:227
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:213
#, python-format
msgid ""
@@ -2927,24 +2851,24 @@ msgid ""
" %(id)s. Instance has no fixed IPv4 addresses to associate."
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:240
+#: nova/api/openstack/compute/floating_ips.py:242
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:228
msgid "floating IP is already associated"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:243
+#: nova/api/openstack/compute/floating_ips.py:245
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:231
msgid "l3driver call to add floating IP failed"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:248
-#: nova/api/openstack/compute/floating_ips.py:278
+#: nova/api/openstack/compute/floating_ips.py:250
+#: nova/api/openstack/compute/floating_ips.py:280
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:234
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:269
msgid "floating IP not found"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:253
+#: nova/api/openstack/compute/floating_ips.py:255
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:239
#, python-format
msgid ""
@@ -2952,12 +2876,12 @@ msgid ""
" for instance %(id)s. Error: %(error)s"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:293
+#: nova/api/openstack/compute/floating_ips.py:295
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:284
msgid "Floating IP is not associated"
msgstr ""
-#: nova/api/openstack/compute/floating_ips.py:297
+#: nova/api/openstack/compute/floating_ips.py:299
#: nova/api/openstack/compute/legacy_v2/contrib/floating_ips.py:288
#, python-format
msgid "Floating IP %(address)s is not associated with instance %(id)s."
@@ -3017,7 +2941,7 @@ msgstr ""
msgid "You are not allowed to delete the image."
msgstr ""
-#: nova/api/openstack/compute/instance_actions.py:71
+#: nova/api/openstack/compute/instance_actions.py:81
#, python-format
msgid "Action %s not found"
msgstr ""
@@ -3038,6 +2962,11 @@ msgstr ""
msgid "Quota exceeded, too many key pairs."
msgstr ""
+#: nova/api/openstack/compute/multiple_create.py:56
+#: nova/api/openstack/compute/legacy_v2/servers.py:602
+msgid "min_count must be <= max_count"
+msgstr ""
+
#: nova/api/openstack/compute/networks.py:103
#: nova/api/openstack/compute/networks.py:116
#: nova/api/openstack/compute/networks.py:131
@@ -3129,7 +3058,7 @@ msgstr ""
msgid "No instances found for any event"
msgstr ""
-#: nova/api/openstack/compute/server_groups.py:145
+#: nova/api/openstack/compute/server_groups.py:147
#: nova/api/openstack/compute/legacy_v2/contrib/server_groups.py:203
msgid "Quota exceeded, too many server groups."
msgstr ""
@@ -3148,133 +3077,123 @@ msgstr ""
msgid "Metadata item was not found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:316
+#: nova/api/openstack/compute/servers.py:329
#: nova/api/openstack/compute/legacy_v2/servers.py:165
#: nova/api/openstack/compute/legacy_v2/contrib/cells.py:342
msgid "Invalid changes-since value"
msgstr ""
-#: nova/api/openstack/compute/servers.py:341
+#: nova/api/openstack/compute/servers.py:354
#: nova/api/openstack/compute/legacy_v2/servers.py:190
msgid "Only administrators may list deleted instances"
msgstr ""
-#: nova/api/openstack/compute/servers.py:450
+#: nova/api/openstack/compute/servers.py:464
msgid "Unknown argument: port"
msgstr ""
-#: nova/api/openstack/compute/servers.py:453
+#: nova/api/openstack/compute/servers.py:467
#, python-format
msgid ""
"Specified Fixed IP '%(addr)s' cannot be used with port '%(port)s': port "
"already has a Fixed IP allocated."
msgstr ""
-#: nova/api/openstack/compute/servers.py:466
-#: nova/api/openstack/compute/legacy_v2/servers.py:326
+#: nova/api/openstack/compute/servers.py:480
+#: nova/api/openstack/compute/legacy_v2/servers.py:327
#, python-format
msgid "Bad networks format: network uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/servers.py:474
-#: nova/api/openstack/compute/legacy_v2/servers.py:344
+#: nova/api/openstack/compute/servers.py:488
+#: nova/api/openstack/compute/legacy_v2/servers.py:345
#, python-format
msgid "Duplicate networks (%s) are not allowed"
msgstr ""
-#: nova/api/openstack/compute/servers.py:481
-#: nova/api/openstack/compute/legacy_v2/servers.py:351
+#: nova/api/openstack/compute/servers.py:495
+#: nova/api/openstack/compute/legacy_v2/servers.py:352
#, python-format
msgid "Bad network format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/servers.py:484
-#: nova/api/openstack/compute/legacy_v2/servers.py:354
-#: nova/api/openstack/compute/legacy_v2/servers.py:502
+#: nova/api/openstack/compute/servers.py:498
+#: nova/api/openstack/compute/legacy_v2/servers.py:355
+#: nova/api/openstack/compute/legacy_v2/servers.py:503
msgid "Bad networks format"
msgstr ""
-#: nova/api/openstack/compute/servers.py:588
-#: nova/api/openstack/compute/servers.py:613
+#: nova/api/openstack/compute/servers.py:612
+#: nova/api/openstack/compute/servers.py:639
#: nova/api/openstack/compute/legacy_v2/servers.py:91
-#: nova/api/openstack/compute/legacy_v2/servers.py:908
+#: nova/api/openstack/compute/legacy_v2/servers.py:912
msgid "Invalid flavorRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:610
+#: nova/api/openstack/compute/servers.py:636
#: nova/api/openstack/compute/legacy_v2/servers.py:90
msgid "Can not find requested image"
msgstr ""
-#: nova/api/openstack/compute/servers.py:616
+#: nova/api/openstack/compute/servers.py:642
#: nova/api/openstack/compute/legacy_v2/servers.py:92
msgid "Invalid key_name provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:619
+#: nova/api/openstack/compute/servers.py:645
#: nova/api/openstack/compute/legacy_v2/servers.py:93
msgid "Invalid config_drive provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:793
-#: nova/api/openstack/compute/servers.py:910
-#: nova/api/openstack/compute/servers.py:1021
-#: nova/api/openstack/compute/legacy_v2/servers.py:731
-#: nova/api/openstack/compute/legacy_v2/servers.py:848
-#: nova/api/openstack/compute/legacy_v2/servers.py:1035
+#: nova/api/openstack/compute/servers.py:825
+#: nova/api/openstack/compute/servers.py:936
+#: nova/api/openstack/compute/servers.py:1049
+#: nova/api/openstack/compute/legacy_v2/servers.py:735
+#: nova/api/openstack/compute/legacy_v2/servers.py:852
+#: nova/api/openstack/compute/legacy_v2/servers.py:1039
msgid "Instance could not be found"
msgstr ""
-#: nova/api/openstack/compute/servers.py:811
-#: nova/api/openstack/compute/servers.py:831
-#: nova/api/openstack/compute/legacy_v2/servers.py:744
-#: nova/api/openstack/compute/legacy_v2/servers.py:760
+#: nova/api/openstack/compute/servers.py:843
+#: nova/api/openstack/compute/servers.py:863
+#: nova/api/openstack/compute/legacy_v2/servers.py:748
+#: nova/api/openstack/compute/legacy_v2/servers.py:764
msgid "Instance has not been resized."
msgstr ""
-#: nova/api/openstack/compute/servers.py:834
-#: nova/api/openstack/compute/legacy_v2/servers.py:763
+#: nova/api/openstack/compute/servers.py:866
+#: nova/api/openstack/compute/legacy_v2/servers.py:767
msgid "Flavor used by the instance could not be found."
msgstr ""
-#: nova/api/openstack/compute/servers.py:875
-#: nova/api/openstack/compute/legacy_v2/servers.py:813
-msgid "Unable to locate requested flavor."
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:878
-#: nova/api/openstack/compute/legacy_v2/servers.py:816
-msgid "Resize requires a flavor change."
-msgstr ""
-
-#: nova/api/openstack/compute/servers.py:889
-#: nova/api/openstack/compute/legacy_v2/servers.py:826
+#: nova/api/openstack/compute/servers.py:912
+#: nova/api/openstack/compute/legacy_v2/servers.py:830
msgid "You are not authorized to access the image the instance was started with."
msgstr ""
-#: nova/api/openstack/compute/servers.py:893
-#: nova/api/openstack/compute/legacy_v2/servers.py:830
+#: nova/api/openstack/compute/servers.py:916
+#: nova/api/openstack/compute/legacy_v2/servers.py:834
msgid "Image that the instance was started with could not be found."
msgstr ""
-#: nova/api/openstack/compute/servers.py:900
-#: nova/api/openstack/compute/legacy_v2/servers.py:837
+#: nova/api/openstack/compute/servers.py:926
+#: nova/api/openstack/compute/legacy_v2/servers.py:841
msgid "Invalid instance image."
msgstr ""
-#: nova/api/openstack/compute/servers.py:926
-#: nova/api/openstack/compute/legacy_v2/servers.py:865
-#: nova/api/openstack/compute/legacy_v2/servers.py:873
+#: nova/api/openstack/compute/servers.py:952
+#: nova/api/openstack/compute/legacy_v2/servers.py:869
+#: nova/api/openstack/compute/legacy_v2/servers.py:877
msgid "Invalid imageRef provided."
msgstr ""
-#: nova/api/openstack/compute/servers.py:945
-#: nova/api/openstack/compute/legacy_v2/servers.py:860
+#: nova/api/openstack/compute/servers.py:971
+#: nova/api/openstack/compute/legacy_v2/servers.py:864
msgid "Missing imageRef attribute"
msgstr ""
-#: nova/api/openstack/compute/servers.py:1026
-#: nova/api/openstack/compute/legacy_v2/servers.py:1041
+#: nova/api/openstack/compute/servers.py:1054
+#: nova/api/openstack/compute/legacy_v2/servers.py:1045
msgid "Cannot find image for rebuild"
msgstr ""
@@ -3293,13 +3212,13 @@ msgstr ""
msgid "Service %s not found."
msgstr ""
-#: nova/api/openstack/compute/simple_tenant_usage.py:195
-#: nova/api/openstack/compute/legacy_v2/contrib/simple_tenant_usage.py:196
+#: nova/api/openstack/compute/simple_tenant_usage.py:192
+#: nova/api/openstack/compute/legacy_v2/contrib/simple_tenant_usage.py:193
msgid "Datetime is in invalid format"
msgstr ""
-#: nova/api/openstack/compute/simple_tenant_usage.py:214
-#: nova/api/openstack/compute/legacy_v2/contrib/simple_tenant_usage.py:215
+#: nova/api/openstack/compute/simple_tenant_usage.py:211
+#: nova/api/openstack/compute/legacy_v2/contrib/simple_tenant_usage.py:212
msgid "Invalid start time. The start time cannot occur after the end time."
msgstr ""
@@ -3323,29 +3242,29 @@ msgstr ""
msgid "Create networks failed"
msgstr ""
-#: nova/api/openstack/compute/volumes.py:250
-#: nova/api/openstack/compute/volumes.py:383
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:256
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:405
+#: nova/api/openstack/compute/volumes.py:277
+#: nova/api/openstack/compute/volumes.py:419
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:268
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:417
#, python-format
msgid "Instance %s is not attached."
msgstr ""
-#: nova/api/openstack/compute/volumes.py:261
-#: nova/api/openstack/compute/volumes.py:416
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:267
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:384
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:431
+#: nova/api/openstack/compute/volumes.py:288
+#: nova/api/openstack/compute/volumes.py:452
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:279
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:396
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:443
#, python-format
msgid "volume_id not found: %s"
msgstr ""
-#: nova/api/openstack/compute/volumes.py:359
+#: nova/api/openstack/compute/volumes.py:392
msgid "The volume was either invalid or not attached to the instance."
msgstr ""
-#: nova/api/openstack/compute/volumes.py:392
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:414
+#: nova/api/openstack/compute/volumes.py:428
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:426
msgid "Can't detach root device volume"
msgstr ""
@@ -3384,118 +3303,126 @@ msgstr ""
msgid "Malformed request body. meta item must be object"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:288
+#: nova/api/openstack/compute/legacy_v2/servers.py:289
#, python-format
msgid "Bad personality format: missing %s"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:291
+#: nova/api/openstack/compute/legacy_v2/servers.py:292
msgid "Bad personality format"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:294
+#: nova/api/openstack/compute/legacy_v2/servers.py:295
#, python-format
msgid "Personality content for %s cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:309
+#: nova/api/openstack/compute/legacy_v2/servers.py:310
#, python-format
msgid "Bad port format: port uuid is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:317
+#: nova/api/openstack/compute/legacy_v2/servers.py:318
msgid "Unknown argument : port"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:337
+#: nova/api/openstack/compute/legacy_v2/servers.py:338
#, python-format
msgid "Invalid fixed IP address (%s)"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:382
+#: nova/api/openstack/compute/legacy_v2/servers.py:383
msgid "accessIPv4 is not proper IPv4 format"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:387
+#: nova/api/openstack/compute/legacy_v2/servers.py:388
msgid "accessIPv6 is not proper IPv6 format"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:403
+#: nova/api/openstack/compute/legacy_v2/servers.py:404
msgid "Userdata content cannot be decoded"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:414
+#: nova/api/openstack/compute/legacy_v2/servers.py:415
msgid "block_device_mapping must be a list"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:439
+#: nova/api/openstack/compute/legacy_v2/servers.py:440
msgid "block_device_mapping_v2 must be a list"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:519
+#: nova/api/openstack/compute/legacy_v2/servers.py:520
msgid "Server name is not defined"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:545
+#: nova/api/openstack/compute/legacy_v2/servers.py:546
#: nova/api/validation/validators.py:169
#, python-format
msgid "Invalid input for field/attribute %(path)s. Value: %(value)s. %(message)s"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:717
+#: nova/api/openstack/compute/legacy_v2/servers.py:721
msgid "HostId cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:721
+#: nova/api/openstack/compute/legacy_v2/servers.py:725
msgid "Personality cannot be updated."
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:777
+#: nova/api/openstack/compute/legacy_v2/servers.py:781
msgid "Argument 'type' for reboot must be a string"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:783
+#: nova/api/openstack/compute/legacy_v2/servers.py:787
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:787
+#: nova/api/openstack/compute/legacy_v2/servers.py:791
msgid "Missing argument 'type' for reboot"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:903
+#: nova/api/openstack/compute/legacy_v2/servers.py:817
+msgid "Unable to locate requested flavor."
+msgstr ""
+
+#: nova/api/openstack/compute/legacy_v2/servers.py:820
+msgid "Resize requires a flavor change."
+msgstr ""
+
+#: nova/api/openstack/compute/legacy_v2/servers.py:907
msgid "Missing flavorRef attribute"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:917
+#: nova/api/openstack/compute/legacy_v2/servers.py:921
msgid "No adminPass was specified"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:939
+#: nova/api/openstack/compute/legacy_v2/servers.py:943
msgid "Unable to parse metadata key/value pairs."
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:950
+#: nova/api/openstack/compute/legacy_v2/servers.py:954
msgid "Resize request has invalid 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:953
+#: nova/api/openstack/compute/legacy_v2/servers.py:957
msgid "Resize requests require 'flavorRef' attribute."
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:971
+#: nova/api/openstack/compute/legacy_v2/servers.py:975
msgid "Could not parse imageRef from request."
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:1075
+#: nova/api/openstack/compute/legacy_v2/servers.py:1079
msgid "createImage entity requires name attribute"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:1084
+#: nova/api/openstack/compute/legacy_v2/servers.py:1088
#: nova/api/openstack/compute/legacy_v2/contrib/admin_actions.py:276
msgid "Invalid metadata"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/servers.py:1136
+#: nova/api/openstack/compute/legacy_v2/servers.py:1140
msgid "Invalid adminPass"
msgstr ""
@@ -3952,30 +3879,30 @@ msgstr ""
msgid "Invalid attribute in the request"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:143
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:155
msgid "volume not specified"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:277
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:289
#, python-format
msgid "Bad volumeId format: volumeId is not in proper format (%s)"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:288
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:345
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:300
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:357
msgid "volumeAttachment not specified"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:293
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:354
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:305
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:366
msgid "volumeId must be specified."
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:537
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:549
msgid "snapshot not specified"
msgstr ""
-#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:550
+#: nova/api/openstack/compute/legacy_v2/contrib/volumes.py:562
#, python-format
msgid "Invalid value '%s' for force."
msgstr ""
@@ -4366,182 +4293,178 @@ msgstr ""
msgid "Command failed, please check log for more info"
msgstr ""
-#: nova/compute/api.py:347
+#: nova/compute/api.py:349
msgid "Cannot run any more instances of this type."
msgstr ""
-#: nova/compute/api.py:353
+#: nova/compute/api.py:355
#, python-format
msgid "Can only run %s more instances of this type."
msgstr ""
-#: nova/compute/api.py:399
+#: nova/compute/api.py:401
msgid "Metadata type should be dict."
msgstr ""
-#: nova/compute/api.py:421
+#: nova/compute/api.py:423
msgid "Metadata property key greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:424
+#: nova/compute/api.py:426
msgid "Metadata property value greater than 255 characters"
msgstr ""
-#: nova/compute/api.py:734
+#: nova/compute/api.py:736
#, python-format
msgid "The volume cannot be assigned the same device name as the root device %s"
msgstr ""
-#: nova/compute/api.py:770
+#: nova/compute/api.py:772
msgid "Cannot attach one or more volumes to multiple instances"
msgstr ""
-#: nova/compute/api.py:814
-msgid "The requested availability zone is not available"
-msgstr ""
-
-#: nova/compute/api.py:952
+#: nova/compute/api.py:946
msgid "Quota exceeded, too many servers in group"
msgstr ""
-#: nova/compute/api.py:1040
+#: nova/compute/api.py:1035
msgid "Server group scheduler hint must be a UUID."
msgstr ""
-#: nova/compute/api.py:1254
+#: nova/compute/api.py:1235
msgid ""
"Images with destination_type 'volume' need to have a non-zero size "
"specified"
msgstr ""
-#: nova/compute/api.py:1280
+#: nova/compute/api.py:1261
msgid "Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size"
msgstr ""
-#: nova/compute/api.py:1293
+#: nova/compute/api.py:1274
msgid "More than one swap drive requested."
msgstr ""
-#: nova/compute/api.py:1448
+#: nova/compute/api.py:1428
msgid ""
"Unable to launch multiple instances with a single configured port ID. "
"Please launch your instance one by one with different ports."
msgstr ""
-#: nova/compute/api.py:1458
+#: nova/compute/api.py:1438
msgid "max_count cannot be greater than 1 if an fixed_ip is specified."
msgstr ""
-#: nova/compute/api.py:2291
+#: nova/compute/api.py:1477
+msgid "The requested availability zone is not available"
+msgstr ""
+
+#: nova/compute/api.py:2297
#, python-format
msgid "snapshot for %s"
msgstr ""
-#: nova/compute/api.py:2554
+#: nova/compute/api.py:2560
msgid "Resize to zero disk flavor is not allowed."
msgstr ""
-#: nova/compute/api.py:2769
+#: nova/compute/api.py:2775
msgid "Cannot rescue a volume-backed instance"
msgstr ""
-#: nova/compute/api.py:3038
-msgid "Volume must be attached in order to detach."
-msgstr ""
-
-#: nova/compute/api.py:3058
+#: nova/compute/api.py:3141
msgid "Old volume is attached to a different instance."
msgstr ""
-#: nova/compute/api.py:3061
+#: nova/compute/api.py:3144
msgid "New volume must be detached in order to swap."
msgstr ""
-#: nova/compute/api.py:3064
+#: nova/compute/api.py:3147
msgid "New volume must be the same size or larger."
msgstr ""
-#: nova/compute/api.py:3551
+#: nova/compute/api.py:3668
msgid "Host aggregate is not empty"
msgstr ""
-#: nova/compute/api.py:3582
+#: nova/compute/api.py:3699
#, python-format
msgid "One or more hosts already in availability zone(s) %s"
msgstr ""
-#: nova/compute/api.py:3602
+#: nova/compute/api.py:3719
#, python-format
msgid "Unexpected aggregate action %s"
msgstr ""
-#: nova/compute/api.py:3682
+#: nova/compute/api.py:3799
msgid "Keypair name contains unsafe characters"
msgstr ""
-#: nova/compute/api.py:3688
+#: nova/compute/api.py:3805
msgid "Keypair name must be string and between 1 and 255 characters long"
msgstr ""
-#: nova/compute/api.py:3795
+#: nova/compute/api.py:3912
#, python-format
msgid "Security group %s is not a string or unicode"
msgstr ""
-#: nova/compute/api.py:3805
+#: nova/compute/api.py:3922
#, python-format
msgid ""
"Value (%(value)s) for parameter Group%(property)s is invalid. Content "
"limited to '%(allowed)s'."
msgstr ""
-#: nova/compute/api.py:3826
+#: nova/compute/api.py:3943
msgid "Quota exceeded, too many security groups."
msgstr ""
-#: nova/compute/api.py:3841
+#: nova/compute/api.py:3958
#, python-format
msgid "Security group %s already exists"
msgstr ""
-#: nova/compute/api.py:3854
+#: nova/compute/api.py:3971
#, python-format
msgid "Unable to update system group '%s'"
msgstr ""
-#: nova/compute/api.py:3916
+#: nova/compute/api.py:4033
#, python-format
msgid "Unable to delete system group '%s'"
msgstr ""
-#: nova/compute/api.py:3921
+#: nova/compute/api.py:4038
msgid "Security group is still in use"
msgstr ""
-#: nova/compute/api.py:4008
+#: nova/compute/api.py:4125
#, python-format
msgid "Rule (%s) not found"
msgstr ""
-#: nova/compute/api.py:4024
+#: nova/compute/api.py:4141
msgid "Quota exceeded, too many security group rules."
msgstr ""
-#: nova/compute/api.py:4027
+#: nova/compute/api.py:4144
#, python-format
msgid ""
"Security group %(name)s added %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)"
msgstr ""
-#: nova/compute/api.py:4042
+#: nova/compute/api.py:4159
#, python-format
msgid ""
"Security group %(name)s removed %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)"
msgstr ""
-#: nova/compute/api.py:4094
+#: nova/compute/api.py:4211
msgid "Security group id should be integer"
msgstr ""
@@ -4605,100 +4528,100 @@ msgid ""
"underscores, colons and spaces."
msgstr ""
-#: nova/compute/manager.py:609
+#: nova/compute/manager.py:606
msgid "Instance event failed"
msgstr ""
-#: nova/compute/manager.py:739
+#: nova/compute/manager.py:736
#, python-format
msgid "%s is not a valid node managed by this compute host."
msgstr ""
-#: nova/compute/manager.py:1471
+#: nova/compute/manager.py:1463
msgid "Anti-affinity instance group policy was violated."
msgstr ""
-#: nova/compute/manager.py:1479
+#: nova/compute/manager.py:1471
msgid "Affinity instance group policy was violated."
msgstr ""
-#: nova/compute/manager.py:2055
+#: nova/compute/manager.py:2060
#, python-format
msgid "Failed to allocate the network(s) with error %s, not rescheduling."
msgstr ""
-#: nova/compute/manager.py:2065 nova/compute/manager.py:2138
+#: nova/compute/manager.py:2070 nova/compute/manager.py:2144
msgid "Failed to allocate the network(s), not rescheduling."
msgstr ""
-#: nova/compute/manager.py:2113
+#: nova/compute/manager.py:2119
msgid "Success"
msgstr ""
-#: nova/compute/manager.py:2177
+#: nova/compute/manager.py:2183
msgid "Failure prepping block device."
msgstr ""
-#: nova/compute/manager.py:2829
+#: nova/compute/manager.py:2849
msgid "Invalid state of instance files on shared storage"
msgstr ""
-#: nova/compute/manager.py:3239
+#: nova/compute/manager.py:3260
#, python-format
msgid "instance %s is not running"
msgstr ""
-#: nova/compute/manager.py:3256
+#: nova/compute/manager.py:3277
msgid "set_admin_password is not implemented by this driver or guest instance."
msgstr ""
-#: nova/compute/manager.py:3271
+#: nova/compute/manager.py:3292
msgid "error setting admin password"
msgstr ""
-#: nova/compute/manager.py:3356
+#: nova/compute/manager.py:3375
#, python-format
msgid "Driver Error: %s"
msgstr ""
-#: nova/compute/manager.py:3674
+#: nova/compute/manager.py:3693
msgid "Instance has no source host"
msgstr ""
-#: nova/compute/manager.py:4972
+#: nova/compute/manager.py:4997
#, python-format
msgid "Port %s is not attached"
msgstr ""
-#: nova/compute/manager.py:5696
+#: nova/compute/manager.py:5746
#, python-format
msgid "Instance %s not found"
msgstr ""
-#: nova/compute/manager.py:5701
+#: nova/compute/manager.py:5751
msgid "In ERROR state"
msgstr ""
-#: nova/compute/manager.py:5729
+#: nova/compute/manager.py:5779
#, python-format
msgid "In states %(vm_state)s/%(task_state)s, not RESIZED/None"
msgstr ""
-#: nova/compute/manager.py:6370
+#: nova/compute/manager.py:6419
#, python-format
msgid "Unrecognized value '%s' for CONF.running_deleted_instance_action"
msgstr ""
-#: nova/compute/resource_tracker.py:922
+#: nova/compute/resource_tracker.py:938
#, python-format
msgid "Missing keys: %s"
msgstr ""
-#: nova/compute/rpcapi.py:71
+#: nova/compute/rpcapi.py:72
msgid "No compute host specified"
msgstr ""
-#: nova/compute/rpcapi.py:73
+#: nova/compute/rpcapi.py:74
#, python-format
msgid "Unable to find host for Instance %s"
msgstr ""
@@ -4707,15 +4630,15 @@ msgstr ""
msgid "Unexpected type adding stats"
msgstr ""
-#: nova/conductor/manager.py:224
+#: nova/conductor/manager.py:228
msgid "No valid host found for cold migrate"
msgstr ""
-#: nova/conductor/manager.py:226
+#: nova/conductor/manager.py:230
msgid "No valid host found for resize"
msgstr ""
-#: nova/conductor/manager.py:436
+#: nova/conductor/manager.py:461
#, python-format
msgid "Unshelve attempted but the image %s cannot be found."
msgstr ""
@@ -4786,47 +4709,47 @@ msgstr ""
msgid "Unrecognized read_deleted value '%s'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1020
+#: nova/db/sqlalchemy/api.py:1015
#, python-format
msgid "Invalid floating IP %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1336
+#: nova/db/sqlalchemy/api.py:1331
#, python-format
msgid "Invalid fixed IP Address %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1492
+#: nova/db/sqlalchemy/api.py:1486
#, python-format
msgid "Invalid virtual interface address %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:1586
+#: nova/db/sqlalchemy/api.py:1584
#, python-format
msgid ""
"Unknown osapi_compute_unique_server_name_scope value: %s Flag must be "
"empty, \"global\" or \"project\""
msgstr ""
-#: nova/db/sqlalchemy/api.py:1777
+#: nova/db/sqlalchemy/api.py:1764
#, python-format
msgid "Invalid instance id %s in request"
msgstr ""
-#: nova/db/sqlalchemy/api.py:2143
+#: nova/db/sqlalchemy/api.py:2120
#, python-format
msgid "Invalid field name: %s"
msgstr ""
-#: nova/db/sqlalchemy/api.py:2306
+#: nova/db/sqlalchemy/api.py:2283
msgid "Unknown sort direction, must be 'desc' or 'asc'"
msgstr ""
-#: nova/db/sqlalchemy/api.py:2317
+#: nova/db/sqlalchemy/api.py:2294
msgid "Sort direction size exceeds sort key size"
msgstr ""
-#: nova/db/sqlalchemy/api.py:5696
+#: nova/db/sqlalchemy/api.py:5671
#, python-format
msgid "Add metadata failed for aggregate %(id)s after %(retries)s retries"
msgstr ""
@@ -4966,10 +4889,14 @@ msgstr ""
msgid "Bad project_id for to_global_ipv6: %s"
msgstr ""
-#: nova/keymgr/barbican.py:78
+#: nova/keymgr/barbican.py:79
msgid "User is not authorized to use key manager."
msgstr ""
+#: nova/keymgr/barbican.py:84
+msgid "Unable to create Barbican Client without project_id."
+msgstr ""
+
#: nova/keymgr/conf_key_mgr.py:58
msgid "keymgr.fixed_key not defined"
msgstr ""
@@ -4999,12 +4926,12 @@ msgstr ""
msgid "Unknown chain: %r"
msgstr ""
-#: nova/network/linux_net.py:1608
+#: nova/network/linux_net.py:1623
#, python-format
msgid "Failed to add bridge: %s"
msgstr ""
-#: nova/network/linux_net.py:1623
+#: nova/network/linux_net.py:1638
#, python-format
msgid "Failed to add interface: %s"
msgstr ""
@@ -5042,59 +4969,59 @@ msgstr ""
msgid "This driver only supports type 'a'"
msgstr ""
-#: nova/network/model.py:159
+#: nova/network/model.py:163
#, python-format
msgid "Invalid IP format %s"
msgstr ""
-#: nova/network/neutronv2/api.py:124
+#: nova/network/neutronv2/api.py:120
#, python-format
msgid "Unknown auth plugin: %s"
msgstr ""
-#: nova/network/neutronv2/api.py:268
+#: nova/network/neutronv2/api.py:269
#, python-format
msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
msgstr ""
-#: nova/network/neutronv2/api.py:275
+#: nova/network/neutronv2/api.py:276
#, python-format
msgid "Fixed IP %s is already in use."
msgstr ""
-#: nova/network/neutronv2/api.py:483
+#: nova/network/neutronv2/api.py:485
#, python-format
msgid ""
"Multiple security groups found matching '%s'. Use an ID to be more "
"specific."
msgstr ""
-#: nova/network/neutronv2/api.py:543
+#: nova/network/neutronv2/api.py:546
#, python-format
msgid "empty project id for instance %s"
msgstr ""
-#: nova/network/neutronv2/api.py:575 nova/network/neutronv2/api.py:1009
+#: nova/network/neutronv2/api.py:579 nova/network/neutronv2/api.py:1029
msgid "Multiple possible networks found, use a Network ID to be more specific."
msgstr ""
-#: nova/network/neutronv2/api.py:832
+#: nova/network/neutronv2/api.py:836
#, python-format
msgid "Failed to access port %(port_id)s: %(reason)s"
msgstr ""
-#: nova/network/neutronv2/api.py:858
+#: nova/network/neutronv2/api.py:862
msgid ""
"This method needs to be called with either networks=None and "
"port_ids=None or port_ids and networks as not none."
msgstr ""
-#: nova/network/neutronv2/api.py:1109
+#: nova/network/neutronv2/api.py:1129
#, python-format
msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
msgstr ""
-#: nova/network/neutronv2/api.py:1363
+#: nova/network/neutronv2/api.py:1383
#, python-format
msgid "Multiple floating IP pools matches found for name '%s'"
msgstr ""
@@ -5135,12 +5062,12 @@ msgstr ""
msgid "To and From ports must be integers"
msgstr ""
-#: nova/network/security_group/security_group_base.py:137
+#: nova/network/security_group/security_group_base.py:138
#, python-format
msgid "This rule already exists in group %s"
msgstr ""
-#: nova/objects/block_device.py:211
+#: nova/objects/block_device.py:220
msgid "Volume does not belong to the requested instance."
msgstr ""
@@ -5149,28 +5076,28 @@ msgstr ""
msgid "Architecture name '%s' is not valid"
msgstr ""
-#: nova/objects/fields.py:237
+#: nova/objects/fields.py:249
#, python-format
msgid "Hypervisor virt type '%s' is not valid"
msgstr ""
-#: nova/objects/fields.py:357
+#: nova/objects/fields.py:390
#, python-format
msgid "Virtual machine mode '%s' is not valid"
msgstr ""
-#: nova/objects/fields.py:462 nova/objects/fields.py:473
-#: nova/objects/fields.py:484
+#: nova/objects/fields.py:569 nova/objects/fields.py:580
+#: nova/objects/fields.py:591
#, python-format
msgid "Network \"%(val)s\" is not valid in field %(attr)s"
msgstr ""
-#: nova/objects/fields.py:526
+#: nova/objects/fields.py:633
#, python-format
msgid "A NetworkModel is required in field %s"
msgstr ""
-#: nova/objects/fields.py:547 nova/objects/fields.py:556
+#: nova/objects/fields.py:654 nova/objects/fields.py:663
#, python-format
msgid "Value must be >= 0 for field %s"
msgstr ""
@@ -5201,17 +5128,27 @@ msgstr ""
msgid "Snapshot list encountered but no header found!"
msgstr ""
-#: nova/pci/whitelist.py:53
+#: nova/pci/devspec.py:42
+#, python-format
+msgid "invalid %(property)s %(attr)s"
+msgstr ""
+
+#: nova/pci/request.py:109
+#, python-format
+msgid "Device type mismatch for alias '%s'"
+msgstr ""
+
+#: nova/pci/whitelist.py:45
#, python-format
msgid "Invalid entry: '%s'"
msgstr ""
-#: nova/pci/whitelist.py:58
+#: nova/pci/whitelist.py:50
#, python-format
msgid "Invalid entry: '%s'; Expecting list or dict"
msgstr ""
-#: nova/pci/whitelist.py:64
+#: nova/pci/whitelist.py:56
#, python-format
msgid "Invalid entry: '%s'; Expecting dict"
msgstr ""
@@ -5228,34 +5165,46 @@ msgstr ""
msgid "There are not enough hosts available."
msgstr ""
-#: nova/scheduler/utils.py:163
+#: nova/scheduler/driver.py:64
#, python-format
msgid ""
-"Exceeded max scheduling attempts %(max_attempts)d for instance "
-"%(instance_uuid)s. Last exception: %(exc_reason)s"
+"Cannot load host manager from configuration scheduler_host_manager = "
+"%(conf)s."
msgstr ""
-#: nova/scheduler/utils.py:196
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
+#: nova/scheduler/manager.py:73
+#, python-format
+msgid "Cannot load scheduler driver from configuration %(conf)s."
msgstr ""
-#: nova/scheduler/utils.py:287
+#: nova/scheduler/utils.py:180
+#, python-format
+msgid ""
+"Exceeded max scheduling attempts %(max_attempts)d for instance "
+"%(instance_uuid)s. Last exception: %(exc_reason)s"
+msgstr ""
+
+#: nova/scheduler/utils.py:314
msgid "ServerGroupAffinityFilter not configured"
msgstr ""
-#: nova/scheduler/utils.py:291
+#: nova/scheduler/utils.py:318
msgid "ServerGroupAntiAffinityFilter not configured"
msgstr ""
-#: nova/servicegroup/drivers/db.py:50
-msgid "service is a mandatory argument for DB based ServiceGroup driver"
+#: nova/scheduler/utils.py:323
+msgid "ServerGroupSoftAffinityWeigher not configured"
msgstr ""
-#: nova/servicegroup/drivers/mc.py:41
-msgid "memcached_servers not defined"
+#: nova/scheduler/utils.py:328
+msgid "ServerGroupSoftAntiAffinityWeigher not configured"
msgstr ""
-#: nova/servicegroup/drivers/mc.py:54
+#: nova/servicegroup/drivers/db.py:50
+msgid "service is a mandatory argument for DB based ServiceGroup driver"
+msgstr ""
+
+#: nova/servicegroup/drivers/mc.py:53
msgid "service is a mandatory argument for Memcached based ServiceGroup driver"
msgstr ""
@@ -5269,11 +5218,11 @@ msgstr ""
msgid "Invalid type for %s entry"
msgstr ""
-#: nova/virt/driver.py:892
+#: nova/virt/driver.py:899
msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr ""
-#: nova/virt/driver.py:1465
+#: nova/virt/driver.py:1455
msgid "Event must be an instance of nova.virt.event.Event"
msgstr ""
@@ -5301,50 +5250,70 @@ msgstr ""
msgid "Unknown"
msgstr ""
-#: nova/virt/hardware.py:51
+#: nova/virt/hardware.py:52
#, python-format
msgid "No CPUs available after parsing %r"
msgstr ""
-#: nova/virt/hardware.py:88 nova/virt/hardware.py:92
+#: nova/virt/hardware.py:89 nova/virt/hardware.py:93
#, python-format
msgid "Invalid range expression %r"
msgstr ""
-#: nova/virt/hardware.py:104
+#: nova/virt/hardware.py:105
#, python-format
msgid "Invalid exclusion expression %r"
msgstr ""
-#: nova/virt/hardware.py:111
+#: nova/virt/hardware.py:112
#, python-format
msgid "Invalid inclusion expression %r"
msgstr ""
-#: nova/virt/images.py:56
+#: nova/virt/images.py:57
#, python-format
msgid "Path does not exist %(path)s"
msgstr ""
-#: nova/virt/images.py:62
+#: nova/virt/images.py:66
+#, python-format
+msgid "qemu-img failed to execute on %(path)s : %(exp)s"
+msgstr ""
+
+#: nova/virt/images.py:71
#, python-format
msgid "Failed to run qemu-img info on %(path)s : %(error)s"
msgstr ""
-#: nova/virt/images.py:95
+#: nova/virt/images.py:106
+#, python-format
+msgid "Unable to convert image to %(format)s: %(exp)s"
+msgstr ""
+
+#: nova/virt/images.py:131
msgid "'qemu-img info' parsing failed."
msgstr ""
-#: nova/virt/images.py:101
+#: nova/virt/images.py:137
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr ""
-#: nova/virt/images.py:132
+#: nova/virt/images.py:167
+#, python-format
+msgid "Unable to convert image to raw: %(exp)s"
+msgstr ""
+
+#: nova/virt/images.py:175
#, python-format
msgid "Converted to raw, but format is now %s"
msgstr ""
+#: nova/virt/osinfo.py:44
+#, python-format
+msgid "Cannot load Libosinfo: (%s)"
+msgstr ""
+
#: nova/virt/disk/api.py:349
msgid "image already mounted"
msgstr ""
@@ -5448,11 +5417,11 @@ msgstr ""
msgid "File path %s not valid"
msgstr ""
-#: nova/virt/hyperv/driver.py:265
+#: nova/virt/hyperv/driver.py:270
msgid "VIF plugging is not supported by the Hyper-V driver."
msgstr ""
-#: nova/virt/hyperv/driver.py:270
+#: nova/virt/hyperv/driver.py:275
msgid "VIF unplugging is not supported by the Hyper-V driver."
msgstr ""
@@ -5497,52 +5466,52 @@ msgid ""
"host's SAN policy is set to \"OfflineAll\" or \"OfflineShared\""
msgstr ""
-#: nova/virt/ironic/client_wrapper.py:93
+#: nova/virt/ironic/client_wrapper.py:97
msgid "Unable to authenticate Ironic client."
msgstr ""
-#: nova/virt/ironic/client_wrapper.py:152
+#: nova/virt/ironic/client_wrapper.py:156
#, python-format
msgid ""
"Error contacting Ironic server for '%(method)s'. Attempt %(attempt)d of "
"%(total)d"
msgstr ""
-#: nova/virt/ironic/driver.py:359
+#: nova/virt/ironic/driver.py:368
#, python-format
msgid ""
"Failed to add deploy parameters on node %(node)s when provisioning the "
"instance %(instance)s"
msgstr ""
-#: nova/virt/ironic/driver.py:387
+#: nova/virt/ironic/driver.py:396
#, python-format
msgid "Fail to clean up node %s parameters"
msgstr ""
-#: nova/virt/ironic/driver.py:399
+#: nova/virt/ironic/driver.py:408
#, python-format
msgid "Instance %s provisioning was aborted"
msgstr ""
-#: nova/virt/ironic/driver.py:420
+#: nova/virt/ironic/driver.py:429
#, python-format
msgid "Failed to provision instance %(inst)s: %(reason)s"
msgstr ""
-#: nova/virt/ironic/driver.py:714
+#: nova/virt/ironic/driver.py:721
#, python-format
msgid "Ironic node uuid not supplied to driver for instance %s."
msgstr ""
-#: nova/virt/ironic/driver.py:735
+#: nova/virt/ironic/driver.py:742
#, python-format
msgid ""
"Ironic node: %(id)s failed to validate. (deploy: %(deploy)s, power: "
"%(power)s)"
msgstr ""
-#: nova/virt/ironic/driver.py:839
+#: nova/virt/ironic/driver.py:846
#, python-format
msgid ""
"Error destroying the instance on node %(node)s. Provision state still "
@@ -5556,22 +5525,22 @@ msgid ""
"count: %(vif_count)d, Pif count: %(pif_count)d)"
msgstr ""
-#: nova/virt/ironic/driver.py:1152
+#: nova/virt/ironic/driver.py:1150
#, python-format
msgid "Failed to request Ironic to rebuild instance %(inst)s: %(reason)s"
msgstr ""
-#: nova/virt/libvirt/blockinfo.py:146
+#: nova/virt/libvirt/blockinfo.py:147
#, python-format
msgid "Unable to determine disk prefix for %s"
msgstr ""
-#: nova/virt/libvirt/blockinfo.py:200
+#: nova/virt/libvirt/blockinfo.py:201
#, python-format
msgid "No free disk device names for prefix '%s'"
msgstr ""
-#: nova/virt/libvirt/blockinfo.py:314
+#: nova/virt/libvirt/blockinfo.py:318
#, python-format
msgid "Unable to determine disk bus for '%s'"
msgstr ""
@@ -5581,102 +5550,97 @@ msgstr ""
msgid "Root element name should be '%(name)s' not '%(tag)s'"
msgstr ""
-#: nova/virt/libvirt/driver.py:585
+#: nova/virt/libvirt/driver.py:663
#, python-format
msgid "Nova requires libvirt version %s or greater."
msgstr ""
-#: nova/virt/libvirt/driver.py:591
+#: nova/virt/libvirt/driver.py:669
#, python-format
msgid "Running Nova with parallels virt_type requires libvirt version %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:611
+#: nova/virt/libvirt/driver.py:690
#, python-format
msgid ""
-"Running Nova with qemu/kvm virt_type on s390/s390x requires libvirt "
-"version %(libvirt_ver)s and qemu version %(qemu_ver)s, or greater"
+"Running Nova with qemu/kvm virt_type on %(arch)s requires libvirt version"
+" %(libvirt_ver)s and qemu version %(qemu_ver)s, or greater"
msgstr ""
-#: nova/virt/libvirt/driver.py:754
+#: nova/virt/libvirt/driver.py:929
msgid "operation time out"
msgstr ""
-#: nova/virt/libvirt/driver.py:1107
+#: nova/virt/libvirt/driver.py:1309
#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
msgstr ""
-#: nova/virt/libvirt/driver.py:1114
-#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr ""
-
-#: nova/virt/libvirt/driver.py:1204
+#: nova/virt/libvirt/driver.py:1402
msgid "Swap only supports host devices"
msgstr ""
-#: nova/virt/libvirt/driver.py:1511
+#: nova/virt/libvirt/driver.py:1756
#, python-format
msgid ""
"Error from libvirt while set password for username \"%(user)s\": [Error "
"Code %(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1535
+#: nova/virt/libvirt/driver.py:1780
#, python-format
msgid ""
"Error from libvirt while quiescing %(instance_name)s: [Error Code "
"%(error_code)s] %(ex)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1687
+#: nova/virt/libvirt/driver.py:1932
msgid "Found no disk to snapshot."
msgstr ""
-#: nova/virt/libvirt/driver.py:1777
+#: nova/virt/libvirt/driver.py:2022
#, python-format
msgid "Unknown type: %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1782
+#: nova/virt/libvirt/driver.py:2027
msgid "snapshot_id required in create_info"
msgstr ""
-#: nova/virt/libvirt/driver.py:1831
+#: nova/virt/libvirt/driver.py:2076
#, python-format
msgid ""
"Something went wrong when deleting a volume snapshot: rebasing a "
"%(protocol)s network disk using qemu-img has not been fully tested"
msgstr ""
-#: nova/virt/libvirt/driver.py:1885
+#: nova/virt/libvirt/driver.py:2130
#, python-format
msgid "Libvirt '%s' or later is required for online deletion of volume snapshots."
msgstr ""
-#: nova/virt/libvirt/driver.py:1893
+#: nova/virt/libvirt/driver.py:2138
#, python-format
msgid "Unknown delete_info type %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:1929
+#: nova/virt/libvirt/driver.py:2174
#, python-format
msgid "Disk with id: %s not found attached to instance."
msgstr ""
-#: nova/virt/libvirt/driver.py:1938
+#: nova/virt/libvirt/driver.py:2183
msgid "filename cannot be None"
msgstr ""
-#: nova/virt/libvirt/driver.py:1967
+#: nova/virt/libvirt/driver.py:2212
#, python-format
msgid "no match found for %s"
msgstr ""
-#: nova/virt/libvirt/driver.py:2047
+#: nova/virt/libvirt/driver.py:2292
#, python-format
msgid ""
"Relative blockcommit support was not detected. Libvirt '%s' or later is "
@@ -5684,94 +5648,94 @@ msgid ""
"snapshots."
msgstr ""
-#: nova/virt/libvirt/driver.py:2576 nova/virt/xenapi/vmops.py:1762
+#: nova/virt/libvirt/driver.py:2814 nova/virt/xenapi/vmops.py:1771
msgid "Guest does not have a console available"
msgstr ""
-#: nova/virt/libvirt/driver.py:2936
+#: nova/virt/libvirt/driver.py:3176
#, python-format
msgid "%s format is not supported"
msgstr ""
-#: nova/virt/libvirt/driver.py:3057
+#: nova/virt/libvirt/driver.py:3297
#, python-format
msgid "Detaching PCI devices with libvirt < %(ver)s is not permitted"
msgstr ""
-#: nova/virt/libvirt/driver.py:3138
+#: nova/virt/libvirt/driver.py:3377
#, python-format
msgid "Detaching SR-IOV ports with libvirt < %(ver)s is not permitted"
msgstr ""
-#: nova/virt/libvirt/driver.py:3218
+#: nova/virt/libvirt/driver.py:3456
#, python-format
msgid ""
"Config requested an explicit CPU model, but the current libvirt "
"hypervisor '%s' does not support selecting CPU models"
msgstr ""
-#: nova/virt/libvirt/driver.py:3224
+#: nova/virt/libvirt/driver.py:3462
msgid "Config requested a custom CPU model, but no model name was provided"
msgstr ""
-#: nova/virt/libvirt/driver.py:3228
+#: nova/virt/libvirt/driver.py:3466
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
-#: nova/virt/libvirt/driver.py:3264
+#: nova/virt/libvirt/driver.py:3502
#, python-format
msgid ""
"Volume sets discard option, but libvirt %(libvirt)s or later is required,"
" qemu %(qemu)s or later is required."
msgstr ""
-#: nova/virt/libvirt/driver.py:3406
+#: nova/virt/libvirt/driver.py:3644
msgid "Unable to get host UUID: /etc/machine-id does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:3414
+#: nova/virt/libvirt/driver.py:3652
msgid "Unable to get host UUID: /etc/machine-id is empty"
msgstr ""
-#: nova/virt/libvirt/driver.py:3595
+#: nova/virt/libvirt/driver.py:3843
#, python-format
msgid "Invalid libvirt version %(version)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4663
+#: nova/virt/libvirt/driver.py:4958
#, python-format
msgid ""
"Invalid vcpu_pin_set config, one or more of the specified cpuset is not "
"online. Online cpuset(s): %(online)s, requested cpuset(s): %(req)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:4670
+#: nova/virt/libvirt/driver.py:4965
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr ""
-#: nova/virt/libvirt/driver.py:5218
+#: nova/virt/libvirt/driver.py:5506
msgid "Block migration can not be used with shared storage."
msgstr ""
-#: nova/virt/libvirt/driver.py:5235
+#: nova/virt/libvirt/driver.py:5523
#, python-format
msgid "Cannot block migrate instance %s with mapped volumes"
msgstr ""
-#: nova/virt/libvirt/driver.py:5242
+#: nova/virt/libvirt/driver.py:5530
msgid ""
"Live migration can not be used without shared storage except a booted "
"from volume VM which does not have a local disk."
msgstr ""
-#: nova/virt/libvirt/driver.py:5322
+#: nova/virt/libvirt/driver.py:5610
#, python-format
msgid ""
"Unable to migrate %(instance_uuid)s: Disk of instance is too "
"large(available on destination host:%(available)s < need:%(necessary)s)"
msgstr ""
-#: nova/virt/libvirt/driver.py:5368
+#: nova/virt/libvirt/driver.py:5656
#, python-format
msgid ""
"CPU doesn't have compatibility.\n"
@@ -5781,12 +5745,12 @@ msgid ""
"Refer to %(u)s"
msgstr ""
-#: nova/virt/libvirt/driver.py:5438
+#: nova/virt/libvirt/driver.py:5726
#, python-format
msgid "The firewall filter for %s does not exist"
msgstr ""
-#: nova/virt/libvirt/driver.py:5572
+#: nova/virt/libvirt/driver.py:5860
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag "
"or your destination node does not support retrieving listen addresses. "
@@ -5795,7 +5759,7 @@ msgid ""
"address (0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
msgstr ""
-#: nova/virt/libvirt/driver.py:5606
+#: nova/virt/libvirt/driver.py:5894
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag "
"or your destination node does not support retrieving listen addresses. "
@@ -5803,15 +5767,15 @@ msgid ""
"serial console or upgrade your libvirt version."
msgstr ""
-#: nova/virt/libvirt/driver.py:6720
+#: nova/virt/libvirt/driver.py:7004
msgid "Unable to resize disk down."
msgstr ""
-#: nova/virt/libvirt/driver.py:6728
+#: nova/virt/libvirt/driver.py:7012
msgid "Migration is not supported for LVM backed instances"
msgstr ""
-#: nova/virt/libvirt/driver.py:6746
+#: nova/virt/libvirt/driver.py:7030
#, python-format
msgid "not able to execute ssh command: %s"
msgstr ""
@@ -5866,40 +5830,51 @@ msgid ""
" LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:329
+#: nova/virt/libvirt/imagebackend.py:331
#, python-format
msgid "Could not load line %(line)s, got error %(error)s"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:344
+#: nova/virt/libvirt/imagebackend.py:346
msgid "Attempted overwrite of an existing value."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:392
+#: nova/virt/libvirt/imagebackend.py:394
msgid "clone() is not implemented"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:612
+#: nova/virt/libvirt/imagebackend.py:406
+msgid "direct_snapshot() is not implemented"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:662
msgid "You should specify images_volume_group flag to use LVM images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:674
+#: nova/virt/libvirt/imagebackend.py:736
msgid "Instance disk to be encrypted but no context provided"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:729
+#: nova/virt/libvirt/imagebackend.py:791
msgid "You should specify images_rbd_pool flag to use rbd images."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:832
+#: nova/virt/libvirt/imagebackend.py:894
msgid "Image is not raw format"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:840
+#: nova/virt/libvirt/imagebackend.py:902
msgid "No image locations are accessible"
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:902
+#: nova/virt/libvirt/imagebackend.py:970
+#, python-format
+msgid ""
+"Cannot determine the parent storage pool for %s; cannot determine where "
+"to store images"
+msgstr ""
+
+#: nova/virt/libvirt/imagebackend.py:1063
#, python-format
msgid ""
"PCS doesn't support images in %s format. You should either set "
@@ -5907,26 +5882,26 @@ msgid ""
"format."
msgstr ""
-#: nova/virt/libvirt/imagebackend.py:953
+#: nova/virt/libvirt/imagebackend.py:1114
#, python-format
msgid "Unknown image_type=%s"
msgstr ""
-#: nova/virt/libvirt/utils.py:358
+#: nova/virt/libvirt/utils.py:371
msgid "Can't retrieve root device path from instance libvirt configuration"
msgstr ""
-#: nova/virt/libvirt/vif.py:438 nova/virt/libvirt/vif.py:692
-#: nova/virt/libvirt/vif.py:881
+#: nova/virt/libvirt/vif.py:441 nova/virt/libvirt/vif.py:741
+#: nova/virt/libvirt/vif.py:958
msgid "vif_type parameter must be present for this vif_driver implementation"
msgstr ""
-#: nova/virt/libvirt/vif.py:444 nova/virt/libvirt/vif.py:887
+#: nova/virt/libvirt/vif.py:447 nova/virt/libvirt/vif.py:964
#, python-format
msgid "Unexpected vif_type=%s"
msgstr ""
-#: nova/virt/libvirt/vif.py:698
+#: nova/virt/libvirt/vif.py:747
#, python-format
msgid "Plug vif failed because of unexpected vif_type=%s"
msgstr ""
@@ -5948,22 +5923,32 @@ msgstr ""
msgid "Path %s must be LVM logical volume"
msgstr ""
-#: nova/virt/libvirt/storage/rbd_utils.py:113
+#: nova/virt/libvirt/storage/rbd_utils.py:117
msgid "rbd python libraries not found"
msgstr ""
-#: nova/virt/libvirt/storage/rbd_utils.py:165
+#: nova/virt/libvirt/storage/rbd_utils.py:169
msgid "Not stored in rbd"
msgstr ""
-#: nova/virt/libvirt/storage/rbd_utils.py:169
+#: nova/virt/libvirt/storage/rbd_utils.py:173
msgid "Blank components"
msgstr ""
-#: nova/virt/libvirt/storage/rbd_utils.py:172
+#: nova/virt/libvirt/storage/rbd_utils.py:176
msgid "Not an rbd snapshot"
msgstr ""
+#: nova/virt/libvirt/storage/rbd_utils.py:229
+#, python-format
+msgid "no write permission on storage pool %s"
+msgstr ""
+
+#: nova/virt/libvirt/storage/rbd_utils.py:257
+#, python-format
+msgid "no usable parent snapshot for volume %s"
+msgstr ""
+
#: nova/virt/libvirt/volume/net.py:86
msgid "Invalid volume source data"
msgstr ""
@@ -5994,23 +5979,23 @@ msgstr ""
msgid "Cannot mount Scality SOFS, check syslog for errors"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:149
+#: nova/virt/vmwareapi/driver.py:148
msgid ""
"Must specify host_ip, host_username and host_password to use "
"vmwareapi.VMwareVCDriver"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:158
+#: nova/virt/vmwareapi/driver.py:157
#, python-format
msgid "Invalid Regular Expression %s"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:174
+#: nova/virt/vmwareapi/driver.py:173
#, python-format
msgid "The specified cluster '%s' was not found in vCenter"
msgstr ""
-#: nova/virt/vmwareapi/driver.py:543
+#: nova/virt/vmwareapi/driver.py:539
msgid ""
"Multiple hosts may be managed by the VMWare vCenter driver; therefore we "
"do not return uptime for just one host."
@@ -6038,7 +6023,7 @@ msgstr ""
msgid "The default PBM policy doesn't exist on the backend."
msgstr ""
-#: nova/virt/vmwareapi/images.py:453
+#: nova/virt/vmwareapi/images.py:459
msgid "Extracting vmdk from OVA failed."
msgstr ""
@@ -6096,50 +6081,50 @@ msgstr ""
msgid "Rescue device does not exist for instance %s"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:545
+#: nova/virt/vmwareapi/vmops.py:553
msgid "Image disk size greater than requested disk size"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:587
+#: nova/virt/vmwareapi/vmops.py:595
#, python-format
msgid "disk type '%s' not supported"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:808
+#: nova/virt/vmwareapi/vmops.py:821
#, python-format
msgid "Invalid config_drive_format \"%s\""
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1007
+#: nova/virt/vmwareapi/vmops.py:1020
msgid "instance is not powered on"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1103
+#: nova/virt/vmwareapi/vmops.py:1116
msgid "pause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1107
+#: nova/virt/vmwareapi/vmops.py:1120
msgid "unpause not supported for vmwareapi"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1126
+#: nova/virt/vmwareapi/vmops.py:1139
msgid "instance is powered off and cannot be suspended."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1147
+#: nova/virt/vmwareapi/vmops.py:1160
msgid "instance is not in a suspended state"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1323
+#: nova/virt/vmwareapi/vmops.py:1336
msgid "Unable to shrink disk."
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1743
+#: nova/virt/vmwareapi/vmops.py:1757
#, python-format
msgid "No device with interface-id %s exists on VM"
msgstr ""
-#: nova/virt/vmwareapi/vmops.py:1755
+#: nova/virt/vmwareapi/vmops.py:1769
#, python-format
msgid "No device with MAC address %s exists on the VM"
msgstr ""
@@ -6164,16 +6149,16 @@ msgid ""
"connection_password to use compute_driver=xenapi.XenAPIDriver"
msgstr ""
-#: nova/virt/xenapi/driver.py:638
+#: nova/virt/xenapi/driver.py:645
msgid "Host startup on XenServer is not supported."
msgstr ""
-#: nova/virt/xenapi/fake.py:851
+#: nova/virt/xenapi/fake.py:850
#, python-format
msgid "xenapi.fake does not have an implementation for %s"
msgstr ""
-#: nova/virt/xenapi/fake.py:959
+#: nova/virt/xenapi/fake.py:958
#, python-format
msgid ""
"xenapi.fake does not have an implementation for %s or it has been called "
@@ -6365,75 +6350,75 @@ msgstr ""
msgid "instance has a kernel or ramdisk but not both"
msgstr ""
-#: nova/virt/xenapi/vmops.py:1610
+#: nova/virt/xenapi/vmops.py:1619
#, python-format
msgid "Instance is already in Rescue Mode: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2067
+#: nova/virt/xenapi/vmops.py:2073
#, python-format
msgid "Destination host:%s must be in the same aggregate as the source server"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2088
+#: nova/virt/xenapi/vmops.py:2094
msgid "No suitable network for migrate"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2094
+#: nova/virt/xenapi/vmops.py:2100
#, python-format
msgid "PIF %s does not contain IP address"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2107
+#: nova/virt/xenapi/vmops.py:2113
msgid "Migrate Receive failed"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2181
+#: nova/virt/xenapi/vmops.py:2186
msgid "XAPI supporting relax-xsm-sr-check=true required"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2192
+#: nova/virt/xenapi/vmops.py:2202
#, python-format
msgid "assert_can_migrate failed because: %s"
msgstr ""
-#: nova/virt/xenapi/vmops.py:2331
+#: nova/virt/xenapi/vmops.py:2341
msgid "Migrate Send failed"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:88 nova/virt/xenapi/volume_utils.py:273
+#: nova/virt/xenapi/volume_utils.py:99 nova/virt/xenapi/volume_utils.py:284
#, python-format
msgid "Unable to obtain target information %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:176
+#: nova/virt/xenapi/volume_utils.py:187
#, python-format
msgid "Unable to introduce VDI on SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:180
+#: nova/virt/xenapi/volume_utils.py:191
#, python-format
msgid ""
"VDI not found on SR %(sr)s (vdi_uuid %(vdi_uuid)s, target_lun "
"%(target_lun)s)"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:191
+#: nova/virt/xenapi/volume_utils.py:202
#, python-format
msgid "Unable to get record of VDI %s on"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:213
+#: nova/virt/xenapi/volume_utils.py:224
#, python-format
msgid "Unable to introduce VDI for SR %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:311
+#: nova/virt/xenapi/volume_utils.py:322
#, python-format
msgid "Unable to find SR from VBD %s"
msgstr ""
-#: nova/virt/xenapi/volume_utils.py:322
+#: nova/virt/xenapi/volume_utils.py:333
#, python-format
msgid "Unable to find SR from VDI %s"
msgstr ""
@@ -6458,30 +6443,34 @@ msgid ""
"configuration option set."
msgstr ""
-#: nova/volume/cinder.py:292
+#: nova/volume/cinder.py:299
#, python-format
msgid "volume '%(vol)s' status must be 'in-use'. Currently in '%(status)s' status"
msgstr ""
-#: nova/volume/cinder.py:300
+#: nova/volume/cinder.py:307
#, python-format
msgid "volume '%(vol)s' status must be 'available'. Currently in '%(status)s'"
msgstr ""
-#: nova/volume/cinder.py:305
+#: nova/volume/cinder.py:312
#, python-format
msgid "volume %s already attached"
msgstr ""
-#: nova/volume/cinder.py:310
+#: nova/volume/cinder.py:317
#, python-format
msgid ""
"Instance %(instance)s and volume %(vol)s are not in the same "
"availability_zone. Instance is in %(ins_zone)s. Volume is in %(vol_zone)s"
msgstr ""
-#: nova/volume/cinder.py:322
+#: nova/volume/cinder.py:329
#, python-format
msgid "volume %s already detached"
msgstr ""
+#: nova/volume/cinder.py:333
+msgid "Volume must be attached in order to detach."
+msgstr ""
+
diff --git a/nova/locale/pa_IN/LC_MESSAGES/nova-log-critical.po b/nova/locale/pa_IN/LC_MESSAGES/nova-log-critical.po
index a715ce3929..8953a025ba 100644
--- a/nova/locale/pa_IN/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/pa_IN/LC_MESSAGES/nova-log-critical.po
@@ -7,19 +7,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.dev41\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-10-27 06:30+0000\n"
-"PO-Revision-Date: 2015-08-08 11:24+0000\n"
-"Last-Translator: A S Alam <apreet.alam@gmail.com>\n"
-"Language-Team: Punjabi (India)\n"
-"Language: pa-IN\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-08-08 11:24+0000\n"
+"Last-Translator: A S Alam <apreet.alam@gmail.com>\n"
+"Language: pa-IN\n"
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Punjabi (India)\n"
#, python-format
msgid "Missing core API extensions: %s"
diff --git a/nova/locale/pt_BR/LC_MESSAGES/nova.po b/nova/locale/pt_BR/LC_MESSAGES/nova.po
index aaadce5bc0..d1301081e8 100644
--- a/nova/locale/pt_BR/LC_MESSAGES/nova.po
+++ b/nova/locale/pt_BR/LC_MESSAGES/nova.po
@@ -1,22 +1,43 @@
-# Portuguese (Brazil) translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
-msgid ""
-msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+# Translators:
+# Francisco Demontiê dos Santos Junior <demontiejunior@gmail.com>, 2013
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# Francisco Demontiê dos Santos Junior <demontiejunior@gmail.com>, 2013
+# Gabriel Wainer, 2013
+# Josemar Muller Lohn <j@lo.hn>, 2013
+# Leonardo Rodrigues de Mello <>, 2012
+# Marcelo Dieder <marcelodieder@gmail.com>, 2013
+# MichaelBr <maicao_xd@hotmail.com>, 2013
+# Volmar Oliveira Junior <volmar.oliveira.jr@gmail.com>, 2013
+# Welkson Renny de Medeiros <welkson@gmail.com>, 2012
+# Wiliam Souza <wiliamsouza83@gmail.com>, 2013
+# Fernando Pimenta <fernando.c.pimenta@gmail.com>, 2015. #zanata
+# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# José Mello <jose.eduardo.jr@gmail.com>, 2016. #zanata
+# Lucas Palm <lapalm@us.ibm.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-09-11 06:59+0000\n"
-"Last-Translator: Fernando Pimenta <fernando.c.pimenta@gmail.com>\n"
-"Language: pt_BR\n"
-"Language-Team: Portuguese (Brazil)\n"
-"Plural-Forms: nplurals=2; plural=(n > 1)\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
+"PO-Revision-Date: 2016-02-03 07:16+0000\n"
+"Last-Translator: Lucas Palm <lapalm@us.ibm.com>\n"
+"Language: pt-BR\n"
+"Plural-Forms: nplurals=2; plural=(n > 1);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Portuguese (Brazil)\n"
+
+#, python-format
+msgid "%(address)s is not a valid IP address."
+msgstr "%(address)s não é um endereço ip válido."
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -34,10 +55,36 @@ msgstr ""
"pela política"
#, python-format
+msgid "%(cidr)s is not a valid IP network."
+msgstr "%(cidr)s não é uma rede de IP válida."
+
+#, python-format
+msgid ""
+"%(desc)r\n"
+"command: %(cmd)r\n"
+"exit code: %(code)r\n"
+"stdout: %(stdout)r\n"
+"stderr: %(stderr)r"
+msgstr ""
+"%(desc)r\n"
+"comando: %(cmd)r.\n"
+"código de saída: %(code)r.\n"
+"stdout: %(stdout)r\n"
+"stderr: %(stderr)r"
+
+#, python-format
msgid "%(err)s"
msgstr "%(err)s"
#, python-format
+msgid "%(field)s should not be part of the updates."
+msgstr "%(field)s não deve fazer parte das atualizações."
+
+#, python-format
+msgid "%(fieldname)s missing field type"
+msgstr "%(fieldname)s faltando tipo do campo"
+
+#, python-format
msgid "%(host)s:%(port)s: Target closed"
msgstr "%(host)s:%(port)s: Destino fechado"
@@ -79,6 +126,10 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "O hypervisor %(type)s não suporta dispositivos PCI"
#, python-format
+msgid "%(typename)s in %(fieldname)s is not an instance of Enum"
+msgstr "%(typename)s em %(fieldname)s não é uma instância de Enumeração"
+
+#, python-format
msgid "%(value_name)s must be <= %(max_value)d"
msgstr "%(value_name)s deve ser <= %(max_value)d"
@@ -99,6 +150,10 @@ msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr "%(worker_name)s valor de %(workers)s é inválido, deve ser maior que 0"
#, python-format
+msgid "%r failed. Not Retrying."
+msgstr "%r falhou. Não tentando novamente."
+
+#, python-format
msgid "%r failed. Retrying."
msgstr "%r falhou. Tentando novamente."
@@ -115,6 +170,10 @@ msgid "%s is not a string or unicode"
msgstr "%s não é uma sequência ou unicode"
#, python-format
+msgid "%s is not a valid IP network"
+msgstr "%s não é uma rede IP válida"
+
+#, python-format
msgid "%s is not a valid node managed by this compute host."
msgstr "%s não é um nó válido gerenciado por este host de cálculo."
@@ -127,6 +186,10 @@ msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr "%s deve ser 'MANUAL' ou 'AUTO'."
#, python-format
+msgid "'%(other)s' should be an instance of '%(cls)s'"
+msgstr "'%(other)s' deve ser uma instância de '%(cls)s'"
+
+#, python-format
msgid "'%s' is either missing or empty."
msgstr "'%s' está ausente ou vazio."
@@ -154,6 +217,10 @@ msgstr ""
"Um nome de modelo de CPU não deve ser configurado quando um modelo de CPU do "
"host for solicitado"
+#, python-format
+msgid "A NetworkModel is required in field %s"
+msgstr "Um NetworkModel é requerido no campo %s"
+
msgid ""
"A unique ID given to each file system. This is value is set in Glance and "
"agreed upon here so that the operator knowns they are dealing with the same "
@@ -187,9 +254,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "Versão de API %(version)s não é suportada nesse método."
-msgid "Access key not provided"
-msgstr "Chave de acesso não fornecida"
-
msgid "Access list not available for public flavors."
msgstr "Lista de acesso não disponível para métodos públicos."
@@ -220,6 +284,9 @@ msgstr "O endereço não pôde ser convertido."
msgid "Address not specified"
msgstr "Endereço não especificado"
+msgid "Affinity instance group policy was violated."
+msgstr "A política de grupo da instância de afinidade foi violada."
+
#, python-format
msgid "Agent does not support the call: %(method)s"
msgstr "O agente não suporta a chamada: %(method)s"
@@ -285,6 +352,10 @@ msgstr "Política de grupo de instância Antiafinidade foi violada."
msgid "Architecture name '%(arch)s' is not recognised"
msgstr "Nome de arquitetura '%(arch)s' não é reconhecido"
+#, python-format
+msgid "Architecture name '%s' is not valid"
+msgstr "O nome de arquitetura '%s' não é válido"
+
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr "O 'tipo' de argumento para reinicializar não é HARD ou SOFT"
@@ -370,9 +441,23 @@ msgstr ""
msgid "Binary"
msgstr "binário"
+#, python-format
+msgid ""
+"Binding failed for port %(port_id)s, please check neutron logs for more "
+"information."
+msgstr ""
+"A ligação falhou para a porta %(port_id)s, verifique os logs do neutron para "
+"obter informações adicionais."
+
msgid "Blank components"
msgstr "Componentes em branco"
+msgid ""
+"Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size"
+msgstr ""
+"Volumes em branco (origem: 'blank', dest: 'volume') precisam ter tamanho "
+"diferente de zero"
+
#, python-format
msgid "Block Device %(id)s is not bootable."
msgstr "%(id)s do Dispositivo de Bloco não é inicializável."
@@ -468,6 +553,21 @@ msgstr "Número de CPU %(cpunum)d é maior que o máximo %(cpumax)d"
msgid "CPU number %(cpuset)s is not assigned to any node"
msgstr "Número de CPU %(cpuset)s não é designado a nenhum nó"
+#, python-format
+msgid "CPU pinning is not supported by the host: %(reason)s"
+msgstr "A fixação de CPU não é suportada pelo host: %(reason)s"
+
+#, python-format
+msgid ""
+"CPU set to pin/unpin %(requested)s must be a subset of known CPU set "
+"%(cpuset)s"
+msgstr ""
+"A CPU configurada para fixar/desafixar %(requested)s deve ser um subconjunto "
+"do conjunto de CPU conhecido %(cpuset)s"
+
+msgid "Can not add access to a public flavor."
+msgstr "Não é possível incluir acesso em um tipo público."
+
msgid "Can not find requested image"
msgstr "Não é possível localizar a imagem solicitada"
@@ -544,6 +644,16 @@ msgstr ""
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr "Não é possível chamar %(method)s no objeto órfão %(objtype)s"
+msgid ""
+"Cannot create default bittorrent URL without xenserver.torrent_base_url "
+"configuration option set."
+msgstr ""
+"Não é possível criar URL bittorrent padrão sem definir a opção de "
+"configuração xenserver.torrent_base_url."
+
+msgid "Cannot disassociate auto assigned floating IP"
+msgstr "Não é possível desassociar o IP flutuante designado automaticamente"
+
msgid "Cannot execute /sbin/mount.sofs"
msgstr "Não é possível executar /sbin/mount.sofs"
@@ -588,6 +698,14 @@ msgstr ""
msgid "Cannot rescue a volume-backed instance"
msgstr "Não é possível resgatar uma instância suportada por volume"
+#, python-format
+msgid ""
+"Cannot resize the root disk to a smaller size. Current size: "
+"%(curr_root_gb)s GB. Requested size: %(new_root_gb)s GB."
+msgstr ""
+"Não é possível redimensionar o disco raiz para um tamanho menor. Tamanho "
+"atual: %(curr_root_gb)s GB. Tamanho solicitado: %(new_root_gb)s GB."
+
msgid "Cannot run any more instances of this type."
msgstr "Não é possível executar nenhuma outra instância desse tipo."
@@ -637,6 +755,9 @@ msgstr "A mensagem da célula atingiu a contagem máxima de hops: %(hop_count)s"
msgid "Cell name cannot be empty"
msgstr "O nome da célula não pode estar vazio"
+msgid "Cell name cannot contain '!', '.' or '@'"
+msgstr "Nome da célula não pode conter '!', '.' ou '@'"
+
msgid "Cell type must be 'parent' or 'child'"
msgstr "O tipo da célula deve ser 'pai' ou 'filho'"
@@ -701,6 +822,22 @@ msgstr ""
"Configuração solicitou um modelo de CPU explícito, mas o hypervisor libvirt "
"atual '%s' não suporta a seleção de modelos de CPU"
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s, but we were unable to "
+"determine the cause"
+msgstr ""
+"Conflito ao atualizar a instância %(instance_uuid)s, mas não foi possível "
+"determinar a causa"
+
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. "
+"Actual: %(actual)s"
+msgstr ""
+"Conflito ao atualizar a instância %(instance_uuid)s. Esperado: %(expected)s. "
+"Real: %(actual)s"
+
msgid "Conflicting policies configured!"
msgstr "Políticas conflitantes configuradas!"
@@ -709,6 +846,10 @@ msgid "Connection to cinder host failed: %(reason)s"
msgstr "Conexão com o host do cinder falhou: %(reason)s"
#, python-format
+msgid "Connection to glance host %(server)s failed: %(reason)s"
+msgstr "Conexão ao host Glance %(server)s falhou: %(reason)s"
+
+#, python-format
msgid "Connection to libvirt lost: %s"
msgstr "Conexão com libvirt perdida: %s"
@@ -787,10 +928,6 @@ msgstr "Não foi possível localizar o binário %(binary)s no host %(host)s."
msgid "Could not find config at %(path)s"
msgstr "Não foi possível localizar a configuração em %(path)s"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "Não foi possível localizar o(s) par(es) de chaves: %s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr ""
"Não foi possível localizar a(s) referência(s) do armazenamento de dados que "
@@ -830,14 +967,6 @@ msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr ""
"Não foi possível atribuir um IP para o Link Local de %(interface)s :%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"Não foi possível parar a instância %(instance)s dentro de 1 hora. vm_state "
-"atual: %(vm_state)s, task_state atual: %(task_state)s"
-
msgid "Create networks failed"
msgstr "Falha na criação de redes"
@@ -1042,6 +1171,26 @@ msgstr ""
"%(error_code)s] %(ex)s"
#, python-format
+msgid ""
+"Error from libvirt while set password for username \"%(user)s\": [Error Code "
+"%(error_code)s] %(ex)s"
+msgstr ""
+"Erro de libvirt ao configurar senha para o nome do usuário \"%(user)s\": "
+"[Código de erro %(error_code)s] %(ex)s"
+
+#, python-format
+msgid ""
+"Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs "
+"(%(e)s)"
+msgstr ""
+"Erro ao montar %(device)s para %(dir)s na imagem %(image)s com libguestfs "
+"(%(e)s)"
+
+#, python-format
+msgid "Error mounting %(image)s with libguestfs (%(e)s)"
+msgstr "Erro ao montar %(image)s com libguestfs (%(e)s)"
+
+#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Erro ao criar monitor de recurso: %(monitor)s"
@@ -1057,6 +1206,14 @@ msgstr "O evento deve ser uma instância de nova.virt.event.Event"
#, python-format
msgid ""
+"Exceeded max scheduling attempts %(max_attempts)d for instance "
+"%(instance_uuid)s. Last exception: %(exc_reason)s"
+msgstr ""
+"Máximo excedido de tentativas de planejamento %(max_attempts)d para a "
+"instância %(instance_uuid)s. Última exceção:%(exc_reason)s"
+
+#, python-format
+msgid ""
"Exceeded max scheduling retries %(max_retries)d for instance "
"%(instance_uuid)s during live migration"
msgstr ""
@@ -1064,6 +1221,10 @@ msgstr ""
"instância %(instance_uuid)s durante migração em tempo real"
#, python-format
+msgid "Exceeded maximum number of retries. %(reason)s"
+msgstr "Foi excedido o número máximo de tentativas. %(reason)s"
+
+#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr "Esperado um uuid, mas recebido %(uuid)s."
@@ -1071,21 +1232,6 @@ msgstr "Esperado um uuid, mas recebido %(uuid)s."
msgid "Expected object of type: %s"
msgstr "Objeto esperado do tipo: %s"
-msgid "Expecting a list of resources"
-msgstr "Aguardando uma lista de recursos"
-
-msgid "Expecting a list of tagSets"
-msgstr "Esperando uma lista de tagSets"
-
-msgid "Expecting both key and value to be set"
-msgstr "Esperando que a chave e o valor sejam configurados"
-
-msgid "Expecting key to be set"
-msgstr "Esperando chave para ser setada"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "Esperando que tagSet sejam pares de chave/valor"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "Coluna adicional %(table)s.%(column)s na tabela de sombra"
@@ -1103,6 +1249,14 @@ msgstr ""
"esperada"
#, python-format
+msgid "Failed to access port %(port_id)s: %(reason)s"
+msgstr "Falha ao acessar a porta %(port_id)s: %(reason)s"
+
+#, python-format
+msgid "Failed to add bridge: %s"
+msgstr "Falha ao incluir bridge: %s"
+
+#, python-format
msgid ""
"Failed to add deploy parameters on node %(node)s when provisioning the "
"instance %(instance)s"
@@ -1250,10 +1404,6 @@ msgstr "Falha ao suspender a instância: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Falha ao finalizar instância: %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "Falha ao analisar sintaticamente a resposta do keystone: %s"
-
msgid "Failure prepping block device."
msgstr "Falha na preparação do dispositivo de bloco."
@@ -1283,6 +1433,15 @@ msgid "Filename of root Certificate Revocation List"
msgstr "O nome do arquivo da Lista de Revogação de Certificado raiz"
#, python-format
+msgid "Fixed IP %(address)s already exists."
+msgstr "O IP fixo %(address)s já existe."
+
+#, python-format
+msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
+msgstr ""
+"O IP fixo %(ip)s não é um endereço IP válido para a rede %(network_id)s."
+
+#, python-format
msgid "Fixed IP %s has been deleted"
msgstr "O IP fixo %s foi excluído"
@@ -1319,6 +1478,10 @@ msgid "Fixed IP associate failed for network: %(net)s."
msgstr "Associação do IP fixo falhou para rede: %(net)s."
#, python-format
+msgid "Fixed IP not found for address %(address)s."
+msgstr "IP fixo não localizado para o endereço %(address)s."
+
+#, python-format
msgid "Flavor %(flavor_id)s could not be found."
msgstr "O método %(flavor_id)s não pôde ser localizado."
@@ -1335,14 +1498,6 @@ msgstr ""
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"Especificação %(id)d extra de tipo não pode ser atualizada ou criada após "
-"%(retries)d novas tentativas."
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1386,14 +1541,71 @@ msgstr "Tipo com nome %(flavor_name)s não pôde ser localizado."
msgid "Flavor with name %(name)s already exists."
msgstr "Tipo com nome %(name)s já existe."
+#, python-format
+msgid ""
+"Flavor's disk is smaller than the minimum size specified in image metadata. "
+"Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i "
+"bytes."
+msgstr ""
+"O disco do tipo é menor que o tamanho mínimo especificado nos metadados de "
+"imagem. O disco do tipo tem %(flavor_size)i bytes; o tamanho mínimo é "
+"%(image_min_disk)i bytes."
+
+#, python-format
+msgid ""
+"Flavor's disk is too small for requested image. Flavor disk is "
+"%(flavor_size)i bytes, image is %(image_size)i bytes."
+msgstr ""
+"O disco do tipo é muito pequeno para a imagem solicitada. O disco do tipo "
+"tem %(flavor_size)i bytes; a imagem tem %(image_size)i bytes."
+
msgid "Flavor's memory is too small for requested image."
msgstr "Memória do tipo é muito pequena para a imagem solicitada."
+#, python-format
+msgid "Floating IP %(address)s already exists."
+msgstr "O IP flutuante %(address)s já existe."
+
+#, python-format
+msgid "Floating IP %(address)s association has failed."
+msgstr "A associação de IP flutuante %(address)s falhou."
+
+#, python-format
+msgid "Floating IP %(address)s is associated."
+msgstr "O IP flutuante %(address)s está associado."
+
+#, python-format
+msgid "Floating IP %(address)s is not associated with instance %(id)s."
+msgstr "O IP flutuante %(address)s não está associado à instância %(id)s."
+
+#, python-format
+msgid "Floating IP %(address)s is not associated."
+msgstr "O IP flutuante %(address)s não está associado."
+
msgid "Floating IP allocate failed."
msgstr "Alocação de IP flutuante falhou."
-msgid "Floating ip is not associated."
-msgstr "O IP flutuante não está associado."
+msgid "Floating IP is not associated"
+msgstr "O IP flutuante não está associado"
+
+#, python-format
+msgid "Floating IP not found for ID %(id)s."
+msgstr "IP flutuante não localizado para o ID %(id)s."
+
+#, python-format
+msgid "Floating IP not found for ID %s"
+msgstr "IP flutuante não localizado para ID %s"
+
+#, python-format
+msgid "Floating IP not found for address %(address)s."
+msgstr "IP flutuante não localizado para o endereço %(address)s."
+
+#, python-format
+msgid "Floating IP not found for host %(host)s."
+msgstr "IP flutuante não localizado para o host %(host)s."
+
+msgid "Floating IP pool not found."
+msgstr "Conjunto de IPs flutuantes não localizado."
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
@@ -1461,6 +1673,10 @@ msgstr ""
"tempo real envolvendo essa versão pode causar a perda de dados. Faça upgrade "
"de Nova em %(server)s e tente novamente."
+#, python-format
+msgid "Host '%(name)s' is not mapped to any cell"
+msgstr "Host '%(name)s' não mapeado para qualquer célula"
+
msgid "Host PowerOn is not supported by the Hyper-V driver"
msgstr "O host PowerOn não é suportado pelo driver Hyper-V"
@@ -1488,6 +1704,10 @@ msgstr ""
"Driver do hypervisor não suporta o método post_live_migration_at_source"
#, python-format
+msgid "Hypervisor virt type '%s' is not valid"
+msgstr "O tipo hypervisor virt '%s' não é válido"
+
+#, python-format
msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised"
msgstr "Tipo de virtualização do hypervisor '%(hv_type)s' não é reconhecido"
@@ -1558,8 +1778,9 @@ msgstr "A chave de metadados da imagem é muito longa"
msgid "Image metadata limit exceeded"
msgstr "Limite excedido de metadados da imagem"
-msgid "Image must be available"
-msgstr "A imagem deve estar disponível"
+#, python-format
+msgid "Image model '%(image)s' is not supported"
+msgstr "O modelo de imagem '%(image)s' não é suportado"
msgid "Image not found."
msgstr "Imagem não encontrada."
@@ -1684,6 +1905,18 @@ msgid "Instance %(instance_uuid)s does not specify a NUMA topology"
msgstr "A instância %(instance_uuid)s não especifica uma topologia NUMA"
#, python-format
+msgid "Instance %(instance_uuid)s does not specify a migration context."
+msgstr "A instância %(instance_uuid)s não especifica um contexto de migração."
+
+#, python-format
+msgid "Instance %(instance_uuid)s doesn't have fixed IP '%(ip)s'."
+msgstr "A instância %(instance_uuid)s não possui IP fixo '%(ip)s'."
+
+#, python-format
+msgid "Instance %(instance_uuid)s has zero fixed IPs."
+msgstr "A instância %(instance_uuid)s possui zero IPs fixos."
+
+#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while "
"the instance is in this state."
@@ -1715,6 +1948,10 @@ msgstr "A instância %s não está conectada."
msgid "Instance %s not found"
msgstr "Instância %s não encontrada"
+#, python-format
+msgid "Instance %s provisioning was aborted"
+msgstr "A instância %s que está sendo provisionada foi interrompida"
+
msgid "Instance could not be found"
msgstr "A instância não pôde ser localizada"
@@ -1771,6 +2008,14 @@ msgid "Instance snapshots are not permitted at this time."
msgstr "Capturas instantâneas da instância não são permitidas nesse momento."
#, python-format
+msgid ""
+"Insufficient Space on Volume Group %(vg)s. Only %(free_space)db available, "
+"but %(size)d bytes required by volume %(lv)s."
+msgstr ""
+"Espaço Insuficiente no Grupo de Volumes %(vg)s. Apenas %(free_space)db "
+"disponíveis, mas %(size)d bytes requeridos pelo volume %(lv)s."
+
+#, python-format
msgid "Insufficient compute resources: %(reason)s."
msgstr "Recursos de cálculo insuficientes: %(reason)s."
@@ -1787,9 +2032,6 @@ msgstr "Interface %(interface)s não encontrada."
msgid "Invalid Base 64 data for file %(path)s"
msgstr "Dados Base 64 inválidos para o arquivo %(path)s"
-msgid "Invalid CIDR"
-msgstr "CIDR inválido"
-
msgid "Invalid Connection Info"
msgstr "Informações de conexão inválidas"
@@ -1802,10 +2044,6 @@ msgid "Invalid IP format %s"
msgstr "Formato de IP inválido %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "Protocolo IP %(protocol)s é inválido."
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "Protocolo IP %(protocol)s é inválido."
@@ -1850,6 +2088,10 @@ msgid "Invalid broker_hosts value: %s. It should be in hostname:port format"
msgstr ""
"Valor broker_hosts inválido: %s. Ele deve estar no formato hostname:port"
+#, python-format
+msgid "Invalid certificate format: %s"
+msgstr "Formato de certificado inválido: %s"
+
msgid "Invalid changes-since value"
msgstr "Valor desde as alterações inválido"
@@ -1892,6 +2134,10 @@ msgid "Invalid entry: '%s'; Expecting list or dict"
msgstr "Entrada inválida: '%s'; Esperando dicionário ou lista"
#, python-format
+msgid "Invalid event name %s"
+msgstr "Nome de evento inválido %s"
+
+#, python-format
msgid "Invalid event status `%s'"
msgstr "status de eventos inválido `%s'"
@@ -1931,6 +2177,10 @@ msgid "Invalid id: %(volume_id)s (expecting \"i-...\")"
msgstr "ID inválido: %(volume_id)s (esperando \"i-...\")"
#, python-format
+msgid "Invalid image format '%(format)s'"
+msgstr "Formato de imagem inválido '%(format)s'"
+
+#, python-format
msgid "Invalid image href %(image_href)s."
msgstr "Imagem inválida href %(image_href)s."
@@ -1973,6 +2223,10 @@ msgid "Invalid key_name provided."
msgstr "key_name inválido fornecido."
#, python-format
+msgid "Invalid libvirt version %(version)s"
+msgstr "Versão libvirt inválida %(version)s"
+
+#, python-format
msgid "Invalid memory page size '%(pagesize)s'"
msgstr "Tamanho de página de memória inválido ‘%(pagesize)s‘"
@@ -2090,24 +2344,21 @@ msgid "Invalid usage_type: %s"
msgstr "Usage_type inválido: %s"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"Valor inválido '%(ec2_instance_id)s' para instanceId. A instância não possui "
-"um volume anexado na raiz (%(root)s)"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "Valor inválido '%s' para força."
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "Valor inválido para 'scheduler_max_attempts'; deve ser >= 1"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "Valor inválido para a opção Configuração de Unidade: %(option)s"
+#, python-format
+msgid ""
+"Invalid vcpu_pin_set config, one or more of the specified cpuset is not "
+"online. Online cpuset(s): %(online)s, requested cpuset(s): %(req)s"
+msgstr ""
+"Configuração vcpu_pin_set inválida, um ou mais dos cpuset especificados não "
+"estão on-line. cpuset(s) on-line: %(online)s, cpuset(s) solicitados: %(req)s"
+
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr ""
"Configuração vcpu_pin_set inválida, fora do intervalo de cpu do hypervisor."
@@ -2233,6 +2484,13 @@ msgstr ""
"Lista de sistemas de arquivos que são configurados nesse arquivo nas seções "
"image_file_url:<list entry name>"
+msgid ""
+"Live migration can not be used without shared storage except a booted from "
+"volume VM which does not have a local disk."
+msgstr ""
+"A migração em tempo real não pode ser usada sem armazenamento compartilhado, "
+"exceto uma inicializada a partir da VM de volume que não tem um disco local."
+
msgid "Live migration is supported starting with Hyper-V Server 2012"
msgstr ""
"A migração em tempo real é suportada iniciando com o Hyper-V Server 2012"
@@ -2292,6 +2550,12 @@ msgstr "O marcador %(marker)s não pôde ser localizado."
msgid "Maximum allowed length for 'label' is 255."
msgstr "O comprimento máximo permitido para 'label' é 255."
+msgid "Maximum number of fixed IPs exceeded"
+msgstr "Número máximo de IPs fixos excedido"
+
+msgid "Maximum number of floating IPs exceeded"
+msgstr "Número máximo de IPs flutuantes excedido"
+
msgid "Maximum number of key pairs exceeded"
msgstr "Número máximo de pares de chaves excedido"
@@ -2379,6 +2643,9 @@ msgstr "Está faltando o campo do motivo da desativação"
msgid "Missing flavorRef attribute"
msgstr "Atributo flavorRef ausente"
+msgid "Missing forced_down field"
+msgstr "Faltando campo forced_down"
+
msgid "Missing imageRef attribute"
msgstr "Atributo imageRef ausente"
@@ -2399,6 +2666,12 @@ msgstr "Dicionário de parâmetros ausente"
msgid "Missing tenant parameter"
msgstr "Parâmetro de locatário ausente"
+#, python-format
+msgid ""
+"More than one instance is associated with fixed IP address '%(address)s'."
+msgstr ""
+"Mais de uma instância está associada ao endereço IP fixo '%(address)s'."
+
msgid ""
"More than one possible network found. Specify network ID(s) to select which "
"one(s) to connect to."
@@ -2425,6 +2698,10 @@ msgstr ""
"Vários correspondências de conjuntos de IP flutuantes localizadas para o "
"nome '%s'"
+#, python-format
+msgid "Multiple floating IPs are found for address %(address)s."
+msgstr "Vários IPs flutuantes foram localizados para o endereço %(address)s."
+
msgid ""
"Multiple hosts may be managed by the VMWare vCenter driver; therefore we do "
"not return uptime for just one host."
@@ -2474,6 +2751,10 @@ msgid "Netmask to push into openvpn config"
msgstr "Máscara de rede para enviar por push na configuração de openvpn"
#, python-format
+msgid "Network \"%(val)s\" is not valid in field %(attr)s"
+msgstr "A rede \"%(val)s\" não é válida no campo %(attr)s"
+
+#, python-format
msgid "Network %(network_id)s could not be found."
msgstr "Rede %(network_id)s não foi encontrada."
@@ -2517,6 +2798,10 @@ msgstr "A rede não pôde ser localizada com cidr %(cidr)s."
msgid "Network driver does not support this function."
msgstr "O driver de rede não suporta essa função."
+#, python-format
+msgid "Network host %(host)s has zero fixed IPs in network %(network_id)s."
+msgstr "O host da rede %(host)s possui zero IPs fixos na rede %(network_id)s."
+
msgid "Network label is required"
msgstr "O rótulo da rede é necessário"
@@ -2551,6 +2836,10 @@ msgstr "O novo volume deve ser removido para a troca."
msgid "New volume must be the same size or larger."
msgstr "O novo volume deve ser do mesmo tamanho ou maior."
+#, python-format
+msgid "No Block Device Mapping with id %(id)s."
+msgstr "Nenhum Mapeamento de Dispositivo de Bloco com id %(id)s."
+
msgid "No CIDR requested"
msgstr "Nenhum CIDR solicitado"
@@ -2564,6 +2853,9 @@ msgstr "Sem Corpo da Solicitação"
msgid "No Unique Match Found."
msgstr "Nenhuma Correspondência Exclusiva Localizada."
+msgid "No access_url in connection_info. Cannot validate protocol"
+msgstr "Nenhum access_url em connection_info. Não é possível validar protocolo"
+
msgid "No adminPass was specified"
msgstr "Nenhum adminPass foi especificado"
@@ -2612,6 +2904,9 @@ msgstr "Nenhum IP fixo associado ao ID %(id)s."
msgid "No fixed IP found."
msgstr "IP fixo não localizado."
+msgid "No fixed IPs associated to instance"
+msgstr "Nenhum IP fixo associado à instância"
+
msgid "No floating IP addresses have been defined."
msgstr "Nenhum endereço IP flutuante foi definido."
@@ -2646,6 +2941,17 @@ msgstr "Nenhum ID de correspondência para a URL %s foi localizado."
msgid "No more available networks."
msgstr "Não há mais redes disponíveis."
+msgid "No more floating IPs available."
+msgstr "Nenhum IP flutuante disponível."
+
+#, python-format
+msgid "No more floating IPs in pool %s."
+msgstr "Sem IPs flutuantes no conjunto %s."
+
+#, python-format
+msgid "No mount points found in %(root)s of %(image)s"
+msgstr "Nenhum ponto de montagem localizado em %(root)s de %(image)s"
+
msgid "No networks defined."
msgstr "Nenhuma rede definida."
@@ -2672,9 +2978,6 @@ msgstr "Nenhum corpo da solicitação"
msgid "No root disk defined."
msgstr "Nenhum disco raiz definido."
-msgid "No rule for the specified parameters."
-msgstr "Não existe regra para os parâmetros especificados"
-
msgid "No suitable network for migrate"
msgstr "Nenhuma rede adequada para migração"
@@ -2710,10 +3013,6 @@ msgstr "Não é possível adquirir uma porta livre para %(host)s"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "Não é possível ligar %(host)s:%(port)d, %(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "Não permitido modificar atributos para a imagem %s"
-
msgid "Not an rbd snapshot"
msgstr "Não uma captura instantânea de rbd"
@@ -2762,6 +3061,10 @@ msgid "Old volume is attached to a different instance."
msgstr "Um volume antigo está anexado a uma instância diferente."
#, python-format
+msgid "One or more hosts already in availability zone(s) %s"
+msgstr "Um ou mais hosts já na(s) zona(s) de disponibilidade %s"
+
+#, python-format
msgid ""
"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
"%(unit_string)s."
@@ -2786,9 +3089,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "Somente o parâmetro do host pode ser especificado"
-msgid "Only instances implemented"
-msgstr "Apenas instâncias implementadas"
-
msgid "Only root certificate can be retrieved."
msgstr "Apenas o certificado raiz pode ser recuperado."
@@ -2834,6 +3134,10 @@ msgid "PCI device %(id)s not found"
msgstr "Dispositivo PCI %(id)s não localizado"
#, python-format
+msgid "PCI device request %(requests)s failed"
+msgstr "Solicitação de dispositivo PCI %(requests)s falhou"
+
+#, python-format
msgid ""
"PCS doesn't support images in %s format. You should either set "
"force_raw_images=True in config or upload an image in ploop or raw format."
@@ -2866,6 +3170,16 @@ msgid "Page size %(pagesize)s is not supported by the host."
msgstr "Tamanho da página %(pagesize)s não é suportado pelo host."
#, python-format
+msgid ""
+"Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. "
+"Check your Neutron configuration to validate that the macvtap parameters are "
+"correct."
+msgstr ""
+"Parâmetros %(missing_params)s não presentes em vif_details para vif "
+"%(vif_id)s. Verifique a configuração do Neutron para validar se os "
+"parâmetros macvtap estão corretos."
+
+#, python-format
msgid "Path %s must be LVM logical volume"
msgstr "O caminho %s deve ser um volume lógico LVM"
@@ -2950,6 +3264,10 @@ msgstr "Ação de watchdog fornecida (%(action)s) não é suportada."
msgid "QEMU guest agent is not enabled"
msgstr "O agente convidado QEMU não está ativado"
+#, python-format
+msgid "Quiescing is not supported in instance %(instance_id)s"
+msgstr "Quiesce não é suportado na instância %(instance_id)s"
+
msgid "Quota"
msgstr "Quota"
@@ -2961,6 +3279,14 @@ msgid "Quota could not be found"
msgstr "A cota não pôde ser localizada"
#, python-format
+msgid ""
+"Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s "
+"of %(allowed)s %(overs)s"
+msgstr ""
+"Cota excedida para %(overs)s: Solicitados %(req)s, mas já usados %(used)s "
+"%(allowed)s %(overs)s"
+
+#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr "Cota excedida para os recursos: %(overs)s"
@@ -3015,6 +3341,14 @@ msgstr ""
#, python-format
msgid ""
+"Quota limit %(limit)s for %(resource)s must be in the range of -1 and "
+"%(max)s."
+msgstr ""
+"O limite de cota %(limit)s para %(resource)s deve estar na faixa de -1 e "
+"%(max)s."
+
+#, python-format
+msgid ""
"Quota limit %(limit)s for %(resource)s must be less than or equal to "
"%(maximum)s."
msgstr ""
@@ -3045,6 +3379,15 @@ msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr ""
"Número máximo de novas tentativas atingido ao tentar desconectar o VBD %s"
+#, python-format
+msgid ""
+"Relative blockcommit support was not detected. Libvirt '%s' or later is "
+"required for online deletion of file/network storage-backed volume snapshots."
+msgstr ""
+"Suporte a blockcommit relativo não foi detectado. Libvirt '%s' ou mais "
+"recente é necessário para exclusão on-line de capturas instantâneas de "
+"volume voltado para o armazenamento de arquivo/rede."
+
msgid "Request body and URI mismatch"
msgstr "Corpo do pedido e incompatibilidade URI"
@@ -3309,10 +3652,22 @@ msgstr "O serviço com host %(host)s binário %(binary)s existe."
msgid "Service with host %(host)s topic %(topic)s exists."
msgstr "O serviço com host %(host)s tópico %(topic)s existe."
+msgid "Set admin password is not supported"
+msgstr "Definir senha admin não é suportado"
+
#, python-format
msgid "Shadow table with name %(name)s already exists."
msgstr "A tabela de sombra com o nome %(name)s já existe."
+#, python-format
+msgid "Share '%s' is not supported"
+msgstr "O compartilhamento '%s' não é suportado"
+
+#, python-format
+msgid "Share level '%s' cannot have share configured"
+msgstr ""
+"O nível de compartilhamento '%s' não pode ter compartilhamento configurado"
+
msgid "Should we use a CA for each project?"
msgstr "Devemos usar um CA para cada projeto?"
@@ -3323,9 +3678,6 @@ msgstr ""
"Redução do sistema de arquivos com resize2fs falhou, verifique se você tem "
"espaço livre suficiente em disco."
-msgid "Signature not provided"
-msgstr "Assinatura não fornecida"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "A captura instantânea %(snapshot_id)s não pôde ser localizada."
@@ -3447,11 +3799,17 @@ msgstr ""
"Por favor crie uma base de dados utilizando 'nova-manage db sync' antes de "
"executar este comando."
+msgid "The backlog must be more than 0"
+msgstr "O backlog deve ser maior que 0"
+
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr ""
"O intervalo de portas do console %(min_port)d-%(max_port)d está esgotado."
+msgid "The created instance's disk would be too small."
+msgstr "O disco da instância criada seria muito pequeno."
+
msgid "The current driver does not support preserving ephemeral partitions."
msgstr "O driver atual não suporta a preservação partições temporárias."
@@ -3462,6 +3820,13 @@ msgstr "A política de PBM padrão não existe no backend."
msgid "The firewall filter for %s does not exist"
msgstr "O filtro de firewall para %s não existe"
+msgid "The floating IP request failed with a BadRequest"
+msgstr "A solicitação de IP flutuante falhou com um BadRequest"
+
+#, python-format
+msgid "The group %(group_name)s must be configured with an id."
+msgstr "O grupo %(group_name)s deve ser configurado com um ID."
+
msgid "The input is not a string or unicode"
msgstr "A entrada não é uma sequência ou unicode"
@@ -3517,6 +3882,11 @@ msgstr ""
"O intervalo de rede não é grande o suficiente para ajustar %(num_networks)s "
"redes. O tamanho da rede é %(network_size)s"
+#, python-format
+msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
+msgstr ""
+"O número de portas definidas: %(ports)d está acima do limite: %(quota)d"
+
msgid "The only partition should be partition 1."
msgstr "A única partição deve ser a partição 1."
@@ -3558,6 +3928,10 @@ msgstr ""
"O serviço do driver de grupo de serviços %(driver)s está temporariamente "
"indisponível."
+#, python-format
+msgid "The specified cluster '%s' was not found in vCenter"
+msgstr "O cluster especificado '%s' não foi localizado no vCenter"
+
msgid ""
"The string containing the reason for disabling the service contains invalid "
"characters or is too long."
@@ -3645,6 +4019,17 @@ msgid "There are not enough hosts available."
msgstr "Não há hosts suficientes disponíveis."
#, python-format
+msgid ""
+"There are still %(count)i unmigrated flavor records. Migration cannot "
+"continue until all instance flavor records have been migrated to the new "
+"format. Please run `nova-manage db migrate_flavor_data' first."
+msgstr ""
+"Ainda há %(count)i registros de tipo não migrados. A migração não pode "
+"continuar até que todos os registros de tipo de instância tenham sido "
+"migrados para o novo formato. Execute `nova-manage db migrate_flavor_data' "
+"primeiro."
+
+#, python-format
msgid "There is no such action: %s"
msgstr "Essa ação não existe: %s"
@@ -3682,8 +4067,11 @@ msgstr "Tempo limite de espera para que o dispositivo %s seja criado"
msgid "Timeout waiting for response from cell"
msgstr "Aguardando tempo limite para a resposta da célula"
-msgid "Timestamp failed validation."
-msgstr "Falha do registro de data e hora na validação."
+#, python-format
+msgid "Timeout while checking if we can live migrate to host: %s"
+msgstr ""
+"Tempo limite atingido ao verificar se é possível migrar em tempo real para o "
+"host: %s"
msgid "To and From ports must be integers"
msgstr "Portas Para e De devem ser números inteiros"
@@ -3698,17 +4086,22 @@ msgid ""
msgstr ""
"Muitos endereços IP serão gerados. Aumente /%s para reduzir o número gerado."
-msgid "Too many failed authentications."
-msgstr "Muitas falhas de autenticação."
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "Tipo e Código devem ser números inteiros para o tipo de protocolo ICMP"
+msgid "UEFI is not supported"
+msgstr "UEFI não é suportado"
+
msgid "UUID is required to delete Neutron Networks"
msgstr "UUID é requerido para excluir Redes Neutron"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "Não é possível associar o Endereço IP; nenhum fixed_ips."
+#, python-format
+msgid ""
+"Unable to associate floating IP %(address)s to fixed IP %(fixed_address)s "
+"for instance %(id)s. Error: %(error)s"
+msgstr ""
+"Não é possível associar o IP flutuante %(address)s ao IP fixo "
+"%(fixed_address)s para a instância %(id)s. Erro: %(error)s"
msgid "Unable to authenticate Ironic client."
msgstr "Não é possível autenticar cliente Ironic."
@@ -3726,6 +4119,9 @@ msgstr ""
"Não é possível entrar em contato com o agente convidado. A chamada a seguir "
"atingiu o tempo limite: %(method)s"
+msgid "Unable to create Barbican Client without project_id."
+msgstr "Não é possível criar o Barbican Client sem project_id."
+
msgid "Unable to create dns domain"
msgstr "Não é possível criar o domínio dns"
@@ -3833,6 +4229,12 @@ msgstr "Não é possível obter o domínio dns"
msgid "Unable to get dns entry"
msgstr "Não é possível obter a entrada dns"
+msgid "Unable to get host UUID: /etc/machine-id does not exist"
+msgstr "Não é possível obter UUID do host: /etc/machine-id não existe"
+
+msgid "Unable to get host UUID: /etc/machine-id is empty"
+msgstr "Não é possível obter UUID do host: /etc/machine-id está vazio"
+
msgid "Unable to get rdp console, functionality not implemented"
msgstr "Não é possível obter console do rdp, funcionalidade não implementada"
@@ -3981,6 +4383,10 @@ msgid "Unknown argument: port"
msgstr "Argumento desconhecido: port"
#, python-format
+msgid "Unknown auth plugin: %s"
+msgstr "Plugin de autenticação desconhecido: %s"
+
+#, python-format
msgid "Unknown chain: %r"
msgstr "Cadeia desconhecida: %r"
@@ -3994,9 +4400,6 @@ msgstr ""
msgid "Unknown delete_info type %s"
msgstr "Tipo de delete_info desconhecido %s"
-msgid "Unknown error occurred."
-msgstr "Ocorreu um erro desconhecido."
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "image_type desconhecido=%s"
@@ -4136,6 +4539,10 @@ msgstr ""
"O valor (%(value)s) do grupo de parâmetros%(property)s é inválido. Conteúdo "
"limitado a '%(allowed)s'."
+#, python-format
+msgid "Value must be >= 0 for field %s"
+msgstr "O valor deve ser >= 0 para o campo %s"
+
msgid "Value required for 'scality_sofs_config'"
msgstr "Valor necessário para 'scality_sofs_config'"
@@ -4170,6 +4577,10 @@ msgid "Virtual machine mode '%(vmmode)s' is not recognised"
msgstr "Modo da máquina virtual '%(vmmode)s' não reconhecido"
#, python-format
+msgid "Virtual machine mode '%s' is not valid"
+msgstr "O modo de máquina virtual '%s' não é válido"
+
+#, python-format
msgid ""
"Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
@@ -4211,6 +4622,15 @@ msgstr ""
"Criptografia de volume não é suportada para %(volume_type)s volume "
"%(volume_id)s"
+#, python-format
+msgid ""
+"Volume is smaller than the minimum size specified in image metadata. Volume "
+"size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes."
+msgstr ""
+"O volume é menor que o tamanho mínimo especificado nos metadados de imagem. "
+"O tamanho do volume é %(volume_size)i bytes; o tamanho mínimo é "
+"%(image_min_disk)i bytes."
+
msgid "Volume must be attached in order to detach."
msgstr "O volume deve estar conectado para que seja removido."
@@ -4218,12 +4638,6 @@ msgid "Volume resource quota exceeded"
msgstr "Quota de recurso de volume excedida."
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr ""
-"O volume configura o tamanho de bloco, mas o libvirt '%s' ou mais recente é "
-"necessário."
-
-#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
@@ -4240,6 +4654,10 @@ msgstr ""
"superior é necessário, qemu %(qemu)s ou superior é necessário."
#, python-format
+msgid "WARNING: fixed IP %s allocated to missing instance"
+msgstr "AVISO: IP fixo %s alocado para instância ausente"
+
+#, python-format
msgid ""
"We do not support scheme '%s' under Python < 2.7.4, please use http or https"
msgstr "Não suportamos esquema ‘%s' sob Python < 2.7.4, use http ou https"
@@ -4268,12 +4686,18 @@ msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
msgstr ""
"Tipo errado de método de gancho. Somente o tipo 'pré' e ‘pós' permitido"
+msgid "X-Forwarded-For is missing from request."
+msgstr "X-Forwarded-For está ausente da solicitação."
+
msgid "X-Instance-ID header is missing from request."
msgstr "O cabeçalho X-Instance-ID está ausente da solicitação."
msgid "X-Instance-ID-Signature header is missing from request."
msgstr "Cabeçalho X-Instance-ID-Signature está ausente da solicitação."
+msgid "X-Metadata-Provider is missing from request."
+msgstr "X-Metadata-Provider está ausente da solicitação."
+
msgid "X-Tenant-ID header is missing from request."
msgstr "Cabeçalho X-Tenant-ID está ausente da solicitação."
@@ -4304,6 +4728,17 @@ msgstr ""
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
"your destination node does not support retrieving listen addresses. In "
+"order for live migration to work properly you must either disable serial "
+"console or upgrade your libvirt version."
+msgstr ""
+"A versão libvirt não suporta a sinalização VIR_DOMAIN_XML_MIGRATABLE ou o nó "
+"de destino não suporta a recuperação de endereços de atendimento. Para que a "
+"migração em tempo real funcione corretamente, deve-se desativar o console "
+"serial ou fazer upgrade da versão libvirt."
+
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
+"your destination node does not support retrieving listen addresses. In "
"order for live migration to work properly, you must configure the graphics "
"(VNC and/or SPICE) listen addresses to be either the catch-all address "
"(0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
@@ -4314,6 +4749,15 @@ msgstr ""
"(VNC e/ou SPICE) atender endereços seja o endereço catch-all (0.0.0.0 ou ::) "
"ou o endereço local (127.0.0.1 ou ::1)."
+msgid "Zero fixed IPs could be found."
+msgstr "Zero IPs fixos não puderam ser localizados."
+
+msgid "Zero floating IPs available."
+msgstr "Nenhum IPs flutuantes disponíveis."
+
+msgid "Zero floating IPs exist."
+msgstr "Existem zero IPs flutuantes."
+
msgid "Zone"
msgstr "Zona"
@@ -4345,9 +4789,6 @@ msgstr "block_device_mapping deve ser uma lista"
msgid "block_device_mapping_v2 must be a list"
msgstr "block_device_mapping_v2 deve ser uma lista"
-msgid "can't build a valid rule"
-msgstr "Não é possível criar uma regra válida"
-
msgid "cannot delete non-existent key"
msgstr "não é possível excluir a chave não existente"
@@ -4357,6 +4798,9 @@ msgstr "não é possível armazenar chaves arbitrárias"
msgid "cannot understand JSON"
msgstr "não é possível entender JSON"
+msgid "cell_uuid must be set"
+msgstr "cell_uuid deve ser definido"
+
msgid "clone() is not implemented"
msgstr "clone() não está implementado"
@@ -4418,6 +4862,12 @@ msgstr "falha ao gerar a impressão digital"
msgid "filename cannot be None"
msgstr "nome de arquivo não pode ser Nenhum"
+msgid "floating IP is already associated"
+msgstr "O IP flutuante já está associado"
+
+msgid "floating IP not found"
+msgstr "IP flutuante não localizado"
+
#, python-format
msgid "fmt=%(fmt)s backed by: %(backing_file)s"
msgstr "fmt=%(fmt)s retornado por: %(backing_file)s"
@@ -4460,13 +4910,6 @@ msgstr "Imagem"
msgid "image already mounted"
msgstr "imagem já montada"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "imagem de %(instance)s às %(now)s"
-
-msgid "imageLocation is required"
-msgstr "imageLocation é obrigatório"
-
msgid "index"
msgstr "índice"
@@ -4507,6 +4950,9 @@ msgstr "kernel"
msgid "keymgr.fixed_key not defined"
msgstr "keymgr.fixed_key não definido"
+msgid "l3driver call to add floating IP failed"
+msgstr "Falha na chamada l3driver para incluir IP flutuante"
+
msgid "launched"
msgstr "disparado"
@@ -4550,9 +4996,6 @@ msgstr "dispositivo nbd %s não mostrado"
msgid "nbd unavailable: module not loaded"
msgstr "nbd indisponível: módulo não carregado"
-msgid "need group_name or group_id"
-msgstr "é necessário o group_name ou group_id"
-
msgid "network"
msgstr "rede"
@@ -4570,18 +5013,22 @@ msgstr "Nó"
msgid "not able to execute ssh command: %s"
msgstr "não foi possível executar o comando ssh: %s"
+msgid ""
+"nova-idmapshift is a tool that properly sets the ownership of a filesystem "
+"for use with linux user namespaces. This tool can only be used with linux "
+"lxc containers. See the man page for details."
+msgstr ""
+"nova-idmapshift é uma ferramenta que configura corretamente a propriedade de "
+"um sistema de arquivos para uso com namespaces do usuário Linux. Essa "
+"ferramenta só pode ser usada com contêineres lxc Linux. Consulte a página do "
+"manual para obter detalhes."
+
msgid "onSharedStorage must be specified."
msgstr "onSharedStorage deve ser especificado."
-msgid "only group \"all\" is supported"
-msgstr "apenas o grupo \"all\" é suportado"
-
msgid "operation time out"
msgstr "tempo limite da operação"
-msgid "operation_type must be add or remove"
-msgstr "operation_type deve ser add ou remove"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "os-getConsoleOutput malformada ou ausente no corpo da solicitação"
@@ -4619,9 +5066,6 @@ msgstr "Bibliotecas rbd python não localizadas"
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr "read_deleted pode ser apenas um de 'no', 'yes' ou 'only', não %r"
-msgid "resource_id and tag are required"
-msgstr "resource_id e tag são requeridos"
-
msgid "rpc_port must be integer"
msgstr "rpc_port deve ser um número inteiro"
@@ -4690,9 +5134,6 @@ msgstr "campos não suportados: %s"
msgid "user"
msgstr "usuário"
-msgid "user or group not specified"
-msgstr "usuário ou grupo não especificado"
-
msgid "uuid"
msgstr "uuid"
@@ -4706,6 +5147,9 @@ msgstr ""
"adaptador físico desejado. O vSwitch esperado é %(expected)s, mas o "
"associado é %(actual)s."
+msgid "vcpu"
+msgstr "vcpu"
+
msgid "version should be an integer"
msgstr "a versão deve ser um número inteiro"
@@ -4717,6 +5161,10 @@ msgstr "vg %s deve estar no grupo de volumes LVM"
msgid "vhostuser_sock_path not present in vif_details for vif %(vif_id)s"
msgstr "vhostuser_sock_path ausente no vif_details para vif %(vif_id)s"
+#, python-format
+msgid "vif type %s not supported"
+msgstr "Tipo de vif %s não suportado"
+
msgid "vif_type parameter must be present for this vif_driver implementation"
msgstr ""
"o parâmetro vif_type deve estar presente para esta implementação de "
diff --git a/nova/locale/ru/LC_MESSAGES/nova.po b/nova/locale/ru/LC_MESSAGES/nova.po
index 3d8fb63fac..29233aaae7 100644
--- a/nova/locale/ru/LC_MESSAGES/nova.po
+++ b/nova/locale/ru/LC_MESSAGES/nova.po
@@ -1,24 +1,36 @@
-# Russian translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
-msgid ""
-msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+# Translators:
+# Ilya Alekseyev <ilyaalekseyev@acm.org>, 2013
+# Aleksandr Brezhnev <abrezhnev@gmail.com>, 2013
+# Alexei Rudenko <alexei.rudenko@gmail.com>, 2013
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# lykoz <woof@stopme.net>, 2012
+# Alexei Rudenko <alexei.rudenko@gmail.com>, 2013
+# Stanislav Hanzhin <hanzhin.stas@gmail.com>, 2013
+# dvy <vasily.dubinin@gmail.com>, 2014
+# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Lucas Palm <lapalm@us.ibm.com>, 2016. #zanata
+msgid ""
+msgstr ""
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-09-06 10:17+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-02-03 07:17+0000\n"
+"Last-Translator: Lucas Palm <lapalm@us.ibm.com>\n"
"Language: ru\n"
-"Language-Team: Russian\n"
"Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n"
"%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n"
-"%100>=11 && n%100<=14)? 2 : 3)\n"
-"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
-"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
+"%100>=11 && n%100<=14)? 2 : 3);\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Russian\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -36,10 +48,32 @@ msgstr ""
"Ñтратегией"
#, python-format
+msgid ""
+"%(desc)r\n"
+"command: %(cmd)r\n"
+"exit code: %(code)r\n"
+"stdout: %(stdout)r\n"
+"stderr: %(stderr)r"
+msgstr ""
+"%(desc)r\n"
+"команда: %(cmd)r\n"
+"код выхода: %(code)r\n"
+"stdout: %(stdout)r\n"
+"stderr: %(stderr)r"
+
+#, python-format
msgid "%(err)s"
msgstr "%(err)s"
#, python-format
+msgid "%(field)s should not be part of the updates."
+msgstr "%(field)s не должно входить в ÑоÑтав обновлений."
+
+#, python-format
+msgid "%(fieldname)s missing field type"
+msgstr "ОтÑутÑтвует тип Ð¿Ð¾Ð»Ñ %(fieldname)s"
+
+#, python-format
msgid "%(host)s:%(port)s: Target closed"
msgstr "%(host)s:%(port)s: Целевой порт закрыт"
@@ -82,6 +116,10 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "Гипервизор %(type)s не поддерживает уÑтройÑтва PCI"
#, python-format
+msgid "%(typename)s in %(fieldname)s is not an instance of Enum"
+msgstr "%(typename)s в %(fieldname)s не ÑвлÑетÑÑ ÑкземплÑром Enum"
+
+#, python-format
msgid "%(value_name)s must be <= %(max_value)d"
msgstr "%(value_name)s должно быть <= %(max_value)d"
@@ -104,6 +142,10 @@ msgstr ""
"Значение должно быть больше 0"
#, python-format
+msgid "%r failed. Not Retrying."
+msgstr "Сбой %r. Повторной попытки не будет."
+
+#, python-format
msgid "%r failed. Retrying."
msgstr "%r ошибка. ВыполнÑетÑÑ Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€."
@@ -132,6 +174,10 @@ msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr "%s должен быть 'MANUAL' или 'AUTO'."
#, python-format
+msgid "'%(other)s' should be an instance of '%(cls)s'"
+msgstr "'%(other)s' должен быть ÑкземплÑром '%(cls)s'"
+
+#, python-format
msgid "'%s' is either missing or empty."
msgstr "'%s' отÑутÑтвует или ÑвлÑетÑÑ Ð¿ÑƒÑтым."
@@ -159,6 +205,10 @@ msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr ""
"Ð˜Ð¼Ñ Ð¼Ð¾Ð´ÐµÐ»Ð¸ CPU не должно уÑтанавливатьÑÑ, когда запрошена модель CPU хоÑта"
+#, python-format
+msgid "A NetworkModel is required in field %s"
+msgstr "Ð’ поле %s требуетÑÑ NetworkModel "
+
msgid ""
"A unique ID given to each file system. This is value is set in Glance and "
"agreed upon here so that the operator knowns they are dealing with the same "
@@ -192,9 +242,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "ВерÑÐ¸Ñ API %(version)s не поддерживаетÑÑ Ñтим методом."
-msgid "Access key not provided"
-msgstr "Ключ доÑтупа не указан"
-
msgid "Access list not available for public flavors."
msgstr "СпиÑок прав доÑтупа не доÑтупен Ð´Ð»Ñ Ð¾Ð±Ñ‰Ð¸Ñ… разновидноÑтей."
@@ -225,6 +272,9 @@ msgstr "Ðевозможно преобразовать адреÑ."
msgid "Address not specified"
msgstr "ÐÐ´Ñ€ÐµÑ Ð½Ðµ задан"
+msgid "Affinity instance group policy was violated."
+msgstr "Ðарушена ÑÑ‚Ñ€Ð°Ñ‚ÐµÐ³Ð¸Ñ Ð³Ñ€ÑƒÐ¿Ð¿Ñ‹ ÑкземплÑров привÑзки."
+
#, python-format
msgid "Agent does not support the call: %(method)s"
msgstr "Ðгент не поддерживает вызов: %(method)s"
@@ -289,6 +339,10 @@ msgstr "Ðарушена ÑÑ‚Ñ€Ð°Ñ‚ÐµÐ³Ð¸Ñ Ñтрогой раÑпределеÐ
msgid "Architecture name '%(arch)s' is not recognised"
msgstr "Ð˜Ð¼Ñ Ð°Ñ€Ñ…Ð¸Ñ‚ÐµÐºÑ‚ÑƒÑ€Ñ‹ %(arch)s не раÑпознано"
+#, python-format
+msgid "Architecture name '%s' is not valid"
+msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ Ð°Ñ€Ñ…Ð¸Ñ‚ÐµÐºÑ‚ÑƒÑ€Ñ‹: '%s'"
+
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr "Ðргумент 'type' Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ·Ð°Ð³Ñ€ÑƒÐ·ÐºÐ¸ не ÑвлÑетÑÑ HARD или SOFT"
@@ -372,9 +426,22 @@ msgstr "Ðеверный формат volumeId: volumeId имеет неверн
msgid "Binary"
msgstr "Двоичный"
+#, python-format
+msgid ""
+"Binding failed for port %(port_id)s, please check neutron logs for more "
+"information."
+msgstr ""
+"Ошибка ÑÐ¾Ð·Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð¸Ð²Ñзки Ð´Ð»Ñ Ð¿Ð¾Ñ€Ñ‚Ð° %(port_id)s. Дополнительные ÑÐ²ÐµÐ´ÐµÐ½Ð¸Ñ "
+"можно найти в протоколах neutron."
+
msgid "Blank components"
msgstr "ПуÑтые компоненты"
+msgid ""
+"Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size"
+msgstr ""
+"ПуÑтые тома (source: 'blank', dest: 'volume') должны иметь ненулевой размер"
+
#, python-format
msgid "Block Device %(id)s is not bootable."
msgstr "Блочное уÑтройÑтво не загрузочное %(id)s."
@@ -470,6 +537,21 @@ msgstr "ЧиÑло CPU %(cpunum)d превышает макÑимальное (%
msgid "CPU number %(cpuset)s is not assigned to any node"
msgstr "ЧиÑло CPU %(cpuset)s не назначено ни одному узлу"
+#, python-format
+msgid "CPU pinning is not supported by the host: %(reason)s"
+msgstr "Прикрепление CPU не поддерживаетÑÑ Ñ…Ð¾Ñтом: %(reason)s"
+
+#, python-format
+msgid ""
+"CPU set to pin/unpin %(requested)s must be a subset of known CPU set "
+"%(cpuset)s"
+msgstr ""
+"Группа CPU Ð´Ð»Ñ Ð¿Ñ€Ð¸ÐºÑ€ÐµÐ¿Ð»ÐµÐ½Ð¸Ñ/Ð¾Ñ‚ÐºÑ€ÐµÐ¿Ð»ÐµÐ½Ð¸Ñ %(requested)s должна входить в "
+"извеÑтную группу CPU %(cpuset)s"
+
+msgid "Can not add access to a public flavor."
+msgstr "Ðевозможно добавить права доÑтупа к общедоÑтупной разновидноÑти."
+
msgid "Can not find requested image"
msgstr "Ðевозможно найти запрошенный образ"
@@ -516,6 +598,10 @@ msgstr ""
"запиÑÑми, которые невозможно идентифицировать на удаленном хоÑте."
#, python-format
+msgid "Cannot access 'scality_sofs_config': %s"
+msgstr "Ðет доÑтупа к scality_sofs_config: %s"
+
+#, python-format
msgid "Cannot add host %(host)s in aggregate %(id)s: host exists"
msgstr ""
"Ðе удалоÑÑŒ добавить хоÑÑ‚ %(host)s в ÑоÑтавной объект %(id)s: хоÑÑ‚ ÑущеÑтвует"
@@ -541,6 +627,16 @@ msgstr "Блочный Ð¿ÐµÑ€ÐµÐ½Ð¾Ñ ÑкземплÑра %s Ñо ÑвÑзанÐ
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr "Ðевозможно вызвать %(method)s в неприÑвоенном объекте %(objtype)s"
+msgid ""
+"Cannot create default bittorrent URL without xenserver.torrent_base_url "
+"configuration option set."
+msgstr ""
+"Ðевозможно Ñоздать URL bittorrent по умолчанию без наÑтроенного параметра "
+"конфигурации xenserver.torrent_base_url. "
+
+msgid "Cannot execute /sbin/mount.sofs"
+msgstr "Ðе удалоÑÑŒ выполнить /sbin/mount.sofs"
+
msgid "Cannot find SR of content-type ISO"
msgstr "Ðевозможно найти SR типа Ñодержимого ISO"
@@ -550,6 +646,11 @@ msgstr "Ðевозможно найти SR Ð´Ð»Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ/запиÑи VDI.
msgid "Cannot find image for rebuild"
msgstr "Ðевозможно найти образ Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐºÐ¾Ð¼Ð¿Ð¾Ð½Ð¾Ð²ÐºÐ¸"
+msgid "Cannot mount Scality SOFS, check syslog for errors"
+msgstr ""
+"Ðе удалоÑÑŒ Ñмонтировать Scality SOFS. Проверьте ÑиÑтемный протокол на "
+"наличие ошибок"
+
#, python-format
msgid ""
"Cannot pin/unpin cpus %(requested)s from the following pinned set %(pinned)s"
@@ -631,6 +732,9 @@ msgstr ""
msgid "Cell name cannot be empty"
msgstr "Ð˜Ð¼Ñ Ñчейки не может быть пуÑтой."
+msgid "Cell name cannot contain '!', '.' or '@'"
+msgstr "Ð’ имени Ñчейки не разрешены Ñимволы '!', '.' и '@'"
+
msgid "Cell type must be 'parent' or 'child'"
msgstr "Типом Ñчейки должен быть 'parent' или 'child'"
@@ -695,6 +799,22 @@ msgstr ""
"ÐšÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñила Ñвную модель CPU, но гипервизор текущей libvirt '%s' "
"не поддерживает выбор моделей CPU"
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s, but we were unable to "
+"determine the cause"
+msgstr ""
+"Конфликт при обновлении ÑкземплÑра %(instance_uuid)s, но не удалоÑÑŒ "
+"определить причину."
+
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. "
+"Actual: %(actual)s"
+msgstr ""
+"Конфликт при обновлении ÑкземплÑра %(instance_uuid)s. ОжидалоÑÑŒ: "
+"%(expected)s. ФактичеÑкое значение: %(actual)s"
+
msgid "Conflicting policies configured!"
msgstr "ÐаÑтроены конфликтующие Ñтратегии!"
@@ -779,10 +899,6 @@ msgstr "Ðе удалоÑÑŒ найти двоичный файл %(binary)s на
msgid "Could not find config at %(path)s"
msgstr "Ðевозможно найти конфигурацию по адреÑу %(path)s"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "Ðевозможно найти пары ключей: %s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr "Ðе удалоÑÑŒ найти ÑÑылки на хранилища данных, иÑпользуемых VM."
@@ -816,14 +932,6 @@ msgstr "Ðевозможно передать образ %(image_id)s"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "Ðе удалоÑÑŒ получить локальный IP-Ð°Ð´Ñ€ÐµÑ ÑÑылки %(interface)s :%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"Ðе удалоÑÑŒ оÑтановить ÑкземплÑÑ€ %(instance)s в течение 1 чаÑа. Текущее "
-"значение vm_state: %(vm_state)s, текущее значение task_state: %(task_state)s"
-
msgid "Create networks failed"
msgstr "Ðе удалоÑÑŒ Ñоздать Ñети"
@@ -1029,6 +1137,26 @@ msgstr ""
"%(error_code)s] %(ex)s"
#, python-format
+msgid ""
+"Error from libvirt while set password for username \"%(user)s\": [Error Code "
+"%(error_code)s] %(ex)s"
+msgstr ""
+"Ошибка в libvirt при уÑтановке Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð´Ð»Ñ Ð¸Ð¼ÐµÐ½Ð¸ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ \"%(user)s\": "
+"[Код ошибки %(error_code)s] %(ex)s"
+
+#, python-format
+msgid ""
+"Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs "
+"(%(e)s)"
+msgstr ""
+"Ошибка при монтировании %(device)s на %(dir)s в образе %(image)s Ñ "
+"libguestfs (%(e)s)"
+
+#, python-format
+msgid "Error mounting %(image)s with libguestfs (%(e)s)"
+msgstr "Ошибка при монтировании %(image)s Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ libguestfs (%(e)s)"
+
+#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "Ошибка при Ñоздании монитора реÑурÑов: %(monitor)s"
@@ -1052,6 +1180,10 @@ msgstr ""
"переноÑа"
#, python-format
+msgid "Exceeded maximum number of retries. %(reason)s"
+msgstr "Превышено макÑимальное количеÑтво попыток. %(reason)s"
+
+#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr "ОжидалÑÑ uuid, а получен %(uuid)s."
@@ -1059,21 +1191,6 @@ msgstr "ОжидалÑÑ uuid, а получен %(uuid)s."
msgid "Expected object of type: %s"
msgstr "ОжидалÑÑ Ð¾Ð±ÑŠÐµÐºÑ‚ типа: %s"
-msgid "Expecting a list of resources"
-msgstr "ОжидаетÑÑ ÑпиÑок реÑурÑов"
-
-msgid "Expecting a list of tagSets"
-msgstr "ОжидаетÑÑ ÑпиÑок tagSets"
-
-msgid "Expecting both key and value to be set"
-msgstr "ОжидаетÑÑ ÑƒÑтановка и ключа, и значениÑ"
-
-msgid "Expecting key to be set"
-msgstr "Ожидание уÑтановки ключа"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "ОжидаетÑÑ, что tagSet предÑтавлÑет Ñобой пары ключ/значение"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "Дополнительный Ñтолбец %(table)s.%(column)s в теневой таблице"
@@ -1091,6 +1208,14 @@ msgstr ""
"Ñтрока"
#, python-format
+msgid "Failed to access port %(port_id)s: %(reason)s"
+msgstr "Ðе удалоÑÑŒ обратитьÑÑ Ðº порту %(port_id)s: %(reason)s"
+
+#, python-format
+msgid "Failed to add bridge: %s"
+msgstr "Ðе удалоÑÑŒ добавить моÑÑ‚: %s"
+
+#, python-format
msgid ""
"Failed to add deploy parameters on node %(node)s when provisioning the "
"instance %(instance)s"
@@ -1241,10 +1366,6 @@ msgstr "Ðе удалоÑÑŒ приоÑтановить ÑкземплÑÑ€: %(rea
msgid "Failed to terminate instance: %(reason)s"
msgstr "Ðе удалоÑÑŒ завершить ÑкземплÑÑ€: %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "Ошибка при анализе ответа от keystone: %s"
-
msgid "Failure prepping block device."
msgstr "Сбой при подготовке блочного уÑтройÑтва."
@@ -1274,6 +1395,12 @@ msgid "Filename of root Certificate Revocation List"
msgstr "Ð˜Ð¼Ñ Ñ„Ð°Ð¹Ð»Ð° корневого ÑпиÑка отзыва Ñертификатов"
#, python-format
+msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
+msgstr ""
+"ФикÑированный IP %(ip)s не ÑвлÑетÑÑ Ð´Ð¾Ð¿ÑƒÑтимым IP-адреÑом Ð´Ð»Ñ Ñети "
+"%(network_id)s."
+
+#, python-format
msgid "Fixed IP %s has been deleted"
msgstr "ФикÑированный IP %s удален"
@@ -1328,14 +1455,6 @@ msgstr ""
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"Дополнительную Ñпецификацию %(id)d разновидноÑти не удалоÑÑŒ Ñоздать или "
-"изменить за %(retries)d попыток."
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1379,15 +1498,34 @@ msgstr "Ðе удалоÑÑŒ найти разновидноÑÑ‚ÑŒ Ñ Ð¸Ð¼ÐµÐ½ÐµÐ
msgid "Flavor with name %(name)s already exists."
msgstr "РазновидноÑÑ‚ÑŒ Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %(name)s уже ÑущеÑтвует."
+#, python-format
+msgid ""
+"Flavor's disk is smaller than the minimum size specified in image metadata. "
+"Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i "
+"bytes."
+msgstr ""
+"Объем диÑка разновидноÑти меньше минимального размера, указанного в "
+"метаданных образа. Объем диÑка разновидноÑти: %(flavor_size)i байт, "
+"минимальный объем - %(image_min_disk)i байт."
+
+#, python-format
+msgid ""
+"Flavor's disk is too small for requested image. Flavor disk is "
+"%(flavor_size)i bytes, image is %(image_size)i bytes."
+msgstr ""
+"ДиÑк разновидноÑти Ñлишком мал Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾ÑˆÐµÐ½Ð½Ð¾Ð³Ð¾ образа. ДиÑк разновидноÑти "
+"ÑоÑтавлÑет %(flavor_size)i байт, размер образа - %(image_size)i байт."
+
msgid "Flavor's memory is too small for requested image."
msgstr "ПамÑÑ‚ÑŒ разновидноÑти Ñлишком мала Ð´Ð»Ñ Ð·Ð°Ð¿Ñ€Ð¾ÑˆÐµÐ½Ð½Ð¾Ð³Ð¾ образа."
+#, python-format
+msgid "Floating IP %(address)s association has failed."
+msgstr "Сбой ÑвÑзи нефикÑированного IP-адреÑа %(address)s."
+
msgid "Floating IP allocate failed."
msgstr "Выделение нефикÑированных IP-адреÑов не выполнено."
-msgid "Floating ip is not associated."
-msgstr "ÐефикÑированный IP не ÑвÑзан."
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1455,6 +1593,10 @@ msgstr ""
"переноÑа, в которых учаÑтвует Ñта верÑиÑ, могут привеÑти к потере данных. "
"Обновите Nova на %(server)s и повторите операцию."
+#, python-format
+msgid "Host '%(name)s' is not mapped to any cell"
+msgstr "ХоÑÑ‚ '%(name)s' не привÑзан ни к одной Ñчейке"
+
msgid "Host PowerOn is not supported by the Hyper-V driver"
msgstr "Драйвер Hyper-V не поддерживает включение Ð¿Ð¸Ñ‚Ð°Ð½Ð¸Ñ Ñ…Ð¾Ñта"
@@ -1482,6 +1624,10 @@ msgstr ""
"Драйвер гипервизора не поддерживает метод post_live_migration_at_source"
#, python-format
+msgid "Hypervisor virt type '%s' is not valid"
+msgstr "ÐедопуÑтимый тип виртуализации гипервизора '%s'"
+
+#, python-format
msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised"
msgstr "Тип виртуализации гипервизора %(hv_type)s не раÑпознан"
@@ -1551,8 +1697,9 @@ msgstr "Ключ метаданных образа Ñлишком длинен"
msgid "Image metadata limit exceeded"
msgstr "Ограничение метаданных образа превышено"
-msgid "Image must be available"
-msgstr "Образ должен быть доÑтупен"
+#, python-format
+msgid "Image model '%(image)s' is not supported"
+msgstr "Модель образа '%(image)s' не поддерживаетÑÑ"
msgid "Image not found."
msgstr "образ не найден."
@@ -1677,6 +1824,10 @@ msgid "Instance %(instance_uuid)s does not specify a NUMA topology"
msgstr "Ð’ ÑкземплÑре %(instance_uuid)s не указана Ñ‚Ð¾Ð¿Ð¾Ð»Ð¾Ð³Ð¸Ñ NUMA"
#, python-format
+msgid "Instance %(instance_uuid)s does not specify a migration context."
+msgstr "ЭкземплÑÑ€ %(instance_uuid)s не задает контекÑÑ‚ миграции."
+
+#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while "
"the instance is in this state."
@@ -1710,6 +1861,10 @@ msgstr "ЭкземплÑÑ€ %s не подключен."
msgid "Instance %s not found"
msgstr "ЭкземплÑÑ€ %s не найден"
+#, python-format
+msgid "Instance %s provisioning was aborted"
+msgstr "ПредоÑтавление реÑурÑов Ð´Ð»Ñ ÑкземплÑра %s прервано."
+
msgid "Instance could not be found"
msgstr "ÐšÐ¾Ð¿Ð¸Ñ Ð½Ðµ найдена"
@@ -1781,9 +1936,6 @@ msgstr "Ð˜Ð½Ñ‚ÐµÑ€Ñ„ÐµÐ¹Ñ %(interface)s не найден."
msgid "Invalid Base 64 data for file %(path)s"
msgstr "ÐедопуÑтимые данные Base 64 Ð´Ð»Ñ Ñ„Ð°Ð¹Ð»Ð° %(path)s"
-msgid "Invalid CIDR"
-msgstr "Ðеверный CIDR"
-
msgid "Invalid Connection Info"
msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ Ð¸Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ð¸Ñ Ð¾ Ñоединении"
@@ -1796,10 +1948,6 @@ msgid "Invalid IP format %s"
msgstr "ÐедопуÑтимый формат IP %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "ÐедопуÑтимый IP-протокол %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "ÐедопуÑтимый протокол IP %(protocol)s."
@@ -1886,6 +2034,10 @@ msgid "Invalid entry: '%s'; Expecting list or dict"
msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ: '%s'; ожидаетÑÑ ÑпиÑок или dict"
#, python-format
+msgid "Invalid event name %s"
+msgstr "ÐедопуÑтимое Ð¸Ð¼Ñ Ñлемента %s"
+
+#, python-format
msgid "Invalid event status `%s'"
msgstr "ÐедопуÑтимое ÑоÑтоÑние ÑÐ¾Ð±Ñ‹Ñ‚Ð¸Ñ `%s'"
@@ -1925,6 +2077,10 @@ msgid "Invalid id: %(volume_id)s (expecting \"i-...\")"
msgstr "ÐедопуÑтимый ИД: %(volume_id)s (ожидаетÑÑ \"i-...\")"
#, python-format
+msgid "Invalid image format '%(format)s'"
+msgstr "ÐедопуÑтимый формат образа '%(format)s'"
+
+#, python-format
msgid "Invalid image href %(image_href)s."
msgstr "ÐедопуÑтимый образ href %(image_href)s."
@@ -1968,6 +2124,10 @@ msgid "Invalid key_name provided."
msgstr "ПредоÑтавлен недопуÑтимый key_name."
#, python-format
+msgid "Invalid libvirt version %(version)s"
+msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð²ÐµÑ€ÑÐ¸Ñ libvirt: %(version)s"
+
+#, python-format
msgid "Invalid memory page size '%(pagesize)s'"
msgstr "ÐедопуÑтимый размер Ñтраницы памÑти '%(pagesize)s'"
@@ -2085,24 +2245,22 @@ msgid "Invalid usage_type: %s"
msgstr "ÐедопуÑтимый usage_type: %s"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"ÐедопуÑтимое значение '%(ec2_instance_id)s' Ð´Ð»Ñ instanceId. К ÑкземплÑру не "
-"подключен том в корневом каталоге (%(root)s)"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "Ðеверное значение '%s' Ð´Ð»Ñ Ð¿Ñ€Ð¸Ð½ÑƒÐ´Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð¾Ð³Ð¾ применениÑ."
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "ÐедопуÑтимое значение Ð´Ð»Ñ 'scheduler_max_attempts', должно быть >= 1"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "ÐедопуÑтимое значение Ð´Ð»Ñ Ð¾Ð¿Ñ†Ð¸Ð¸ ДиÑк конфигурации: %(option)s"
+#, python-format
+msgid ""
+"Invalid vcpu_pin_set config, one or more of the specified cpuset is not "
+"online. Online cpuset(s): %(online)s, requested cpuset(s): %(req)s"
+msgstr ""
+"ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ vcpu_pin_set, одна или неÑколько заданных групп "
+"cpuset не подключены. Подключенных cpuset: %(online)s, запрошенных cpuset: "
+"%(req)s"
+
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr "ÐÐµÐ²ÐµÑ€Ð½Ð°Ñ ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ vcpu_pin_set, вне диапазона cpu гипервизора."
@@ -2226,6 +2384,13 @@ msgstr ""
"СпиÑок файловых ÑиÑтем, наÑтроенных в Ñтом файле в разделах image_file_url:"
"<list entry name>"
+msgid ""
+"Live migration can not be used without shared storage except a booted from "
+"volume VM which does not have a local disk."
+msgstr ""
+"Выполнение оперативной миграции невозможно без общей ÑиÑтемы хранениÑ. "
+"ИÑключение: загрузка Ñ Ð’Ðœ тома, не имеющей локального диÑка."
+
msgid "Live migration is supported starting with Hyper-V Server 2012"
msgstr "Оперативный Ð¿ÐµÑ€ÐµÐ½Ð¾Ñ Ð¿Ð¾Ð´Ð´ÐµÑ€Ð¶Ð¸Ð²Ð°ÐµÑ‚ÑÑ, Ð½Ð°Ñ‡Ð¸Ð½Ð°Ñ Ñ Hyper-V Server 2012"
@@ -2372,6 +2537,9 @@ msgstr "ОтÑутÑтвует поле причины выключениÑ"
msgid "Missing flavorRef attribute"
msgstr "ОтÑутÑтвует атрибут flavorRef"
+msgid "Missing forced_down field"
+msgstr "ОтÑутÑтвует поле forced_down "
+
msgid "Missing imageRef attribute"
msgstr "ОтÑутÑтвует атрибут imageRef"
@@ -2464,6 +2632,10 @@ msgid "Netmask to push into openvpn config"
msgstr "МаÑка Ñети Ð´Ð»Ñ Ð¿Ð¾Ð¼ÐµÑ‰ÐµÐ½Ð¸Ñ Ð² конфигурацию openvpn"
#, python-format
+msgid "Network \"%(val)s\" is not valid in field %(attr)s"
+msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ñеть \"%(val)s\" в поле %(attr)s"
+
+#, python-format
msgid "Network %(network_id)s could not be found."
msgstr "Сеть %(network_id)s не найдена."
@@ -2540,6 +2712,10 @@ msgstr "Ð”Ð»Ñ Ð¿Ð¾Ð´ÐºÐ°Ñ‡ÐºÐ¸ новый том необходимо отклÑ
msgid "New volume must be the same size or larger."
msgstr "Размер нового тома должен быть тем же или большим."
+#, python-format
+msgid "No Block Device Mapping with id %(id)s."
+msgstr "ОтÑутÑтвует ÑвÑзь блочного уÑтройÑтва Ñ Ð˜Ð” %(id)s."
+
msgid "No CIDR requested"
msgstr "CIDR не запрошен"
@@ -2553,6 +2729,10 @@ msgstr "Ðет тела запроÑа"
msgid "No Unique Match Found."
msgstr "Уникальное ÑоответÑтвие не найдено."
+msgid "No access_url in connection_info. Cannot validate protocol"
+msgstr ""
+"Ð’ connection_info отÑутÑтвует access_url. Проверка протокола невозможна."
+
msgid "No adminPass was specified"
msgstr "adminPass не был задан"
@@ -2635,6 +2815,10 @@ msgstr "Ðе удалоÑÑŒ найти ÑоответÑтвующий ИД длÑ
msgid "No more available networks."
msgstr "ДоÑтупных Ñетей больше нет."
+#, python-format
+msgid "No mount points found in %(root)s of %(image)s"
+msgstr "Точки Ð¼Ð¾Ð½Ñ‚Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ Ð½Ðµ найдены в %(root)s из %(image)s"
+
msgid "No networks defined."
msgstr "Сети не определены."
@@ -2661,9 +2845,6 @@ msgstr "Ðет тела запроÑа"
msgid "No root disk defined."
msgstr "Ðе определен корневой диÑк."
-msgid "No rule for the specified parameters."
-msgstr "ОтÑутÑтвует правило Ð´Ð»Ñ Ð·Ð°Ð´Ð°Ð½Ð½Ñ‹Ñ… параметров."
-
msgid "No suitable network for migrate"
msgstr "Ðет подходÑщей Ñети Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ½Ð¾Ñа"
@@ -2697,10 +2878,6 @@ msgstr "Ðе удалоÑÑŒ получить Ñвободный порт длÑ
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "Ðе удалоÑÑŒ ÑвÑзать %(host)s:%(port)d, %(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "Ðе разрешено изменÑÑ‚ÑŒ атрибуты Ð´Ð»Ñ Ð¾Ð±Ñ€Ð°Ð·Ð° %s"
-
msgid "Not an rbd snapshot"
msgstr "Ðе ÑвлÑетÑÑ Ð¼Ð¾Ð¼ÐµÐ½Ñ‚Ð°Ð»ÑŒÐ½Ð¾Ð¹ копией rbd"
@@ -2750,6 +2927,10 @@ msgid "Old volume is attached to a different instance."
msgstr "Старый том подключен к другому ÑкземплÑру."
#, python-format
+msgid "One or more hosts already in availability zone(s) %s"
+msgstr "Один или неÑколько хоÑтов уже находÑÑ‚ÑÑ Ð² зоне готовноÑти %s"
+
+#, python-format
msgid ""
"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
"%(unit_string)s."
@@ -2774,9 +2955,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "Может быть указан только параметр хоÑта"
-msgid "Only instances implemented"
-msgstr "Реализованы только ÑкземплÑры"
-
msgid "Only root certificate can be retrieved."
msgstr "Может быть извлечен только базовый Ñертификат."
@@ -2854,6 +3032,15 @@ msgid "Page size %(pagesize)s is not supported by the host."
msgstr "Размер Ñтраницы %(pagesize)s не поддерживаетÑÑ Ñ…Ð¾Ñтом."
#, python-format
+msgid ""
+"Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. "
+"Check your Neutron configuration to validate that the macvtap parameters are "
+"correct."
+msgstr ""
+"Параметры %(missing_params)s отÑутÑтвуют в vif_details Ð´Ð»Ñ vif %(vif_id)s. "
+"Проверьте правильноÑÑ‚ÑŒ параметров macvtap в конфигурации Neutron. "
+
+#, python-format
msgid "Path %s must be LVM logical volume"
msgstr "Путь %s должен быть логичеÑким томом LVM"
@@ -2938,6 +3125,10 @@ msgstr ""
msgid "QEMU guest agent is not enabled"
msgstr "ГоÑтевой агент QEMU не включен"
+#, python-format
+msgid "Quiescing is not supported in instance %(instance_id)s"
+msgstr "Ð’ ÑкземплÑре %(instance_id)s приоÑтановка не поддерживаетÑÑ "
+
msgid "Quota"
msgstr "Квота"
@@ -2949,6 +3140,14 @@ msgid "Quota could not be found"
msgstr "Квота не найдена"
#, python-format
+msgid ""
+"Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s "
+"of %(allowed)s %(overs)s"
+msgstr ""
+"Превышена квота Ð´Ð»Ñ %(overs)s: Запрошено %(req)s, но уже иÑпользуетÑÑ "
+"%(used)s %(allowed)s %(overs)s"
+
+#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr "Квота превышена Ð´Ð»Ñ Ñ€ÐµÑурÑов: %(overs)s"
@@ -3002,6 +3201,14 @@ msgstr ""
#, python-format
msgid ""
+"Quota limit %(limit)s for %(resource)s must be in the range of -1 and "
+"%(max)s."
+msgstr ""
+"Ограничение квоты %(limit)s Ð´Ð»Ñ %(resource)s должно находитьÑÑ Ð² диапазоне "
+"от -1 до %(max)s."
+
+#, python-format
+msgid ""
"Quota limit %(limit)s for %(resource)s must be less than or equal to "
"%(maximum)s."
msgstr ""
@@ -3031,6 +3238,15 @@ msgstr "ИÑпользование квоты Ð´Ð»Ñ Ð¿Ñ€Ð¾ÐµÐºÑ‚Ð° %(project_id
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr "ДоÑтигнуто макÑимальное чиÑло попыток отÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ VBD %s"
+#, python-format
+msgid ""
+"Relative blockcommit support was not detected. Libvirt '%s' or later is "
+"required for online deletion of file/network storage-backed volume snapshots."
+msgstr ""
+"Ðе найдена поддержка отноÑительной команды blockcommit. ТребуетÑÑ libvirt %s "
+"или выше Ð´Ð»Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ‚Ð¸Ð²Ð½Ð¾Ð³Ð¾ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ð¼Ð¾Ð¼ÐµÐ½Ñ‚Ð°Ð»ÑŒÐ½Ñ‹Ñ… копий томов, находÑщихÑÑ Ð² "
+"файловом или Ñетевом хранилище."
+
msgid "Request body and URI mismatch"
msgstr "Тело запроÑа и URI не Ñовпадают"
@@ -3295,10 +3511,21 @@ msgstr "Служба Ñ Ñ…Ð¾Ñтом %(host)s Ð´Ð»Ñ Ð´Ð²Ð¾Ð¸Ñ‡Ð½Ð¾Ð³Ð¾ файл
msgid "Service with host %(host)s topic %(topic)s exists."
msgstr "Служба Ñ Ñ…Ð¾Ñтом %(host)s Ð´Ð»Ñ Ñ€Ð°Ð·Ð´ÐµÐ»Ð° %(topic)s ÑущеÑтвует."
+msgid "Set admin password is not supported"
+msgstr "Указание Ð¿Ð°Ñ€Ð¾Ð»Ñ Ð°Ð´Ð¼Ð¸Ð½Ð¸Ñтратора не поддерживаетÑÑ."
+
#, python-format
msgid "Shadow table with name %(name)s already exists."
msgstr "Ð¢ÐµÐ½ÐµÐ²Ð°Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ð° Ñ Ð¸Ð¼ÐµÐ½ÐµÐ¼ %(name)s уже ÑущеÑтвует."
+#, python-format
+msgid "Share '%s' is not supported"
+msgstr "Общий реÑÑƒÑ€Ñ '%s' не поддерживаетÑÑ"
+
+#, python-format
+msgid "Share level '%s' cannot have share configured"
+msgstr "Ð”Ð»Ñ ÑƒÑ€Ð¾Ð²Ð½Ñ '%s' общего реÑурÑа Ð½ÐµÐ»ÑŒÐ·Ñ Ð½Ð°Ñтраивать общий реÑурÑ. "
+
msgid "Should we use a CA for each project?"
msgstr "Должны ли мы иÑпользовать центр Ñертификации Ð´Ð»Ñ ÐºÐ°Ð¶Ð´Ð¾Ð³Ð¾ проекта?"
@@ -3309,9 +3536,6 @@ msgstr ""
"Сокращение размера файловой ÑиÑтемы Ñ resize2fs не выполнено, проверьте, "
"доÑтаточно ли Ñвободного меÑта на диÑке."
-msgid "Signature not provided"
-msgstr "Ðе указана подпиÑÑŒ"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "Снимок %(snapshot_id)s не может быть найден."
@@ -3432,10 +3656,16 @@ msgstr ""
"Создайте базу данных Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ 'nova-manage db sync' перед выполнением Ñтой "
"команды."
+msgid "The backlog must be more than 0"
+msgstr "Значение запаÑа должно быть больше 0"
+
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr "Диапазон портов конÑоли %(min_port)d-%(max_port)d иÑчерпан."
+msgid "The created instance's disk would be too small."
+msgstr "Созданный диÑк ÑкземплÑра будет недоÑтаточным."
+
msgid "The current driver does not support preserving ephemeral partitions."
msgstr "Текущий драйвер не поддерживает Ñохранение временных разделов."
@@ -3446,6 +3676,13 @@ msgstr "Ð¡Ñ‚Ñ€Ð°Ñ‚ÐµÐ³Ð¸Ñ PBM по умолчанию не ÑущеÑтвует
msgid "The firewall filter for %s does not exist"
msgstr "Фильтр брандмауÑра Ð´Ð»Ñ %s не ÑущеÑтвует"
+msgid "The floating IP request failed with a BadRequest"
+msgstr "Сбой нефикÑированного IP-адреÑа Ñ BadRequest"
+
+#, python-format
+msgid "The group %(group_name)s must be configured with an id."
+msgstr "Ð”Ð»Ñ Ð³Ñ€ÑƒÐ¿Ð¿Ñ‹ %(group_name)s необходимо задать ИД."
+
msgid "The input is not a string or unicode"
msgstr "Введенное значение не ÑвлÑетÑÑ Ñтрокой или unicode"
@@ -3502,6 +3739,12 @@ msgstr ""
"Диапазон Ñети не доÑтаточно велик Ð´Ð»Ñ %(num_networks)s Ñетей. Размер Ñети - "
"%(network_size)s"
+#, python-format
+msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
+msgstr ""
+"ЧиÑло определенных портов %(ports)dis превышает макÑимально разрешенное: "
+"%(quota)d"
+
msgid "The only partition should be partition 1."
msgstr "ЕдинÑтвенный раздел должен быть разделом 1."
@@ -3540,6 +3783,10 @@ msgid ""
"The service from servicegroup driver %(driver)s is temporarily unavailable."
msgstr "Служба из драйвера servicegroup %(driver)s временно недоÑтупно."
+#, python-format
+msgid "The specified cluster '%s' was not found in vCenter"
+msgstr "Указанный клаÑтер '%s' не найден в vCenter"
+
msgid ""
"The string containing the reason for disabling the service contains invalid "
"characters or is too long."
@@ -3626,6 +3873,17 @@ msgid "There are not enough hosts available."
msgstr "Ðет доÑтаточного чиÑла доÑтупных хоÑтов."
#, python-format
+msgid ""
+"There are still %(count)i unmigrated flavor records. Migration cannot "
+"continue until all instance flavor records have been migrated to the new "
+"format. Please run `nova-manage db migrate_flavor_data' first."
+msgstr ""
+"ОÑталоÑÑŒ %(count)i запиÑей разновидноÑти, которые не были перенеÑены. "
+"Продолжение миграции невозможно, пока вÑе запиÑи разновидноÑти ÑкземплÑра не "
+"будут перенеÑены в новый формат. Вначале необходимо выполнить команду 'nova-"
+"manage db migrate_flavor_data'."
+
+#, python-format
msgid "There is no such action: %s"
msgstr "Ðе ÑущеÑтвует такого дейÑтвиÑ: %s"
@@ -3662,8 +3920,10 @@ msgstr "Ð’Ñ€ÐµÐ¼Ñ Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¿Ñ€Ð¸ Ñоздании уÑтройÑтва
msgid "Timeout waiting for response from cell"
msgstr "Тайм-аут Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð° от Ñчейки"
-msgid "Timestamp failed validation."
-msgstr "Ðе выполнена проверка ÑиÑтемного времени."
+#, python-format
+msgid "Timeout while checking if we can live migrate to host: %s"
+msgstr ""
+"Произошел тайм-аут при проверке возможноÑти оперативной миграции на хоÑÑ‚: %s"
msgid "To and From ports must be integers"
msgstr "Порты От и К должны быть целыми чиÑлами"
@@ -3679,18 +3939,12 @@ msgstr ""
"Будет Ñгенерировано Ñлишком много IP-адреÑов. Увеличьте /%s Ð´Ð»Ñ ÑÐ¾ÐºÑ€Ð°Ñ‰ÐµÐ½Ð¸Ñ "
"количеÑтва Ñгенерированных адреÑов."
-msgid "Too many failed authentications."
-msgstr "Слишком много неудачных попыток аутентификации."
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "Ð—Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¢Ð¸Ð¿ и Код должны быть целыми чиÑлами Ð´Ð»Ñ Ñ‚Ð¸Ð¿Ð° протокола ICMP"
msgid "UUID is required to delete Neutron Networks"
msgstr "ТребуетÑÑ UUID Ð´Ð»Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð¸Ñ Ñетей Neutron"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "Ðевозможно ÑвÑзать IP-адреÑ, нет fixed_ips."
-
msgid "Unable to authenticate Ironic client."
msgstr "Ðе удалоÑÑŒ идентифицировать клиент Ironic."
@@ -3815,6 +4069,12 @@ msgstr "Ðе удалоÑÑŒ получить домен dns"
msgid "Unable to get dns entry"
msgstr "Ðе удалоÑÑŒ получить запиÑÑŒ dns"
+msgid "Unable to get host UUID: /etc/machine-id does not exist"
+msgstr "Ðе удалоÑÑŒ получить UUID хоÑта: /etc/machine-id не ÑущеÑтвует"
+
+msgid "Unable to get host UUID: /etc/machine-id is empty"
+msgstr "Ðе удалоÑÑŒ получить UUID хоÑта: /etc/machine-id пуÑÑ‚"
+
msgid "Unable to get rdp console, functionality not implemented"
msgstr "Ðевозможно получить конÑоль rdp, Ñ„ÑƒÐ½ÐºÑ†Ð¸Ñ Ð½Ðµ реализована"
@@ -3976,9 +4236,6 @@ msgstr ""
msgid "Unknown delete_info type %s"
msgstr "ÐеизвеÑтный тип delete_info %s"
-msgid "Unknown error occurred."
-msgstr "Возникла неизвеÑÑ‚Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°."
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "ÐеизвеÑтный image_type=%s"
@@ -4118,6 +4375,13 @@ msgstr ""
"ограничено до '%(allowed)s'."
#, python-format
+msgid "Value must be >= 0 for field %s"
+msgstr "Значение Ð¿Ð¾Ð»Ñ %s должно быть >=0. "
+
+msgid "Value required for 'scality_sofs_config'"
+msgstr "ТребуетÑÑ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ðµ Ð´Ð»Ñ 'scality_sofs_config'"
+
+#, python-format
msgid ""
"Version %(req_ver)s is not supported by the API. Minimum is %(min_ver)s and "
"maximum is %(max_ver)s."
@@ -4148,6 +4412,10 @@ msgid "Virtual machine mode '%(vmmode)s' is not recognised"
msgstr "Режим виртуальной машины %(vmmode)s не раÑпознан"
#, python-format
+msgid "Virtual machine mode '%s' is not valid"
+msgstr "ÐедопуÑтимый режим виртуальной машины '%s'"
+
+#, python-format
msgid ""
"Virtual switch associated with the network adapter %(adapter)s not found."
msgstr ""
@@ -4167,20 +4435,40 @@ msgid "Volume %(volume_id)s could not be found."
msgstr "Том %(volume_id)s не найден."
#, python-format
+msgid ""
+"Volume %(volume_id)s did not finish being created even after we waited "
+"%(seconds)s seconds or %(attempts)s attempts. And its status is "
+"%(volume_status)s."
+msgstr ""
+"Создание тома %(volume_id)s не завершаетÑÑ Ð´Ð¾Ð»Ð³Ð¾Ðµ Ð²Ñ€ÐµÐ¼Ñ %(seconds)s Ñекунд "
+"или %(attempts)s попыток. СоÑтоÑние тома: %(volume_status)s."
+
+#, python-format
msgid "Volume %(volume_id)s is not attached to anything"
msgstr "Том %(volume_id)s никуда не приÑоединён"
msgid "Volume does not belong to the requested instance."
msgstr "Том не отноÑитÑÑ Ðº запрашиваемому ÑкземплÑру."
-msgid "Volume must be attached in order to detach."
-msgstr "Том должен быть подключен, чтобы его можно было отключить."
+#, python-format
+msgid ""
+"Volume encryption is not supported for %(volume_type)s volume %(volume_id)s"
+msgstr "Ð”Ð»Ñ Ñ‚Ð¾Ð¼Ð° %(volume_type)s %(volume_id)s не поддерживаетÑÑ ÑˆÐ¸Ñ„Ñ€Ð¾Ð²Ð°Ð½Ð¸Ðµ "
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
+msgid ""
+"Volume is smaller than the minimum size specified in image metadata. Volume "
+"size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes."
msgstr ""
-"Том указывает размер блока, но требуетÑÑ libvirt '%s' или более позднÑÑ "
-"верÑиÑ."
+"Размер тома меньше, чем минимальный размер, указанный в метаданных образа. "
+"Размер тома ÑоÑтавлÑет %(volume_size)i байт, минимальный размер - "
+"%(image_min_disk)i байт."
+
+msgid "Volume must be attached in order to detach."
+msgstr "Том должен быть подключен, чтобы его можно было отключить."
+
+msgid "Volume resource quota exceeded"
+msgstr "Превышена квота реÑурÑа тома"
#, python-format
msgid ""
@@ -4228,12 +4516,18 @@ msgstr ""
msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
msgstr "ÐедопуÑтимый тип метода перехватчика. ДопуÑтимые типы: 'pre' и 'post'"
+msgid "X-Forwarded-For is missing from request."
+msgstr "Ð’ запроÑе отÑутÑтвует X-Forwarded-For."
+
msgid "X-Instance-ID header is missing from request."
msgstr "Заголовок X-Instance-ID отÑутÑтвует в запроÑе."
msgid "X-Instance-ID-Signature header is missing from request."
msgstr "Ð’ запроÑе отÑутÑтвует заголовок X-Instance-ID-Signature."
+msgid "X-Metadata-Provider is missing from request."
+msgstr "Ð’ запроÑе отÑутÑтвует X-Metadata-Provider."
+
msgid "X-Tenant-ID header is missing from request."
msgstr "Заголовок X-Tenant-ID отÑутÑтвует в запроÑе."
@@ -4260,6 +4554,17 @@ msgstr ""
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
"your destination node does not support retrieving listen addresses. In "
+"order for live migration to work properly you must either disable serial "
+"console or upgrade your libvirt version."
+msgstr ""
+"Ваша верÑÐ¸Ñ libvirt не поддерживает флаг VIR_DOMAIN_XML_MIGRATABLE или ваш "
+"целевой узел не поддерживает Ð¸Ð·Ð²Ð»ÐµÑ‡ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¾Ñлушивающих адреÑов. Ð”Ð»Ñ "
+"правильного Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ‚Ð¸Ð²Ð½Ð¾Ð¹ миграции необходимо отключить "
+"поÑледовательную конÑоль или обновить верÑию libvirt."
+
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
+"your destination node does not support retrieving listen addresses. In "
"order for live migration to work properly, you must configure the graphics "
"(VNC and/or SPICE) listen addresses to be either the catch-all address "
"(0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
@@ -4300,9 +4605,6 @@ msgstr "block_device_mapping должен быть ÑпиÑком"
msgid "block_device_mapping_v2 must be a list"
msgstr "block_device_mapping_v2 должен быть ÑпиÑком"
-msgid "can't build a valid rule"
-msgstr "невозможно Ñкомпоновать верное правило"
-
msgid "cannot delete non-existent key"
msgstr "Ðе удаетÑÑ ÑƒÐ´Ð°Ð»Ð¸Ñ‚ÑŒ неÑущеÑтвующий ключ"
@@ -4312,6 +4614,9 @@ msgstr "Ðе удаетÑÑ Ñохранить произвольные ключ
msgid "cannot understand JSON"
msgstr "невозможно понÑÑ‚ÑŒ JSON"
+msgid "cell_uuid must be set"
+msgstr "Ðеобходимо указать cell_uuid"
+
msgid "clone() is not implemented"
msgstr "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ clone() не реализована"
@@ -4416,13 +4721,6 @@ msgstr "образ"
msgid "image already mounted"
msgstr "образ уже приÑоединён"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "образ %(instance)s в %(now)s"
-
-msgid "imageLocation is required"
-msgstr "ТребуетÑÑ imageLocation"
-
msgid "index"
msgstr "индекÑ"
@@ -4506,9 +4804,6 @@ msgstr "УÑтройÑтво nbd %s не показан"
msgid "nbd unavailable: module not loaded"
msgstr "nbd недоÑтупен: модуль не загружен"
-msgid "need group_name or group_id"
-msgstr "требуетÑÑ ÑƒÐºÐ°Ð·Ð°Ñ‚ÑŒ group_name или group_id"
-
msgid "network"
msgstr "Ñеть"
@@ -4526,18 +4821,22 @@ msgstr "узел"
msgid "not able to execute ssh command: %s"
msgstr "не может выполнить команду ssh: %s"
+msgid ""
+"nova-idmapshift is a tool that properly sets the ownership of a filesystem "
+"for use with linux user namespaces. This tool can only be used with linux "
+"lxc containers. See the man page for details."
+msgstr ""
+"nova-idmapshift - инÑтрумент, позволÑющий правильно задать принадлежноÑÑ‚ÑŒ "
+"файловой ÑиÑтемы Ð´Ð»Ñ Ð¸ÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»ÑŒÑкими проÑтранÑтвами имен "
+"Linux. Этот инÑтрумент можно применÑÑ‚ÑŒ только Ñ ÐºÐ¾Ð½Ñ‚ÐµÐ¹Ð½ÐµÑ€Ð°Ð¼Ð¸ lxc Linux. "
+"Более подробную информацию можно найти на Ñтранице Ñправки."
+
msgid "onSharedStorage must be specified."
msgstr "Должно быть указано значение onSharedStorage."
-msgid "only group \"all\" is supported"
-msgstr "поддерживаетÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ группа \"вÑе(all)\""
-
msgid "operation time out"
msgstr "тайм-аут операции"
-msgid "operation_type must be add or remove"
-msgstr "operation_type должен быть add или remove"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "Ðеверный формат или отÑутÑтвует os-getConsoleOutput в теле запроÑа"
@@ -4577,9 +4876,6 @@ msgstr ""
"read_deleted может принимать Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ 'no', 'yes' или 'only', значение %r "
"недопуÑтимо"
-msgid "resource_id and tag are required"
-msgstr "требуетÑÑ resource_id и tag"
-
msgid "rpc_port must be integer"
msgstr "rpc_port должен быть целым чиÑлом"
@@ -4649,9 +4945,6 @@ msgstr "неподдерживаемые полÑ: %s"
msgid "user"
msgstr "пользователь"
-msgid "user or group not specified"
-msgstr "не указан пользователь или группа"
-
msgid "uuid"
msgstr "uuid"
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova-log-critical.po b/nova/locale/tr_TR/LC_MESSAGES/nova-log-critical.po
index fc2bd4f73b..123249d635 100644
--- a/nova/locale/tr_TR/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.dev41\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-10-27 06:30+0000\n"
-"PO-Revision-Date: 2015-07-29 02:05+0000\n"
-"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-07-29 02:05+0000\n"
+"Last-Translator: İşbaran Akçayır <isbaran@gmail.com>\n"
+"Language: tr-TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Turkish (Turkey)\n"
#, python-format
msgid "Missing core API extensions: %s"
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova-log-error.po b/nova/locale/tr_TR/LC_MESSAGES/nova-log-error.po
index d5de532216..ac07d841c9 100644
--- a/nova/locale/tr_TR/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova-log-error.po
@@ -4,21 +4,22 @@
#
# Translators:
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-08-30 12:41+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
+"POT-Creation-Date: 2016-02-08 05:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-01-20 01:49+0000\n"
+"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
+"Language: tr-TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Turkish (Turkey)\n"
msgid "\"Look for the VDIs failed"
msgstr "\"VDI arama başarısız"
@@ -190,10 +191,6 @@ msgid "Environment variable 'NETWORK_ID' must be set."
msgstr "Çevresel değişken 'NETWORK_ID' ayarlanmış olmalı."
#, python-format
-msgid "Environment: %s"
-msgstr "Ortam: %s"
-
-#, python-format
msgid "Error copying key: %s"
msgstr "Anahtar kopyalanırken hata: %s"
@@ -338,10 +335,6 @@ msgstr "Sunucu Kurtarılmaya çalışılırken hata"
msgid "Error trying to reschedule"
msgstr "Yeniden zamanlama yapmaya çalışılırken hata"
-#, python-format
-msgid "Error updating resources for node %(node)s: %(e)s"
-msgstr "%(node)s düğümü için kaynaklar güncellenirken hata: %(e)s"
-
msgid "Error waiting for responses from neighbor cells"
msgstr "Komşu hücrelerden yanıt beklerken hata"
@@ -536,7 +529,7 @@ msgstr "%s-api yükleme başarısı"
msgid ""
"Failed to mount container filesystem '%(image)s' on '%(target)s': %(errors)s"
msgstr ""
-"Kap dosya sistemi '%(image)s' '%(target)s' üstüne bağlanamadı: %(error)s"
+"Kap dosya sistemi '%(image)s' '%(target)s' üstüne bağlanamadı: %(errors)s"
msgid "Failed to notify cells of BDM destroy."
msgstr "BDM silme hücrelerini bilgilendirme başarısız."
@@ -667,9 +660,6 @@ msgstr "Ekli VDI'ler temizlenmesi başarısız"
msgid "Fatal Exception running %(name)s %(type)s-hook: %(obj)s"
msgstr "%(name)s %(type)s-hook çalıştırılırken ölümcül istisna: %(obj)s"
-msgid "FaultWrapper error"
-msgstr "FaultWrapper hatası"
-
msgid "Guest does not have a console available"
msgstr "Konuğun kullanılabilir bir konsolu yok"
@@ -719,10 +709,6 @@ msgid "Invalid server_string: %s"
msgstr "Geçersiz server_string: %s"
#, python-format
-msgid "Keystone failure: %s"
-msgstr "Keystone başarısızlığı: %s"
-
-#, python-format
msgid "Live Migration failure: %s"
msgstr "Canlı Göç başarısız: %s"
@@ -881,7 +867,7 @@ msgstr ""
#, python-format
msgid "TIMEOUT: The call to %(method)s timed out. args=%(args)r"
-msgstr "ZAMAN AŞIMI: %(method)s çağrısı zaman aşımına uğradı. args=%(arg)r"
+msgstr "ZAMAN AŞIMI: %(method)s çağrısı zaman aşımına uğradı. args=%(args)r"
#, python-format
msgid "Temporary directory is invalid: %s"
@@ -991,10 +977,6 @@ msgid "Unable to parse rrd of %s"
msgstr "%s rrd'si ayrıştırılamadı"
#, python-format
-msgid "Unable to preallocate image at path: %(path)s"
-msgstr "Şu yoldaki imajın önceden ayrılması başarısız: %(path)s"
-
-#, python-format
msgid "Unable to retrieve storage policy with name %s"
msgstr "%s isimli depolama ilkesi alınamadı"
@@ -1009,10 +991,6 @@ msgstr "VBD sökülemedi"
msgid "Unable to update host of port %s"
msgstr "%s bağlantı noktasının istemcisi güncellenemiyor"
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr "Beklenmedik %(ex_name)s yükseltildi: %(ex_str)s"
-
msgid "Unexpected build failure, not rescheduling build."
msgstr "Beklenmeyen inşa başarısızlığı, inşa tekrar zamanlanmıyor."
@@ -1078,7 +1056,7 @@ msgstr ""
#, python-format
msgid "allocate_port_for_instance returned %(ports)s ports"
-msgstr "allocate_port_for_instance %(port)s bağlantı noktaları döndürdü"
+msgstr "allocate_port_for_instance %(ports)s bağlantı noktaları döndürdü"
msgid "attaching network adapter failed."
msgstr "ağ bağdaştırıcısı ekleme başarısız."
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova-log-info.po b/nova/locale/tr_TR/LC_MESSAGES/nova-log-info.po
index 8831793f13..428cf063b7 100644
--- a/nova/locale/tr_TR/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova-log-info.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-08-30 12:41+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-08-30 12:41+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: tr-TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Turkish (Turkey)\n"
#, python-format
msgid "%(action_str)s instance"
@@ -62,25 +62,14 @@ msgid "Adding security group %(security_group_id)s to port %(port_id)s"
msgstr ""
"%(security_group_id)s güvenlik grubu %(port_id)s bağlantı noktasına ekleniyor"
-msgid "Allocate address"
-msgstr "Adres tahsisi"
-
#, python-format
msgid "Allocated network: '%s' for instance"
msgstr "Ağ: '%s' sunucu için ayrıldı"
-#, python-format
-msgid "Associate address %(public_ip)s to instance %(instance_id)s"
-msgstr "%(instance_id)s örneğine %(public_ip)s adresini ilişkilendir"
-
msgid "Attach interface"
msgstr "Arayüz ekle"
#, python-format
-msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-msgstr "%(device)s'daki %(instance_id)s örneğine %(volume_id)s birimini bağla"
-
-#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr "%(device)s'daki %(server_id)s örneğine %(volume_id)s birimi ekleniyor"
@@ -194,22 +183,10 @@ msgid "Create assisted snapshot from volume %s"
msgstr "%s hacminden anlık görüntü oluştur"
#, python-format
-msgid "Create key pair %s"
-msgstr "%s anahtar çiftini oluştur"
-
-#, python-format
msgid "Create snapshot from volume %s"
msgstr "%s biriminden sistem görüntüsü oluşturuluyor"
#, python-format
-msgid "Create snapshot of volume %s"
-msgstr "Bölüm %s'in sistem anlık görüntüsünü oluştur"
-
-#, python-format
-msgid "Create volume from snapshot %s"
-msgstr "%s sistem görüntüsünden birim oluştur"
-
-#, python-format
msgid "Create volume of %s GB"
msgstr "%s GB'lık birim oluştur"
@@ -228,14 +205,6 @@ msgid "Creating image"
msgstr "Ä°maj oluÅŸturuluyor"
#, python-format
-msgid "De-registering image %s"
-msgstr "%s imaj kaydı siliniyor"
-
-#, python-format
-msgid "Delete key pair %s"
-msgstr "%s anahtar çiftini sil"
-
-#, python-format
msgid "Delete security group %s"
msgstr "%s güvenlik grubunu siliniz"
@@ -316,10 +285,6 @@ msgid "Disabling host %s."
msgstr "%s istemcisini kapatmak."
#, python-format
-msgid "Disassociate address %s"
-msgstr "Adresi kes %s"
-
-#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr "Vadesi geçmiş VDI %s hesap domU'sundan ayrılıyor"
@@ -468,10 +433,6 @@ msgid "Get console output"
msgstr "Konsol çıktısını al"
#, python-format
-msgid "Get console output for instance %s"
-msgstr "%s örneği için konsol çıktısını getir"
-
-#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
@@ -532,10 +493,6 @@ msgstr ""
"süre: %(image_id)s imajı için %(duration).2f saniye"
#, python-format
-msgid "Import key %s"
-msgstr "%s anahtarını içeriye aktar"
-
-#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database: "
"assuming it is a worker VM and skip ping migration to a new host"
@@ -738,10 +695,6 @@ msgstr ""
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr "%(host_name)s istemcisi %(mode)s bakım kipine alınıyor."
-#, python-format
-msgid "Reboot instance %r"
-msgstr "%r örneğini tekrar yükle"
-
msgid "Rebooting instance"
msgstr "Sunucu yeniden başlatılıyor"
@@ -782,14 +735,6 @@ msgid "Reclaiming deleted instance"
msgstr "Silinen örnek kurtarılıyor"
#, python-format
-msgid "Registered image %(image_location)s with id %(image_id)s"
-msgstr "%(image_id)s id ile %(image_location)s imajı kaydedildi"
-
-#, python-format
-msgid "Release address %s"
-msgstr "%s adresini serbest bırak"
-
-#, python-format
msgid "Removable base files: %s"
msgstr "Taşınabilir temel dosyalar: %s"
@@ -989,11 +934,6 @@ msgstr ""
"%(ex)s"
#, python-format
-msgid ""
-"Unauthorized request for controller=%(controller)s and action=%(action)s"
-msgstr "%(controller)s kontrolcüsü ve %(action)s işlemi için izinsiz istek"
-
-#, python-format
msgid "Unexpected error: %s"
msgstr "Beklenmeyen hata: %s"
@@ -1011,10 +951,6 @@ msgid "Updating from migration %s"
msgstr "%s göçünden güncelleniyor"
#, python-format
-msgid "Updating image %s publicity"
-msgstr "%s imaj tanıtımı güncelleniyor"
-
-#, python-format
msgid "Updating instance to original state: '%s'"
msgstr "Sunucu asıl durumuna güncelleniyor: '%s'"
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova-log-warning.po b/nova/locale/tr_TR/LC_MESSAGES/nova-log-warning.po
index 08971da5d6..8874563895 100644
--- a/nova/locale/tr_TR/LC_MESSAGES/nova-log-warning.po
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova-log-warning.po
@@ -4,21 +4,22 @@
#
# Translators:
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-09-08 05:38+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Turkish (Turkey)\n"
-"Language: tr-TR\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-01-20 01:48+0000\n"
+"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
+"Language: tr-TR\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Turkish (Turkey)\n"
#, python-format
msgid ""
@@ -36,7 +37,7 @@ msgstr "%(host_state)s'den yakın zamanda haber alınamadı"
#, python-format
msgid "%(overs)s quota exceeded for %(pid)s, tried to resize instance."
msgstr ""
-"%(pid)s için %(over)s kota aşıldı, sunucu yeniden boyutlandırılmaya "
+"%(pid)s için %(overs)s kota aşıldı, sunucu yeniden boyutlandırılmaya "
"çalışıldı."
#, python-format
@@ -57,14 +58,6 @@ msgstr ""
"%s swap imajı sunucu tarafından kullanılmuş ama mevcut arka dosyalar yok!"
#, python-format
-msgid ""
-"Access key %(access_key)s has had %(failures)d failed authentications and "
-"will be locked out for %(lock_mins)d minutes."
-msgstr ""
-"%(access_key)s erişim anahtarı %(failures)d kez yanlış girildi ve "
-"%(lock_mins)d dakika boyunca kilitlenmiÅŸ olacak."
-
-#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr "|%(address)s| adresi tahsis edilemedi"
@@ -415,7 +408,7 @@ msgid ""
"Found multiple matches for domain %(domain)s.\n"
"%(entry)s"
msgstr ""
-"%(domain) alanı için birden fazla eşleşme bulundu.\n"
+"%(domain)s alanı için birden fazla eşleşme bulundu.\n"
"%(entry)s"
msgid ""
@@ -697,7 +690,7 @@ msgstr "Neutron hatası: Sabit IP %s zaten kullanımda."
#, python-format
msgid ""
"Neutron error: MAC address %(mac)s is already in use on network %(network)s."
-msgstr "Neutron hatası: MAC adresi %(mac) %(network)s ağında zaten kullanımda"
+msgstr "Neutron hatası: MAC adresi %(mac)s %(network)s ağında zaten kullanımda"
#, python-format
msgid "Neutron error: No more fixed IPs in network: %s"
@@ -902,16 +895,6 @@ msgstr ""
"çalıştırılmıyor. Gerekli asgari vCenter sürümü 13.0.0 sürümüyle beraber "
"%(version)s olarak artırılacak."
-#, python-format
-msgid ""
-"Running Nova with a libvirt version less than %(version)s is deprecated. The "
-"required minimum version of libvirt will be raised to %(version)s in the "
-"13.0.0 release."
-msgstr ""
-"Nova'nın %(version)s sürümünden eski libvirt ile çalıştırılması artık "
-"kullanılmıyor. 13.0.0 sürümüyle birlikte gerekli libvirt sürümü %(version)s "
-"olarak artırılacak."
-
msgid ""
"Running libvirt-lxc without user namespaces is dangerous. Containers spawned "
"by Nova will be run as the host's root user. It is highly suggested that "
@@ -1179,14 +1162,14 @@ msgstr ""
#, python-format
msgid "Warning, memory usage is 0 for %(instance)s on baremetal node %(node)s."
msgstr ""
-"Uyarı, çıplak saç düğüm %(node)s üzerindeki %(instance) için hafıza "
+"Uyarı, çıplak saç düğüm %(node)s üzerindeki %(instance)s için hafıza "
"kullanımı 0."
#, python-format
msgid ""
"Warning, number of cpus is 0 for %(instance)s on baremetal node %(node)s."
msgstr ""
-"Uyarı, çıplak saç düğüm %(node)s üzerindeki %(instance) için cpu sayısı 0."
+"Uyarı, çıplak saç düğüm %(node)s üzerindeki %(instance)s için cpu sayısı 0."
#, python-format
msgid ""
@@ -1287,10 +1270,6 @@ msgid "multiple fixed_ips exist, using the first IPv4 fixed_ip: %s"
msgstr "birden fazla sabit_ip mevcut, ilk IPv4 sabit ip kullanılıyor: %s"
#, python-format
-msgid "multiple fixed_ips exist, using the first: %s"
-msgstr "birden fazla sabit_ip mevcut, ilki kullanılıyor: %s"
-
-#, python-format
msgid ""
"my_ip address (%(my_ip)s) was not found on any of the interfaces: %(ifaces)s"
msgstr ""
diff --git a/nova/locale/tr_TR/LC_MESSAGES/nova.po b/nova/locale/tr_TR/LC_MESSAGES/nova.po
index 843e8adebc..9b2bdfed16 100644
--- a/nova/locale/tr_TR/LC_MESSAGES/nova.po
+++ b/nova/locale/tr_TR/LC_MESSAGES/nova.po
@@ -1,22 +1,27 @@
-# Turkish (Turkey) translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
+# Translators:
+# Özcan Zafer AYAN <ozcanzaferayan@gmail.com>, 2013
+# Özcan Zafer AYAN <ozcanzaferayan@gmail.com>, 2013
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Andreas Jaeger <jaegerandi@gmail.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-09-06 10:17+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: tr_TR\n"
-"Language-Team: Turkish (Turkey)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
+"PO-Revision-Date: 2016-01-20 01:45+0000\n"
+"Last-Translator: Andreas Jaeger <jaegerandi@gmail.com>\n"
+"Language: tr-TR\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Turkish (Turkey)\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -197,9 +202,6 @@ msgstr ""
msgid "API version %(version)s is not supported on this method."
msgstr "API sürümü %(version)s bu metodda desteklenmiyor."
-msgid "Access key not provided"
-msgstr "Erişim anahtarı verilmedi."
-
msgid "Access list not available for public flavors."
msgstr "Erişim listesi açık nitelikler için kullanılamaz."
@@ -782,10 +784,6 @@ msgstr "%(host)s sunucusunda %(binary)s ikilisi bulunamadı."
msgid "Could not find config at %(path)s"
msgstr "%(path)s'deki yapılandırma bulunamadı"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "Anahtar çift(ler)i bulunamadı: %s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr "VM'nin kullandığı veri deposu referansı(ları) bulunamadı."
@@ -819,14 +817,6 @@ msgstr "İmaj %(image_id)s yüklenemedi"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "Couldn't get Link Local IP of %(interface)s :%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"Sunucu %(instance)s 1 saattir durdurulamadı. Şu anki vm_durumu: "
-"%(vm_state)s, mevcut görev_durumu: %(task_state)s"
-
msgid "Create networks failed"
msgstr "Ağ oluşturma başarısız"
@@ -1063,21 +1053,6 @@ msgstr "Bir uuid bekleniyordu ama %(uuid)s alındı."
msgid "Expected object of type: %s"
msgstr "Beklenen nesne türü: %s"
-msgid "Expecting a list of resources"
-msgstr "Bir kaynak listesi bekleniyor"
-
-msgid "Expecting a list of tagSets"
-msgstr "tagSets listesi bekleniyor"
-
-msgid "Expecting both key and value to be set"
-msgstr "Hem anahtar hem değerin ayarlanmış olması bekleniyor"
-
-msgid "Expecting key to be set"
-msgstr "Anahtarın ayarlanmış olması bekleniyor"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "tagSet'in anahtar/değer çifti olması bekleniyor"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "Gölge tabloda ek sütun %(table)s.%(column)s"
@@ -1244,10 +1219,6 @@ msgstr "Sunucu askıya alınamadı: %(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "Sunucu sonlandırılamadı: %(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "Keystone'den gelen yanıt ayrıştırılırken hata: %s"
-
msgid "Failure prepping block device."
msgstr "Blok aygıt hazırlama başarısız."
@@ -1332,14 +1303,6 @@ msgstr "%(flavor_id)s niteliğinin %(key)s anahtarına sahip ek özelliği yok."
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"%(id)d niteliği ek özelliği %(retries)d yeniden deneme sonrası "
-"güncellenemedi ya da oluşturulamadı."
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr ""
@@ -1392,9 +1355,6 @@ msgstr "Değişken IP %(address)s ilişkilendirmesi başarısız."
msgid "Floating IP allocate failed."
msgstr "Değişken IP ayrılması başarısız."
-msgid "Floating ip is not associated."
-msgstr "DeÄŸiÅŸken ip iliÅŸkilendirilmemiÅŸ."
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1561,9 +1521,6 @@ msgstr "İmaj üstveri sınırı aşıldı"
msgid "Image model '%(image)s' is not supported"
msgstr "Ä°maj modeli '%(image)s' desteklenmiyor"
-msgid "Image must be available"
-msgstr "İmaj müsait olmak zorunda"
-
msgid "Image not found."
msgstr "İmaj bulunamadı"
@@ -1792,9 +1749,6 @@ msgstr "%(interface)s arayüzü bulunamadı."
msgid "Invalid Base 64 data for file %(path)s"
msgstr "%(path)s dosyası için geçersiz base 64 verisi"
-msgid "Invalid CIDR"
-msgstr "Geçersiz CIDR"
-
msgid "Invalid Connection Info"
msgstr "Geçersiz Bağlantı Bilgisi"
@@ -1807,10 +1761,6 @@ msgid "Invalid IP format %s"
msgstr "Geçersiz IP biçimi %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "Geçersiz IP iletişim kuralı %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "Geçersiz IP %(protocol)s."
@@ -2100,20 +2050,9 @@ msgid "Invalid usage_type: %s"
msgstr "Geçersiz usage_type: %s"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"instanceId için geçersiz değer '%(ec2_instance_id)s'. Sunucunun (%(root)s) "
-"köküne bağlanmış bir mantıksal sürücü yok"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "Zorlama için geçersiz değer '%s'."
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "'scheduler_max_attempts' için geçersiz değer, >= 1 olmalı"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "Yapılandırma Sürücüsü seçeneği için geçersiz değer: %(option)s"
@@ -2693,9 +2632,6 @@ msgstr "İstek gövdesi yok"
msgid "No root disk defined."
msgstr "Kök disk tanımlanmamış."
-msgid "No rule for the specified parameters."
-msgstr "Belirlenmiş parametreler için hiç kural yok"
-
msgid "No suitable network for migrate"
msgstr "Göç için uygun bir ağ yok"
@@ -2730,10 +2666,6 @@ msgstr "%(host)s için boş bir bağlantı noktası edinilemedi"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "%(host)s:%(port)d bağlanamadı, %(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "%s imajının özniteliklerinin değiştirilmesine izin verilmiyor"
-
msgid "Not an rbd snapshot"
msgstr "Rbd anlık görüntüsü değil"
@@ -2809,9 +2741,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "Yalnızca host parametresi belirtilebilir"
-msgid "Only instances implemented"
-msgstr "Yalnızca uygulanan sunucular"
-
msgid "Only root certificate can be retrieved."
msgstr "Sadece kök sertifikası alınabilir."
@@ -3358,9 +3287,6 @@ msgstr ""
"resize2fs ile dosya sisteminin küçültülmesi başarısız, lütfen diskinizde "
"yeterli alan olduÄŸundan emin olun."
-msgid "Signature not provided"
-msgstr "Ä°mza verilmedi."
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "%(snapshot_id)s sistem anlık görüntüsü bulunamadı."
@@ -3716,9 +3642,6 @@ msgstr "%s aygıtının oluşturulması beklenirken zaman aşımı"
msgid "Timeout waiting for response from cell"
msgstr "Hücreden cevap beklerken zaman aşımı"
-msgid "Timestamp failed validation."
-msgstr "Zaman damgası doğrulamada başarısız."
-
msgid "To and From ports must be integers"
msgstr "Hedef ve Kaynak bağlantı noktaları tam sayı olmalı"
@@ -3733,18 +3656,12 @@ msgstr ""
"Çok fazla IP adresi üretilecek. Lütfen üretilen sayıyı azaltmak için /%s'i "
"artırın."
-msgid "Too many failed authentications."
-msgstr "Çok sayıda kimlik doğrulama başarısız oldu."
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "ICMP iletişim kuralı türü için Tür ve Kod tam sayı olmalı"
msgid "UUID is required to delete Neutron Networks"
msgstr "Neutron Ağlarını silmek için UUID gerekiyor"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "IP Adresi iliÅŸkilendirilemedi, sabit_ip yok."
-
msgid "Unable to authenticate Ironic client."
msgstr "Ironic istemcisi doğrulanamıyor."
@@ -4020,9 +3937,6 @@ msgstr "Tanınmayan zincir: %r"
msgid "Unknown delete_info type %s"
msgstr "Bilinmeyen delete_info türü %s"
-msgid "Unknown error occurred."
-msgstr "Bilinmeyen bir hata oluÅŸtu."
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "Bilinmeyen image_type=%s"
@@ -4079,11 +3993,11 @@ msgstr "Önce Essex sürümünü kullanarak veritabanı yükseltimi yapın."
#, python-format
msgid "User %(username)s not found in password file."
-msgstr "Kullanıcı %(username) parola dosyasında bulunamadı."
+msgstr "Kullanıcı %(username)s parola dosyasında bulunamadı."
#, python-format
msgid "User %(username)s not found in shadow file."
-msgstr "Kullanıcı % (kullanıcıadı) gölge dosyasında bulunamadı."
+msgstr "Kullanıcı %(username)s gölge dosyasında bulunamadı."
msgid "User data needs to be valid base 64."
msgstr "Kullanıcı verisi geçerli base 64 olmalıdır."
@@ -4232,12 +4146,6 @@ msgid "Volume must be attached in order to detach."
msgstr "Mantıksal sürücünün ayrılabilmesi için eklenmiş olmalıdır."
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr ""
-"Mantıksal sürücü blok boyutu ayarlıyor, ama libvirt '%s' ya da daha yenisi "
-"gerekli."
-
-#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
@@ -4357,9 +4265,6 @@ msgstr "blok_aygıt_eşleştirmesi liste olmalı"
msgid "block_device_mapping_v2 must be a list"
msgstr "blok_aygıt_eşleşetirmesi_v2 bir liste olmalı"
-msgid "can't build a valid rule"
-msgstr "geçerli bir kural inşa edilemiyor"
-
msgid "cannot delete non-existent key"
msgstr "olmayan anahtar silinemez"
@@ -4471,13 +4376,6 @@ msgstr "imaj"
msgid "image already mounted"
msgstr "imaj zaten bağlanmış"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "%(instance)s in %(now)s deki imajı"
-
-msgid "imageLocation is required"
-msgstr "imageLocation gerekli"
-
msgid "index"
msgstr "indis"
@@ -4561,9 +4459,6 @@ msgstr "nbd aygıtı %s ortaya çıkmadı"
msgid "nbd unavailable: module not loaded"
msgstr "nbd kullanılabilir değil: modül yüklenmemiş"
-msgid "need group_name or group_id"
-msgstr "group_name veya group_id gerekli"
-
msgid "network"
msgstr "aÄŸ"
@@ -4584,15 +4479,9 @@ msgstr "ssh komutu çalıştırılamadı: %s"
msgid "onSharedStorage must be specified."
msgstr "onSharedStorage belirtilmelidir."
-msgid "only group \"all\" is supported"
-msgstr "Sadece \"all\" grubu destekleniyor"
-
msgid "operation time out"
msgstr "işlem zaman aşımına uğradı"
-msgid "operation_type must be add or remove"
-msgstr "İşlem türü eklenmek veya kaldırılmak zorunda"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "os-getConsoleOutpu kusurlu veya istek gövdesinde bulunmuyor"
@@ -4632,9 +4521,6 @@ msgstr ""
"read_deleted deÄŸiÅŸkeni 'no', 'yes' veya 'only' deÄŸerlerini alabilir, %r "
"olamaz"
-msgid "resource_id and tag are required"
-msgstr "kaynak_id'si ve etiket gerekli"
-
msgid "rpc_port must be integer"
msgstr "rpc_port tam sayı olmalı"
@@ -4700,9 +4586,6 @@ msgstr "desteklenmeyen alanlar: %s"
msgid "user"
msgstr "kullanıcı"
-msgid "user or group not specified"
-msgstr "Kullanıcı veya grup belirlenmedi"
-
msgid "uuid"
msgstr "uuid"
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-critical.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-critical.po
index 350800ef62..8267b1d124 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-critical.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-critical.po
@@ -6,19 +6,19 @@
# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.dev41\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2015-10-27 06:30+0000\n"
-"PO-Revision-Date: 2014-09-02 05:10+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language-Team: Chinese (China)\n"
-"Language: zh-CN\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2014-09-02 05:10+0000\n"
+"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
+"Language: zh-CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
-"X-Generator: Zanata 3.7.1\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (China)\n"
#, python-format
msgid "Missing core API extensions: %s"
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po
index 3c4968ad93..d9f28c9f85 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-error.po
@@ -10,19 +10,19 @@
# zhangjingwen <zhangjingwen@cn.fujitsu.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-10-07 05:39+0000\n"
-"Last-Translator: liujunpeng <liujunpeng@inspur.com>\n"
-"Language-Team: Chinese (China)\n"
-"Language: zh-CN\n"
+"POT-Creation-Date: 2016-02-08 05:39+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-10-07 05:39+0000\n"
+"Last-Translator: liujunpeng <liujunpeng@inspur.com>\n"
+"Language: zh-CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (China)\n"
msgid "\"Look for the VDIs failed"
msgstr "查找VDI失败"
@@ -196,10 +196,6 @@ msgid "Environment variable 'NETWORK_ID' must be set."
msgstr "必须设置环境å˜é‡'NETWORK_ID'。"
#, python-format
-msgid "Environment: %s"
-msgstr "Environment: %s"
-
-#, python-format
msgid "Error copying key: %s"
msgstr "å¤åˆ¶key:%s 出错"
@@ -333,10 +329,6 @@ msgstr "å°è¯•æ•‘æ´å®žä¾‹æ—¶å‘生错误"
msgid "Error trying to reschedule"
msgstr "å°è¯•é‡æ–°è°ƒåº¦æ—¶å‡ºé”™"
-#, python-format
-msgid "Error updating resources for node %(node)s: %(e)s"
-msgstr "更新节点 %(node)s: %(e)s 资æºæ—¶å‘生错误。"
-
msgid "Error waiting for responses from neighbor cells"
msgstr "等待æ¥è‡ªç›¸é‚»cellçš„å“应å‘生错误"
@@ -673,9 +665,6 @@ msgstr "清除已连接的 VDI 时,å‘生故障"
msgid "Fatal Exception running %(name)s %(type)s-hook: %(obj)s"
msgstr "è¿è¡Œ%(name)s %(type)s-hook: %(obj)sæ—¶å‘生致命错误"
-msgid "FaultWrapper error"
-msgstr "FaultWrapper错误"
-
msgid "Guest does not have a console available"
msgstr "Guest 没有控制å°å¯ç”¨"
@@ -720,10 +709,6 @@ msgid "Invalid server_string: %s"
msgstr "server_string 无效:%s"
#, python-format
-msgid "Keystone failure: %s"
-msgstr "Keystone å‘生故障:%s"
-
-#, python-format
msgid "Live Migration failure: %s"
msgstr "在线è¿ç§»å¤±è´¥ï¼š%s"
@@ -982,10 +967,6 @@ msgid "Unable to parse rrd of %s"
msgstr "ä¸èƒ½è§£æž %s çš„rrd"
#, python-format
-msgid "Unable to preallocate image at path: %(path)s"
-msgstr "ä¸èƒ½ç ¸è·¯å¾„:%(path)s上é¢é¢„分é…é•œåƒ"
-
-#, python-format
msgid "Unable to retrieve storage policy with name %s"
msgstr "ä¸èƒ½èŽ·å–å称 %s 的存储策略"
@@ -1004,10 +985,6 @@ msgstr "æ— æ³•æ›´æ–°ç«¯å£ %s 的主机"
msgid "Unable to update instance VNIC index for port %s."
msgstr "无法更新实例VNICçš„ç´¢å¼•çš„ç«¯å£ %s"
-#, python-format
-msgid "Unexpected %(ex_name)s raised: %(ex_str)s"
-msgstr "å‘生æ„外错误 %(ex_name)s :%(ex_str)s"
-
msgid "Unexpected build failure, not rescheduling build."
msgstr "æ„外构筑失败,未é‡æ–°å®‰æŽ’构筑。"
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po
index a919b432cd..4daa94807f 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-info.po
@@ -11,19 +11,19 @@
# liujunpeng <liujunpeng@inspur.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-10-07 05:39+0000\n"
-"Last-Translator: liujunpeng <liujunpeng@inspur.com>\n"
-"Language-Team: Chinese (China)\n"
-"Language: zh-CN\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-10-07 05:39+0000\n"
+"Last-Translator: liujunpeng <liujunpeng@inspur.com>\n"
+"Language: zh-CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (China)\n"
#, python-format
msgid "%(action_str)s instance"
@@ -65,25 +65,14 @@ msgstr "活跃的基文件:%s"
msgid "Adding security group %(security_group_id)s to port %(port_id)s"
msgstr "正在将安全组 %(security_group_id)s æ·»åŠ è‡³ç«¯å£ %(port_id)s"
-msgid "Allocate address"
-msgstr "分é…地å€"
-
#, python-format
msgid "Allocated network: '%s' for instance"
msgstr "为实例分é…网络: '%s' "
-#, python-format
-msgid "Associate address %(public_ip)s to instance %(instance_id)s"
-msgstr "æŠŠåœ°å€ %(public_ip)s å…³è”到实例 %(instance_id)s"
-
msgid "Attach interface"
msgstr "请连接接å£"
#, python-format
-msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
-msgstr "æŠŠå· %(volume_id)s 附加到实例 %(instance_id)s 上ä½ç½®åœ¨ %(device)s"
-
-#, python-format
msgid "Attach volume %(volume_id)s to instance %(server_id)s at %(device)s"
msgstr "æŠŠå· %(volume_id)s 附加到实例 %(server_id)s çš„ %(device)s 设备上"
@@ -190,22 +179,10 @@ msgid "Create assisted snapshot from volume %s"
msgstr "从å·%s 创建辅助的快照"
#, python-format
-msgid "Create key pair %s"
-msgstr "创建密钥对 %s"
-
-#, python-format
msgid "Create snapshot from volume %s"
msgstr "ä¸ºå· %s 创建快照"
#, python-format
-msgid "Create snapshot of volume %s"
-msgstr "åˆ›å»ºå· %s çš„å¿«ç…§"
-
-#, python-format
-msgid "Create volume from snapshot %s"
-msgstr "从快照 %s 创建å·"
-
-#, python-format
msgid "Create volume of %s GB"
msgstr "创建 %s GBçš„å·"
@@ -230,14 +207,6 @@ msgid ""
msgstr "在%(last)d秒以å‰ï¼Œæ•°æ®å‰©ä½™%(remaining)d 字节,低值%(watermark)d 字节。"
#, python-format
-msgid "De-registering image %s"
-msgstr "è§£é™¤é•œåƒ %s 的注册"
-
-#, python-format
-msgid "Delete key pair %s"
-msgstr "删除密钥对 %s"
-
-#, python-format
msgid "Delete security group %s"
msgstr "删除安全组 %s"
@@ -316,10 +285,6 @@ msgid "Disabling host %s."
msgstr "正在ç¦ç”¨ä¸»æœº %s。"
#, python-format
-msgid "Disassociate address %s"
-msgstr "å–æ¶ˆåœ°å€ %s çš„å…³è”"
-
-#, python-format
msgid "Disconnecting stale VDI %s from compute domU"
msgstr "正在将过时 VDI %s 从计算 domU 断开连接"
@@ -461,10 +426,6 @@ msgid "Get console output"
msgstr "请获å–控制å°è¾“出"
#, python-format
-msgid "Get console output for instance %s"
-msgstr "获å–实例 %s 控制å°è¾“出"
-
-#, python-format
msgid ""
"Getting block stats failed, device might have been detached. Instance="
"%(instance_name)s Disk=%(disk)s Code=%(errcode)s Error=%(e)s"
@@ -524,10 +485,6 @@ msgstr ""
"%(duration).2f 秒"
#, python-format
-msgid "Import key %s"
-msgstr "导入密钥 %s"
-
-#, python-format
msgid ""
"Instance %(name)s running on %(host)s could not be found in the database: "
"assuming it is a worker VM and skip ping migration to a new host"
@@ -730,10 +687,6 @@ msgstr ""
msgid "Putting host %(host_name)s in maintenance mode %(mode)s."
msgstr "正在将主机 %(host_name)s ç½®äºŽç»´æŠ¤æ–¹å¼ %(mode)s 下。"
-#, python-format
-msgid "Reboot instance %r"
-msgstr "é‡å¯å®žä¾‹ %r"
-
msgid "Rebooting instance"
msgstr "正在é‡æ–°å¼•å¯¼å®žä¾‹"
@@ -771,14 +724,6 @@ msgid "Recovered connection to memcache server for reporting service status."
msgstr "与memcacheæœåŠ¡å™¨çš„连接æ¢å¤ï¼ŒæŠ¥å‘ŠæœåŠ¡çŠ¶æ€ã€‚"
#, python-format
-msgid "Registered image %(image_location)s with id %(image_id)s"
-msgstr "用id %(image_id)s æ³¨å†Œé•œåƒ %(image_location)s"
-
-#, python-format
-msgid "Release address %s"
-msgstr "é‡Šæ”¾åœ°å€ %s"
-
-#, python-format
msgid "Removable base files: %s"
msgstr "å¯åˆ é™¤çš„基文件:%s"
@@ -969,11 +914,6 @@ msgid ""
msgstr "ä¸èƒ½ä½¿ç”¨æ‰©å¤§çš„域获å–APIs,回归到慢代ç è·¯å¾„:%(ex)s"
#, python-format
-msgid ""
-"Unauthorized request for controller=%(controller)s and action=%(action)s"
-msgstr "为controller=%(controller)s ä»¥åŠ action=%(action)s未验è¯çš„请求"
-
-#, python-format
msgid "Unexpected error: %s"
msgstr "æ„外错误:%s"
@@ -991,10 +931,6 @@ msgid "Updating from migration %s"
msgstr "正在从è¿ç§» %s 进行更新"
#, python-format
-msgid "Updating image %s publicity"
-msgstr "æ­£åœ¨æ›´æ–°é•œåƒ %s çš„ publicity 属性"
-
-#, python-format
msgid "Updating instance to original state: '%s'"
msgstr "正在将实例更新为原始状æ€ï¼šâ€˜%s’"
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova-log-warning.po b/nova/locale/zh_CN/LC_MESSAGES/nova-log-warning.po
index 8aec92f662..a46fede01a 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova-log-warning.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova-log-warning.po
@@ -10,19 +10,19 @@
# liujunpeng <liujunpeng@inspur.com>, 2015. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b2.dev725\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:12+0000\n"
-"PO-Revision-Date: 2015-10-07 05:40+0000\n"
-"Last-Translator: liujunpeng <liujunpeng@inspur.com>\n"
-"Language-Team: Chinese (China)\n"
-"Language: zh-CN\n"
+"POT-Creation-Date: 2016-01-21 04:19+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2015-10-07 05:40+0000\n"
+"Last-Translator: liujunpeng <liujunpeng@inspur.com>\n"
+"Language: zh-CN\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Generated-By: Babel 2.0\n"
"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (China)\n"
#, python-format
msgid ""
@@ -70,14 +70,6 @@ msgid "%s swap image was used by instance but no back files existing!"
msgstr "%s 交æ¢é•œåƒè¢«å®žä¾‹ä½¿ç”¨ï¼Œä½†æ˜¯æ²¡æœ‰å¤‡ä»½æ–‡ä»¶å­˜åœ¨ï¼"
#, python-format
-msgid ""
-"Access key %(access_key)s has had %(failures)d failed authentications and "
-"will be locked out for %(lock_mins)d minutes."
-msgstr ""
-"访问密钥 %(access_key)s 有错误 %(failures)d,认è¯å¤±è´¥å°†è¢«é”定 %(lock_mins)d "
-"分钟。"
-
-#, python-format
msgid "Address |%(address)s| is not allocated"
msgstr "åœ°å€ |%(address)s| 没有分é…"
@@ -977,15 +969,6 @@ msgstr ""
"Novaä¸å†æ”¯æŒä½ŽäºŽ%(version)s çš„VMware vCenter 。在13.0.0å‘布版本中,最å°"
"VCenter版本是%(version)s。"
-#, python-format
-msgid ""
-"Running Nova with a libvirt version less than %(version)s is deprecated. The "
-"required minimum version of libvirt will be raised to %(version)s in the "
-"13.0.0 release."
-msgstr ""
-"Novaä¸å†æ”¯æŒä½ŽäºŽ%(version)s çš„libvirt。在13.0.0å‘布版本中,最å°VCenter版本"
-"是%(version)s。"
-
msgid ""
"Running libvirt-lxc without user namespaces is dangerous. Containers spawned "
"by Nova will be run as the host's root user. It is highly suggested that "
@@ -1397,10 +1380,6 @@ msgid "multiple fixed_ips exist, using the first IPv4 fixed_ip: %s"
msgstr "存在多个固定ips,使用第一个IPv4 固定ip:%s"
#, python-format
-msgid "multiple fixed_ips exist, using the first: %s"
-msgstr "存在多个 fixed_ip,正在使用第一个:%s"
-
-#, python-format
msgid ""
"my_ip address (%(my_ip)s) was not found on any of the interfaces: %(ifaces)s"
msgstr "在任何接å£ï¼š%(ifaces)s上,my_ip åœ°å€ (%(my_ip)s) 没有找到。"
diff --git a/nova/locale/zh_CN/LC_MESSAGES/nova.po b/nova/locale/zh_CN/LC_MESSAGES/nova.po
index 698158f89e..0b336664db 100644
--- a/nova/locale/zh_CN/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_CN/LC_MESSAGES/nova.po
@@ -1,22 +1,52 @@
-# Chinese (Simplified, China) translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
+# Translators:
+# Amos Huang <ch.linux.free@gmail.com>, 2013
+# XiaoYong Yuan <chbrian77@gmail.com>, 2013
+# Ying Chun Guo <daisy.ycguo@gmail.com>, 2013
+# donghua <me@lidonghua.com>, 2013
+# LIU Yulong <dragon889@163.com>, 2013
+# LIU Yulong <dragon889@163.com>, 2013
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# hamo <hamo.by@gmail.com>, 2012
+# hanxue <leehanxue@gmail.com>, 2012
+# honglei, 2015
+# Jack River <ritksm@gmail.com>, 2013
+# kwang1971 <kwang1971@gmail.com>, 2014
+# kwang1971 <kwang1971@gmail.com>, 2014
+# Lee Anthony <imagineful@gmail.com>, 2013
+# Jack River <ritksm@gmail.com>, 2013
+# Shuwen SUN <chris-void@outlook.com>, 2014
+# Tom Fifield <tom@openstack.org>, 2013-2014
+# Xiao Xi LIU <liuxx@cn.ibm.com>, 2014
+# XiaoYong Yuan <chbrian77@gmail.com>, 2013
+# 颜海峰 <yanheven@gmail.com>, 2014
+# Yu Zhang, 2013
+# Yu Zhang, 2013
+# 汪军 <wwyyzz08@sina.com>, 2015
+# 颜海峰 <yanheven@gmail.com>, 2014
+# English translations for nova.
+# Daisy <guoyingc@cn.ibm.com>, 2015. #zanata
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# liujunpeng <liujunpeng@inspur.com>, 2015. #zanata
+# Daisy <guoyingc@cn.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-10-07 05:39+0000\n"
-"Last-Translator: liujunpeng <liujunpeng@inspur.com>\n"
-"Language: zh_Hans_CN\n"
-"Language-Team: Chinese (China)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
+"PO-Revision-Date: 2016-02-02 03:39+0000\n"
+"Last-Translator: Daisy <guoyingc@cn.ibm.com>\n"
+"Language: zh-CN\n"
+"Language-Team: Chinese (China)\n"
+"Plural-Forms: nplurals=1; plural=0\n"
"Generated-By: Babel 2.2.0\n"
+"X-Generator: Zanata 3.7.3\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -216,9 +246,6 @@ msgstr "API 版本字符%(version)sæ ¼å¼æ— æ•ˆã€‚必须是大版本å·.å°ç‰ˆæœ
msgid "API version %(version)s is not supported on this method."
msgstr "这个方法ä¸æ”¯æŒ%(version)s版本的API。"
-msgid "Access key not provided"
-msgstr "访问密钥没有æä¾›"
-
msgid "Access list not available for public flavors."
msgstr "未æ供公用云主机类型的访问列表。"
@@ -816,10 +843,6 @@ msgstr "没有找到二进制 %(binary)s 在主机 %(host)s 上。"
msgid "Could not find config at %(path)s"
msgstr "在 %(path)s 找ä¸åˆ°é…置文件。"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "找ä¸åˆ°å¯†é’¥å¯¹ï¼š%s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr "无法找到虚拟机使用的数æ®å­˜å‚¨å¼•ç”¨ã€‚"
@@ -851,14 +874,6 @@ msgstr "æœªèƒ½ä¸Šè½½æ˜ åƒ %(image_id)s"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "无法连接到 %(interface)s 的本地IP:%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"一å°æ—¶æ— æ³•åœæ­¢å®žä¾‹%(instance)s。当å‰è™šæ‹ŸæœºçŠ¶æ€ï¼š%(vm_state)s,当å‰ä»»åŠ¡çŠ¶æ€ï¼š"
-"%(task_state)s"
-
msgid "Create networks failed"
msgstr "创建网络失败"
@@ -1090,21 +1105,6 @@ msgstr "期望 uuid,但是接收到 %(uuid)s。"
msgid "Expected object of type: %s"
msgstr "期望的对象类型:%s"
-msgid "Expecting a list of resources"
-msgstr "期望资æºåˆ—表"
-
-msgid "Expecting a list of tagSets"
-msgstr "期望 tagSet 列表"
-
-msgid "Expecting both key and value to be set"
-msgstr "期望 key和value都被设置了"
-
-msgid "Expecting key to be set"
-msgstr "期望 key 被设置"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "期望 tagSet 是键值对"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "在影å­è¡¨ä¸­æœ‰é¢å¤–列%(table)s.%(column)s"
@@ -1266,10 +1266,6 @@ msgstr "无法挂起云主机:%(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "无法终止云主机:%(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "解æžkeystoneå“应失败:%s"
-
msgid "Failure prepping block device."
msgstr "准备å—设备失败。"
@@ -1348,13 +1344,6 @@ msgstr "云主机类型%(flavor_id)s中没有å为%(key)s的附加规格。"
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr ""
-"在å°è¯•äº† %(retries)d 次之åŽï¼Œè¿˜æ˜¯æ— æ³•æ›´æ–°æˆ–创建云主机类型 %(id)d 的附加规格。"
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr "项目 %(project_id)så·²ç»æ‹¥æœ‰å¯¹äºŽäº‘主机类型 %(flavor_id)s的访问æƒé™ã€‚"
@@ -1417,9 +1406,6 @@ msgstr "浮动IP %(address)s绑定失败。"
msgid "Floating IP allocate failed."
msgstr "浮动IP分é…失败。"
-msgid "Floating ip is not associated."
-msgstr "未关è”浮动 IP。"
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1585,9 +1571,6 @@ msgstr "超过镜åƒå…ƒæ•°æ®é™åˆ¶"
msgid "Image model '%(image)s' is not supported"
msgstr "é•œåƒæ¨¡å¼ '%(image)s' ä¸æ”¯æŒ"
-msgid "Image must be available"
-msgstr "é•œåƒå¿…é¡»å¯ç”¨ã€‚"
-
msgid "Image not found."
msgstr "é•œåƒæ²¡æœ‰æ‰¾åˆ°ã€‚"
@@ -1810,9 +1793,6 @@ msgstr "æŽ¥å£ %(interface)s没有找到。"
msgid "Invalid Base 64 data for file %(path)s"
msgstr "文件%(path)sçš„Base 64æ•°æ®éžæ³•"
-msgid "Invalid CIDR"
-msgstr "CIDR 无效"
-
msgid "Invalid Connection Info"
msgstr "连接信æ¯æ— æ•ˆ"
@@ -1825,10 +1805,6 @@ msgid "Invalid IP format %s"
msgstr "无效IPæ ¼å¼%s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "IPåè®® %(protocol)s 无效"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "无效的IPåè®® %(protocol)s。"
@@ -2115,19 +2091,9 @@ msgid "Invalid usage_type: %s"
msgstr "usage_type: %s无效"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"值“%(ec2_instance_id)sâ€å¯¹äºŽ instanceId 无效。实例没有在根 (%(root)s) 处连接å·"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "值“%sâ€å¯¹äºŽ force 无效。"
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "值对于“scheduler_max_attemptsâ€æ— æ•ˆï¼Œå¿…é¡» >= 1"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "éžæ³•çš„Config Drive值: %(option)s"
@@ -2690,9 +2656,6 @@ msgstr "ä¸å­˜åœ¨ä»»ä½•è¯·æ±‚主体"
msgid "No root disk defined."
msgstr "没有定义根ç£ç›˜ã€‚"
-msgid "No rule for the specified parameters."
-msgstr "对给定的å‚数无特定规则。"
-
msgid "No suitable network for migrate"
msgstr "对于è¿ç§»ï¼Œæ²¡æœ‰åˆé€‚的网络"
@@ -2726,10 +2689,6 @@ msgstr "无法为 %(host)s 获å–å¯ç”¨ç«¯å£"
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "无法绑定 %(host)s:%(port)d,%(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "ä¸å…è®¸ä¸ºæ˜ åƒ %s 修改属性"
-
msgid "Not an rbd snapshot"
msgstr "ä¸æ˜¯ rbd å¿«ç…§"
@@ -2801,9 +2760,6 @@ msgstr "åªæœ‰åŸºäºŽæ–‡ä»¶çš„SRs(ext/NFS)支æŒè¿™ä¸ªç‰¹æ€§ã€‚SR %(uuid)s 是类
msgid "Only host parameter can be specified"
msgstr "åªèƒ½æŒ‡å®šä¸»æœº å‚æ•°"
-msgid "Only instances implemented"
-msgstr "åªæœ‰å®žä¾‹å®žçŽ°"
-
msgid "Only root certificate can be retrieved."
msgstr "åªæœ‰æ ¹è¯ä¹¦èƒ½è¢«èŽ·å–。"
@@ -3179,14 +3135,6 @@ msgid "Running Nova with parallels virt_type requires libvirt version %s"
msgstr "è¿è¡Œä¸²è¡Œvirt_typeçš„nova串行virt_type需è¦libvirt版本%s"
#, python-format
-msgid ""
-"Running Nova with qemu/kvm virt_type on s390/s390x requires libvirt version "
-"%(libvirt_ver)s and qemu version %(qemu_ver)s, or greater"
-msgstr ""
-"在s390/s390xè¿è¡Œå¸¦qemu/kvm virt_type çš„Nova需è¦libvirt版本%(libvirt_ver)s 并"
-"且qemu版本%(qemu_ver)s,或更高"
-
-#, python-format
msgid "Running cmd (subprocess): %s"
msgstr "正在è¿è¡Œcmd (subprocess):%s"
@@ -3355,9 +3303,6 @@ msgid ""
msgstr ""
"使用resize2fså‘下压缩文件系统失败,请检查您的ç£ç›˜ä¸Šæ˜¯å¦æœ‰è¶³å¤Ÿçš„剩余空间。"
-msgid "Signature not provided"
-msgstr "ç­¾å没有æä¾›"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "快照 %(snapshot_id)s 没有找到。"
@@ -3718,9 +3663,6 @@ msgstr "等待æ¥è‡ªå•å…ƒçš„å“应时å‘生超时"
msgid "Timeout while checking if we can live migrate to host: %s"
msgstr "当检查我们是å¦å¯ä»¥åœ¨çº¿è¿ç§»åˆ°ä¸»æœº%s时,超时。"
-msgid "Timestamp failed validation."
-msgstr "时间戳记未能通过验è¯ã€‚"
-
msgid "To and From ports must be integers"
msgstr "目的和æºç«¯å£å¿…须是整数"
@@ -3733,18 +3675,12 @@ msgid ""
"number generated."
msgstr "è¦ç”Ÿæˆçš„IP地å€å¤ªå¤šã€‚请å‡å°‘/%s æ¥å‡å°‘生æˆçš„数目。"
-msgid "Too many failed authentications."
-msgstr "认è¯å¤±è´¥è¿‡å¤š"
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "类型和编ç å¿…须是ICMPå议类型"
msgid "UUID is required to delete Neutron Networks"
msgstr "删除Neutron网络需è¦UUID"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "æ— æ³•å…³è” IP 地å€ï¼Œä¸å­˜åœ¨ä»»ä½• fixed_ip。"
-
msgid "Unable to authenticate Ironic client."
msgstr "ä¸èƒ½è®¤è¯Ironic客户端。"
@@ -4026,9 +3962,6 @@ msgstr "é…ç½®é©±åŠ¨å™¨æ ¼å¼ %(format)s 未知。请选择下列其中一项:
msgid "Unknown delete_info type %s"
msgstr "未知delete_info类型%s"
-msgid "Unknown error occurred."
-msgstr "å‘生未知错误。"
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "image_type=%s 未知"
@@ -4254,10 +4187,6 @@ msgid "Volume resource quota exceeded"
msgstr "超出å·èµ„æºé…é¢"
#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr "å·è®¾ç½®å—大å°ï¼Œä½†æ˜¯å¿…é¡»libvirt '%s' 或更高"
-
-#, python-format
msgid ""
"Volume sets block size, but the current libvirt hypervisor '%s' does not "
"support custom block size"
@@ -4383,9 +4312,6 @@ msgstr "block_device_mapping必须是列表"
msgid "block_device_mapping_v2 must be a list"
msgstr "block_device_mapping_v2必须是列表"
-msgid "can't build a valid rule"
-msgstr "无法构建一个有效的规则"
-
msgid "cannot delete non-existent key"
msgstr "ä¸èƒ½åˆ é™¤ä¸å­˜åœ¨çš„键值"
@@ -4499,13 +4425,6 @@ msgstr "é•œåƒ"
msgid "image already mounted"
msgstr "é•œåƒå·²ç»æŒ‚è½½"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "%(now)s 处的 %(instance)s 的映åƒ"
-
-msgid "imageLocation is required"
-msgstr "éœ€è¦ imageLocation"
-
msgid "index"
msgstr "索引"
@@ -4587,9 +4506,6 @@ msgstr "nbd 设备 %s 没有出现"
msgid "nbd unavailable: module not loaded"
msgstr "NBDä¸å¯ç”¨ï¼šæ¨¡å—没有加载"
-msgid "need group_name or group_id"
-msgstr "éœ€è¦ group_name 或 group_id"
-
msgid "network"
msgstr "网络"
@@ -4618,15 +4534,9 @@ msgstr ""
msgid "onSharedStorage must be specified."
msgstr "必须指定onSharedStorage。"
-msgid "only group \"all\" is supported"
-msgstr "仅仅支æŒç»„\"all\""
-
msgid "operation time out"
msgstr "æ“作超时"
-msgid "operation_type must be add or remove"
-msgstr "operation_type必须添加或者移除"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "os-getConsoleOutput çš„æ ¼å¼ä¸æ­£ç¡®ï¼Œæˆ–请求主体中缺少该项"
@@ -4664,9 +4574,6 @@ msgstr "没有找到rbd pyhon库"
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr "read_deleted åªèƒ½æ˜¯â€œnoâ€ã€â€œyesâ€æˆ–“onlyâ€å…¶ä¸­ä¸€é¡¹ï¼Œè€Œä¸èƒ½æ˜¯ %r"
-msgid "resource_id and tag are required"
-msgstr "resource_id 和 tag 是必需的"
-
msgid "rpc_port must be integer"
msgstr "rpc_port 必须是整数"
@@ -4729,9 +4636,6 @@ msgstr "ä¸æ”¯æŒçš„字段:%s"
msgid "user"
msgstr "用户"
-msgid "user or group not specified"
-msgstr "用户或者组没有确定"
-
msgid "uuid"
msgstr "uuid"
diff --git a/nova/locale/zh_TW/LC_MESSAGES/nova.po b/nova/locale/zh_TW/LC_MESSAGES/nova.po
index 095a32783e..aa491e7b48 100644
--- a/nova/locale/zh_TW/LC_MESSAGES/nova.po
+++ b/nova/locale/zh_TW/LC_MESSAGES/nova.po
@@ -1,22 +1,30 @@
-# Chinese (Traditional, Taiwan) translations for nova.
-# Copyright (C) 2016 ORGANIZATION
+# Translations template for nova.
+# Copyright (C) 2015 ORGANIZATION
# This file is distributed under the same license as the nova project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2016.
#
+# Translators:
+# Chao-Hsiung Liao <pesder@gmail.com>, 2012
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011
+# Pellaeon Lin <nfsmwlin@gmail.com>, 2013
+# Pellaeon Lin <nfsmwlin@gmail.com>, 2013
+# Lucas Palm <lapalm@us.ibm.com>, 2015. #zanata
+# OpenStack Infra <zanata@openstack.org>, 2015. #zanata
+# Lucas Palm <lapalm@us.ibm.com>, 2016. #zanata
msgid ""
msgstr ""
-"Project-Id-Version: nova 13.0.0.0b2.dev521\n"
+"Project-Id-Version: nova 13.0.0.0b3.dev339\n"
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
-"POT-Creation-Date: 2016-01-08 06:11+0000\n"
-"PO-Revision-Date: 2015-09-06 10:17+0000\n"
-"Last-Translator: openstackjenkins <jenkins@openstack.org>\n"
-"Language: zh_Hant_TW\n"
-"Language-Team: Chinese (Taiwan)\n"
-"Plural-Forms: nplurals=1; plural=0\n"
+"POT-Creation-Date: 2016-02-08 05:38+0000\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=utf-8\n"
+"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.2.0\n"
+"PO-Revision-Date: 2016-02-03 07:15+0000\n"
+"Last-Translator: Lucas Palm <lapalm@us.ibm.com>\n"
+"Language: zh-TW\n"
+"Plural-Forms: nplurals=1; plural=0;\n"
+"Generated-By: Babel 2.0\n"
+"X-Generator: Zanata 3.7.3\n"
+"Language-Team: Chinese (Taiwan)\n"
#, python-format
msgid "%(address)s is not a valid IP v4/6 address."
@@ -32,10 +40,32 @@ msgid ""
msgstr "%(binary)s 嘗試直接存å–資料庫,但原則ä¸å®¹è¨±é€™æ¨£åš"
#, python-format
+msgid ""
+"%(desc)r\n"
+"command: %(cmd)r\n"
+"exit code: %(code)r\n"
+"stdout: %(stdout)r\n"
+"stderr: %(stderr)r"
+msgstr ""
+"%(desc)r\n"
+"指令:%(cmd)r\n"
+"çµæŸç¢¼ï¼š%(code)r\n"
+"標準輸出:%(stdout)r\n"
+"標準錯誤:%(stderr)r"
+
+#, python-format
msgid "%(err)s"
msgstr "%(err)s"
#, python-format
+msgid "%(field)s should not be part of the updates."
+msgstr "%(field)s ä¸æ‡‰æ˜¯æ›´æ–°çš„一部分。"
+
+#, python-format
+msgid "%(fieldname)s missing field type"
+msgstr "%(fieldname)s éºæ¼æ¬„ä½é¡žåž‹"
+
+#, python-format
msgid "%(host)s:%(port)s: Target closed"
msgstr "%(host)s:%(port)s:目標已關閉"
@@ -76,6 +106,10 @@ msgid "%(type)s hypervisor does not support PCI devices"
msgstr "%(type)s Hypervisor ä¸æ”¯æ´ PCI è£ç½®"
#, python-format
+msgid "%(typename)s in %(fieldname)s is not an instance of Enum"
+msgstr "%(fieldname)s 中的 %(typename)s ä¸æ˜¯åˆ—舉的實例"
+
+#, python-format
msgid "%(value_name)s must be <= %(max_value)d"
msgstr "%(value_name)s å¿…é ˆ <= %(max_value)d"
@@ -96,6 +130,10 @@ msgid "%(worker_name)s value of %(workers)s is invalid, must be greater than 0"
msgstr "%(workers)s 的 %(worker_name)s 值無效,必須大於 0"
#, python-format
+msgid "%r failed. Not Retrying."
+msgstr "%r 失敗。將ä¸é‡è©¦ã€‚"
+
+#, python-format
msgid "%r failed. Retrying."
msgstr "%r 失敗。正在é‡è©¦ã€‚"
@@ -124,6 +162,10 @@ msgid "%s must be either 'MANUAL' or 'AUTO'."
msgstr "%s 必須是 'MANUAL' 或 'AUTO'。"
#, python-format
+msgid "'%(other)s' should be an instance of '%(cls)s'"
+msgstr "'%(other)s' 應該是 '%(cls)s' 的實例"
+
+#, python-format
msgid "'%s' is either missing or empty."
msgstr "'%s' å·²éºæ¼æˆ–者是空的。"
@@ -148,6 +190,10 @@ msgstr "/%s 應該指定為單一ä½å€ï¼Œè€Œä¸æ˜¯æŒ‡å®šç‚º CIDR æ ¼å¼"
msgid "A CPU model name should not be set when a host CPU model is requested"
msgstr "è¦æ±‚主機 CPU 型號時,ä¸æ‡‰è©²è¨­å®š CPU 型號å稱"
+#, python-format
+msgid "A NetworkModel is required in field %s"
+msgstr "æ¬„ä½ %s éœ€è¦ NetworkModel"
+
msgid ""
"A unique ID given to each file system. This is value is set in Glance and "
"agreed upon here so that the operator knowns they are dealing with the same "
@@ -178,9 +224,6 @@ msgstr "API 版本字串 %(version)s æ ¼å¼ç„¡æ•ˆã€‚æ ¼å¼å¿…須為 MajorNum.Min
msgid "API version %(version)s is not supported on this method."
msgstr "此方法ä¸æ”¯æ´ API %(version)s 版。"
-msgid "Access key not provided"
-msgstr "未æ供存å–金鑰"
-
msgid "Access list not available for public flavors."
msgstr "å­˜å–清單ä¸é©ç”¨æ–¼å…¬ç”¨ç‰¹æ€§ã€‚"
@@ -208,6 +251,9 @@ msgstr "無法轉æ›ä½å€ã€‚"
msgid "Address not specified"
msgstr "未指定ä½å€"
+msgid "Affinity instance group policy was violated."
+msgstr "é•å了親緣性實例群組原則。"
+
#, python-format
msgid "Agent does not support the call: %(method)s"
msgstr "代ç†ç¨‹å¼ä¸æ”¯æ´å‘¼å«ï¼š%(method)s"
@@ -269,6 +315,10 @@ msgstr "é•å了å親緣性實例群組原則。"
msgid "Architecture name '%(arch)s' is not recognised"
msgstr "未辨識架構å稱 '%(arch)s'"
+#, python-format
+msgid "Architecture name '%s' is not valid"
+msgstr "架構å稱 '%s' 無效"
+
msgid "Argument 'type' for reboot is not HARD or SOFT"
msgstr "é‡æ–°å•Ÿå‹•çš„引數 'type' ä¸æ˜¯ HARD 或 SOFT"
@@ -347,9 +397,19 @@ msgstr "錯誤的 volumeId æ ¼å¼ï¼švolumeId ä¸æ˜¯é©ç•¶çš„æ ¼å¼ (%s)"
msgid "Binary"
msgstr "二進ä½"
+#, python-format
+msgid ""
+"Binding failed for port %(port_id)s, please check neutron logs for more "
+"information."
+msgstr "é‡å°åŸ  %(port_id)s 的連çµå¤±æ•—,請檢查 Neutron 日誌,以å–得相關資訊。"
+
msgid "Blank components"
msgstr "空白元件"
+msgid ""
+"Blank volumes (source: 'blank', dest: 'volume') need to have non-zero size"
+msgstr "空白ç£å€ï¼ˆä¾†æºï¼š'blank',目的地:'volume')需è¦å…·æœ‰éžé›¶å¤§å°"
+
#, python-format
msgid "Block Device %(id)s is not bootable."
msgstr "å€å¡Šè£ç½® %(id)s ä¸å¯å•Ÿå‹•ã€‚"
@@ -432,6 +492,21 @@ msgstr "CPU 數目 %(cpunum)d å¤§æ–¼ä¸Šé™ %(cpumax)d"
msgid "CPU number %(cpuset)s is not assigned to any node"
msgstr "CPU 數目 %(cpuset)s 未指派給任何節點"
+#, python-format
+msgid "CPU pinning is not supported by the host: %(reason)s"
+msgstr "主機ä¸æ”¯æ´ CPU 固定:%(reason)s"
+
+#, python-format
+msgid ""
+"CPU set to pin/unpin %(requested)s must be a subset of known CPU set "
+"%(cpuset)s"
+msgstr ""
+"已設定固定/å–消固定 %(requested)s çš„ CPU,必須是下列已知 CPU 集的å­é›†ï¼š"
+"%(cpuset)s"
+
+msgid "Can not add access to a public flavor."
+msgstr "無法新增å°å…¬ç”¨ç‰¹æ€§çš„å­˜å–權。"
+
msgid "Can not find requested image"
msgstr "找ä¸åˆ°æ‰€è¦æ±‚的映åƒæª”"
@@ -498,6 +573,13 @@ msgstr "無法å°éŽ–具有已å°æ˜ ç£å€çš„移轉實例 %s"
msgid "Cannot call %(method)s on orphaned %(objtype)s object"
msgstr "無法å°å­¤ç«‹çš„ %(objtype)s ç‰©ä»¶å‘¼å« %(method)s"
+msgid ""
+"Cannot create default bittorrent URL without xenserver.torrent_base_url "
+"configuration option set."
+msgstr ""
+"如果沒有 xenserver.torrent_base_url é…ç½®é¸é …集,則無法建立é è¨­ bittorrent "
+"URL。"
+
msgid "Cannot execute /sbin/mount.sofs"
msgstr "無法執行 /sbin/mount.sofs"
@@ -583,6 +665,9 @@ msgstr "Cell 訊æ¯å·²é”到跳èºæ•¸ä¸Šé™ï¼š%(hop_count)s"
msgid "Cell name cannot be empty"
msgstr "Cell å稱ä¸èƒ½æ˜¯ç©ºçš„"
+msgid "Cell name cannot contain '!', '.' or '@'"
+msgstr "Cell å稱ä¸èƒ½åŒ…å« '!'ã€'.' 或 '@'"
+
msgid "Cell type must be 'parent' or 'child'"
msgstr "Cell 類型必須是 'parent' 或 'child'"
@@ -641,6 +726,19 @@ msgid ""
msgstr ""
"é…置已è¦æ±‚明確的 CPU 型號,但ç¾è¡Œ libVirt Hypervisor '%s' ä¸æ”¯æ´é¸å– CPU 型號"
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s, but we were unable to "
+"determine the cause"
+msgstr "更新實例 %(instance_uuid)s 時發生è¡çªï¼Œä½†æ˜¯ç„¡æ³•åˆ¤å®šåŽŸå› "
+
+#, python-format
+msgid ""
+"Conflict updating instance %(instance_uuid)s. Expected: %(expected)s. "
+"Actual: %(actual)s"
+msgstr ""
+"更新實例 %(instance_uuid)s 時發生è¡çªã€‚é æœŸï¼š%(expected)s。實際:%(actual)s"
+
msgid "Conflicting policies configured!"
msgstr "所é…置的原則相è¡çªï¼"
@@ -724,10 +822,6 @@ msgstr "在主機 %(host)s 上找ä¸åˆ°äºŒé€²ä½æª” %(binary)s。"
msgid "Could not find config at %(path)s"
msgstr "在 %(path)s 處找ä¸åˆ°é…ç½®"
-#, python-format
-msgid "Could not find key pair(s): %s"
-msgstr "找ä¸åˆ°é‡‘鑰組:%s"
-
msgid "Could not find the datastore reference(s) which the VM uses."
msgstr "找ä¸åˆ° VM 所使用的資料儲存庫åƒç…§ã€‚"
@@ -759,14 +853,6 @@ msgstr "無法上傳映åƒæª” %(image_id)s"
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "無法å–å¾— %(interface)s çš„éˆçµæœ¬ç«¯ IP:%(ex)s"
-#, python-format
-msgid ""
-"Couldn't stop instance %(instance)s within 1 hour. Current vm_state: "
-"%(vm_state)s, current task_state: %(task_state)s"
-msgstr ""
-"在 1 å°æ™‚之內,無法åœæ­¢å¯¦ä¾‹ %(instance)s。ç¾è¡Œ vm_state:%(vm_state)s,ç¾è¡Œ "
-"task_state:%(task_state)s"
-
msgid "Create networks failed"
msgstr "建立網路失敗"
@@ -953,6 +1039,26 @@ msgstr ""
"%(error_code)s] %(ex)s"
#, python-format
+msgid ""
+"Error from libvirt while set password for username \"%(user)s\": [Error Code "
+"%(error_code)s] %(ex)s"
+msgstr ""
+"設定使用者å稱 \"%(user)s\" 的密碼時,libvirt 傳回了錯誤:[錯誤"
+"碼%(error_code)s] %(ex)s"
+
+#, python-format
+msgid ""
+"Error mounting %(device)s to %(dir)s in image %(image)s with libguestfs "
+"(%(e)s)"
+msgstr ""
+"使用 libguestfs å°‡ %(device)s è£è¼‰åˆ°æ˜ åƒæª” %(image)s 中的 %(dir)s 時發生錯誤"
+"(%(e)s)"
+
+#, python-format
+msgid "Error mounting %(image)s with libguestfs (%(e)s)"
+msgstr "è£è¼‰å…·æœ‰ libguestfs (%(e)s) çš„ %(image)s 時發生錯誤"
+
+#, python-format
msgid "Error when creating resource monitor: %(monitor)s"
msgstr "建立資æºç›£è¦–器 %(monitor)s 時發生錯誤"
@@ -975,6 +1081,10 @@ msgstr ""
"%(max_retries)d"
#, python-format
+msgid "Exceeded maximum number of retries. %(reason)s"
+msgstr "已超出é‡è©¦æ¬¡æ•¸ä¸Šé™ã€‚%(reason)s"
+
+#, python-format
msgid "Expected a uuid but received %(uuid)s."
msgstr "éœ€è¦ UUID,但收到 %(uuid)s。"
@@ -982,21 +1092,6 @@ msgstr "éœ€è¦ UUID,但收到 %(uuid)s。"
msgid "Expected object of type: %s"
msgstr "需è¦é¡žåž‹ç‚º %s 的物件"
-msgid "Expecting a list of resources"
-msgstr "é æœŸè³‡æºæ¸…å–®"
-
-msgid "Expecting a list of tagSets"
-msgstr "é æœŸ tagSet 清單"
-
-msgid "Expecting both key and value to be set"
-msgstr "é æœŸè¨­å®šç´¢å¼•éµåŠå€¼"
-
-msgid "Expecting key to be set"
-msgstr "é æœŸè¨­å®šç´¢å¼•éµ"
-
-msgid "Expecting tagSet to be key/value pairs"
-msgstr "tagSet é æœŸç‚ºéµå€¼çµ„"
-
#, python-format
msgid "Extra column %(table)s.%(column)s in shadow table"
msgstr "備份副本表格中存在é¡å¤–直欄 %(table)s.%(column)s"
@@ -1012,6 +1107,14 @@ msgid "Fail to validate provided extra specs keys. Expected string"
msgstr "無法驗證所æ供的é¡å¤–è¦æ ¼é‡‘鑰。é æœŸç‚ºå­—串"
#, python-format
+msgid "Failed to access port %(port_id)s: %(reason)s"
+msgstr "無法存å–埠 %(port_id)s:%(reason)s"
+
+#, python-format
+msgid "Failed to add bridge: %s"
+msgstr "無法新增橋接器:%s"
+
+#, python-format
msgid ""
"Failed to add deploy parameters on node %(node)s when provisioning the "
"instance %(instance)s"
@@ -1150,10 +1253,6 @@ msgstr "無法懸置實例:%(reason)s"
msgid "Failed to terminate instance: %(reason)s"
msgstr "無法終止實例:%(reason)s"
-#, python-format
-msgid "Failure parsing response from keystone: %s"
-msgstr "剖æžä¾†è‡ª Keystone 的回應失敗:%s "
-
msgid "Failure prepping block device."
msgstr "準備å€å¡Šè£ç½®æ™‚失敗。"
@@ -1181,6 +1280,10 @@ msgid "Filename of root Certificate Revocation List"
msgstr "主è¦æ†‘證撤銷清冊的檔å"
#, python-format
+msgid "Fixed IP %(ip)s is not a valid ip address for network %(network_id)s."
+msgstr "固定 IP %(ip)s ä¸æ˜¯ç¶²è·¯ %(network_id)s 的有效 IP ä½å€ã€‚"
+
+#, python-format
msgid "Fixed IP %s has been deleted"
msgstr "已刪除固定 IP %s"
@@ -1228,12 +1331,6 @@ msgstr "特性 %(flavor_id)s 沒有索引éµç‚º %(key)s çš„é¡å¤–è¦æ ¼ã€‚"
#, python-format
msgid ""
-"Flavor %(id)d extra spec cannot be updated or created after %(retries)d "
-"retries."
-msgstr "在é‡è©¦ %(retries)d 次之後,無法更新或建立特性 %(id)d é¡å¤–è¦æ ¼ã€‚"
-
-#, python-format
-msgid ""
"Flavor access already exists for flavor %(flavor_id)s and project "
"%(project_id)s combination."
msgstr "特性 %(flavor_id)s åŠå°ˆæ¡ˆ%(project_id)s 組åˆå·²å­˜åœ¨ç‰¹æ€§å­˜å–。"
@@ -1269,15 +1366,33 @@ msgstr "找ä¸åˆ°å稱為 %(flavor_name)s 的特性。"
msgid "Flavor with name %(name)s already exists."
msgstr "å稱為 %(name)s 的特性已存在。"
+#, python-format
+msgid ""
+"Flavor's disk is smaller than the minimum size specified in image metadata. "
+"Flavor disk is %(flavor_size)i bytes, minimum size is %(image_min_disk)i "
+"bytes."
+msgstr ""
+"特性ç£ç¢Ÿå°æ–¼æ˜ åƒæª” meta 資料中指定的大å°ä¸‹é™ã€‚特性ç£ç¢Ÿç‚º %(flavor_size)i ä½å…ƒ"
+"組,大å°ä¸‹é™ç‚º %(image_min_disk)i ä½å…ƒçµ„。"
+
+#, python-format
+msgid ""
+"Flavor's disk is too small for requested image. Flavor disk is "
+"%(flavor_size)i bytes, image is %(image_size)i bytes."
+msgstr ""
+"é‡å°æ‰€è¦æ±‚的映åƒæª”而言,特性ç£ç¢Ÿå¤ªå°ã€‚特性ç£ç¢Ÿç‚º%(flavor_size)i ä½å…ƒçµ„,映åƒ"
+"檔為 %(image_size)i ä½å…ƒçµ„。"
+
msgid "Flavor's memory is too small for requested image."
msgstr "特性的記憶體太å°ï¼Œè£ä¸ä¸‹æ‰€è¦æ±‚的映åƒæª”。"
+#, python-format
+msgid "Floating IP %(address)s association has failed."
+msgstr "浮動 IP %(address)s é—œè¯å¤±æ•—。"
+
msgid "Floating IP allocate failed."
msgstr "浮動 IP é…置失敗。"
-msgid "Floating ip is not associated."
-msgstr "未與浮動 IP 產生關è¯ã€‚"
-
msgid ""
"Forbidden to exceed flavor value of number of serial ports passed in image "
"meta."
@@ -1339,6 +1454,10 @@ msgstr ""
"主機 %(server)s 正在執行舊版本的 Nova,涉åŠåˆ°è©²ç‰ˆæœ¬çš„å³æ™‚移轉å¯èƒ½æœƒå°Žè‡´è³‡æ–™æµ"
"失。請å‡ç´š %(server)s 上的 Nova,然後å†è©¦ä¸€æ¬¡ã€‚"
+#, python-format
+msgid "Host '%(name)s' is not mapped to any cell"
+msgstr "主機 '%(name)s' 未å°æ˜ è‡³ä»»ä½• Cell"
+
msgid "Host PowerOn is not supported by the Hyper-V driver"
msgstr "Hyper-V 驅動程å¼ä¸æ”¯æ´ä¸»æ©Ÿ PowerOn"
@@ -1364,6 +1483,10 @@ msgid "Hypervisor driver does not support post_live_migration_at_source method"
msgstr "Hypervisor 驅動程å¼ä¸æ”¯æ´ post_live_migration_at_source 方法"
#, python-format
+msgid "Hypervisor virt type '%s' is not valid"
+msgstr "Hypervisor virt 類型 '%s' 無效"
+
+#, python-format
msgid "Hypervisor virtualization type '%(hv_type)s' is not recognised"
msgstr "未辨識 Hypervisor 虛擬化類型 '%(hv_type)s'"
@@ -1431,8 +1554,9 @@ msgstr "映åƒæª” meta 資料索引éµå¤ªé•·"
msgid "Image metadata limit exceeded"
msgstr "已超出映åƒæª” meta 資料é™åˆ¶"
-msgid "Image must be available"
-msgstr "映åƒæª”å¿…é ˆå¯ç”¨"
+#, python-format
+msgid "Image model '%(image)s' is not supported"
+msgstr "ä¸æ”¯æ´æ˜ åƒæª”模型 '%(image)s'"
msgid "Image not found."
msgstr "找ä¸åˆ°æ˜ åƒæª”。"
@@ -1547,6 +1671,10 @@ msgid "Instance %(instance_uuid)s does not specify a NUMA topology"
msgstr "實例 %(instance_uuid)s 未指定 NUMA 拓蹼"
#, python-format
+msgid "Instance %(instance_uuid)s does not specify a migration context."
+msgstr "實例 %(instance_uuid)s 未指定移轉環境定義。"
+
+#, python-format
msgid ""
"Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot %(method)s while "
"the instance is in this state."
@@ -1578,6 +1706,10 @@ msgstr "未連接實例 %s。"
msgid "Instance %s not found"
msgstr "找ä¸åˆ°å¯¦ä¾‹ %s"
+#, python-format
+msgid "Instance %s provisioning was aborted"
+msgstr "已中斷實例 %s 供應"
+
msgid "Instance could not be found"
msgstr "找ä¸åˆ°å¯¦ä¾‹"
@@ -1648,9 +1780,6 @@ msgstr "找ä¸åˆ°ä»‹é¢ %(interface)s。"
msgid "Invalid Base 64 data for file %(path)s"
msgstr "檔案 %(path)s 的 Base 64 資料無效"
-msgid "Invalid CIDR"
-msgstr "無效的 CIDR"
-
msgid "Invalid Connection Info"
msgstr "無效的連線資訊"
@@ -1663,10 +1792,6 @@ msgid "Invalid IP format %s"
msgstr "無效的 IP æ ¼å¼ %s"
#, python-format
-msgid "Invalid IP protocol %(protocol)s"
-msgstr "無效的 IP 通訊å”定 %(protocol)s"
-
-#, python-format
msgid "Invalid IP protocol %(protocol)s."
msgstr "無效的 IP 通訊å”定 %(protocol)s。"
@@ -1747,6 +1872,10 @@ msgid "Invalid entry: '%s'; Expecting list or dict"
msgstr "é …ç›® '%s' 無效;é æœŸæ¸…單或字典"
#, python-format
+msgid "Invalid event name %s"
+msgstr "事件å稱 %s 無效"
+
+#, python-format
msgid "Invalid event status `%s'"
msgstr "無效的事件狀態 `%s'"
@@ -1786,6 +1915,10 @@ msgid "Invalid id: %(volume_id)s (expecting \"i-...\")"
msgstr "無效的 ID:%(volume_id)s(é æœŸç‚º \"i-...\")"
#, python-format
+msgid "Invalid image format '%(format)s'"
+msgstr "映åƒæª”æ ¼å¼ '%(format)s' 無效"
+
+#, python-format
msgid "Invalid image href %(image_href)s."
msgstr "無效的映åƒæª” href %(image_href)s。"
@@ -1827,6 +1960,10 @@ msgid "Invalid key_name provided."
msgstr "æ供的 key_name 無效。"
#, python-format
+msgid "Invalid libvirt version %(version)s"
+msgstr "libvirt 版本 %(version)s 無效"
+
+#, python-format
msgid "Invalid memory page size '%(pagesize)s'"
msgstr "記憶體é é¢å¤§å° '%(pagesize)s' 無效"
@@ -1941,24 +2078,21 @@ msgid "Invalid usage_type: %s"
msgstr "usage_type %s 無效"
#, python-format
-msgid ""
-"Invalid value '%(ec2_instance_id)s' for instanceId. Instance does not have a "
-"volume attached at root (%(root)s)"
-msgstr ""
-"instanceId 的值 '%(ec2_instance_id)s' 無效。實例沒有在根目錄 (%(root)s) 處連"
-"接ç£å€ã€‚"
-
-#, python-format
msgid "Invalid value '%s' for force."
msgstr "強制值 '%s' 無效。"
-msgid "Invalid value for 'scheduler_max_attempts', must be >= 1"
-msgstr "'scheduler_max_attempts' 的值無效,必須 >= 1"
-
#, python-format
msgid "Invalid value for Config Drive option: %(option)s"
msgstr "「é…置驅動ã€é¸é … %(option)s 的值無效"
+#, python-format
+msgid ""
+"Invalid vcpu_pin_set config, one or more of the specified cpuset is not "
+"online. Online cpuset(s): %(online)s, requested cpuset(s): %(req)s"
+msgstr ""
+"vcpu_pin_set é…置無效,一個以上的指定 CPU 集ä¸åœ¨ç·šä¸Šã€‚線上 CPU 集:"
+"%(online)s,所è¦æ±‚çš„ CPU 集:%(req)s"
+
msgid "Invalid vcpu_pin_set config, out of hypervisor cpu range."
msgstr "無效的 vcpu_pin_set é…置,因為超出 Hypervisor CPU 範åœã€‚"
@@ -2070,6 +2204,12 @@ msgid ""
"<list entry name> sections"
msgstr "檔案系統清單,é…置於此檔案的 image_file_url:<清單項目å稱> å€æ®µä¸­"
+msgid ""
+"Live migration can not be used without shared storage except a booted from "
+"volume VM which does not have a local disk."
+msgstr ""
+"在沒有共用儲存體的情æ³ä¸‹ï¼Œç„¡æ³•ä½¿ç”¨å³æ™‚移轉,ä¸å«æ²’有本端ç£ç¢Ÿçš„å•Ÿå‹•ç£å€ VM。"
+
msgid "Live migration is supported starting with Hyper-V Server 2012"
msgstr "從 Hyper-V Server 2012 開始,支æ´å³æ™‚移轉"
@@ -2208,6 +2348,9 @@ msgstr "éºæ¼äº†åœç”¨åŽŸå› æ¬„ä½"
msgid "Missing flavorRef attribute"
msgstr "éºæ¼äº† flavorRef 屬性"
+msgid "Missing forced_down field"
+msgstr "éºæ¼ forced_down 欄ä½"
+
msgid "Missing imageRef attribute"
msgstr "éºæ¼äº† imageRef 屬性"
@@ -2295,6 +2438,10 @@ msgid "Netmask to push into openvpn config"
msgstr "è¦æŽ¨é€è‡³ openvpn é…置的網路é®ç½©"
#, python-format
+msgid "Network \"%(val)s\" is not valid in field %(attr)s"
+msgstr "網路 \"%(val)s\" åœ¨æ¬„ä½ %(attr)s 中無效"
+
+#, python-format
msgid "Network %(network_id)s could not be found."
msgstr "找ä¸åˆ°ç¶²è·¯ %(network_id)s。"
@@ -2367,6 +2514,10 @@ msgstr "æ–°ç£å€å¿…須分離æ‰èƒ½äº¤æ›ã€‚"
msgid "New volume must be the same size or larger."
msgstr "æ–°ç£å€å¿…須具有相åŒå¤§å°æˆ–者更大。"
+#, python-format
+msgid "No Block Device Mapping with id %(id)s."
+msgstr "沒有 ID 為 %(id)s çš„å€å¡Šè£ç½®å°æ˜ ã€‚"
+
msgid "No CIDR requested"
msgstr "未è¦æ±‚ CIDR"
@@ -2380,6 +2531,9 @@ msgstr "沒有è¦æ±‚內文"
msgid "No Unique Match Found."
msgstr "找ä¸åˆ°å”¯ä¸€ç›¸ç¬¦é …。"
+msgid "No access_url in connection_info. Cannot validate protocol"
+msgstr "connection_info 中沒有 access_url。無法驗證通訊å”定"
+
msgid "No adminPass was specified"
msgstr "未指定 adminPass"
@@ -2462,6 +2616,10 @@ msgstr "找ä¸åˆ° URL %s 的相符 ID。"
msgid "No more available networks."
msgstr "已無其他網路å¯ç”¨ã€‚"
+#, python-format
+msgid "No mount points found in %(root)s of %(image)s"
+msgstr "在 %(image)s çš„ %(root)s 中找ä¸åˆ°è£è¼‰é»ž"
+
msgid "No networks defined."
msgstr "未定義網路。"
@@ -2488,9 +2646,6 @@ msgstr "沒有è¦æ±‚內文"
msgid "No root disk defined."
msgstr "未定義根ç£ç¢Ÿã€‚"
-msgid "No rule for the specified parameters."
-msgstr "指定的åƒæ•¸æ²’有è¦å‰‡ã€‚"
-
msgid "No suitable network for migrate"
msgstr "沒有é©åˆæ–¼ç§»è½‰çš„網路"
@@ -2524,10 +2679,6 @@ msgstr "無法ç²å¾— %(host)s çš„å¯ç”¨åŸ "
msgid "Not able to bind %(host)s:%(port)d, %(error)s"
msgstr "ç„¡æ³•é€£çµ %(host)s:%(port)d,%(error)s"
-#, python-format
-msgid "Not allowed to modify attributes for image %s"
-msgstr "ä¸å®¹è¨±ä¿®æ”¹æ˜ åƒæª” %s 的屬性"
-
msgid "Not an rbd snapshot"
msgstr "ä¸æ˜¯ rbd Snapshot"
@@ -2573,6 +2724,10 @@ msgid "Old volume is attached to a different instance."
msgstr "已將舊ç£å€é€£æŽ¥è‡³å…¶ä»–實例。"
#, python-format
+msgid "One or more hosts already in availability zone(s) %s"
+msgstr "一個以上的主機已經ä½æ–¼å¯ç”¨æ€§å€åŸŸ %s 中"
+
+#, python-format
msgid ""
"Only %(value)s %(verb)s request(s) can be made to %(uri)s every "
"%(unit_string)s."
@@ -2595,9 +2750,6 @@ msgstr ""
msgid "Only host parameter can be specified"
msgstr "åªèƒ½æŒ‡å®šä¸»æ©Ÿåƒæ•¸"
-msgid "Only instances implemented"
-msgstr "僅實作了實例"
-
msgid "Only root certificate can be retrieved."
msgstr "åªèƒ½æ“·å–主è¦æ†‘證。"
@@ -2674,6 +2826,15 @@ msgid "Page size %(pagesize)s is not supported by the host."
msgstr "主機ä¸æ”¯æ´é é¢å¤§å° %(pagesize)s。"
#, python-format
+msgid ""
+"Parameters %(missing_params)s not present in vif_details for vif %(vif_id)s. "
+"Check your Neutron configuration to validate that the macvtap parameters are "
+"correct."
+msgstr ""
+"åƒæ•¸ %(missing_params)s 未呈ç¾åœ¨ vif %(vif_id)s çš„ vif_details 中。 請檢查 "
+"Neutron é…ç½®ï¼Œä»¥ç¢ºèª macvtap åƒæ•¸æ˜¯æ­£ç¢ºçš„。"
+
+#, python-format
msgid "Path %s must be LVM logical volume"
msgstr "路徑 %s 必須是 LVM é‚輯ç£å€"
@@ -2757,6 +2918,10 @@ msgstr "ä¸æ”¯æ´æ‰€æ供的監視器動作 (%(action)s)。"
msgid "QEMU guest agent is not enabled"
msgstr "未啟用 QEMU 訪客代ç†ç¨‹å¼"
+#, python-format
+msgid "Quiescing is not supported in instance %(instance_id)s"
+msgstr "實例 %(instance_id)s ä¸æ”¯æ´éœæ­¢"
+
msgid "Quota"
msgstr "é…é¡"
@@ -2768,6 +2933,14 @@ msgid "Quota could not be found"
msgstr "找ä¸åˆ°é…é¡"
#, python-format
+msgid ""
+"Quota exceeded for %(overs)s: Requested %(req)s, but already used %(used)s "
+"of %(allowed)s %(overs)s"
+msgstr ""
+"%(overs)s 已超出é…é¡ï¼šè¦æ±‚ %(req)s,但已經使用了 %(used)s %(allowed)s "
+"%(overs)s"
+
+#, python-format
msgid "Quota exceeded for resources: %(overs)s"
msgstr "資æºå·²è¶…出é…é¡ï¼š%(overs)s"
@@ -2820,6 +2993,12 @@ msgstr ""
#, python-format
msgid ""
+"Quota limit %(limit)s for %(resource)s must be in the range of -1 and "
+"%(max)s."
+msgstr "%(resource)s çš„é…é¡é™åˆ¶ %(limit)s å¿…é ˆä½æ–¼ -1 å’Œ%(max)s 範åœå…§ã€‚"
+
+#, python-format
+msgid ""
"Quota limit %(limit)s for %(resource)s must be less than or equal to "
"%(maximum)s."
msgstr "%(resource)s çš„é…é¡é™åˆ¶ %(limit)s å¿…é ˆå°æ–¼æˆ–等於%(maximum)s。"
@@ -2847,6 +3026,14 @@ msgstr "找ä¸åˆ°å°ˆæ¡ˆ %(project_id)s çš„é…é¡ç”¨é‡ã€‚"
msgid "Reached maximum number of retries trying to unplug VBD %s"
msgstr "嘗試拔除 VBD %s 時é”到了é‡è©¦æ¬¡æ•¸ä¸Šé™"
+#, python-format
+msgid ""
+"Relative blockcommit support was not detected. Libvirt '%s' or later is "
+"required for online deletion of file/network storage-backed volume snapshots."
+msgstr ""
+"åµæ¸¬ä¸åˆ°ç›¸å°çš„å€å¡Šç¢ºå®šæ”¯æ´ã€‚線上刪除以檔案/網路儲存體為基礎的ç£å€ Snapshot 需"
+"è¦ Libvirt '%s' 或更新版本。"
+
msgid "Request body and URI mismatch"
msgstr "è¦æ±‚內文與 URI ä¸ç¬¦"
@@ -3086,10 +3273,21 @@ msgstr "主機為 %(host)s 且二進ä½æª”為 %(binary)s çš„æœå‹™å·²å­˜åœ¨ã€‚"
msgid "Service with host %(host)s topic %(topic)s exists."
msgstr "主機為 %(host)s 且主題為 %(topic)s çš„æœå‹™å·²å­˜åœ¨ã€‚"
+msgid "Set admin password is not supported"
+msgstr "ä¸æ”¯æ´è¨­å®šç®¡ç†å¯†ç¢¼"
+
#, python-format
msgid "Shadow table with name %(name)s already exists."
msgstr "å稱為 %(name)s 的備份副本表格已存在。"
+#, python-format
+msgid "Share '%s' is not supported"
+msgstr "ä¸æ”¯æ´å…±ç”¨ '%s'"
+
+#, python-format
+msgid "Share level '%s' cannot have share configured"
+msgstr "共用層次 '%s' ä¸èƒ½é…置共用"
+
msgid "Should we use a CA for each project?"
msgstr "是å¦éœ€è¦ä¸€å€‹project 使用一個CA"
@@ -3099,9 +3297,6 @@ msgid ""
msgstr ""
"使用 resize2fs 來縮å°æª”案系統時失敗,請檢查ç£ç¢Ÿä¸Šæ˜¯å¦å…·æœ‰è¶³å¤ çš„å¯ç”¨ç©ºé–“。"
-msgid "Signature not provided"
-msgstr "未æ供簽章"
-
#, python-format
msgid "Snapshot %(snapshot_id)s could not be found."
msgstr "找ä¸åˆ° Snapshot %(snapshot_id)s。"
@@ -3216,10 +3411,16 @@ msgstr ""
"上述錯誤å¯èƒ½æœƒé¡¯ç¤ºå°šæœªå»ºç«‹è³‡æ–™åº«ã€‚\n"
"請使用 'nova-manage db sync' 來建立資料庫,然後å†åŸ·è¡Œæ­¤æŒ‡ä»¤ã€‚"
+msgid "The backlog must be more than 0"
+msgstr "待辦事項必須大於 0"
+
#, python-format
msgid "The console port range %(min_port)d-%(max_port)d is exhausted."
msgstr "主控å°åŸ ç¯„åœ %(min_port)d-%(max_port)d 已耗盡。"
+msgid "The created instance's disk would be too small."
+msgstr "已建立實例的ç£ç¢Ÿå°‡å¤ªå°ã€‚"
+
msgid "The current driver does not support preserving ephemeral partitions."
msgstr "ç¾è¡Œé©…動程å¼ä¸æ”¯æ´ä¿ç•™æš«æ™‚分割å€ã€‚"
@@ -3230,6 +3431,13 @@ msgstr "é è¨­ PBM 原則ä¸å­˜åœ¨æ–¼å¾Œç«¯ä¸Šã€‚"
msgid "The firewall filter for %s does not exist"
msgstr "%s 的防ç«ç‰†éŽæ¿¾å™¨ä¸å­˜åœ¨"
+msgid "The floating IP request failed with a BadRequest"
+msgstr "浮動 IP è¦æ±‚失敗,發生 BadRequest"
+
+#, python-format
+msgid "The group %(group_name)s must be configured with an id."
+msgstr "必須使用 ID 來é…置群組 %(group_name)s。"
+
msgid "The input is not a string or unicode"
msgstr "輸入ä¸æ˜¯å­—串或 Unicode"
@@ -3277,6 +3485,10 @@ msgid ""
msgstr ""
"網路範åœä¸å¤ å¤§ï¼Œä¸é©åˆ %(num_networks)s 個網路。網路大å°ç‚º %(network_size)s"
+#, python-format
+msgid "The number of defined ports: %(ports)d is over the limit: %(quota)d"
+msgstr "所定義的埠數目 %(ports)d 超出é™åˆ¶ï¼š%(quota)d"
+
msgid "The only partition should be partition 1."
msgstr "唯一的分割å€æ‡‰è©²æ˜¯åˆ†å‰²å€ 1。"
@@ -3314,6 +3526,10 @@ msgid ""
"The service from servicegroup driver %(driver)s is temporarily unavailable."
msgstr "來自 ServiceGroup é©…å‹•ç¨‹å¼ %(driver)s çš„æœå‹™æš«æ™‚無法使用。"
+#, python-format
+msgid "The specified cluster '%s' was not found in vCenter"
+msgstr "在 vCenter 中找ä¸åˆ°æŒ‡å®šçš„å¢é›† '%s'"
+
msgid ""
"The string containing the reason for disabling the service contains invalid "
"characters or is too long."
@@ -3393,6 +3609,15 @@ msgid "There are not enough hosts available."
msgstr "沒有足夠的å¯ç”¨ä¸»æ©Ÿã€‚"
#, python-format
+msgid ""
+"There are still %(count)i unmigrated flavor records. Migration cannot "
+"continue until all instance flavor records have been migrated to the new "
+"format. Please run `nova-manage db migrate_flavor_data' first."
+msgstr ""
+"ä»æœ‰ %(count)i 個未移轉的特性記錄。移轉無法繼續,直到將所有實例特性記錄都移轉"
+"為新的格å¼ç‚ºæ­¢ã€‚請先執行 `nova-manage db migrate_flavor_data'。"
+
+#, python-format
msgid "There is no such action: %s"
msgstr "沒有這樣的動作:%s"
@@ -3429,8 +3654,9 @@ msgstr "等待建立è£ç½® %s 時發生逾時"
msgid "Timeout waiting for response from cell"
msgstr "等候 Cell 回應時發生逾時"
-msgid "Timestamp failed validation."
-msgstr "時間戳記驗證失敗。"
+#, python-format
+msgid "Timeout while checking if we can live migrate to host: %s"
+msgstr "在檢查是å¦å¯ä»¥å³æ™‚移轉至主機時逾時:%s"
msgid "To and From ports must be integers"
msgstr "目標埠和來æºåŸ å¿…須是整數"
@@ -3444,18 +3670,12 @@ msgid ""
"number generated."
msgstr "將產生太多的 IP ä½å€ã€‚請增大 /%s 以減少所產生的數目。"
-msgid "Too many failed authentications."
-msgstr "鑑別失敗太多次。"
-
msgid "Type and Code must be integers for ICMP protocol type"
msgstr "ICMP 通訊å”定類型的類型åŠä»£ç¢¼å¿…須是整數"
msgid "UUID is required to delete Neutron Networks"
msgstr "éœ€è¦ UUID æ‰èƒ½åˆªé™¤ã€ŒNeutron 網路ã€"
-msgid "Unable to associate IP Address, no fixed_ips."
-msgstr "無法與 IP ä½å€ç”¢ç”Ÿé—œè¯ï¼Œæ²’有 fixed_ip。"
-
msgid "Unable to authenticate Ironic client."
msgstr "無法鑑別 Ironic 用戶端。"
@@ -3574,6 +3794,12 @@ msgstr "無法å–å¾— DNS 網域"
msgid "Unable to get dns entry"
msgstr "無法å–å¾— DNS é …ç›®"
+msgid "Unable to get host UUID: /etc/machine-id does not exist"
+msgstr "無法å–得主機 UUID:/etc/machine-id ä¸å­˜åœ¨"
+
+msgid "Unable to get host UUID: /etc/machine-id is empty"
+msgstr "無法å–得主機 UUID:/etc/machine-id 是空的"
+
msgid "Unable to get rdp console, functionality not implemented"
msgstr "無法å–å¾— rdp 主控å°ï¼ŒåŠŸèƒ½æœªå¯¦ä½œ"
@@ -3729,9 +3955,6 @@ msgstr "ä¸æ˜Žçš„é…ç½®ç£ç¢Ÿæ©Ÿæ ¼å¼ %(format)s。請é¸å– iso9660 或 vfat ç
msgid "Unknown delete_info type %s"
msgstr "ä¸æ˜Žçš„ delete_info é¡žåž‹ %s"
-msgid "Unknown error occurred."
-msgstr "發生ä¸æ˜ŽéŒ¯èª¤ã€‚"
-
#, python-format
msgid "Unknown image_type=%s"
msgstr "ä¸æ˜Žçš„ image_type = %s"
@@ -3866,6 +4089,10 @@ msgid ""
msgstr ""
"åƒæ•¸ Group %(property)s 的值 (%(value)s) 無效。內容僅é™æ–¼'%(allowed)s'。"
+#, python-format
+msgid "Value must be >= 0 for field %s"
+msgstr "æ¬„ä½ %s 的值必須大於或等於 0"
+
msgid "Value required for 'scality_sofs_config'"
msgstr "'scality_sofs_config' 需è¦å€¼"
@@ -3898,6 +4125,10 @@ msgid "Virtual machine mode '%(vmmode)s' is not recognised"
msgstr "æœªè¾¨è­˜è™›æ“¬æ©Ÿå™¨æ¨¡å¼ '%(vmmode)s'"
#, python-format
+msgid "Virtual machine mode '%s' is not valid"
+msgstr "è™›æ“¬æ©Ÿå™¨æ¨¡å¼ '%s' 無效"
+
+#, python-format
msgid ""
"Virtual switch associated with the network adapter %(adapter)s not found."
msgstr "找ä¸åˆ°èˆ‡ç¶²è·¯é…æŽ¥å¡ %(adapter)s 相關è¯çš„虛擬交æ›å™¨ã€‚"
@@ -3914,18 +4145,39 @@ msgid "Volume %(volume_id)s could not be found."
msgstr "找ä¸åˆ°ç£å€ %(volume_id)s。"
#, python-format
+msgid ""
+"Volume %(volume_id)s did not finish being created even after we waited "
+"%(seconds)s seconds or %(attempts)s attempts. And its status is "
+"%(volume_status)s."
+msgstr ""
+"å³ä½¿åœ¨ç­‰å¾… %(seconds)s 秒或者嘗試 %(attempts)s 次之後,也未完æˆå»ºç«‹ç£å€ "
+"%(volume_id)s。並且它的狀態是%(volume_status)s。"
+
+#, python-format
msgid "Volume %(volume_id)s is not attached to anything"
msgstr "ç£å€ %(volume_id)s 未連接至任何項目"
msgid "Volume does not belong to the requested instance."
msgstr "ç£å€ä¸å±¬æ–¼æ‰€è¦æ±‚的實例。"
+#, python-format
+msgid ""
+"Volume encryption is not supported for %(volume_type)s volume %(volume_id)s"
+msgstr "%(volume_type)s çš„ç£å€ %(volume_id)s ä¸æ”¯æ´ç£å€åŠ å¯†"
+
+#, python-format
+msgid ""
+"Volume is smaller than the minimum size specified in image metadata. Volume "
+"size is %(volume_size)i bytes, minimum size is %(image_min_disk)i bytes."
+msgstr ""
+"ç£å€å°æ–¼æ˜ åƒæª” meta 資料中指定的大å°ä¸‹é™ã€‚ç£å€å¤§å°ç‚º %(volume_size)i ä½å…ƒçµ„,"
+"大å°ä¸‹é™ç‚º %(image_min_disk)i ä½å…ƒçµ„。"
+
msgid "Volume must be attached in order to detach."
msgstr "ç£å€å¿…須連接æ‰èƒ½åˆ†é›¢ã€‚"
-#, python-format
-msgid "Volume sets block size, but libvirt '%s' or later is required."
-msgstr "ç”±ç£å€è¨­å®šå€å¡Šå¤§å°ï¼Œä½†éœ€è¦ libVirt '%s' 或更新版本。"
+msgid "Volume resource quota exceeded"
+msgstr "已超出ç£å€è³‡æºé…é¡"
#, python-format
msgid ""
@@ -3968,12 +4220,18 @@ msgstr "åœ¨è³‡æº %(res)s 上使用了錯誤的é…é¡æ–¹æ³• %(method)s"
msgid "Wrong type of hook method. Only 'pre' and 'post' type allowed"
msgstr "連çµé‰¤æ–¹æ³•é¡žåž‹éŒ¯èª¤ã€‚僅容許 'pre' åŠ 'post' é¡žåž‹"
+msgid "X-Forwarded-For is missing from request."
+msgstr "è¦æ±‚éºæ¼äº† X-Forwarded-For。"
+
msgid "X-Instance-ID header is missing from request."
msgstr "è¦æ±‚éºæ¼äº† X-Instance-ID 標頭。"
msgid "X-Instance-ID-Signature header is missing from request."
msgstr "è¦æ±‚中éºæ¼ X-Instance-ID-Signature 標頭。"
+msgid "X-Metadata-Provider is missing from request."
+msgstr "è¦æ±‚éºæ¼äº† X-Metadata-Provider。"
+
msgid "X-Tenant-ID header is missing from request."
msgstr "è¦æ±‚éºæ¼äº† X-Tenant-ID 標頭。"
@@ -3999,6 +4257,16 @@ msgstr "應該指定 images_volume_group 旗標以使用 LVM 映åƒæª”。"
msgid ""
"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
"your destination node does not support retrieving listen addresses. In "
+"order for live migration to work properly you must either disable serial "
+"console or upgrade your libvirt version."
+msgstr ""
+"您的 Libvirt 版本ä¸æ”¯æ´ VIR_DOMAIN_XML_MIGRATABLE 旗標,或者您的目的地節點ä¸"
+"支æ´æ“·å–接è½ä½å€ã€‚如果è¦è®“å³æ™‚移轉正常工作,您必須åœç”¨åºåˆ—主控å°ï¼Œæˆ–者å‡ç´š "
+"libvirt 版本。"
+
+msgid ""
+"Your libvirt version does not support the VIR_DOMAIN_XML_MIGRATABLE flag or "
+"your destination node does not support retrieving listen addresses. In "
"order for live migration to work properly, you must configure the graphics "
"(VNC and/or SPICE) listen addresses to be either the catch-all address "
"(0.0.0.0 or ::) or the local address (127.0.0.1 or ::1)."
@@ -4038,9 +4306,6 @@ msgstr "block_device_mapping 必須是清單"
msgid "block_device_mapping_v2 must be a list"
msgstr "block_device_mapping_v2 必須是清單"
-msgid "can't build a valid rule"
-msgstr "無法建置有效è¦å‰‡"
-
msgid "cannot delete non-existent key"
msgstr "無法刪除ä¸å­˜åœ¨çš„索引éµ"
@@ -4050,6 +4315,9 @@ msgstr "無法儲存任æ„索引éµ"
msgid "cannot understand JSON"
msgstr "無法ç†è§£ JSON"
+msgid "cell_uuid must be set"
+msgstr "必須設定 cell_uuid"
+
msgid "clone() is not implemented"
msgstr "未實作 clone()"
@@ -4151,13 +4419,6 @@ msgstr "映åƒæª”"
msgid "image already mounted"
msgstr "å·²è£è¼‰æ˜ åƒæª”"
-#, python-format
-msgid "image of %(instance)s at %(now)s"
-msgstr "(%(now)s) 處的 %(instance)s 映åƒæª”"
-
-msgid "imageLocation is required"
-msgstr "éœ€è¦ imageLocation"
-
msgid "index"
msgstr "索引"
@@ -4239,9 +4500,6 @@ msgstr "NBD è£ç½® %s 未顯示"
msgid "nbd unavailable: module not loaded"
msgstr "NBD 無法使用:未載入模組"
-msgid "need group_name or group_id"
-msgstr "éœ€è¦ group_name 或 group_id"
-
msgid "network"
msgstr "網路"
@@ -4259,18 +4517,21 @@ msgstr "節點"
msgid "not able to execute ssh command: %s"
msgstr "無法執行 SSH 指令:%s"
+msgid ""
+"nova-idmapshift is a tool that properly sets the ownership of a filesystem "
+"for use with linux user namespaces. This tool can only be used with linux "
+"lxc containers. See the man page for details."
+msgstr ""
+"nova-idmapshift 是一個工具,用來é©ç•¶åœ°è¨­å®šæª”案系統的所有權,以與 Linux 使用者"
+"å稱空間æ­é…使用。此工具åªèƒ½èˆ‡ Linuxlxc 儲存器æ­é…使用。如需詳細資料,請åƒé–±"
+"線上指令說明。"
+
msgid "onSharedStorage must be specified."
msgstr "必須指定 onSharedStorage。"
-msgid "only group \"all\" is supported"
-msgstr "僅支æ´ç¾¤çµ„ \"all\""
-
msgid "operation time out"
msgstr "作業逾時"
-msgid "operation_type must be add or remove"
-msgstr "operation_type 必須是新增或移除"
-
msgid "os-getConsoleOutput malformed or missing from request body"
msgstr "è¦æ±‚內文中 os-getConsoleOutput æ ¼å¼ä¸æ­£ç¢ºæˆ–者已éºæ¼"
@@ -4308,9 +4569,6 @@ msgstr "找ä¸åˆ° rbd Python 程å¼åº«"
msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r"
msgstr "read_deleted åªèƒ½æ˜¯ 'no'ã€'yes' 或 'only' 其中之一,ä¸èƒ½æ˜¯ %r"
-msgid "resource_id and tag are required"
-msgstr "éœ€è¦ resource_id åŠæ¨™ç±¤"
-
msgid "rpc_port must be integer"
msgstr "rpc_port 必須是整數"
@@ -4373,9 +4631,6 @@ msgstr "ä¸æ”¯æ´çš„欄ä½ï¼š%s"
msgid "user"
msgstr "使用者"
-msgid "user or group not specified"
-msgstr "未指定使用者或群組"
-
msgid "uuid"
msgstr "UUID"
diff --git a/nova/network/__init__.py b/nova/network/__init__.py
index 1c5baab6a0..27a0a802c2 100644
--- a/nova/network/__init__.py
+++ b/nova/network/__init__.py
@@ -29,7 +29,5 @@ oslo_config.cfg.CONF.register_opts(_network_opts)
def API(skip_policy_check=False):
network_api_class = oslo_config.cfg.CONF.network_api_class
- if 'quantumv2' in network_api_class:
- network_api_class = network_api_class.replace('quantumv2', 'neutronv2')
cls = importutils.import_class(network_api_class)
return cls(skip_policy_check=skip_policy_check)
diff --git a/nova/network/api.py b/nova/network/api.py
index df990810b6..6ab0c2b48a 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -224,8 +224,8 @@ class API(base_api.NetworkAPI):
instance_id=orig_instance_uuid)
LOG.info(_LI('re-assign floating IP %(address)s from '
'instance %(instance_id)s'), msg_dict)
- orig_instance = objects.Instance.get_by_uuid(context,
- orig_instance_uuid)
+ orig_instance = objects.Instance.get_by_uuid(
+ context, orig_instance_uuid, expected_attrs=['flavor'])
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
@@ -486,7 +486,8 @@ class API(base_api.NetworkAPI):
# and instance.host is not yet or is no longer equal to
args = {'instance_id': instance.id,
'host': host,
- 'teardown': teardown}
+ 'teardown': teardown,
+ 'instance': instance}
self.network_rpcapi.setup_networks_on_host(context, **args)
diff --git a/nova/network/l3.py b/nova/network/l3.py
index 3a7a2d80a6..c13e6e2596 100644
--- a/nova/network/l3.py
+++ b/nova/network/l3.py
@@ -31,11 +31,11 @@ class L3Driver(object):
"""Set up basic L3 networking functionality."""
raise NotImplementedError()
- def initialize_network(self, network):
+ def initialize_network(self, cidr, is_external):
"""Enable rules for a specific network."""
raise NotImplementedError()
- def initialize_gateway(self, network):
+ def initialize_gateway(self, network_ref):
"""Set up a gateway on this network."""
raise NotImplementedError()
@@ -148,7 +148,7 @@ class NullL3(L3Driver):
def is_initialized(self):
return True
- def initialize_network(self, cidr):
+ def initialize_network(self, cidr, is_external):
pass
def initialize_gateway(self, network_ref):
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 9b9d3862f3..088948544d 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -1376,9 +1376,10 @@ def create_ovs_vif_port(bridge, dev, iface_id, mac, instance_id):
_set_device_mtu(dev)
-def delete_ovs_vif_port(bridge, dev):
+def delete_ovs_vif_port(bridge, dev, delete_dev=True):
_ovs_vsctl(['--', '--if-exists', 'del-port', bridge, dev])
- delete_net_dev(dev)
+ if delete_dev:
+ delete_net_dev(dev)
def ovs_set_vhostuser_port_type(dev):
@@ -1413,6 +1414,20 @@ def create_tap_dev(dev, mac_address=None):
check_exit_code=[0, 2, 254])
+def create_fp_dev(dev, sockpath, sockmode):
+ if not device_exists(dev):
+ utils.execute('fp-vdev', 'add', dev, '--sockpath', sockpath,
+ '--sockmode', sockmode, run_as_root=True)
+ _set_device_mtu(dev)
+ utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True,
+ check_exit_code=[0, 2, 254])
+
+
+def delete_fp_dev(dev):
+ if device_exists(dev):
+ utils.execute('fp-vdev', 'del', dev, run_as_root=True)
+
+
def delete_net_dev(dev):
"""Delete a network device only if it exists."""
if device_exists(dev):
@@ -1948,8 +1963,6 @@ class NeutronLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
bridge = self.BRIDGE_NAME_PREFIX + str(network['uuid'][0:11])
return bridge
-# provide compatibility with existing configs
-QuantumLinuxBridgeInterfaceDriver = NeutronLinuxBridgeInterfaceDriver
iptables_manager = IptablesManager()
diff --git a/nova/network/manager.py b/nova/network/manager.py
index c6df78709c..1a44a22e00 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -249,7 +249,7 @@ class NetworkManager(manager.Manager):
The one at a time part is to flatten the layout to help scale
"""
- target = messaging.Target(version='1.15')
+ target = messaging.Target(version='1.16')
# If True, this manager requires VIF to create a bridge.
SHOULD_CREATE_BRIDGE = False
@@ -1281,9 +1281,9 @@ class NetworkManager(manager.Manager):
used_subnets = [net.cidr for net in nets]
def find_next(subnet):
- next_subnet = next(subnet)
+ next_subnet = subnet.next()
while next_subnet in subnets_v4:
- next_subnet = next(next_subnet)
+ next_subnet = next_subnet.next()
if next_subnet in fixed_net_v4:
return next_subnet
@@ -1505,7 +1505,7 @@ class NetworkManager(manager.Manager):
"""Calls allocate_fixed_ip once for each network."""
raise NotImplementedError()
- def setup_networks_on_host(self, context, instance_id, host,
+ def setup_networks_on_host(self, context, instance_id, host, instance=None,
teardown=False):
"""calls setup/teardown on network hosts for an instance."""
green_threads = []
@@ -1514,8 +1514,8 @@ class NetworkManager(manager.Manager):
call_func = self._teardown_network_on_host
else:
call_func = self._setup_network_on_host
-
- instance = objects.Instance.get_by_id(context, instance_id)
+ if instance is None:
+ instance = objects.Instance.get_by_id(context, instance_id)
vifs = objects.VirtualInterfaceList.get_by_instance_uuid(
context, instance.uuid)
LOG.debug('Setup networks on host', instance=instance)
diff --git a/nova/network/model.py b/nova/network/model.py
index d81611bccc..c6a089d87f 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -51,6 +51,7 @@ VIF_TYPE_BINDING_FAILED = 'binding_failed'
VIF_DETAILS_PORT_FILTER = 'port_filter'
VIF_DETAILS_OVS_HYBRID_PLUG = 'ovs_hybrid_plug'
VIF_DETAILS_PHYSICAL_NETWORK = 'physical_network'
+VIF_DETAILS_BRIDGE_NAME = 'bridge_name'
# The following constant defines an SR-IOV related parameter in the
# 'vif_details'. 'profileid' should be used for VIF_TYPE_802_QBH
@@ -76,6 +77,9 @@ VIF_DETAILS_VHOSTUSER_SOCKET = 'vhostuser_socket'
# Specifies whether vhost-user socket should be plugged
# into ovs bridge. Valid values are True and False
VIF_DETAILS_VHOSTUSER_OVS_PLUG = 'vhostuser_ovs_plug'
+# Specifies whether vhost-user socket should be used to
+# create a fp netdevice interface.
+VIF_DETAILS_VHOSTUSER_FP_PLUG = 'vhostuser_fp_plug'
# Constants for dictionary keys in the 'vif_details' field that are
# valid for VIF_TYPE_TAP.
@@ -317,6 +321,8 @@ class VIF8021QbgParams(Model):
"""Represents the parameters for a 802.1qbg VIF."""
def __init__(self, managerid, typeid, typeidversion, instanceid):
+ super(VIF8021QbgParams, self).__init__()
+
self['managerid'] = managerid
self['typeid'] = typeid
self['typeidversion'] = typeidversion
@@ -327,6 +333,8 @@ class VIF8021QbhParams(Model):
"""Represents the parameters for a 802.1qbh VIF."""
def __init__(self, profileid):
+ super(VIF8021QbhParams, self).__init__()
+
self['profileid'] = profileid
@@ -457,12 +465,15 @@ class NetworkInfo(list):
return jsonutils.dumps(self)
def wait(self, do_raise=True):
- """A no-op method.
+ """Return this NetworkInfo
- This is useful to avoid type checking when NetworkInfo might be
- subclassed with NetworkInfoAsyncWrapper.
+ This is useful to avoid type checking when it's not clear if you have a
+ NetworkInfo or NetworkInfoAsyncWrapper. It's also useful when
+ utils.spawn in NetworkInfoAsyncWrapper is patched to return a
+ NetworkInfo rather than a GreenThread and wait() should return the same
+ thing for both cases.
"""
- pass
+ return self
class NetworkInfoAsyncWrapper(NetworkInfo):
@@ -487,6 +498,8 @@ class NetworkInfoAsyncWrapper(NetworkInfo):
"""
def __init__(self, async_method, *args, **kwargs):
+ super(NetworkInfoAsyncWrapper, self).__init__()
+
self._gt = utils.spawn(async_method, *args, **kwargs)
methods = ['json', 'fixed_ips', 'floating_ips']
for method in methods:
diff --git a/nova/network/neutronv2/api.py b/nova/network/neutronv2/api.py
index 2136cbaba7..ded2ca904a 100644
--- a/nova/network/neutronv2/api.py
+++ b/nova/network/neutronv2/api.py
@@ -19,12 +19,9 @@ import copy
import time
import uuid
-from keystoneclient import auth
-from keystoneclient.auth import token_endpoint
-from keystoneclient import session
+from keystoneauth1 import loading as ks_loading
from neutronclient.common import exceptions as neutron_client_exc
from neutronclient.v2_0 import client as clientv20
-from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
@@ -49,11 +46,10 @@ neutron_opts = [
help='URL for connecting to neutron'),
cfg.StrOpt('region_name',
help='Region name for connecting to neutron in admin context'),
- # TODO(berrange) temporary hack until Neutron can pass over the
- # name of the OVS bridge it is configured with
cfg.StrOpt('ovs_bridge',
default='br-int',
- help='Name of Integration Bridge used by Open vSwitch'),
+ help='Default OVS bridge name to use if not specified '
+ 'by Neutron'),
cfg.IntOpt('extension_sync_interval',
default=600,
help='Number of seconds before querying neutron for'
@@ -72,9 +68,9 @@ deprecations = {'cafile': [cfg.DeprecatedOpt('ca_certificates_file',
'timeout': [cfg.DeprecatedOpt('url_timeout',
group=NEUTRON_GROUP)]}
-_neutron_options = session.Session.register_conf_options(
+_neutron_options = ks_loading.register_session_conf_options(
CONF, NEUTRON_GROUP, deprecated_opts=deprecations)
-auth.register_conf_options(CONF, NEUTRON_GROUP)
+ks_loading.register_auth_conf_options(CONF, NEUTRON_GROUP)
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
@@ -89,22 +85,22 @@ _ADMIN_AUTH = None
def list_opts():
- list = copy.deepcopy(_neutron_options)
- list.insert(0, auth.get_common_conf_options()[0])
+ opts = copy.deepcopy(_neutron_options)
+ opts.insert(0, ks_loading.get_auth_common_conf_options()[0])
# NOTE(dims): There are a lot of auth plugins, we just generate
# the config options for a few common ones
plugins = ['password', 'v2password', 'v3password']
for name in plugins:
- for plugin_option in auth.get_plugin_class(name).get_options():
+ for plugin_option in ks_loading.get_plugin_loader(name).get_options():
found = False
- for option in list:
+ for option in opts:
if option.name == plugin_option.name:
found = True
break
if not found:
- list.append(plugin_option)
- list.sort(key=lambda x: x.name)
- return [(NEUTRON_GROUP, list)]
+ opts.append(plugin_option)
+ opts.sort(key=lambda x: x.name)
+ return [(NEUTRON_GROUP, opts)]
def reset_state():
@@ -116,7 +112,7 @@ def reset_state():
def _load_auth_plugin(conf):
- auth_plugin = auth.load_from_conf_options(conf, NEUTRON_GROUP)
+ auth_plugin = ks_loading.load_auth_from_conf_options(conf, NEUTRON_GROUP)
if auth_plugin:
return auth_plugin
@@ -136,25 +132,13 @@ def get_client(context, admin=False):
auth_plugin = None
if not _SESSION:
- _SESSION = session.Session.load_from_conf_options(CONF, NEUTRON_GROUP)
+ _SESSION = ks_loading.load_session_from_conf_options(
+ CONF, NEUTRON_GROUP)
if admin or (context.is_admin and not context.auth_token):
- # NOTE(jamielennox): The theory here is that we maintain one
- # authenticated admin auth globally. The plugin will authenticate
- # internally (not thread safe) and on demand so we extract a current
- # auth plugin from it (whilst locked). This may or may not require
- # reauthentication. We then use the static token plugin to issue the
- # actual request with that current token in a thread safe way.
if not _ADMIN_AUTH:
_ADMIN_AUTH = _load_auth_plugin(CONF)
-
- with lockutils.lock('neutron_admin_auth_token_lock'):
- # FIXME(jamielennox): We should also retrieve the endpoint from the
- # catalog here rather than relying on setting it in CONF.
- auth_token = _ADMIN_AUTH.get_token(_SESSION)
-
- # FIXME(jamielennox): why aren't we using the service catalog?
- auth_plugin = token_endpoint.Token(CONF.neutron.url, auth_token)
+ auth_plugin = _ADMIN_AUTH
elif context.auth_token:
auth_plugin = context.get_auth_plugin()
@@ -170,6 +154,23 @@ def get_client(context, admin=False):
region_name=CONF.neutron.region_name)
+def _is_not_duplicate(item, items, items_list_name, instance):
+ present = item in items
+
+ # The expectation from this function's perspective is that the
+ # item is not part of the items list so if it is part of it
+ # we should at least log it as a warning
+ if present:
+ LOG.warning(_LW("%(item)s already exists in list: %(list_name)s "
+ "containing: %(items)s. ignoring it"),
+ {'item': item,
+ 'list_name': items_list_name,
+ 'items': items},
+ instance=instance)
+
+ return not present
+
+
class API(base_api.NetworkAPI):
"""API for interacting with the neutron 2.x API."""
@@ -377,6 +378,8 @@ class API(base_api.NetworkAPI):
port's MAC address is not in that set.
:raises nova.exception.PortInUse: If a requested port is already
attached to another instance.
+ :raises nova.exception.PortNotUsableDNS: If a requested port has a
+ value assigned to its dns_name attribute.
"""
available_macs = None
@@ -407,6 +410,16 @@ class API(base_api.NetworkAPI):
if port.get('device_id'):
raise exception.PortInUse(port_id=request.port_id)
+ # Make sure that if the user assigned a value to the port's
+ # dns_name attribute, it is equal to the instance's
+ # hostname
+ if port.get('dns_name'):
+ if port['dns_name'] != instance.hostname:
+ raise exception.PortNotUsableDNS(
+ port_id=request.port_id,
+ instance=instance.uuid, value=port['dns_name'],
+ hostname=instance.hostname)
+
# Make sure the port is usable
if (port.get('binding:vif_type') ==
network_model.VIF_TYPE_BINDING_FAILED):
@@ -625,7 +638,8 @@ class API(base_api.NetworkAPI):
try:
self._populate_neutron_extension_values(
context, instance, request.pci_request_id, port_req_body,
- neutron=neutron, bind_host_id=bind_host_id)
+ network=network, neutron=neutron,
+ bind_host_id=bind_host_id)
if request.port_id:
port = ports[request.port_id]
port_client.update_port(port['id'], port_req_body)
@@ -638,6 +652,9 @@ class API(base_api.NetworkAPI):
security_group_ids, available_macs, dhcp_opts)
created_port_ids.append(created_port)
ports_in_requested_order.append(created_port)
+ self._update_port_dns_name(context, instance, network,
+ ports_in_requested_order[-1],
+ neutron)
except Exception:
with excutils.save_and_reraise_exception():
self._unbind_ports(context,
@@ -698,7 +715,8 @@ class API(base_api.NetworkAPI):
def _populate_neutron_extension_values(self, context, instance,
pci_request_id, port_req_body,
- neutron=None, bind_host_id=None):
+ network=None, neutron=None,
+ bind_host_id=None):
"""Populate neutron extension values for the instance.
If the extensions loaded contain QOS_QUEUE then pass the rxtx_factor.
@@ -708,11 +726,53 @@ class API(base_api.NetworkAPI):
flavor = instance.get_flavor()
rxtx_factor = flavor.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
- if self._has_port_binding_extension(context, neutron=neutron):
+ has_port_binding_extension = (
+ self._has_port_binding_extension(context, neutron=neutron))
+ if has_port_binding_extension:
port_req_body['port']['binding:host_id'] = bind_host_id
self._populate_neutron_binding_profile(instance,
pci_request_id,
port_req_body)
+ if constants.DNS_INTEGRATION in self.extensions:
+ # If the DNS integration extension is enabled in Neutron, most
+ # ports will get their dns_name attribute set in the port create or
+ # update requests in allocate_for_instance. So we just add the
+ # dns_name attribute to the payload of those requests. The
+ # exception is when the port binding extension is enabled in
+ # Neutron and the port is on a network that has a non-blank
+ # dns_domain attribute. This case requires to be processed by
+ # method _update_port_dns_name
+ if (not has_port_binding_extension
+ or not network.get('dns_domain')):
+ port_req_body['port']['dns_name'] = instance.hostname
+
+ def _update_port_dns_name(self, context, instance, network, port_id,
+ neutron):
+ """Update an instance port dns_name attribute with instance.hostname.
+
+ The dns_name attribute of a port on a network with a non-blank
+ dns_domain attribute will be sent to the external DNS service
+ (Designate) if DNS integration is enabled in Neutron. This requires the
+ assignment of the dns_name to the port to be done with a Neutron client
+ using the user's context. allocate_for_instance uses a port with admin
+ context if the port binding extensions is enabled in Neutron. In this
+ case, we assign in this method the dns_name attribute to the port with
+ an additional update request. Only a very small fraction of ports will
+ require this additional update request.
+ """
+ if (constants.DNS_INTEGRATION in self.extensions and
+ self._has_port_binding_extension(context) and
+ network.get('dns_domain')):
+ try:
+ port_req_body = {'port': {'dns_name': instance.hostname}}
+ neutron.update_port(port_id, port_req_body)
+ except neutron_client_exc.BadRequest:
+ LOG.warning(_LW('Neutron error: Instance hostname '
+ '%(hostname)s is not a valid DNS name'),
+ {'hostname': instance.hostname}, instance=instance)
+ msg = (_('Instance hostname %(hostname)s is not a valid DNS '
+ 'name') % {'hostname': instance.hostname})
+ raise exception.InvalidInput(reason=msg)
def _delete_ports(self, neutron, instance, ports, raise_if_fail=False):
exceptions = []
@@ -875,16 +935,32 @@ class API(base_api.NetworkAPI):
net_ids)
# an interface was added/removed from instance.
else:
- # Since networks does not contain the existing networks on the
- # instance we use their values from the cache and add it.
+
+ # Prepare the network ids list for validation purposes
+ networks_ids = [network['id'] for network in networks]
+
+ # Validate that interface networks doesn't exist in networks.
+ # Though this issue can and should be solved in methods
+ # that prepare the networks list, this method should have this
+ # ignore-duplicate-networks/port-ids mechanism to reduce the
+ # probability of failing to boot the VM.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
- for iface in ifaces]
+ for iface in ifaces
+ if _is_not_duplicate(iface['network']['id'],
+ networks_ids,
+ "networks",
+ instance)]
# Include existing interfaces so they are not removed from the db.
- port_ids = [iface['id'] for iface in ifaces] + port_ids
+ # Validate that the interface id is not in the port_ids
+ port_ids = [iface['id'] for iface in ifaces
+ if _is_not_duplicate(iface['id'],
+ port_ids,
+ "port_ids",
+ instance)] + port_ids
return networks, port_ids
@@ -1521,14 +1597,14 @@ class API(base_api.NetworkAPI):
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
- port_details = port.get('binding:vif_details')
- # TODO(berrange) Neutron should pass the bridge name
- # in another binding metadata field
+ port_details = port.get('binding:vif_details', {})
if vif_type == network_model.VIF_TYPE_OVS:
- bridge = CONF.neutron.ovs_bridge
+ bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
+ CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
- bridge = "brq" + port['network_id']
+ bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
+ "brq" + port['network_id'])
should_create_bridge = True
elif vif_type == network_model.VIF_TYPE_DVS:
# The name of the DVS port group will contain the neutron
@@ -1537,7 +1613,8 @@ class API(base_api.NetworkAPI):
elif (vif_type == network_model.VIF_TYPE_VHOSTUSER and
port_details.get(network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)):
- bridge = CONF.neutron.ovs_bridge
+ bridge = port_details.get(network_model.VIF_DETAILS_BRIDGE_NAME,
+ CONF.neutron.ovs_bridge)
ovs_interfaceid = port['id']
# Prune the bridge name if necessary. For the DVS this is not done
@@ -1622,11 +1699,6 @@ class API(base_api.NetworkAPI):
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
- # In that case we should repopulate ports from the state of
- # Neutron.
- if not port_ids:
- port_ids = current_neutron_port_map.keys()
-
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
diff --git a/nova/network/neutronv2/constants.py b/nova/network/neutronv2/constants.py
index c0df396e67..ca400c8a57 100644
--- a/nova/network/neutronv2/constants.py
+++ b/nova/network/neutronv2/constants.py
@@ -17,3 +17,4 @@ QOS_QUEUE = 'QoS Queue'
NET_EXTERNAL = 'router:external'
PORTBINDING_EXT = 'Port Binding'
VNIC_INDEX_EXT = 'VNIC Index'
+DNS_INTEGRATION = 'DNS Integration'
diff --git a/nova/network/rpcapi.py b/nova/network/rpcapi.py
index 3c161b9817..316fe74ea1 100644
--- a/nova/network/rpcapi.py
+++ b/nova/network/rpcapi.py
@@ -110,6 +110,9 @@ class NetworkAPI(object):
... Liberty supports message version 1.15. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.15.
+
+ * 1.16 - Transfer instance in addition to instance_id in
+ setup_networks_on_host
'''
VERSION_ALIASES = {
@@ -243,11 +246,19 @@ class NetworkAPI(object):
return self.client.call(ctxt, 'create_public_dns_domain',
domain=domain, project=project)
- def setup_networks_on_host(self, ctxt, instance_id, host, teardown):
+ def setup_networks_on_host(self, ctxt, instance_id, host, teardown,
+ instance):
# NOTE(tr3buchet): the call is just to wait for completion
- return self.client.call(ctxt, 'setup_networks_on_host',
- instance_id=instance_id, host=host,
- teardown=teardown)
+ version = '1.16'
+ kwargs = {}
+ if not self.client.can_send_version(version):
+ version = '1.0'
+ else:
+ kwargs['instance'] = instance
+ cctxt = self.client.prepare(version=version)
+ return cctxt.call(ctxt, 'setup_networks_on_host',
+ instance_id=instance_id, host=host,
+ teardown=teardown, **kwargs)
def set_network_host(self, ctxt, network_ref):
version = '1.15'
diff --git a/nova/network/security_group/neutron_driver.py b/nova/network/security_group/neutron_driver.py
index 8a00844832..409136585e 100644
--- a/nova/network/security_group/neutron_driver.py
+++ b/nova/network/security_group/neutron_driver.py
@@ -17,7 +17,6 @@ import sys
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import v2_0 as neutronv20
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
@@ -33,7 +32,6 @@ from nova import objects
from nova import utils
-CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# NOTE: Neutron client has a max URL length of 8192, so we have
@@ -547,10 +545,10 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
'instance': instance.uuid})
self.raise_not_found(msg)
- def populate_security_groups(self, instance, security_groups):
- # Setting to empty list since we do not want to populate this field
+ def populate_security_groups(self, security_groups):
+ # Returning an empty list since we do not want to populate this field
# in the nova database if using the neutron driver
- instance.security_groups = objects.SecurityGroupList()
+ return objects.SecurityGroupList()
def get_default_rule(self, context, id):
msg = _("Network driver does not support this function.")
diff --git a/nova/network/security_group/openstack_driver.py b/nova/network/security_group/openstack_driver.py
index ac8a8359d4..e0e2f5c223 100644
--- a/nova/network/security_group/openstack_driver.py
+++ b/nova/network/security_group/openstack_driver.py
@@ -51,4 +51,4 @@ def get_openstack_security_group_driver(skip_policy_check=False):
def is_neutron_security_groups():
- return CONF.security_group_api.lower() in ('neutron', 'quantum')
+ return CONF.security_group_api.lower() == 'neutron'
diff --git a/nova/network/security_group/security_group_base.py b/nova/network/security_group/security_group_base.py
index 6fdf19a15f..e5af41cc5e 100644
--- a/nova/network/security_group/security_group_base.py
+++ b/nova/network/security_group/security_group_base.py
@@ -19,14 +19,10 @@
import urllib
-from oslo_config import cfg
-
from nova import exception
from nova.i18n import _
from nova import utils
-CONF = cfg.CONF
-
class SecurityGroupBase(object):
@@ -173,7 +169,7 @@ class SecurityGroupBase(object):
"""
pass
- def populate_security_groups(self, instance, security_groups):
+ def populate_security_groups(self, security_groups):
"""Called when populating the database for an instances
security groups.
"""
diff --git a/nova/objects/__init__.py b/nova/objects/__init__.py
index cd64916841..e6f92f7878 100644
--- a/nova/objects/__init__.py
+++ b/nova/objects/__init__.py
@@ -48,15 +48,18 @@ def register_all():
__import__('nova.objects.instance_numa_topology')
__import__('nova.objects.instance_pci_requests')
__import__('nova.objects.keypair')
+ __import__('nova.objects.migrate_data')
__import__('nova.objects.migration')
__import__('nova.objects.migration_context')
__import__('nova.objects.monitor_metric')
__import__('nova.objects.network')
__import__('nova.objects.network_request')
+ __import__('nova.objects.notification')
__import__('nova.objects.numa')
__import__('nova.objects.pci_device')
__import__('nova.objects.pci_device_pool')
__import__('nova.objects.request_spec')
+ __import__('nova.objects.resource_provider')
__import__('nova.objects.tag')
__import__('nova.objects.quotas')
__import__('nova.objects.security_group')
diff --git a/nova/objects/aggregate.py b/nova/objects/aggregate.py
index 6510567b25..97fe9b75ca 100644
--- a/nova/objects/aggregate.py
+++ b/nova/objects/aggregate.py
@@ -12,6 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import log as logging
+from oslo_utils import uuidutils
+
from nova.compute import utils as compute_utils
from nova import db
from nova import exception
@@ -19,15 +22,19 @@ from nova import objects
from nova.objects import base
from nova.objects import fields
+LOG = logging.getLogger(__name__)
+
@base.NovaObjectRegistry.register
class Aggregate(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
- VERSION = '1.1'
+ # Version 1.2: Added uuid field
+ VERSION = '1.2'
fields = {
'id': fields.IntegerField(),
+ 'uuid': fields.UUIDField(nullable=False),
'name': fields.StringField(),
'hosts': fields.ListOfStringsField(nullable=True),
'metadata': fields.DictOfStringsField(nullable=True),
@@ -40,11 +47,31 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
for key in aggregate.fields:
if key == 'metadata':
db_key = 'metadetails'
+ elif key == 'uuid':
+ continue
else:
db_key = key
setattr(aggregate, key, db_aggregate[db_key])
+
+ # NOTE(danms): Remove this conditional load (and remove uuid
+ # special cases above) once we're in Newton and have enforced
+ # that all UUIDs in the database are not NULL.
+ if db_aggregate.get('uuid'):
+ aggregate.uuid = db_aggregate['uuid']
+
aggregate._context = context
aggregate.obj_reset_changes()
+
+ # NOTE(danms): This needs to come after obj_reset_changes() to make
+ # sure we only save the uuid, if we generate one.
+ # FIXME(danms): Remove this in Newton once we have enforced that
+ # all aggregates have uuids set in the database.
+ if 'uuid' not in aggregate:
+ aggregate.uuid = uuidutils.generate_uuid()
+ LOG.debug('Generating UUID %(uuid)s for aggregate %(agg)i',
+ dict(uuid=aggregate.uuid, agg=aggregate.id))
+ aggregate.save()
+
return aggregate
def _assert_no_hosts(self, action):
@@ -69,6 +96,10 @@ class Aggregate(base.NovaPersistentObject, base.NovaObject):
if 'metadata' in updates:
# NOTE(danms): For some reason the notification format is weird
payload['meta_data'] = payload.pop('metadata')
+ if 'uuid' not in updates:
+ updates['uuid'] = uuidutils.generate_uuid()
+ LOG.debug('Generated uuid %(uuid)s for aggregate',
+ dict(uuid=updates['uuid']))
compute_utils.notify_about_aggregate_update(self._context,
"create.start",
payload)
diff --git a/nova/objects/bandwidth_usage.py b/nova/objects/bandwidth_usage.py
index 4d82a360a9..fa5119d00e 100644
--- a/nova/objects/bandwidth_usage.py
+++ b/nova/objects/bandwidth_usage.py
@@ -46,11 +46,17 @@ class BandwidthUsage(base.NovaPersistentObject, base.NovaObject,
bw_usage.obj_reset_changes()
return bw_usage
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_bw_usage_get(context, uuid, start_period, mac, use_slave=False):
+ return db.bw_usage_get(context, uuid=uuid, start_period=start_period,
+ mac=mac)
+
@base.serialize_args
@base.remotable_classmethod
def get_by_instance_uuid_and_mac(cls, context, instance_uuid, mac,
start_period=None, use_slave=False):
- db_bw_usage = db.bw_usage_get(context, uuid=instance_uuid,
+ db_bw_usage = cls._db_bw_usage_get(context, uuid=instance_uuid,
start_period=start_period, mac=mac,
use_slave=use_slave)
if db_bw_usage:
@@ -79,10 +85,17 @@ class BandwidthUsageList(base.ObjectListBase, base.NovaObject):
'objects': fields.ListOfObjectsField('BandwidthUsage'),
}
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_bw_usage_get_by_uuids(context, uuids, start_period,
+ use_slave=False):
+ return db.bw_usage_get_by_uuids(context, uuids=uuids,
+ start_period=start_period)
+
@base.serialize_args
@base.remotable_classmethod
def get_by_uuids(cls, context, uuids, start_period=None, use_slave=False):
- db_bw_usages = db.bw_usage_get_by_uuids(context, uuids=uuids,
+ db_bw_usages = cls._db_bw_usage_get_by_uuids(context, uuids=uuids,
start_period=start_period,
use_slave=use_slave)
return base.obj_make_list(context, cls(), BandwidthUsage, db_bw_usages)
diff --git a/nova/objects/base.py b/nova/objects/base.py
index b0c98e0559..cf938ab24e 100644
--- a/nova/objects/base.py
+++ b/nova/objects/base.py
@@ -71,6 +71,16 @@ class NovaObject(ovoo_base.VersionedObject):
OBJ_SERIAL_NAMESPACE = 'nova_object'
OBJ_PROJECT_NAMESPACE = 'nova'
+ # NOTE(ndipanov): This is nova-specific
+ @staticmethod
+ def should_migrate_data():
+ """A check that can be used to inhibit online migration behavior
+
+ This is usually used to check if all services that will be accessing
+ the db directly are ready for the new format.
+ """
+ raise NotImplementedError()
+
# NOTE(danms): This has some minor change between the nova and o.vo
# version, so avoid inheriting it for the moment so we can make that
# transition separately for clarity.
diff --git a/nova/objects/block_device.py b/nova/objects/block_device.py
index c06a5c13da..f545431b34 100644
--- a/nova/objects/block_device.py
+++ b/nova/objects/block_device.py
@@ -322,17 +322,31 @@ class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject):
return base.obj_make_dict_of_lists(
context, cls, bdms, 'instance_uuid')
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_block_device_mapping_get_all_by_instance_uuids(
+ context, instance_uuids, use_slave=False):
+ return db.block_device_mapping_get_all_by_instance_uuids(
+ context, instance_uuids)
+
@base.remotable_classmethod
def get_by_instance_uuids(cls, context, instance_uuids, use_slave=False):
- db_bdms = db.block_device_mapping_get_all_by_instance_uuids(
- context, instance_uuids, use_slave=use_slave)
+ db_bdms = cls._db_block_device_mapping_get_all_by_instance_uuids(
+ context, instance_uuids, use_slave=use_slave)
return base.obj_make_list(
context, cls(), objects.BlockDeviceMapping, db_bdms or [])
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_block_device_mapping_get_all_by_instance(
+ context, instance_uuid, use_slave=False):
+ return db.block_device_mapping_get_all_by_instance(
+ context, instance_uuid)
+
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
- db_bdms = db.block_device_mapping_get_all_by_instance(
- context, instance_uuid, use_slave=use_slave)
+ db_bdms = cls._db_block_device_mapping_get_all_by_instance(
+ context, instance_uuid, use_slave=use_slave)
return base.obj_make_list(
context, cls(), objects.BlockDeviceMapping, db_bdms or [])
diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py
index 2ab2ec34b7..a9859e3e46 100644
--- a/nova/objects/compute_node.py
+++ b/nova/objects/compute_node.py
@@ -13,7 +13,9 @@
# under the License.
from oslo_config import cfg
+from oslo_log import log as logging
from oslo_serialization import jsonutils
+from oslo_utils import uuidutils
from oslo_utils import versionutils
from nova import db
@@ -26,6 +28,8 @@ from nova.objects import pci_device_pool
CONF = cfg.CONF
CONF.import_opt('cpu_allocation_ratio', 'nova.compute.resource_tracker')
CONF.import_opt('ram_allocation_ratio', 'nova.compute.resource_tracker')
+CONF.import_opt('disk_allocation_ratio', 'nova.compute.resource_tracker')
+LOG = logging.getLogger(__name__)
# TODO(berrange): Remove NovaObjectDictCompat
@@ -47,10 +51,13 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
# Version 1.12: HVSpec version 1.1
# Version 1.13: Changed service_id field to be nullable
# Version 1.14: Added cpu_allocation_ratio and ram_allocation_ratio
- VERSION = '1.14'
+ # Version 1.15: Added uuid
+ # Version 1.16: Added disk_allocation_ratio
+ VERSION = '1.16'
fields = {
'id': fields.IntegerField(read_only=True),
+ 'uuid': fields.UUIDField(read_only=True),
'service_id': fields.IntegerField(nullable=True),
'host': fields.StringField(nullable=True),
'vcpus': fields.IntegerField(),
@@ -66,11 +73,16 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
'free_disk_gb': fields.IntegerField(nullable=True),
'current_workload': fields.IntegerField(nullable=True),
'running_vms': fields.IntegerField(nullable=True),
+ # TODO(melwitt): cpu_info is non-nullable in the schema but we must
+ # wait until version 2.0 of ComputeNode to change it to non-nullable
'cpu_info': fields.StringField(nullable=True),
'disk_available_least': fields.IntegerField(nullable=True),
'metrics': fields.StringField(nullable=True),
'stats': fields.DictOfNullableStringsField(nullable=True),
'host_ip': fields.IPAddressField(nullable=True),
+ # TODO(rlrossit): because of history, numa_topology is held here as a
+ # StringField, not a NUMATopology object. In version 2 of ComputeNode
+ # this will be converted over to a fields.ObjectField('NUMATopology')
'numa_topology': fields.StringField(nullable=True),
# NOTE(pmurray): the supported_hv_specs field maps to the
# supported_instances field in the database
@@ -81,11 +93,18 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
nullable=True),
'cpu_allocation_ratio': fields.FloatField(),
'ram_allocation_ratio': fields.FloatField(),
+ 'disk_allocation_ratio': fields.FloatField(),
}
def obj_make_compatible(self, primitive, target_version):
super(ComputeNode, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
+ if target_version < (1, 16):
+ if 'disk_allocation_ratio' in primitive:
+ del primitive['disk_allocation_ratio']
+ if target_version < (1, 15):
+ if 'uuid' in primitive:
+ del primitive['uuid']
if target_version < (1, 14):
if 'ram_allocation_ratio' in primitive:
del primitive['ram_allocation_ratio']
@@ -150,6 +169,7 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
'supported_hv_specs',
'host',
'pci_device_pools',
+ 'uuid',
])
fields = set(compute.fields) - special_cases
for key in fields:
@@ -160,11 +180,13 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
# As we want to care about our operators and since we don't want to
# ask them to change their configuration files before upgrading, we
# prefer to hardcode the default values for the ratios here until
- # the next release (Mitaka) where the opt default values will be
- # restored for both cpu (16.0) and ram (1.5) allocation ratios.
+ # the next release (Newton) where the opt default values will be
+ # restored for both cpu (16.0), ram (1.5) and disk (1.0)
+ # allocation ratios.
# TODO(sbauza): Remove that in the next major version bump where
- # we break compatibilility with old Kilo computes
- if key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio':
+ # we break compatibilility with old Liberty computes
+ if (key == 'cpu_allocation_ratio' or key == 'ram_allocation_ratio'
+ or key == 'disk_allocation_ratio'):
if value == 0.0:
# Operator has not yet provided a new value for that ratio
# on the compute node
@@ -180,6 +202,9 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
if value == 0.0 and key == 'ram_allocation_ratio':
# It's not specified either on the controller
value = 1.5
+ if value == 0.0 and key == 'disk_allocation_ratio':
+ # It's not specified either on the controller
+ value = 1.0
compute[key] = value
stats = db_compute['stats']
@@ -201,7 +226,24 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
# host column is present in the table or not
compute._host_from_db_object(compute, db_compute)
+ # NOTE(danms): Remove this conditional load (and remove uuid from
+ # the list of special_cases above) once we're in Newton and have
+ # enforced that all UUIDs in the database are not NULL.
+ if db_compute.get('uuid'):
+ compute.uuid = db_compute['uuid']
+
compute.obj_reset_changes()
+
+ # NOTE(danms): This needs to come after obj_reset_changes() to make
+ # sure we only save the uuid, if we generate one.
+ # FIXME(danms): Remove this in Newton once we have enforced that
+ # all compute nodes have uuids set in the database.
+ if 'uuid' not in compute:
+ compute.uuid = uuidutils.generate_uuid()
+ LOG.debug('Generated UUID %(uuid)s for compute node %(id)i',
+ dict(uuid=compute.uuid, id=compute.id))
+ compute.save()
+
return compute
@base.remotable_classmethod
@@ -221,34 +263,11 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
@base.remotable_classmethod
def get_by_host_and_nodename(cls, context, host, nodename):
- try:
- db_compute = db.compute_node_get_by_host_and_nodename(
- context, host, nodename)
- except exception.ComputeHostNotFound:
- # FIXME(sbauza): Some old computes can still have no host record
- # We need to provide compatibility by using the old service_id
- # record.
- # We assume the compatibility as an extra penalty of one more DB
- # call but that's necessary until all nodes are upgraded.
- try:
- service = objects.Service.get_by_compute_host(context, host)
- db_computes = db.compute_nodes_get_by_service_id(
- context, service.id)
- except exception.ServiceNotFound:
- # We need to provide the same exception upstream
- raise exception.ComputeHostNotFound(host=host)
- db_compute = None
- for compute in db_computes:
- if compute['hypervisor_hostname'] == nodename:
- db_compute = compute
- # We can avoid an extra call to Service object in
- # _from_db_object
- db_compute['host'] = service.host
- break
- if not db_compute:
- raise exception.ComputeHostNotFound(host=host)
+ db_compute = db.compute_node_get_by_host_and_nodename(
+ context, host, nodename)
return cls._from_db_object(context, cls(), db_compute)
+ # TODO(pkholkin): Remove this method in the next major version bump
@base.remotable_classmethod
def get_first_node_by_host_for_old_compat(cls, context, host,
use_slave=False):
@@ -290,6 +309,9 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
+ if 'uuid' not in updates:
+ updates['uuid'] = uuidutils.generate_uuid()
+
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
@@ -386,27 +408,14 @@ class ComputeNodeList(base.ObjectListBase, base.NovaObject):
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_compute_node_get_all_by_host(context, host, use_slave=False):
+ return db.compute_node_get_all_by_host(context, host)
+
@base.remotable_classmethod
def get_all_by_host(cls, context, host, use_slave=False):
- try:
- db_computes = db.compute_node_get_all_by_host(context, host,
- use_slave)
- except exception.ComputeHostNotFound:
- # FIXME(sbauza): Some old computes can still have no host record
- # We need to provide compatibility by using the old service_id
- # record.
- # We assume the compatibility as an extra penalty of one more DB
- # call but that's necessary until all nodes are upgraded.
- try:
- service = objects.Service.get_by_compute_host(context, host,
- use_slave)
- db_computes = db.compute_nodes_get_by_service_id(
- context, service.id)
- except exception.ServiceNotFound:
- # We need to provide the same exception upstream
- raise exception.ComputeHostNotFound(host=host)
- # We can avoid an extra call to Service object in _from_db_object
- for db_compute in db_computes:
- db_compute['host'] = service.host
+ db_computes = cls._db_compute_node_get_all_by_host(context, host,
+ use_slave=use_slave)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
diff --git a/nova/objects/fields.py b/nova/objects/fields.py
index 991c3d7f2f..bfe1820abf 100644
--- a/nova/objects/fields.py
+++ b/nova/objects/fields.py
@@ -159,7 +159,7 @@ class CPUAllocationPolicy(Enum):
class CPUThreadAllocationPolicy(Enum):
# prefer (default): The host may or may not have hyperthreads. This
- # retains the legacy behavior, whereby siblings are prefered when
+ # retains the legacy behavior, whereby siblings are preferred when
# available. This is the default if no policy is specified.
PREFER = "prefer"
# isolate: The host may or many not have hyperthreads. If hyperthreads are
@@ -251,6 +251,27 @@ class HVType(Enum):
return super(HVType, self).coerce(obj, attr, value)
+class ImageSignatureHashType(Enum):
+ # Represents the possible hash methods used for image signing
+ def __init__(self):
+ self.hashes = ('SHA-224', 'SHA-256', 'SHA-384', 'SHA-512')
+ super(ImageSignatureHashType, self).__init__(
+ valid_values=self.hashes
+ )
+
+
+class ImageSignatureKeyType(Enum):
+ # Represents the possible keypair types used for image signing
+ def __init__(self):
+ self.key_types = (
+ 'DSA', 'ECC_SECT571K1', 'ECC_SECT409K1', 'ECC_SECT571R1',
+ 'ECC_SECT409R1', 'ECC_SECP521R1', 'ECC_SECP384R1', 'RSA-PSS'
+ )
+ super(ImageSignatureKeyType, self).__init__(
+ valid_values=self.key_types
+ )
+
+
class OSType(Enum):
LINUX = "linux"
@@ -269,6 +290,40 @@ class OSType(Enum):
return super(OSType, self).coerce(obj, attr, value)
+class ResourceClass(Enum):
+ """Classes of resources provided to consumers."""
+
+ VCPU = 'VCPU'
+ MEMORY_MB = 'MEMORY_MB'
+ DISK_GB = 'DISK_GB'
+ PCI_DEVICE = 'PCI_DEVICE'
+ SRIOV_NET_VF = 'SRIOV_NET_VF'
+ NUMA_SOCKET = 'NUMA_SOCKET'
+ NUMA_CORE = 'NUMA_CORE'
+ NUMA_THREAD = 'NUMA_THREAD'
+ NUMA_MEMORY_MB = 'NUMA_MEMORY_MB'
+ IPV4_ADDRESS = 'IPV4_ADDRESS'
+
+ # The ordering here is relevant. If you must add a value, only
+ # append.
+ ALL = (VCPU, MEMORY_MB, DISK_GB, PCI_DEVICE, SRIOV_NET_VF, NUMA_SOCKET,
+ NUMA_CORE, NUMA_THREAD, NUMA_MEMORY_MB, IPV4_ADDRESS)
+
+ def __init__(self):
+ super(ResourceClass, self).__init__(
+ valid_values=ResourceClass.ALL)
+
+ @classmethod
+ def index(cls, value):
+ """Return an index into the Enum given a value."""
+ return cls.ALL.index(value)
+
+ @classmethod
+ def from_index(cls, index):
+ """Return the Enum value at a given index."""
+ return cls.ALL[index]
+
+
class RNGModel(Enum):
VIRTIO = "virtio"
@@ -426,8 +481,9 @@ class HostStatus(Enum):
DOWN = "DOWN" # The nova-compute is forced_down.
MAINTENANCE = "MAINTENANCE" # The nova-compute is disabled.
UNKNOWN = "UNKNOWN" # The nova-compute has not reported.
+ NONE = "" # No host or nova-compute.
- ALL = (UP, DOWN, MAINTENANCE, UNKNOWN)
+ ALL = (UP, DOWN, MAINTENANCE, UNKNOWN, NONE)
def __init__(self):
super(HostStatus, self).__init__(
@@ -441,8 +497,11 @@ class PciDeviceStatus(Enum):
ALLOCATED = "allocated"
REMOVED = "removed" # The device has been hot-removed and not yet deleted
DELETED = "deleted" # The device is marked not available/deleted.
+ UNCLAIMABLE = "unclaimable"
+ UNAVAILABLE = "unavailable"
- ALL = (AVAILABLE, CLAIMED, ALLOCATED, REMOVED, DELETED)
+ ALL = (AVAILABLE, CLAIMED, ALLOCATED, REMOVED, DELETED, UNAVAILABLE,
+ UNCLAIMABLE)
def __init__(self):
super(PciDeviceStatus, self).__init__(
@@ -464,6 +523,62 @@ class PciDeviceType(Enum):
valid_values=PciDeviceType.ALL)
+class DiskFormat(Enum):
+ RBD = "rbd"
+ LVM = "lvm"
+ QCOW2 = "qcow2"
+ RAW = "raw"
+ PLOOP = "ploop"
+ VHD = "vhd"
+ VMDK = "vmdk"
+ VDI = "vdi"
+ ISO = "iso"
+
+ ALL = (RBD, LVM, QCOW2, RAW, PLOOP, VHD, VMDK, VDI, ISO)
+
+ def __init__(self):
+ super(DiskFormat, self).__init__(
+ valid_values=DiskFormat.ALL)
+
+
+class NotificationPriority(Enum):
+ AUDIT = 'audit'
+ CRITICAL = 'critical'
+ DEBUG = 'debug'
+ INFO = 'info'
+ ERROR = 'error'
+ SAMPLE = 'sample'
+ WARN = 'warn'
+
+ ALL = (AUDIT, CRITICAL, DEBUG, INFO, ERROR, SAMPLE, WARN)
+
+ def __init__(self):
+ super(NotificationPriority, self).__init__(
+ valid_values=NotificationPriority.ALL)
+
+
+class NotificationPhase(Enum):
+ START = 'start'
+ END = 'end'
+ ERROR = 'error'
+
+ ALL = (START, END, ERROR)
+
+ def __init__(self):
+ super(NotificationPhase, self).__init__(
+ valid_values=NotificationPhase.ALL)
+
+
+class NotificationAction(Enum):
+ UPDATE = 'update'
+
+ ALL = (UPDATE,)
+
+ def __init__(self):
+ super(NotificationAction, self).__init__(
+ valid_values=NotificationAction.ALL)
+
+
class IPAddress(FieldType):
@staticmethod
def coerce(obj, attr, value):
@@ -674,10 +789,30 @@ class HVTypeField(BaseEnumField):
AUTO_TYPE = HVType()
+class ImageSignatureHashTypeField(BaseEnumField):
+ AUTO_TYPE = ImageSignatureHashType()
+
+
+class ImageSignatureKeyTypeField(BaseEnumField):
+ AUTO_TYPE = ImageSignatureKeyType()
+
+
class OSTypeField(BaseEnumField):
AUTO_TYPE = OSType()
+class ResourceClassField(BaseEnumField):
+ AUTO_TYPE = ResourceClass()
+
+ def index(self, value):
+ """Return an index into the Enum given a value."""
+ return self._type.index(value)
+
+ def from_index(self, index):
+ """Return the Enum value at a given index."""
+ return self._type.from_index(index)
+
+
class RNGModelField(BaseEnumField):
AUTO_TYPE = RNGModel()
@@ -714,6 +849,22 @@ class PciDeviceTypeField(BaseEnumField):
AUTO_TYPE = PciDeviceType()
+class DiskFormatField(BaseEnumField):
+ AUTO_TYPE = DiskFormat()
+
+
+class NotificationPriorityField(BaseEnumField):
+ AUTO_TYPE = NotificationPriority()
+
+
+class NotificationPhaseField(BaseEnumField):
+ AUTO_TYPE = NotificationPhase()
+
+
+class NotificationActionField(BaseEnumField):
+ AUTO_TYPE = NotificationAction()
+
+
class IPAddressField(AutoTypedField):
AUTO_TYPE = IPAddress()
diff --git a/nova/objects/host_mapping.py b/nova/objects/host_mapping.py
index bb7bf000da..4ad19947ff 100644
--- a/nova/objects/host_mapping.py
+++ b/nova/objects/host_mapping.py
@@ -40,8 +40,7 @@ class HostMapping(base.NovaTimestampObject, base.NovaObject,
}
def _get_cell_mapping(self):
- session = db_api.get_api_session()
- with session.begin():
+ with db_api.api_context_manager.reader.using(self._context) as session:
cell_map = (session.query(api_models.CellMapping)
.join(api_models.HostMapping)
.filter(api_models.HostMapping.host == self.host)
@@ -76,19 +75,17 @@ class HostMapping(base.NovaTimestampObject, base.NovaObject,
return host_mapping
@staticmethod
+ @db_api.api_context_manager.reader
def _get_by_host_from_db(context, host):
- session = db_api.get_api_session()
-
- with session.begin():
- db_mapping = (session.query(api_models.HostMapping)
- .join(api_models.CellMapping)
- .with_entities(api_models.HostMapping,
- api_models.CellMapping)
- .filter(api_models.HostMapping.host == host)).first()
- if not db_mapping:
- raise exception.HostMappingNotFound(name=host)
- host_mapping = db_mapping[0]
- host_mapping["cell_mapping"] = db_mapping[1]
+ db_mapping = (context.session.query(api_models.HostMapping)
+ .join(api_models.CellMapping)
+ .with_entities(api_models.HostMapping,
+ api_models.CellMapping)
+ .filter(api_models.HostMapping.host == host)).first()
+ if not db_mapping:
+ raise exception.HostMappingNotFound(name=host)
+ host_mapping = db_mapping[0]
+ host_mapping["cell_mapping"] = db_mapping[1]
return host_mapping
@base.remotable_classmethod
@@ -97,11 +94,11 @@ class HostMapping(base.NovaTimestampObject, base.NovaObject,
return cls._from_db_object(context, cls(), db_mapping)
@staticmethod
+ @db_api.api_context_manager.writer
def _create_in_db(context, updates):
- session = db_api.get_api_session()
db_mapping = api_models.HostMapping()
db_mapping.update(updates)
- db_mapping.save(session)
+ db_mapping.save(context.session)
return db_mapping
@base.remotable
@@ -113,16 +110,14 @@ class HostMapping(base.NovaTimestampObject, base.NovaObject,
self._from_db_object(self._context, self, db_mapping)
@staticmethod
+ @db_api.api_context_manager.writer
def _save_in_db(context, obj, updates):
- session = db_api.get_api_session()
- with session.begin():
- db_mapping = session.query(
- api_models.HostMapping).filter_by(
- id=obj.id).first()
- if not db_mapping:
- raise exception.HostMappingNotFound(name=obj.host)
-
- db_mapping.update(updates)
+ db_mapping = context.session.query(api_models.HostMapping).filter_by(
+ id=obj.id).first()
+ if not db_mapping:
+ raise exception.HostMappingNotFound(name=obj.host)
+
+ db_mapping.update(updates)
return db_mapping
@base.remotable
@@ -135,14 +130,12 @@ class HostMapping(base.NovaTimestampObject, base.NovaObject,
self.obj_reset_changes()
@staticmethod
+ @db_api.api_context_manager.writer
def _destroy_in_db(context, host):
- session = db_api.get_api_session()
-
- with session.begin():
- result = session.query(api_models.HostMapping).filter_by(
- host=host).delete()
- if not result:
- raise exception.HostMappingNotFound(name=host)
+ result = context.session.query(api_models.HostMapping).filter_by(
+ host=host).delete()
+ if not result:
+ raise exception.HostMappingNotFound(name=host)
@base.remotable
def destroy(self):
diff --git a/nova/objects/image_meta.py b/nova/objects/image_meta.py
index 365d4856b8..72a16dc459 100644
--- a/nova/objects/image_meta.py
+++ b/nova/objects/image_meta.py
@@ -126,6 +126,25 @@ class ImageMeta(base.NovaObject):
image_meta = utils.get_image_from_system_metadata(sysmeta)
return cls.from_dict(image_meta)
+ @classmethod
+ def from_image_ref(cls, context, image_api, image_ref):
+ """Create instance from glance image
+
+ :param context: the request context
+ :param image_api: the glance client API
+ :param image_ref: the glance image identifier
+
+ Creates a new object instance, initializing from the
+ properties associated with a glance image
+
+ :returns: an ImageMeta instance
+ """
+
+ image_meta = image_api.get(context, image_ref)
+ image = cls.from_dict(image_meta)
+ setattr(image, "id", image_ref)
+ return image
+
@base.NovaObjectRegistry.register
class ImageMetaProps(base.NovaObject):
@@ -141,7 +160,8 @@ class ImageMetaProps(base.NovaObject):
# Version 1.9: added hw_cpu_thread_policy field
# Version 1.10: added hw_cpu_realtime_mask field
# Version 1.11: Added hw_firmware_type field
- VERSION = '1.11'
+ # Version 1.12: Added properties for image signature verification
+ VERSION = '1.12'
def obj_make_compatible(self, primitive, target_version):
super(ImageMetaProps, self).obj_make_compatible(primitive,
@@ -352,6 +372,19 @@ class ImageMetaProps(base.NovaObject):
# integer value 1
'img_version': fields.IntegerField(),
+ # base64 of encoding of image signature
+ 'img_signature': fields.StringField(),
+
+ # string indicating hash method used to compute image signature
+ 'img_signature_hash_method': fields.ImageSignatureHashTypeField(),
+
+ # string indicating Castellan uuid of certificate
+ # used to compute the image's signature
+ 'img_signature_certificate_uuid': fields.UUIDField(),
+
+ # string indicating type of key used to compute image signature
+ 'img_signature_key_type': fields.ImageSignatureKeyTypeField(),
+
# string of username with admin privileges
'os_admin_user': fields.StringField(),
diff --git a/nova/objects/instance.py b/nova/objects/instance.py
index 0e4af1c490..4ee3ca940c 100644
--- a/nova/objects/instance.py
+++ b/nova/objects/instance.py
@@ -201,6 +201,10 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
super(Instance, self).__init__(*args, **kwargs)
self._reset_metadata_tracking()
+ @property
+ def image_meta(self):
+ return objects.ImageMeta.from_instance(self)
+
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'system_metadata' in fields:
self._orig_system_metadata = (dict(self.system_metadata) if
@@ -379,14 +383,20 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
instance.obj_reset_changes()
return instance
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_instance_get_by_uuid(context, uuid, columns_to_join,
+ use_slave=False):
+ return db.instance_get_by_uuid(context, uuid,
+ columns_to_join=columns_to_join)
+
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
- db_inst = db.instance_get_by_uuid(context, uuid,
- columns_to_join=columns_to_join,
- use_slave=use_slave)
+ db_inst = cls._db_instance_get_by_uuid(context, uuid, columns_to_join,
+ use_slave=use_slave)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@@ -417,14 +427,18 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
}
updates['extra'] = {}
numa_topology = updates.pop('numa_topology', None)
+ expected_attrs.append('numa_topology')
if numa_topology:
- expected_attrs.append('numa_topology')
updates['extra']['numa_topology'] = numa_topology._to_json()
+ else:
+ updates['extra']['numa_topology'] = None
pci_requests = updates.pop('pci_requests', None)
+ expected_attrs.append('pci_requests')
if pci_requests:
- expected_attrs.append('pci_requests')
updates['extra']['pci_requests'] = (
pci_requests.to_json())
+ else:
+ updates['extra']['pci_requests'] = None
flavor = updates.pop('flavor', None)
if flavor:
expected_attrs.append('flavor')
@@ -441,10 +455,12 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
}
updates['extra']['flavor'] = jsonutils.dumps(flavor_info)
vcpu_model = updates.pop('vcpu_model', None)
+ expected_attrs.append('vcpu_model')
if vcpu_model:
- expected_attrs.append('vcpu_model')
updates['extra']['vcpu_model'] = (
jsonutils.dumps(vcpu_model.obj_to_primitive()))
+ else:
+ updates['extra']['vcpu_model'] = None
db_inst = db.instance_create(self._context, updates)
self._from_db_object(self._context, self, db_inst, expected_attrs)
@@ -794,6 +810,10 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
def _load_ec2_ids(self):
self.ec2_ids = objects.EC2Ids.get_by_instance(self._context, self)
+ def _load_security_groups(self):
+ self.security_groups = objects.SecurityGroupList.get_by_instance(
+ self._context, self)
+
def _load_migration_context(self, db_context=_NO_DATA_SENTINEL):
if db_context is _NO_DATA_SENTINEL:
try:
@@ -873,6 +893,8 @@ class Instance(base.NovaPersistentObject, base.NovaObject,
self._load_ec2_ids()
elif attrname == 'migration_context':
self._load_migration_context()
+ elif attrname == 'security_groups':
+ self._load_security_groups()
elif 'flavor' in attrname:
self._load_flavor()
else:
@@ -991,8 +1013,9 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
'objects': fields.ListOfObjectsField('Instance'),
}
- @base.remotable_classmethod
- def get_by_filters(cls, context, filters,
+ @classmethod
+ @db.select_db_reader_mode
+ def _get_by_filters_impl(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None, use_slave=False,
sort_keys=None, sort_dirs=None):
@@ -1000,18 +1023,34 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
db_inst_list = db.instance_get_all_by_filters_sort(
context, filters, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs),
- use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs)
+ sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit,
- marker=marker, columns_to_join=_expected_cols(expected_attrs),
- use_slave=use_slave)
+ marker=marker, columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
+ def get_by_filters(cls, context, filters,
+ sort_key='created_at', sort_dir='desc', limit=None,
+ marker=None, expected_attrs=None, use_slave=False,
+ sort_keys=None, sort_dirs=None):
+ return cls._get_by_filters_impl(
+ context, filters, sort_key=sort_key, sort_dir=sort_dir,
+ limit=limit, marker=marker, expected_attrs=expected_attrs,
+ use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs)
+
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_instance_get_all_by_host(context, host, columns_to_join,
+ use_slave=False):
+ return db.instance_get_all_by_host(context, host,
+ columns_to_join=columns_to_join)
+
+ @base.remotable_classmethod
def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
- db_inst_list = db.instance_get_all_by_host(
+ db_inst_list = cls._db_instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
@@ -1049,6 +1088,15 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_instance_get_active_by_window_joined(
+ context, begin, end, project_id, host, columns_to_join,
+ use_slave=False):
+ return db.instance_get_active_by_window_joined(
+ context, begin, end, project_id, host,
+ columns_to_join=columns_to_join)
+
@base.remotable_classmethod
def _get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
@@ -1058,9 +1106,10 @@ class InstanceList(base.ObjectListBase, base.NovaObject):
# to timezone-aware datetime objects for the DB API call.
begin = timeutils.parse_isotime(begin)
end = timeutils.parse_isotime(end) if end else None
- db_inst_list = db.instance_get_active_by_window_joined(
+ db_inst_list = cls._db_instance_get_active_by_window_joined(
context, begin, end, project_id, host,
- columns_to_join=_expected_cols(expected_attrs))
+ columns_to_join=_expected_cols(expected_attrs),
+ use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
diff --git a/nova/objects/migrate_data.py b/nova/objects/migrate_data.py
index ee5709c772..ce3cb0e41e 100644
--- a/nova/objects/migrate_data.py
+++ b/nova/objects/migrate_data.py
@@ -14,6 +14,7 @@
from oslo_log import log
from oslo_serialization import jsonutils
+from oslo_utils import versionutils
from nova import objects
from nova.objects import base as obj_base
@@ -47,6 +48,19 @@ class LiveMigrateData(obj_base.NovaObject):
if 'migration' in legacy:
self.migration = legacy['migration']
+ @classmethod
+ def detect_implementation(cls, legacy_dict):
+ if 'instance_relative_path' in legacy_dict:
+ obj = LibvirtLiveMigrateData()
+ elif 'image_type' in legacy_dict:
+ obj = LibvirtLiveMigrateData()
+ elif 'migrate_data' in legacy_dict:
+ obj = XenapiLiveMigrateData()
+ else:
+ obj = LiveMigrateData()
+ obj.from_legacy_dict(legacy_dict)
+ return obj
+
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateBDMInfo(obj_base.NovaObject):
@@ -91,7 +105,9 @@ class LibvirtLiveMigrateBDMInfo(obj_base.NovaObject):
@obj_base.NovaObjectRegistry.register
class LibvirtLiveMigrateData(LiveMigrateData):
- VERSION = '1.0'
+ # Version 1.0: Initial version
+ # Version 1.1: Added target_connect_addr
+ VERSION = '1.1'
fields = {
'filename': fields.StringField(),
@@ -105,10 +121,18 @@ class LibvirtLiveMigrateData(LiveMigrateData):
'instance_relative_path': fields.StringField(),
'graphics_listen_addr_vnc': fields.IPAddressField(nullable=True),
'graphics_listen_addr_spice': fields.IPAddressField(nullable=True),
- 'serial_listen_addr': fields.StringField(),
+ 'serial_listen_addr': fields.StringField(nullable=True),
'bdms': fields.ListOfObjectsField('LibvirtLiveMigrateBDMInfo'),
+ 'target_connect_addr': fields.StringField(nullable=True),
}
+ def obj_make_compatible(self, primitive, target_version):
+ super(LibvirtLiveMigrateData, self).obj_make_compatible(
+ primitive, target_version)
+ target_version = versionutils.convert_version_to_tuple(target_version)
+ if target_version < (1, 1) and 'target_connect_addr' in primitive:
+ del primitive['target_connect_addr']
+
def _bdms_to_legacy(self, legacy):
if not self.obj_attr_is_set('bdms'):
return
@@ -144,12 +168,14 @@ class LibvirtLiveMigrateData(LiveMigrateData):
graphics_vnc = legacy.pop('graphics_listen_addr_vnc', None)
graphics_spice = legacy.pop('graphics_listen_addr_spice', None)
+ transport_target = legacy.pop('target_connect_addr', None)
live_result = {
'graphics_listen_addrs': {
'vnc': graphics_vnc and str(graphics_vnc),
'spice': graphics_spice and str(graphics_spice),
},
'serial_listen_addr': legacy.pop('serial_listen_addr', None),
+ 'target_connect_addr': transport_target,
}
if pre_migration_result:
@@ -172,6 +198,7 @@ class LibvirtLiveMigrateData(LiveMigrateData):
pre_result['graphics_listen_addrs'].get('vnc')
self.graphics_listen_addr_spice = \
pre_result['graphics_listen_addrs'].get('spice')
+ self.target_connect_addr = pre_result.get('target_connect_addr')
if 'serial_listen_addr' in pre_result:
self.serial_listen_addr = pre_result['serial_listen_addr']
self._bdms_from_legacy(pre_result)
diff --git a/nova/objects/migration.py b/nova/objects/migration.py
index d23b4f86a4..67b7ae5d6b 100644
--- a/nova/objects/migration.py
+++ b/nova/objects/migration.py
@@ -35,7 +35,9 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Added migration_type and hidden
- VERSION = '1.2'
+ # Version 1.3: Added get_by_id_and_instance()
+ # Version 1.4: Added migration progress detail
+ VERSION = '1.4'
fields = {
'id': fields.IntegerField(),
@@ -52,6 +54,12 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
'live-migration', 'evacuation'],
nullable=False),
'hidden': fields.BooleanField(nullable=False, default=False),
+ 'memory_total': fields.IntegerField(nullable=True),
+ 'memory_processed': fields.IntegerField(nullable=True),
+ 'memory_remaining': fields.IntegerField(nullable=True),
+ 'disk_total': fields.IntegerField(nullable=True),
+ 'disk_processed': fields.IntegerField(nullable=True),
+ 'disk_remaining': fields.IntegerField(nullable=True),
}
@staticmethod
@@ -73,6 +81,14 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
if 'migration_type' in primitive:
del primitive['migration_type']
del primitive['hidden']
+ if target_version < (1, 4):
+ if 'memory_total' in primitive:
+ del primitive['memory_total']
+ del primitive['memory_processed']
+ del primitive['memory_remaining']
+ del primitive['disk_total']
+ del primitive['disk_processed']
+ del primitive['disk_remaining']
def obj_load_attr(self, attrname):
if attrname == 'migration_type':
@@ -90,6 +106,12 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
return cls._from_db_object(context, cls(), db_migration)
@base.remotable_classmethod
+ def get_by_id_and_instance(cls, context, migration_id, instance_uuid):
+ db_migration = db.migration_get_by_id_and_instance(
+ context, migration_id, instance_uuid)
+ return cls._from_db_object(context, cls(), db_migration)
+
+ @base.remotable_classmethod
def get_by_instance_and_status(cls, context, instance_uuid, status):
db_migration = db.migration_get_by_instance_and_status(
context, instance_uuid, status)
@@ -119,7 +141,14 @@ class Migration(base.NovaPersistentObject, base.NovaObject,
@property
def instance(self):
- return objects.Instance.get_by_uuid(self._context, self.instance_uuid)
+ if not hasattr(self, '_cached_instance'):
+ self._cached_instance = objects.Instance.get_by_uuid(
+ self._context, self.instance_uuid)
+ return self._cached_instance
+
+ @instance.setter
+ def instance(self, instance):
+ self._cached_instance = instance
@base.NovaObjectRegistry.register
diff --git a/nova/objects/notification.py b/nova/objects/notification.py
new file mode 100644
index 0000000000..40533eac33
--- /dev/null
+++ b/nova/objects/notification.py
@@ -0,0 +1,150 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from nova.objects import base
+from nova.objects import fields
+from nova import rpc
+
+
+@base.NovaObjectRegistry.register
+class EventType(base.NovaObject):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'object': fields.StringField(nullable=False),
+ 'action': fields.NotificationActionField(nullable=False),
+ 'phase': fields.NotificationPhaseField(nullable=True),
+ }
+
+ def to_notification_event_type_field(self):
+ """Serialize the object to the wire format."""
+ s = '%s.%s' % (self.object, self.action)
+ if self.obj_attr_is_set('phase'):
+ s += '.%s' % self.phase
+ return s
+
+
+# Note(gibi): It is explicitly not registered as this class shall not be used
+# directly, it is just a base class for notification payloads.
+@base.NovaObjectRegistry.register_if(False)
+class NotificationPayloadBase(base.NovaObject):
+ """Base class for the payload of versioned notifications."""
+ # SCHEMA defines how to populate the payload fields. It is a dictionary
+ # where every key value pair has the following format:
+ # <payload_field_name>: (<data_source_name>,
+ # <field_of_the_data_source>)
+ # The <payload_field_name> is the name where the data will be stored in the
+ # payload object, this field has to be defined as a field of the payload.
+ # The <data_source_name> shall refer to name of the parameter passed as
+ # kwarg to the payload's populate_schema() call and this object will be
+ # used as the source of the data. The <field_of_the_data_source> shall be
+ # a valid field of the passed argument.
+ # The SCHEMA needs to be applied with the populate_schema() call before the
+ # notification can be emitted.
+ # The value of the payload.<payload_field_name> field will be set by the
+ # <data_source_name>.<field_of_the_data_source> field. The
+ # <data_source_name> will not be part of the payload object internal or
+ # external representation.
+ # Payload fields that are not set by the SCHEMA can be filled in the same
+ # way as in any versioned object.
+ SCHEMA = {}
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ def __init__(self, *args, **kwargs):
+ super(NotificationPayloadBase, self).__init__(*args, **kwargs)
+ self.populated = not self.SCHEMA
+
+ def populate_schema(self, **kwargs):
+ """Populate the object based on the SCHEMA and the source objects
+
+ :param kwargs: A dict contains the source object at the key defined in
+ the SCHEMA
+ """
+ for key, (obj, field) in self.SCHEMA.items():
+ source = kwargs[obj]
+ if source.obj_attr_is_set(field):
+ setattr(self, key, getattr(source, field))
+ self.populated = True
+
+
+@base.NovaObjectRegistry.register
+class NotificationPublisher(base.NovaObject):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'host': fields.StringField(nullable=False),
+ 'binary': fields.StringField(nullable=False),
+ }
+
+ @classmethod
+ def from_service_obj(cls, service):
+ return cls(host=service.host, binary=service.binary)
+
+
+# Note(gibi): It is explicitly not registered as this class shall not be used
+# directly, it is just a base class for notification.
+@base.NovaObjectRegistry.register_if(False)
+class NotificationBase(base.NovaObject):
+ """Base class for versioned notifications.
+
+ Every subclass shall define a 'payload' field.
+ """
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'priority': fields.NotificationPriorityField(),
+ 'event_type': fields.ObjectField('EventType'),
+ 'publisher': fields.ObjectField('NotificationPublisher'),
+ }
+
+ def _emit(self, context, event_type, publisher_id, payload):
+ notifier = rpc.get_versioned_notifier(publisher_id)
+ notify = getattr(notifier, self.priority)
+ notify(context, event_type=event_type, payload=payload)
+
+ def emit(self, context):
+ """Send the notification."""
+ assert self.payload.populated
+
+ # Note(gibi): notification payload will be a newly populated object
+ # therefore every field of it will look changed so this does not carry
+ # any extra information so we drop this from the payload.
+ self.payload.obj_reset_changes(recursive=False)
+
+ self._emit(context,
+ event_type=
+ self.event_type.to_notification_event_type_field(),
+ publisher_id='%s:%s' %
+ (self.publisher.binary,
+ self.publisher.host),
+ payload=self.payload.obj_to_primitive())
+
+
+def notification_sample(sample):
+ """Class decorator to attach the notification sample information
+ to the notification object for documentation generation purposes.
+
+ :param sample: the path of the sample json file relative to the
+ doc/notification_samples/ directory in the nova repository
+ root.
+ """
+ def wrap(cls):
+ cls.sample = sample
+ return cls
+ return wrap
diff --git a/nova/objects/pci_device.py b/nova/objects/pci_device.py
index bd4a5ccfbe..5dbc949be0 100644
--- a/nova/objects/pci_device.py
+++ b/nova/objects/pci_device.py
@@ -15,6 +15,7 @@
import copy
+from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
@@ -25,6 +26,8 @@ from nova import objects
from nova.objects import base
from nova.objects import fields
+LOG = logging.getLogger(__name__)
+
def compare_pci_device_attributes(obj_a, obj_b):
pci_ignore_fields = base.NovaPersistentObject.fields.keys()
@@ -87,7 +90,8 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
# Version 1.2: added request_id field
# Version 1.3: Added field to represent PCI device NUMA node
# Version 1.4: Added parent_addr field
- VERSION = '1.4'
+ # Version 1.5: Added 2 new device statuses: UNCLAIMABLE and UNAVAILABLE
+ VERSION = '1.5'
fields = {
'id': fields.IntegerField(),
@@ -109,7 +113,7 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
}
@staticmethod
- def _migrate_parent_addr():
+ def should_migrate_data():
# NOTE(ndipanov): Only migrate parent_addr if all services are up to at
# least version 4 - this should only ever be called from save()
services = ('conductor', 'api')
@@ -129,6 +133,15 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
extra_info = primitive.get('extra_info', {})
extra_info['phys_function'] = primitive['parent_addr']
del primitive['parent_addr']
+ if target_version < (1, 5) and 'parent_addr' in primitive:
+ added_statuses = (fields.PciDeviceStatus.UNCLAIMABLE,
+ fields.PciDeviceStatus.UNAVAILABLE)
+ status = primitive['status']
+ if status in added_statuses:
+ raise exception.ObjectActionError(
+ action='obj_make_compatible',
+ reason='status=%s not supported in version %s' % (
+ status, target_version))
def update_device(self, dev_dict):
"""Sync the content from device dictionary to device object.
@@ -145,6 +158,9 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
map(lambda x: dev_dict.pop(x, None),
[key for key in no_changes])
+ # NOTE(ndipanov): This needs to be set as it's accessed when matching
+ dev_dict.setdefault('parent_addr')
+
for k, v in dev_dict.items():
if k in self.fields.keys():
setattr(self, k, v)
@@ -195,7 +211,7 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
return cls._from_db_object(context, cls(), db_dev)
@classmethod
- def create(cls, dev_dict):
+ def create(cls, context, dev_dict):
"""Create a PCI device based on hypervisor information.
As the device object is just created and is not synced with db yet
@@ -204,6 +220,7 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
pci_device = cls()
pci_device.update_device(dev_dict)
pci_device.status = fields.PciDeviceStatus.AVAILABLE
+ pci_device._context = context
return pci_device
@base.remotable
@@ -214,7 +231,7 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
self.address)
elif self.status != fields.PciDeviceStatus.DELETED:
updates = self.obj_get_changes()
- if not self._migrate_parent_addr():
+ if not self.should_migrate_data():
# NOTE(ndipanov): If we are not migrating data yet, make sure
# that any changes to parent_addr are also in the old location
# in extra_info
@@ -240,18 +257,73 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
self.address, updates)
self._from_db_object(self._context, self, db_pci)
+ @staticmethod
+ def _bulk_update_status(dev_list, status):
+ for dev in dev_list:
+ dev.status = status
+
def claim(self, instance):
if self.status != fields.PciDeviceStatus.AVAILABLE:
raise exception.PciDeviceInvalidStatus(
compute_node_id=self.compute_node_id,
address=self.address, status=self.status,
hopestatus=[fields.PciDeviceStatus.AVAILABLE])
+
+ if self.dev_type == fields.PciDeviceType.SRIOV_PF:
+ # Update PF status to CLAIMED if all of it dependants are free
+ # and set their status to UNCLAIMABLE
+ vfs_list = objects.PciDeviceList.get_by_parent_address(
+ self._context,
+ self.compute_node_id,
+ self.address)
+ if not all([vf.is_available() for vf in vfs_list]):
+ raise exception.PciDeviceVFInvalidStatus(
+ compute_node_id=self.compute_node_id,
+ address=self.address)
+ self._bulk_update_status(vfs_list,
+ fields.PciDeviceStatus.UNCLAIMABLE)
+
+ elif self.dev_type == fields.PciDeviceType.SRIOV_VF:
+ # Update VF status to CLAIMED if it's parent has not been
+ # previuosly allocated or claimed
+ # When claiming/allocating a VF, it's parent PF becomes
+ # unclaimable/unavailable. Therefore, it is expected to find the
+ # parent PF in an unclaimable/unavailable state for any following
+ # claims to a sibling VF
+
+ parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
+ fields.PciDeviceStatus.UNCLAIMABLE,
+ fields.PciDeviceStatus.UNAVAILABLE)
+ try:
+ parent = self.get_by_dev_addr(self._context,
+ self.compute_node_id,
+ self.parent_addr)
+ if parent.status not in parent_ok_statuses:
+ raise exception.PciDevicePFInvalidStatus(
+ compute_node_id=self.compute_node_id,
+ address=self.parent_addr, status=self.status,
+ vf_address=self.address,
+ hopestatus=parent_ok_statuses)
+ # Set PF status
+ if parent.status == fields.PciDeviceStatus.AVAILABLE:
+ parent.status = fields.PciDeviceStatus.UNCLAIMABLE
+ except exception.PciDeviceNotFound:
+ LOG.debug('Physical function addr: %(pf_addr)s parent of '
+ 'VF addr: %(vf_addr)s was not found',
+ {'pf_addr': self.parent_addr,
+ 'vf_addr': self.address})
+
self.status = fields.PciDeviceStatus.CLAIMED
self.instance_uuid = instance['uuid']
def allocate(self, instance):
ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
fields.PciDeviceStatus.CLAIMED)
+ parent_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
+ fields.PciDeviceStatus.UNCLAIMABLE,
+ fields.PciDeviceStatus.UNAVAILABLE)
+ dependatns_ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
+ fields.PciDeviceStatus.UNCLAIMABLE)
if self.status not in ok_statuses:
raise exception.PciDeviceInvalidStatus(
compute_node_id=self.compute_node_id,
@@ -263,6 +335,37 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
compute_node_id=self.compute_node_id,
address=self.address, owner=self.instance_uuid,
hopeowner=instance['uuid'])
+ if self.dev_type == fields.PciDeviceType.SRIOV_PF:
+ vfs_list = objects.PciDeviceList.get_by_parent_address(
+ self._context,
+ self.compute_node_id,
+ self.address)
+ if not all([vf.status in dependatns_ok_statuses for
+ vf in vfs_list]):
+ raise exception.PciDeviceVFInvalidStatus(
+ compute_node_id=self.compute_node_id,
+ address=self.address)
+ self._bulk_update_status(vfs_list,
+ fields.PciDeviceStatus.UNAVAILABLE)
+
+ elif (self.dev_type == fields.PciDeviceType.SRIOV_VF):
+ try:
+ parent = self.get_by_dev_addr(self._context,
+ self.compute_node_id,
+ self.parent_addr)
+ if parent.status not in parent_ok_statuses:
+ raise exception.PciDevicePFInvalidStatus(
+ compute_node_id=self.compute_node_id,
+ address=self.parent_addr, status=self.status,
+ vf_address=self.address,
+ hopestatus=parent_ok_statuses)
+ # Set PF status
+ parent.status = fields.PciDeviceStatus.UNAVAILABLE
+ except exception.PciDeviceNotFound:
+ LOG.debug('Physical function addr: %(pf_addr)s parent of '
+ 'VF addr: %(vf_addr)s was not found',
+ {'pf_addr': self.parent_addr,
+ 'vf_addr': self.address})
self.status = fields.PciDeviceStatus.ALLOCATED
self.instance_uuid = instance['uuid']
@@ -289,6 +392,7 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
def free(self, instance=None):
ok_statuses = (fields.PciDeviceStatus.ALLOCATED,
fields.PciDeviceStatus.CLAIMED)
+ free_devs = []
if self.status not in ok_statuses:
raise exception.PciDeviceInvalidStatus(
compute_node_id=self.compute_node_id,
@@ -299,8 +403,36 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
compute_node_id=self.compute_node_id,
address=self.address, owner=self.instance_uuid,
hopeowner=instance['uuid'])
+ if self.dev_type == fields.PciDeviceType.SRIOV_PF:
+ # Set all PF dependants status to AVAILABLE
+ vfs_list = objects.PciDeviceList.get_by_parent_address(
+ self._context,
+ self.compute_node_id,
+ self.address)
+ self._bulk_update_status(vfs_list,
+ fields.PciDeviceStatus.AVAILABLE)
+ free_devs.extend(vfs_list)
+ if self.dev_type == fields.PciDeviceType.SRIOV_VF:
+ # Set PF status to AVAILABLE if all of it's VFs are free
+ vfs_list = objects.PciDeviceList.get_by_parent_address(
+ self._context,
+ self.compute_node_id,
+ self.parent_addr)
+ if all([vf.is_available() for vf in vfs_list if vf.id != self.id]):
+ try:
+ parent = self.get_by_dev_addr(self._context,
+ self.compute_node_id,
+ self.parent_addr)
+ parent.status = fields.PciDeviceStatus.AVAILABLE
+ free_devs.append(parent)
+ except exception.PciDeviceNotFound:
+ LOG.debug('Physical function addr: %(pf_addr)s parent of '
+ 'VF addr: %(vf_addr)s was not found',
+ {'pf_addr': self.parent_addr,
+ 'vf_addr': self.address})
old_status = self.status
self.status = fields.PciDeviceStatus.AVAILABLE
+ free_devs.append(self)
self.instance_uuid = None
self.request_id = None
if old_status == fields.PciDeviceStatus.ALLOCATED and instance:
@@ -312,6 +444,7 @@ class PciDevice(base.NovaPersistentObject, base.NovaObject):
instance['pci_devices'].remove(existed)
else:
instance.pci_devices.objects.remove(existed)
+ return free_devs
def is_available(self):
return self.status == fields.PciDeviceStatus.AVAILABLE
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
index bfbc562f8c..3c9e92288f 100644
--- a/nova/objects/request_spec.py
+++ b/nova/objects/request_spec.py
@@ -184,6 +184,13 @@ class RequestSpec(base.NovaObject):
def from_primitives(cls, context, request_spec, filter_properties):
"""Returns a new RequestSpec object by hydrating it from legacy dicts.
+ Deprecated. A RequestSpec object is created early in the boot process
+ using the from_components method. That object will either be passed to
+ places that require it, or it can be looked up with
+ get_by_instance_uuid. This method can be removed when there are no
+ longer any callers. Because the method is not remotable it is not tied
+ to object versioning.
+
That helper is not intended to leave the legacy dicts kept in the nova
codebase, but is rather just for giving a temporary solution for
populating the Spec object until we get rid of scheduler_utils'
@@ -318,23 +325,67 @@ class RequestSpec(base.NovaObject):
hint) for hint in self.scheduler_hints}
return filt_props
+ @classmethod
+ def from_components(cls, context, instance_uuid, image, flavor,
+ numa_topology, pci_requests, filter_properties, instance_group,
+ availability_zone):
+ """Returns a new RequestSpec object hydrated by various components.
+
+ This helper is useful in creating the RequestSpec from the various
+ objects that are assembled early in the boot process. This method
+ creates a complete RequestSpec object with all properties set or
+ intentionally left blank.
+
+ :param context: a context object
+ :param instance_uuid: the uuid of the instance to schedule
+ :param image: a dict of properties for an image or volume
+ :param flavor: a flavor NovaObject
+ :param numa_topology: InstanceNUMATopology or None
+ :param pci_requests: InstancePCIRequests
+ :param filter_properties: a dict of properties for scheduling
+ :param instance_group: None or an instance group NovaObject
+ :param availability_zone: an availability_zone string
+ """
+ spec_obj = cls(context)
+ spec_obj.num_instances = 1
+ spec_obj.instance_uuid = instance_uuid
+ spec_obj.instance_group = instance_group
+ spec_obj.project_id = context.project_id
+ spec_obj._image_meta_from_image(image)
+ spec_obj._from_flavor(flavor)
+ spec_obj._from_instance_pci_requests(pci_requests)
+ spec_obj._from_instance_numa_topology(numa_topology)
+ spec_obj.ignore_hosts = filter_properties.get('ignore_hosts')
+ spec_obj.force_hosts = filter_properties.get('force_hosts')
+ spec_obj.force_nodes = filter_properties.get('force_nodes')
+ spec_obj._from_retry(filter_properties.get('retry', {}))
+ spec_obj._from_limits(filter_properties.get('limits', {}))
+ spec_obj._from_hints(filter_properties.get('scheduler_hints', {}))
+ spec_obj.availability_zone = availability_zone
+ return spec_obj
+
@staticmethod
def _from_db_object(context, spec, db_spec):
- spec = spec.obj_from_primitive(jsonutils.loads(db_spec['spec']))
+ spec_obj = spec.obj_from_primitive(jsonutils.loads(db_spec['spec']))
+ for key in spec.fields:
+ # Load these from the db model not the serialized object within,
+ # though they should match.
+ if key in ['id', 'instance_uuid']:
+ setattr(spec, key, db_spec[key])
+ else:
+ setattr(spec, key, getattr(spec_obj, key))
spec._context = context
spec.obj_reset_changes()
return spec
@staticmethod
+ @db.api_context_manager.reader
def _get_by_instance_uuid_from_db(context, instance_uuid):
- session = db.get_api_session()
-
- with session.begin():
- db_spec = session.query(api_models.RequestSpec).filter_by(
- instance_uuid=instance_uuid).first()
- if not db_spec:
- raise exception.RequestSpecNotFound(
- instance_uuid=instance_uuid)
+ db_spec = context.session.query(api_models.RequestSpec).filter_by(
+ instance_uuid=instance_uuid).first()
+ if not db_spec:
+ raise exception.RequestSpecNotFound(
+ instance_uuid=instance_uuid)
return db_spec
@base.remotable_classmethod
diff --git a/nova/objects/resource_provider.py b/nova/objects/resource_provider.py
new file mode 100644
index 0000000000..072cf5f4f5
--- /dev/null
+++ b/nova/objects/resource_provider.py
@@ -0,0 +1,197 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy.orm import joinedload
+
+from nova.db.sqlalchemy import api as db_api
+from nova.db.sqlalchemy import models
+from nova import exception
+from nova import objects
+from nova.objects import base
+from nova.objects import fields
+
+
+@base.NovaObjectRegistry.register
+class ResourceProvider(base.NovaObject):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'id': fields.IntegerField(read_only=True),
+ 'uuid': fields.UUIDField(nullable=False),
+ }
+
+ @base.remotable
+ def create(self):
+ if 'id' in self:
+ raise exception.ObjectActionError(action='create',
+ reason='already created')
+ if 'uuid' not in self:
+ raise exception.ObjectActionError(action='create',
+ reason='uuid is required')
+ updates = self.obj_get_changes()
+ db_rp = self._create_in_db(self._context, updates)
+ self._from_db_object(self._context, self, db_rp)
+
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, uuid):
+ db_resource_provider = cls._get_by_uuid_from_db(context, uuid)
+ return cls._from_db_object(context, cls(), db_resource_provider)
+
+ @staticmethod
+ @db_api.main_context_manager.writer
+ def _create_in_db(context, updates):
+ db_rp = models.ResourceProvider()
+ db_rp.update(updates)
+ context.session.add(db_rp)
+ return db_rp
+
+ @staticmethod
+ def _from_db_object(context, resource_provider, db_resource_provider):
+ for field in resource_provider.fields:
+ setattr(resource_provider, field, db_resource_provider[field])
+ resource_provider._context = context
+ resource_provider.obj_reset_changes()
+ return resource_provider
+
+ @staticmethod
+ @db_api.main_context_manager.reader
+ def _get_by_uuid_from_db(context, uuid):
+ result = context.session.query(models.ResourceProvider).filter_by(
+ uuid=uuid).first()
+ if not result:
+ raise exception.NotFound()
+ return result
+
+
+class _HasAResourceProvider(base.NovaObject):
+ """Code shared between Inventory and Allocation
+
+ Both contain a ResourceProvider.
+ """
+
+ @staticmethod
+ def _make_db(updates):
+ try:
+ resource_provider = updates.pop('resource_provider')
+ updates['resource_provider_id'] = resource_provider.id
+ except (KeyError, NotImplementedError):
+ raise exception.ObjectActionError(
+ action='create',
+ reason='resource_provider required')
+ try:
+ resource_class = updates.pop('resource_class')
+ except KeyError:
+ raise exception.ObjectActionError(
+ action='create',
+ reason='resource_class required')
+ updates['resource_class_id'] = fields.ResourceClass.index(
+ resource_class)
+ return updates
+
+ @staticmethod
+ def _from_db_object(context, target, source):
+ for field in target.fields:
+ if field not in ('resource_provider', 'resource_class'):
+ setattr(target, field, source[field])
+
+ if 'resource_class' not in target:
+ target.resource_class = (
+ target.fields['resource_class'].from_index(
+ source['resource_class_id']))
+ if ('resource_provider' not in target and
+ 'resource_provider' in source):
+ target.resource_provider = ResourceProvider()
+ ResourceProvider._from_db_object(
+ context,
+ target.resource_provider,
+ source['resource_provider'])
+
+ target._context = context
+ target.obj_reset_changes()
+ return target
+
+
+@base.NovaObjectRegistry.register
+class Inventory(_HasAResourceProvider):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'id': fields.IntegerField(read_only=True),
+ 'resource_provider': fields.ObjectField('ResourceProvider'),
+ 'resource_class': fields.ResourceClassField(read_only=True),
+ 'total': fields.IntegerField(),
+ 'reserved': fields.IntegerField(),
+ 'min_unit': fields.IntegerField(),
+ 'max_unit': fields.IntegerField(),
+ 'step_size': fields.IntegerField(),
+ 'allocation_ratio': fields.FloatField(),
+ }
+
+ @base.remotable
+ def create(self):
+ if 'id' in self:
+ raise exception.ObjectActionError(action='create',
+ reason='already created')
+ updates = self._make_db(self.obj_get_changes())
+ db_inventory = self._create_in_db(self._context, updates)
+ self._from_db_object(self._context, self, db_inventory)
+
+ @base.remotable
+ def save(self):
+ if 'id' not in self:
+ raise exception.ObjectActionError(action='save',
+ reason='not created')
+ updates = self.obj_get_changes()
+ updates.pop('id', None)
+ self._update_in_db(self._context, self.id, updates)
+
+ @staticmethod
+ @db_api.main_context_manager.writer
+ def _create_in_db(context, updates):
+ db_inventory = models.Inventory()
+ db_inventory.update(updates)
+ context.session.add(db_inventory)
+ return db_inventory
+
+ @staticmethod
+ @db_api.main_context_manager.writer
+ def _update_in_db(context, id_, updates):
+ result = context.session.query(
+ models.Inventory).filter_by(id=id_).update(updates)
+ if not result:
+ raise exception.NotFound()
+
+
+@base.NovaObjectRegistry.register
+class InventoryList(base.ObjectListBase, base.NovaObject):
+ # Version 1.0: Initial Version
+ VERSION = '1.0'
+
+ fields = {
+ 'objects': fields.ListOfObjectsField('Inventory'),
+ }
+
+ @staticmethod
+ @db_api.main_context_manager.reader
+ def _get_all_by_resource_provider(context, rp_uuid):
+ return context.session.query(models.Inventory).\
+ options(joinedload('resource_provider')).\
+ filter(models.ResourceProvider.uuid == rp_uuid).all()
+
+ @base.remotable_classmethod
+ def get_all_by_resource_provider_uuid(cls, context, rp_uuid):
+ db_inventory_list = cls._get_all_by_resource_provider(context,
+ rp_uuid)
+ return base.obj_make_list(context, cls(context), objects.Inventory,
+ db_inventory_list)
diff --git a/nova/objects/security_group_rule.py b/nova/objects/security_group_rule.py
index f224025b76..eea7019fab 100644
--- a/nova/objects/security_group_rule.py
+++ b/nova/objects/security_group_rule.py
@@ -21,10 +21,8 @@ from nova.objects import fields
OPTIONAL_ATTRS = ['parent_group', 'grantee_group']
-# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
-class SecurityGroupRule(base.NovaPersistentObject, base.NovaObject,
- base.NovaObjectDictCompat):
+class SecurityGroupRule(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added create() and set id as read_only
VERSION = '1.1'
@@ -52,9 +50,10 @@ class SecurityGroupRule(base.NovaPersistentObject, base.NovaObject,
expected_attrs = []
for field in rule.fields:
if field in expected_attrs:
- rule[field] = rule._from_db_subgroup(context, db_rule[field])
+ setattr(rule, field,
+ rule._from_db_subgroup(context, db_rule[field]))
elif field not in OPTIONAL_ATTRS:
- rule[field] = db_rule[field]
+ setattr(rule, field, db_rule[field])
rule._context = context
rule.obj_reset_changes()
return rule
diff --git a/nova/objects/service.py b/nova/objects/service.py
index 6ea5cc8f58..c7a5ee3ab9 100644
--- a/nova/objects/service.py
+++ b/nova/objects/service.py
@@ -22,13 +22,14 @@ from nova.i18n import _LW
from nova import objects
from nova.objects import base
from nova.objects import fields
+from nova.objects import notification
LOG = logging.getLogger(__name__)
# NOTE(danms): This is the global service version counter
-SERVICE_VERSION = 4
+SERVICE_VERSION = 7
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
@@ -60,6 +61,12 @@ SERVICE_VERSION_HISTORY = (
{'compute_rpc': '4.6'},
# Version 4: Add PciDevice.parent_addr (data migration needed)
{'compute_rpc': '4.6'},
+ # Version 5: Add attachment_id kwarg to detach_volume()
+ {'compute_rpc': '4.7'},
+ # Version 6: Compute RPC version 4.8
+ {'compute_rpc': '4.8'},
+ # Version 7: Add live_migration_force_complete in the compute_rpc
+ {'compute_rpc': '4.9'},
)
@@ -221,9 +228,15 @@ class Service(base.NovaPersistentObject, base.NovaObject,
return
return cls._from_db_object(context, cls(), db_service)
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_service_get_by_compute_host(context, host, use_slave=False):
+ return db.service_get_by_compute_host(context, host)
+
@base.remotable_classmethod
def get_by_compute_host(cls, context, host, use_slave=False):
- db_service = db.service_get_by_compute_host(context, host)
+ db_service = cls._db_service_get_by_compute_host(context, host,
+ use_slave=use_slave)
return cls._from_db_object(context, cls(), db_service)
# NOTE(ndipanov): This is deprecated and should be removed on the next
@@ -280,6 +293,24 @@ class Service(base.NovaPersistentObject, base.NovaObject,
db_service = db.service_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_service)
+ self._send_status_update_notification(updates)
+
+ def _send_status_update_notification(self, updates):
+ # Note(gibi): We do not trigger notification on version as that field
+ # is always dirty, which would cause that nova sends notification on
+ # every other field change. See the comment in save() too.
+ if set(updates.keys()).intersection(
+ {'disabled', 'disabled_reason', 'forced_down'}):
+ payload = ServiceStatusPayload(self)
+ ServiceStatusNotification(
+ publisher=notification.NotificationPublisher.from_service_obj(
+ self),
+ event_type=notification.EventType(
+ object='service',
+ action=fields.NotificationAction.UPDATE),
+ priority=fields.NotificationPriority.INFO,
+ payload=payload).emit(self._context)
+
@base.remotable
def destroy(self):
db.service_destroy(self._context, self.id)
@@ -293,6 +324,11 @@ class Service(base.NovaPersistentObject, base.NovaObject,
def clear_min_version_cache(cls):
cls._MIN_VERSION_CACHE = {}
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_service_get_minimum_version(context, binary, use_slave=False):
+ return db.service_get_minimum_version(context, binary)
+
@base.remotable_classmethod
def get_minimum_version(cls, context, binary, use_slave=False):
if not binary.startswith('nova-'):
@@ -305,8 +341,8 @@ class Service(base.NovaPersistentObject, base.NovaObject,
cached_version = cls._MIN_VERSION_CACHE.get(binary)
if cached_version:
return cached_version
- version = db.service_get_minimum_version(context, binary,
- use_slave=use_slave)
+ version = cls._db_service_get_minimum_version(context, binary,
+ use_slave=use_slave)
if version is None:
return 0
# NOTE(danms): Since our return value is not controlled by object
@@ -370,3 +406,48 @@ class ServiceList(base.ObjectListBase, base.NovaObject):
context, db_services)
return base.obj_make_list(context, cls(context), objects.Service,
db_services)
+
+
+@notification.notification_sample('service-update.json')
+@base.NovaObjectRegistry.register
+class ServiceStatusNotification(notification.NotificationBase):
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+
+ fields = {
+ 'payload': fields.ObjectField('ServiceStatusPayload')
+ }
+
+
+@base.NovaObjectRegistry.register
+class ServiceStatusPayload(notification.NotificationPayloadBase):
+ SCHEMA = {
+ 'host': ('service', 'host'),
+ 'binary': ('service', 'binary'),
+ 'topic': ('service', 'topic'),
+ 'report_count': ('service', 'report_count'),
+ 'disabled': ('service', 'disabled'),
+ 'disabled_reason': ('service', 'disabled_reason'),
+ 'availability_zone': ('service', 'availability_zone'),
+ 'last_seen_up': ('service', 'last_seen_up'),
+ 'forced_down': ('service', 'forced_down'),
+ 'version': ('service', 'version')
+ }
+ # Version 1.0: Initial version
+ VERSION = '1.0'
+ fields = {
+ 'host': fields.StringField(nullable=True),
+ 'binary': fields.StringField(nullable=True),
+ 'topic': fields.StringField(nullable=True),
+ 'report_count': fields.IntegerField(),
+ 'disabled': fields.BooleanField(),
+ 'disabled_reason': fields.StringField(nullable=True),
+ 'availability_zone': fields.StringField(nullable=True),
+ 'last_seen_up': fields.DateTimeField(nullable=True),
+ 'forced_down': fields.BooleanField(),
+ 'version': fields.IntegerField(),
+ }
+
+ def __init__(self, service):
+ super(ServiceStatusPayload, self).__init__()
+ self.populate_schema(service=service)
diff --git a/nova/objects/virtual_interface.py b/nova/objects/virtual_interface.py
index 93fea70026..a3fc9361a2 100644
--- a/nova/objects/virtual_interface.py
+++ b/nova/objects/virtual_interface.py
@@ -93,9 +93,15 @@ class VirtualInterfaceList(base.ObjectListBase, base.NovaObject):
return base.obj_make_list(context, cls(context),
objects.VirtualInterface, db_vifs)
+ @staticmethod
+ @db.select_db_reader_mode
+ def _db_virtual_interface_get_by_instance(context, instance_uuid,
+ use_slave=False):
+ return db.virtual_interface_get_by_instance(context, instance_uuid)
+
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False):
- db_vifs = db.virtual_interface_get_by_instance(context, instance_uuid,
- use_slave=use_slave)
+ db_vifs = cls._db_virtual_interface_get_by_instance(
+ context, instance_uuid, use_slave=use_slave)
return base.obj_make_list(context, cls(context),
objects.VirtualInterface, db_vifs)
diff --git a/nova/openstack/common/cliutils.py b/nova/openstack/common/cliutils.py
index ca617f6767..3d06069144 100644
--- a/nova/openstack/common/cliutils.py
+++ b/nova/openstack/common/cliutils.py
@@ -180,10 +180,10 @@ def print_list(objs, fields, formatters=None, sortby_index=0,
row.append(data)
pt.add_row(row)
- if six.PY3:
- print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
- else:
+ if six.PY2:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
+ else:
+ print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
def print_dict(dct, dict_property="Property", wrap=0, dict_value='Value'):
@@ -213,10 +213,10 @@ def print_dict(dct, dict_property="Property", wrap=0, dict_value='Value'):
else:
pt.add_row([k, v])
- if six.PY3:
- print(encodeutils.safe_encode(pt.get_string()).decode())
- else:
+ if six.PY2:
print(encodeutils.safe_encode(pt.get_string()))
+ else:
+ print(encodeutils.safe_encode(pt.get_string()).decode())
def get_password(max_password_prompts=3):
diff --git a/nova/openstack/common/memorycache.py b/nova/openstack/common/memorycache.py
deleted file mode 100644
index e72c26df13..0000000000
--- a/nova/openstack/common/memorycache.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Super simple fake memcache client."""
-
-import copy
-
-from oslo_config import cfg
-from oslo_utils import timeutils
-
-memcache_opts = [
- cfg.ListOpt('memcached_servers',
- help='Memcached servers or None for in process cache.'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(memcache_opts)
-
-
-def list_opts():
- """Entry point for oslo-config-generator."""
- return [(None, copy.deepcopy(memcache_opts))]
-
-
-def get_client(memcached_servers=None):
- client_cls = Client
-
- if not memcached_servers:
- memcached_servers = CONF.memcached_servers
- if memcached_servers:
- import memcache
- client_cls = memcache.Client
-
- return client_cls(memcached_servers, debug=0)
-
-
-class Client(object):
- """Replicates a tiny subset of memcached client interface."""
-
- def __init__(self, *args, **kwargs):
- """Ignores the passed in args."""
- self.cache = {}
-
- def get(self, key):
- """Retrieves the value for a key or None.
-
- This expunges expired keys during each get.
- """
-
- now = timeutils.utcnow_ts()
- for k in list(self.cache):
- (timeout, _value) = self.cache[k]
- if timeout and now >= timeout:
- del self.cache[k]
-
- return self.cache.get(key, (0, None))[1]
-
- def set(self, key, value, time=0, min_compress_len=0):
- """Sets the value for a key."""
- timeout = 0
- if time != 0:
- timeout = timeutils.utcnow_ts() + time
- self.cache[key] = (timeout, value)
- return True
-
- def add(self, key, value, time=0, min_compress_len=0):
- """Sets the value for a key if it doesn't exist."""
- if self.get(key) is not None:
- return False
- return self.set(key, value, time, min_compress_len)
-
- def incr(self, key, delta=1):
- """Increments the value for a key."""
- value = self.get(key)
- if value is None:
- return None
- new_value = int(value) + delta
- self.cache[key] = (self.cache[key][0], str(new_value))
- return new_value
-
- def delete(self, key, time=0):
- """Deletes the value associated with a key."""
- if key in self.cache:
- del self.cache[key]
diff --git a/nova/opts.py b/nova/opts.py
index 9d55498a8b..ff939d3b1f 100644
--- a/nova/opts.py
+++ b/nova/opts.py
@@ -12,15 +12,11 @@
import itertools
-import nova.availability_zones
import nova.baserpc
-import nova.cert.rpcapi
import nova.cloudpipe.pipelib
import nova.cmd.novnc
-import nova.cmd.novncproxy
import nova.cmd.serialproxy
import nova.cmd.spicehtml5proxy
-import nova.conductor.api
import nova.conductor.rpcapi
import nova.conductor.tasks.live_migrate
import nova.conf
@@ -46,20 +42,14 @@ import nova.netconf
import nova.notifications
import nova.objects.network
import nova.paths
-import nova.pci.request
-import nova.pci.whitelist
import nova.quota
import nova.rdp
import nova.service
import nova.servicegroup.api
-import nova.servicegroup.drivers.zk
import nova.spice
import nova.utils
-import nova.vnc
-import nova.vnc.xvp_proxy
import nova.volume
import nova.volume.cinder
-import nova.wsgi
def list_opts():
@@ -71,11 +61,8 @@ def list_opts():
[nova.db.base.db_driver_opt],
[nova.ipv6.api.ipv6_backend_opt],
[nova.servicegroup.api.servicegroup_driver_opt],
- nova.availability_zones.availability_zone_opts,
- nova.cert.rpcapi.rpcapi_opts,
nova.cloudpipe.pipelib.cloudpipe_opts,
nova.cmd.novnc.opts,
- nova.cmd.novncproxy.opts,
nova.cmd.spicehtml5proxy.opts,
nova.console.manager.console_manager_opts,
nova.console.rpcapi.rpcapi_opts,
@@ -89,20 +76,15 @@ def list_opts():
nova.notifications.notify_opts,
nova.objects.network.network_opts,
nova.paths.path_opts,
- nova.pci.request.pci_alias_opts,
- nova.pci.whitelist.pci_opts,
nova.quota.quota_opts,
nova.service.service_opts,
nova.utils.monkey_patch_opts,
nova.utils.utils_opts,
- nova.vnc.xvp_proxy.xvp_proxy_opts,
nova.volume._volume_opts,
- nova.wsgi.wsgi_opts,
)),
('barbican', nova.keymgr.barbican.barbican_opts),
('cinder', nova.volume.cinder.cinder_opts),
('api_database', nova.db.sqlalchemy.api.api_db_opts),
- ('conductor', nova.conductor.api.conductor_opts),
('database', nova.db.sqlalchemy.api.oslo_db_options.database_opts),
('glance', nova.image.glance.glance_opts),
('image_file_url', [nova.image.download.file.opt_group]),
@@ -120,12 +102,9 @@ def list_opts():
('upgrade_levels',
itertools.chain(
[nova.baserpc.rpcapi_cap_opt],
- [nova.cert.rpcapi.rpcapi_cap_opt],
[nova.conductor.rpcapi.rpcapi_cap_opt],
[nova.console.rpcapi.rpcapi_cap_opt],
[nova.consoleauth.rpcapi.rpcapi_cap_opt],
)),
- ('vnc', nova.vnc.vnc_opts),
('workarounds', nova.utils.workarounds_opts),
- ('zookeeper', nova.servicegroup.drivers.zk.zk_driver_opts)
]
diff --git a/nova/pci/devspec.py b/nova/pci/devspec.py
index e2189d83b6..512383ac86 100644
--- a/nova/pci/devspec.py
+++ b/nova/pci/devspec.py
@@ -15,6 +15,7 @@ import ast
import re
from nova import exception
+from nova.i18n import _
from nova.pci import utils
MAX_VENDOR_ID = 0xFFFF
@@ -38,7 +39,8 @@ def get_pci_dev_info(pci_obj, property, max, hex_value):
v = get_value(a)
if v > max:
raise exception.PciConfigInvalidWhitelist(
- reason = "invalid %s %s" % (property, a))
+ reason=_("invalid %(property)s %(attr)s") %
+ {'property': property, 'attr': a})
setattr(pci_obj, property, hex_value % v)
diff --git a/nova/pci/manager.py b/nova/pci/manager.py
index c3889e7584..45dcee32bd 100644
--- a/nova/pci/manager.py
+++ b/nova/pci/manager.py
@@ -57,6 +57,7 @@ class PciDevTracker(object):
self.node_id = node_id
self.stats = stats.PciDeviceStats()
self.dev_filter = whitelist.Whitelist(CONF.pci_passthrough_whitelist)
+ self._context = context
if node_id:
self.pci_devs = objects.PciDeviceList.get_by_compute_node(
context, node_id)
@@ -164,8 +165,7 @@ class PciDevTracker(object):
for dev in [dev for dev in devices if
dev['address'] in new_addrs - exist_addrs]:
dev['compute_node_id'] = self.node_id
- # NOTE(danms): These devices are created with no context
- dev_obj = objects.PciDevice.create(dev)
+ dev_obj = objects.PciDevice.create(self._context, dev)
self.pci_devs.objects.append(dev_obj)
self.stats.add_device(dev_obj)
@@ -215,11 +215,12 @@ class PciDevTracker(object):
return None
def _free_device(self, dev, instance=None):
- dev.free(instance)
+ freed_devs = dev.free(instance)
stale = self.stale.pop(dev.address, None)
if stale:
dev.update_device(stale)
- self.stats.add_device(dev)
+ for dev in freed_devs:
+ self.stats.add_device(dev)
def _free_instance(self, instance):
# Note(yjiang5): When an instance is resized, the devices in the
diff --git a/nova/pci/request.py b/nova/pci/request.py
index c5d1a854a6..cfbc8b178f 100644
--- a/nova/pci/request.py
+++ b/nova/pci/request.py
@@ -39,37 +39,19 @@
import copy
import jsonschema
-from oslo_config import cfg
from oslo_serialization import jsonutils
import six
+import nova.conf
from nova import exception
+from nova.i18n import _
from nova import objects
from nova.objects import fields as obj_fields
from nova.pci import utils
-pci_alias_opts = [
- cfg.MultiStrOpt('pci_alias',
- default=[],
- help='An alias for a PCI passthrough device requirement. '
- 'This allows users to specify the alias in the '
- 'extra_spec for a flavor, without needing to repeat '
- 'all the PCI property requirements. For example: '
- 'pci_alias = '
- '{ "name": "QuickAssist", '
- ' "product_id": "0443", '
- ' "vendor_id": "8086", '
- ' "device_type": "type-PCI" '
- '} '
- 'defines an alias for the Intel QuickAssist card. '
- '(multi valued)'
- )
-]
-
PCI_NET_TAG = 'physical_network'
-CONF = cfg.CONF
-CONF.register_opts(pci_alias_opts)
+CONF = nova.conf.CONF
_ALIAS_DEV_TYPE = [obj_fields.PciDeviceType.STANDARD,
@@ -124,7 +106,7 @@ def _get_alias_from_config():
if aliases[name][0]["dev_type"] == spec["dev_type"]:
aliases[name].append(spec)
else:
- reason = "Device type mismatch for alias '%s'" % name
+ reason = _("Device type mismatch for alias '%s'") % name
raise exception.PciInvalidAlias(reason=reason)
except exception.PciInvalidAlias:
diff --git a/nova/pci/utils.py b/nova/pci/utils.py
index 80b8c35f8b..e10a303156 100644
--- a/nova/pci/utils.py
+++ b/nova/pci/utils.py
@@ -69,6 +69,10 @@ def get_pci_address_fields(pci_addr):
return (domain, bus, slot, func)
+def get_pci_address(domain, bus, slot, func):
+ return '%s:%s:%s.%s' % (domain, bus, slot, func)
+
+
def get_function_by_ifname(ifname):
"""Given the device name, returns the PCI address of a device
and returns True if the address in a physical function.
diff --git a/nova/pci/whitelist.py b/nova/pci/whitelist.py
index fdb4ec696e..9bc537dcdb 100644
--- a/nova/pci/whitelist.py
+++ b/nova/pci/whitelist.py
@@ -14,22 +14,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
from oslo_serialization import jsonutils
+import nova.conf
from nova import exception
from nova.i18n import _
from nova.pci import devspec
-pci_opts = [cfg.MultiStrOpt('pci_passthrough_whitelist',
- default=[],
- help='White list of PCI devices available to VMs. '
- 'For example: pci_passthrough_whitelist = '
- '[{"vendor_id": "8086", "product_id": "0443"}]'
- )
- ]
-CONF = cfg.CONF
-CONF.register_opts(pci_opts)
+CONF = nova.conf.CONF
class Whitelist(object):
diff --git a/nova/rpc.py b/nova/rpc.py
index 14a6962bb5..2527a661d0 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -33,8 +33,21 @@ from oslo_serialization import jsonutils
import nova.context
import nova.exception
+
CONF = cfg.CONF
+notification_opts = [
+ cfg.StrOpt('notification_format',
+ choices=['unversioned', 'versioned', 'both'],
+ default='both',
+ help='Specifies which notification format shall be used by '
+ 'nova.'),
+]
+
+CONF.register_opts(notification_opts)
+
TRANSPORT = None
+LEGACY_NOTIFIER = None
+NOTIFICATION_TRANSPORT = None
NOTIFIER = None
ALLOWED_EXMODS = [
@@ -56,21 +69,43 @@ TRANSPORT_ALIASES = {
def init(conf):
- global TRANSPORT, NOTIFIER
+ global TRANSPORT, NOTIFICATION_TRANSPORT, LEGACY_NOTIFIER, NOTIFIER
exmods = get_allowed_exmods()
TRANSPORT = messaging.get_transport(conf,
allowed_remote_exmods=exmods,
aliases=TRANSPORT_ALIASES)
+ NOTIFICATION_TRANSPORT = messaging.get_notification_transport(
+ conf, allowed_remote_exmods=exmods, aliases=TRANSPORT_ALIASES)
serializer = RequestContextSerializer(JsonPayloadSerializer())
- NOTIFIER = messaging.Notifier(TRANSPORT, serializer=serializer)
+ if conf.notification_format == 'unversioned':
+ LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+ serializer=serializer)
+ NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+ serializer=serializer, driver='noop')
+ elif conf.notification_format == 'both':
+ LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+ serializer=serializer)
+ NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+ serializer=serializer,
+ topic='versioned_notifications')
+ else:
+ LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+ serializer=serializer,
+ driver='noop')
+ NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+ serializer=serializer,
+ topic='versioned_notifications')
def cleanup():
- global TRANSPORT, NOTIFIER
+ global TRANSPORT, NOTIFICATION_TRANSPORT, LEGACY_NOTIFIER, NOTIFIER
assert TRANSPORT is not None
+ assert NOTIFICATION_TRANSPORT is not None
+ assert LEGACY_NOTIFIER is not None
assert NOTIFIER is not None
TRANSPORT.cleanup()
- TRANSPORT = NOTIFIER = None
+ NOTIFICATION_TRANSPORT.cleanup()
+ TRANSPORT = NOTIFICATION_TRANSPORT = LEGACY_NOTIFIER = NOTIFIER = None
def set_defaults(control_exchange):
@@ -141,7 +176,12 @@ def get_server(target, endpoints, serializer=None):
def get_notifier(service, host=None, publisher_id=None):
- assert NOTIFIER is not None
+ assert LEGACY_NOTIFIER is not None
if not publisher_id:
publisher_id = "%s.%s" % (service, host or CONF.host)
+ return LEGACY_NOTIFIER.prepare(publisher_id=publisher_id)
+
+
+def get_versioned_notifier(publisher_id):
+ assert NOTIFIER is not None
return NOTIFIER.prepare(publisher_id=publisher_id)
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index fde07fee2f..a695fdcd1b 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -21,23 +21,49 @@ Scheduler base class that all Schedulers should inherit from
import abc
+from oslo_log import log as logging
from oslo_utils import importutils
import six
+from stevedore import driver
import nova.conf
+from nova.i18n import _, _LW
from nova import objects
from nova import servicegroup
CONF = nova.conf.CONF
+LOG = logging.getLogger(__name__)
+
@six.add_metaclass(abc.ABCMeta)
class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
def __init__(self):
- self.host_manager = importutils.import_object(
- CONF.scheduler_host_manager)
+ try:
+ self.host_manager = driver.DriverManager(
+ "nova.scheduler.host_manager",
+ CONF.scheduler_host_manager,
+ invoke_on_load=True).driver
+ # TODO(Yingxin): Change to catch stevedore.exceptions.NoMatches
+ # after stevedore v1.9.0
+ except RuntimeError:
+ # NOTE(Yingxin): Loading full class path is deprecated and
+ # should be removed in the N release.
+ try:
+ self.host_manager = importutils.import_object(
+ CONF.scheduler_host_manager)
+ LOG.warning(_LW("DEPRECATED: scheduler_host_manager uses "
+ "classloader to load %(path)s. This legacy "
+ "loading style will be removed in the "
+ "N release."),
+ {'path': CONF.scheduler_host_manager})
+ except (ImportError, ValueError):
+ raise RuntimeError(
+ _("Cannot load host manager from configuration "
+ "scheduler_host_manager = %(conf)s."),
+ {'conf': CONF.scheduler_host_manager})
self.servicegroup_api = servicegroup.API()
def run_periodic_tasks(self, context):
diff --git a/nova/scheduler/filters/availability_zone_filter.py b/nova/scheduler/filters/availability_zone_filter.py
index bbcf1eec35..58b0e9dd37 100644
--- a/nova/scheduler/filters/availability_zone_filter.py
+++ b/nova/scheduler/filters/availability_zone_filter.py
@@ -13,16 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
from oslo_log import log as logging
+import nova.conf
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-CONF.import_opt('default_availability_zone', 'nova.availability_zones')
+CONF = nova.conf.CONF
class AvailabilityZoneFilter(filters.BaseHostFilter):
diff --git a/nova/scheduler/filters/compute_capabilities_filter.py b/nova/scheduler/filters/compute_capabilities_filter.py
index cfe0059346..e5de487064 100644
--- a/nova/scheduler/filters/compute_capabilities_filter.py
+++ b/nova/scheduler/filters/compute_capabilities_filter.py
@@ -66,7 +66,7 @@ class ComputeCapabilitiesFilter(filters.BaseHostFilter):
def _satisfies_extra_specs(self, host_state, instance_type):
"""Check that the host_state provided by the compute service
- satisfy the extra specs associated with the instance type.
+ satisfies the extra specs associated with the instance type.
"""
if 'extra_specs' not in instance_type:
return True
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index acdccb56ad..ae124a94d7 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -13,15 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
from oslo_log import log as logging
from nova.i18n import _LW
from nova.scheduler import filters
from nova import servicegroup
-CONF = cfg.CONF
-
LOG = logging.getLogger(__name__)
diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py
index dd2a255545..3476ace42b 100644
--- a/nova/scheduler/filters/disk_filter.py
+++ b/nova/scheduler/filters/disk_filter.py
@@ -29,7 +29,7 @@ class DiskFilter(filters.BaseHostFilter):
"""Disk Filter with over subscription flag."""
def _get_disk_allocation_ratio(self, host_state, spec_obj):
- return CONF.disk_allocation_ratio
+ return host_state.disk_allocation_ratio
def host_passes(self, host_state, spec_obj):
"""Filter based on disk usage."""
@@ -73,9 +73,10 @@ class AggregateDiskFilter(DiskFilter):
'disk_allocation_ratio')
try:
ratio = utils.validate_num_values(
- aggregate_vals, CONF.disk_allocation_ratio, cast_to=float)
+ aggregate_vals, host_state.disk_allocation_ratio,
+ cast_to=float)
except ValueError as e:
LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e)
- ratio = CONF.disk_allocation_ratio
+ ratio = host_state.disk_allocation_ratio
return ratio
diff --git a/nova/scheduler/filters/exact_core_filter.py b/nova/scheduler/filters/exact_core_filter.py
index fac78df99a..dbb8e6dfce 100644
--- a/nova/scheduler/filters/exact_core_filter.py
+++ b/nova/scheduler/filters/exact_core_filter.py
@@ -44,4 +44,8 @@ class ExactCoreFilter(filters.BaseHostFilter):
'usable_vcpus': usable_vcpus})
return False
+ # NOTE(mgoddard): Setting the limit ensures that it is enforced in
+ # compute. This ensures that if multiple instances are scheduled to a
+ # single host, then all after the first will fail in the claim.
+ host_state.limits['vcpu'] = host_state.vcpus_total
return True
diff --git a/nova/scheduler/filters/exact_disk_filter.py b/nova/scheduler/filters/exact_disk_filter.py
index dfa15b148d..405e3b91b7 100644
--- a/nova/scheduler/filters/exact_disk_filter.py
+++ b/nova/scheduler/filters/exact_disk_filter.py
@@ -38,4 +38,8 @@ class ExactDiskFilter(filters.BaseHostFilter):
'usable_disk_mb': host_state.free_disk_mb})
return False
+ # NOTE(mgoddard): Setting the limit ensures that it is enforced in
+ # compute. This ensures that if multiple instances are scheduled to a
+ # single host, then all after the first will fail in the claim.
+ host_state.limits['disk_gb'] = host_state.total_usable_disk_gb
return True
diff --git a/nova/scheduler/filters/exact_ram_filter.py b/nova/scheduler/filters/exact_ram_filter.py
index 86d688e664..475bf31218 100644
--- a/nova/scheduler/filters/exact_ram_filter.py
+++ b/nova/scheduler/filters/exact_ram_filter.py
@@ -35,4 +35,8 @@ class ExactRamFilter(filters.BaseHostFilter):
'usable_ram': host_state.free_ram_mb})
return False
+ # NOTE(mgoddard): Setting the limit ensures that it is enforced in
+ # compute. This ensures that if multiple instances are scheduled to a
+ # single host, then all after the first will fail in the claim.
+ host_state.limits['memory_mb'] = host_state.total_usable_ram_mb
return True
diff --git a/nova/scheduler/filters/retry_filter.py b/nova/scheduler/filters/retry_filter.py
index ee4070176a..4c2f0a1635 100644
--- a/nova/scheduler/filters/retry_filter.py
+++ b/nova/scheduler/filters/retry_filter.py
@@ -15,6 +15,7 @@
from oslo_log import log as logging
+from nova.i18n import _LI
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
@@ -43,8 +44,8 @@ class RetryFilter(filters.BaseHostFilter):
passes = host not in hosts
if not passes:
- LOG.debug("Host %(host)s fails. Previously tried hosts: "
- "%(hosts)s", {'host': host, 'hosts': hosts})
+ LOG.info(_LI("Host %(host)s fails. Previously tried hosts: "
+ "%(hosts)s"), {'host': host, 'hosts': hosts})
# Host passes if it's not in the list of previously attempted hosts:
return passes
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 7991520191..d5f476c9e1 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -150,6 +150,7 @@ class HostState(object):
# Allocation ratios for this host
self.ram_allocation_ratio = None
self.cpu_allocation_ratio = None
+ self.disk_allocation_ratio = None
self.updated = None
@@ -241,9 +242,10 @@ class HostState(object):
# update allocation ratios given by the ComputeNode object
self.cpu_allocation_ratio = compute.cpu_allocation_ratio
self.ram_allocation_ratio = compute.ram_allocation_ratio
+ self.disk_allocation_ratio = compute.disk_allocation_ratio
def consume_from_request(self, spec_obj):
- """Incrementally update host state from an RequestSpec object."""
+ """Incrementally update host state from a RequestSpec object."""
@utils.synchronized(self._lock_name)
@set_update_time_on_success
diff --git a/nova/scheduler/ironic_host_manager.py b/nova/scheduler/ironic_host_manager.py
index fb23b812f5..9ac2871661 100644
--- a/nova/scheduler/ironic_host_manager.py
+++ b/nova/scheduler/ironic_host_manager.py
@@ -59,6 +59,7 @@ class IronicNodeState(host_manager.HostState):
# update allocation ratios given by the ComputeNode object
self.cpu_allocation_ratio = compute.cpu_allocation_ratio
self.ram_allocation_ratio = compute.ram_allocation_ratio
+ self.disk_allocation_ratio = compute.disk_allocation_ratio
self.updated = compute.updated_at
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 7b246c8533..3c34034976 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -19,18 +19,23 @@
Scheduler Service
"""
+from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import importutils
+from stevedore import driver
import nova.conf
from nova import exception
+from nova.i18n import _, _LW
from nova import manager
from nova import objects
from nova import quota
+LOG = logging.getLogger(__name__)
+
CONF = nova.conf.CONF
QUOTAS = quota.QUOTAS
@@ -46,7 +51,28 @@ class SchedulerManager(manager.Manager):
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver
- self.driver = importutils.import_object(scheduler_driver)
+ try:
+ self.driver = driver.DriverManager(
+ "nova.scheduler.driver",
+ scheduler_driver,
+ invoke_on_load=True).driver
+ # TODO(Yingxin): Change to catch stevedore.exceptions.NoMatches after
+ # stevedore v1.9.0
+ except RuntimeError:
+ # NOTE(Yingxin): Loading full class path is deprecated and should
+ # be removed in the N release.
+ try:
+ self.driver = importutils.import_object(scheduler_driver)
+ LOG.warning(_LW("DEPRECATED: scheduler_driver uses "
+ "classloader to load %(path)s. This legacy "
+ "loading style will be removed in the "
+ "N release."),
+ {'path': scheduler_driver})
+ except (ImportError, ValueError):
+ raise RuntimeError(
+ _("Cannot load scheduler driver from configuration "
+ "%(conf)s."),
+ {'conf': scheduler_driver})
super(SchedulerManager, self).__init__(service_name='scheduler',
*args, **kwargs)
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
index 1a094df591..5e8b8c27d7 100644
--- a/nova/scheduler/utils.py
+++ b/nova/scheduler/utils.py
@@ -85,7 +85,7 @@ def build_request_spec(ctxt, image, instances, instance_type=None):
def set_vm_state_and_notify(context, instance_uuid, service, method, updates,
- ex, request_spec, db):
+ ex, request_spec):
"""changes VM state and notifies."""
LOG.warning(_LW("Failed to %(service)s_%(method)s: %(ex)s"),
{'service': service, 'method': method, 'ex': ex})
@@ -117,6 +117,20 @@ def set_vm_state_and_notify(context, instance_uuid, service, method, updates,
notifier.error(context, event_type, payload)
+def build_filter_properties(scheduler_hints, forced_host,
+ forced_node, instance_type):
+ """Build the filter_properties dict from data in the boot request."""
+ filter_properties = dict(scheduler_hints=scheduler_hints)
+ filter_properties['instance_type'] = instance_type
+ # TODO(alaski): It doesn't seem necessary that these are conditionally
+ # added. Let's just add empty lists if not forced_host/node.
+ if forced_host:
+ filter_properties['force_hosts'] = [forced_host]
+ if forced_node:
+ filter_properties['force_nodes'] = [forced_node]
+ return filter_properties
+
+
def populate_filter_properties(filter_properties, host_state):
"""Add additional information to the filter properties after a node has
been selected by the scheduling process.
diff --git a/nova/scheduler/weights/disk.py b/nova/scheduler/weights/disk.py
new file mode 100644
index 0000000000..50714494fa
--- /dev/null
+++ b/nova/scheduler/weights/disk.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Disk Weigher. Weigh hosts by their disk usage.
+
+The default is to spread instances across all hosts evenly. If you prefer
+stacking, you can set the 'disk_weight_multiplier' option to a negative
+number and the weighing has the opposite effect of the default.
+"""
+
+import nova.conf
+from nova.scheduler import weights
+
+CONF = nova.conf.CONF
+
+
+class DiskWeigher(weights.BaseHostWeigher):
+ minval = 0
+
+ def weight_multiplier(self):
+ """Override the weight multiplier."""
+ return CONF.disk_weight_multiplier
+
+ def _weigh_object(self, host_state, weight_properties):
+ """Higher weights win. We want spreading to be the default."""
+ return host_state.free_disk_mb
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index 0197b736e1..2a09baa42f 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -26,7 +26,6 @@ LOG = logging.getLogger(__name__)
_driver_name_class_mapping = {
'db': 'nova.servicegroup.drivers.db.DbDriver',
- 'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver',
'mc': 'nova.servicegroup.drivers.mc.MemcachedDriver'
}
_default_driver = 'db'
diff --git a/nova/servicegroup/drivers/mc.py b/nova/servicegroup/drivers/mc.py
index 145354d5b2..e2ae1d4526 100644
--- a/nova/servicegroup/drivers/mc.py
+++ b/nova/servicegroup/drivers/mc.py
@@ -21,8 +21,8 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
+from nova import cache_utils
from nova.i18n import _, _LI, _LW
-from nova.openstack.common import memorycache
from nova.servicegroup import api
from nova.servicegroup.drivers import base
@@ -37,9 +37,8 @@ LOG = logging.getLogger(__name__)
class MemcachedDriver(base.Driver):
def __init__(self, *args, **kwargs):
- if not CONF.memcached_servers:
- raise RuntimeError(_('memcached_servers not defined'))
- self.mc = memorycache.get_client()
+ self.mc = cache_utils.get_memcached_client(
+ expiration_time=CONF.service_down_time)
def join(self, member_id, group_id, service=None):
"""Join the given service with its group."""
@@ -77,8 +76,7 @@ class MemcachedDriver(base.Driver):
# set(..., time=CONF.service_down_time) uses it and
# reduces key-deleting code.
self.mc.set(str(key),
- timeutils.utcnow(),
- time=CONF.service_down_time)
+ timeutils.utcnow())
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
diff --git a/nova/servicegroup/drivers/zk.py b/nova/servicegroup/drivers/zk.py
deleted file mode 100644
index 6962831ecb..0000000000
--- a/nova/servicegroup/drivers/zk.py
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import eventlet
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_utils import importutils
-
-from nova import exception
-from nova.i18n import _LE, _LW
-from nova.servicegroup.drivers import base
-
-evzookeeper = importutils.try_import('evzookeeper')
-membership = importutils.try_import('evzookeeper.membership')
-zookeeper = importutils.try_import('zookeeper')
-
-zk_driver_opts = [
- cfg.StrOpt('address',
- help='The ZooKeeper addresses for servicegroup service in the '
- 'format of host1:port,host2:port,host3:port'),
- cfg.IntOpt('recv_timeout',
- default=4000,
- help='The recv_timeout parameter for the zk session'),
- cfg.StrOpt('sg_prefix',
- default="/servicegroups",
- help='The prefix used in ZooKeeper to store ephemeral nodes'),
- cfg.IntOpt('sg_retry_interval',
- default=5,
- help='Number of seconds to wait until retrying to join the '
- 'session'),
- ]
-
-CONF = cfg.CONF
-CONF.register_opts(zk_driver_opts, group="zookeeper")
-
-LOG = logging.getLogger(__name__)
-
-
-class ZooKeeperDriver(base.Driver):
- """ZooKeeper driver for the service group API."""
-
- def __init__(self, *args, **kwargs):
- """Create the zk session object."""
- if not all([evzookeeper, membership, zookeeper]):
- raise ImportError('zookeeper module not found')
- self._memberships = {}
- self._monitors = {}
- super(ZooKeeperDriver, self).__init__()
- self._cached_session = None
-
- @property
- def _session(self):
- """Creates zookeeper session in lazy manner.
-
- Session is created in lazy manner to mitigate lock problem
- in zookeeper.
-
- Lock happens when many processes try to use the same zk handle.
- Lazy creation allows to deffer initialization of session until
- is really required by worker (child process).
-
- :returns: ZKSession -- new or created earlier
- """
- if self._cached_session is None:
- self._cached_session = self._init_session()
- return self._cached_session
-
- def _init_session(self):
- """Initializes new session.
-
- Optionally creates required servicegroup prefix.
-
- :returns ZKSession - newly created session
- """
- with open(os.devnull, "w") as null:
- session = evzookeeper.ZKSession(
- CONF.zookeeper.address,
- recv_timeout=CONF.zookeeper.recv_timeout,
- zklog_fd=null)
- # Make sure the prefix exists
- try:
- session.create(CONF.zookeeper.sg_prefix, "",
- acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE])
- except zookeeper.NodeExistsException:
- pass
- # Log a warning about quality for this driver.
- LOG.warning(_LW('The ZooKeeper service group driver in Nova is not '
- 'tested by the OpenStack project and thus its quality '
- 'can not be ensured. This may change in the future, '
- 'but current deployers should be aware that the use '
- 'of it in production right now may be risky.'))
- return session
-
- def join(self, member, group, service=None):
- """Add a new member to a service group.
-
- :param member: the joined member ID/name
- :param group: the group ID/name, of the joined member
- :param service: a `nova.service.Service` object
- """
- process_id = str(os.getpid())
- LOG.debug('ZooKeeperDriver: join new member %(id)s(%(pid)s) to the '
- '%(gr)s group, service=%(sr)s',
- {'id': member, 'pid': process_id,
- 'gr': group, 'sr': service})
- member = self._memberships.get((group, member), None)
- if member is None:
- # the first time to join. Generate a new object
- path = "%s/%s/%s" % (CONF.zookeeper.sg_prefix, group, member)
- try:
- zk_member = membership.Membership(self._session, path,
- process_id)
- except RuntimeError:
- LOG.exception(_LE("Unable to join. It is possible that either"
- " another node exists with the same name, or"
- " this node just restarted. We will try "
- "again in a short while to make sure."))
- eventlet.sleep(CONF.zookeeper.sg_retry_interval)
- zk_member = membership.Membership(self._session, path, member)
- self._memberships[(group, member)] = zk_member
-
- def is_up(self, service_ref):
- group_id = service_ref['topic']
- member_id = service_ref['host']
- all_members = self._get_all(group_id)
- return member_id in all_members
-
- def _get_all(self, group_id):
- """Return all members in a list, or a ServiceGroupUnavailable
- exception.
- """
- monitor = self._monitors.get(group_id, None)
- if monitor is None:
- path = "%s/%s" % (CONF.zookeeper.sg_prefix, group_id)
-
- with open(os.devnull, "w") as null:
- local_session = evzookeeper.ZKSession(
- CONF.zookeeper.address,
- recv_timeout=CONF.zookeeper.recv_timeout,
- zklog_fd=null)
-
- monitor = membership.MembershipMonitor(local_session, path)
- self._monitors[group_id] = monitor
- # Note(maoy): When initialized for the first time, it takes a
- # while to retrieve all members from zookeeper. To prevent
- # None to be returned, we sleep 5 sec max to wait for data to
- # be ready.
- timeout = 5 # seconds
- interval = 0.1
- tries = int(timeout / interval)
- for _retry in range(tries):
- eventlet.sleep(interval)
- all_members = monitor.get_all()
- if all_members is not None:
- # Stop the tries once the cache is populated
- LOG.debug('got info about members in %r: %r',
- path, ', '.join(all_members))
- break
- else:
- # if all_members, weren't populated
- LOG.warning(_LW('Problem with acquiring the list of '
- 'children of %(path)r within a given '
- 'timeout=%(timeout)d seconds'),
- path, timeout)
- else:
- all_members = monitor.get_all()
-
- if all_members is None:
- raise exception.ServiceGroupUnavailable(driver="ZooKeeperDriver")
-
- def have_processes(member):
- """Predicate that given member has processes (subnode exists)."""
- value, stat = monitor.get_member_details(member)
- # only check nodes that are created by Membership class
- if value == 'ZKMembers':
- num_children = stat['numChildren']
- return num_children > 0
- else:
- # unknown type of node found - ignoring
- return False
-
- # filter only this members that have processes running
- all_members = filter(have_processes, all_members)
-
- return all_members
diff --git a/nova/test.py b/nova/test.py
index cffaed044e..2e3012ffcd 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -32,6 +32,7 @@ import mock
import os
import fixtures
+from oslo_cache import core as cache
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_config import fixture as config_fixture
@@ -60,16 +61,17 @@ CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v21')
logging.register_options(CONF)
CONF.set_override('use_stderr', False)
logging.setup(CONF, 'nova')
+cache.configure(CONF)
_TRUE_VALUES = ('True', 'true', '1', 'yes')
-if six.PY3:
+if six.PY2:
+ nested = contextlib.nested
+else:
@contextlib.contextmanager
def nested(*contexts):
with contextlib.ExitStack() as stack:
yield [stack.enter_context(c) for c in contexts]
-else:
- nested = contextlib.nested
class SampleNetworks(fixtures.Fixture):
@@ -210,6 +212,7 @@ class TestCase(testtools.TestCase):
if self.USES_DB:
self.useFixture(nova_fixtures.Database())
+ self.useFixture(nova_fixtures.Database(database='api'))
# NOTE(blk-u): WarningsFixture must be after the Database fixture
# because sqlalchemy-migrate messes with the warnings filters.
@@ -223,6 +226,8 @@ class TestCase(testtools.TestCase):
objects_base.NovaObjectRegistry._registry._obj_classes)
self.addCleanup(self._restore_obj_registry)
+ self.useFixture(nova_fixtures.StableObjectJsonFixture())
+
# NOTE(mnaser): All calls to utils.is_neutron() are cached in
# nova.utils._IS_NEUTRON. We set it to None to avoid any
# caching of that value.
diff --git a/nova/tests/fixtures.py b/nova/tests/fixtures.py
index 39467cccca..fd2f538e33 100644
--- a/nova/tests/fixtures.py
+++ b/nova/tests/fixtures.py
@@ -423,6 +423,8 @@ class SpawnIsSynchronousFixture(fixtures.Fixture):
super(SpawnIsSynchronousFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.utils.spawn_n', lambda f, *a, **k: f(*a, **k)))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.utils.spawn', lambda f, *a, **k: f(*a, **k)))
class BannedDBSchemaOperations(fixtures.Fixture):
@@ -446,3 +448,31 @@ class BannedDBSchemaOperations(fixtures.Fixture):
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.alter' % thing,
lambda *a, **k: self._explode(thing, 'alter')))
+
+
+class StableObjectJsonFixture(fixtures.Fixture):
+ """Fixture that makes sure we get stable JSON object representations.
+
+ Since objects contain things like set(), which can't be converted to
+ JSON, we have some situations where the representation isn't fully
+ deterministic. This doesn't matter at all at runtime, but does to
+ unit tests that try to assert things at a low level.
+
+ This fixture mocks the obj_to_primitive() call and makes sure to
+ sort the list of changed fields (which came from a set) before
+ returning it to the caller.
+ """
+ def __init__(self):
+ self._original_otp = obj_base.NovaObject.obj_to_primitive
+
+ def setUp(self):
+ super(StableObjectJsonFixture, self).setUp()
+
+ def _doit(obj, *args, **kwargs):
+ result = self._original_otp(obj, *args, **kwargs)
+ if 'nova_object.changes' in result:
+ result['nova_object.changes'].sort()
+ return result
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.objects.base.NovaObject.obj_to_primitive', _doit))
diff --git a/nova/tests/functional/api/client.py b/nova/tests/functional/api/client.py
index ca2b5d0d44..db4d58c66f 100644
--- a/nova/tests/functional/api/client.py
+++ b/nova/tests/functional/api/client.py
@@ -124,6 +124,7 @@ class TestOpenStackClient(object):
self.auth_key = auth_key
self.auth_uri = auth_uri
self.project_id = project_id
+ self.microversion = None
def request(self, url, method='GET', body=None, headers=None):
_headers = {'Content-Type': 'application/json'}
@@ -167,6 +168,8 @@ class TestOpenStackClient(object):
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
+ if self.microversion:
+ headers['X-OpenStack-Nova-API-Version'] = self.microversion
response = self.request(full_uri, **kwargs)
@@ -193,8 +196,11 @@ class TestOpenStackClient(object):
resp.body = jsonutils.loads(response.content)
return resp
- def api_get(self, relative_uri, **kwargs):
+ def api_get(self, relative_uri, api_version=None, **kwargs):
kwargs.setdefault('check_response_status', [200])
+ if api_version:
+ headers = kwargs.setdefault('headers', {})
+ headers['X-OpenStack-Nova-API-Version'] = api_version
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_post(self, relative_uri, body, api_version=None, **kwargs):
@@ -210,13 +216,16 @@ class TestOpenStackClient(object):
kwargs.setdefault('check_response_status', [200, 202])
return APIResponse(self.api_request(relative_uri, **kwargs))
- def api_put(self, relative_uri, body, **kwargs):
+ def api_put(self, relative_uri, body, api_version=None, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
+ if api_version:
+ headers['X-OpenStack-Nova-API-Version'] = api_version
+
kwargs.setdefault('check_response_status', [200, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
@@ -357,6 +366,6 @@ class TestOpenStackClient(object):
def delete_server_group(self, group_id):
self.api_delete('/os-server-groups/%s' % group_id)
- def get_instance_actions(self, server_id):
+ def get_instance_actions(self, server_id, api_version=None):
return self.api_get('/servers/%s/os-instance-actions' %
- (server_id)).body['instanceActions']
+ (server_id), api_version).body['instanceActions']
diff --git a/nova/tests/functional/api_paste_fixture.py b/nova/tests/functional/api_paste_fixture.py
index 709bbc067f..3dd4d16061 100644
--- a/nova/tests/functional/api_paste_fixture.py
+++ b/nova/tests/functional/api_paste_fixture.py
@@ -54,3 +54,14 @@ class ApiPasteLegacyV2Fixture(ApiPasteV21Fixture):
"/v2: openstack_compute_api_v21_legacy_v2_compatible",
"/v2: openstack_compute_api_legacy_v2")
target_file.write(line)
+
+
+class ApiPasteNoProjectId(ApiPasteV21Fixture):
+
+ def _replace_line(self, target_file, line):
+ line = line.replace(
+ "paste.filter_factory = nova.api.openstack.auth:"
+ "NoAuthMiddleware.factory",
+ "paste.filter_factory = nova.api.openstack.auth:"
+ "NoAuthMiddlewareV2_18.factory")
+ target_file.write(line)
diff --git a/nova/tests/functional/api_sample_tests/api_sample_base.py b/nova/tests/functional/api_sample_tests/api_sample_base.py
index 4d9b25c9c0..89149344a1 100644
--- a/nova/tests/functional/api_sample_tests/api_sample_base.py
+++ b/nova/tests/functional/api_sample_tests/api_sample_base.py
@@ -25,6 +25,9 @@ from nova.tests.unit import fake_network
from nova.tests.unit import fake_utils
CONF = cfg.CONF
+CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
+CONF.import_opt('osapi_compute_extension',
+ 'nova.api.openstack.compute.legacy_v2.extensions')
# API samples heavily uses testscenarios. This allows us to use the
# same tests, with slight variations in configuration to ensure our
@@ -58,7 +61,7 @@ CONF = cfg.CONF
# microversions, then replace the ``scenarios`` class variable in that
# test class with something like:
#
-# [("v2_11", {'api_major_version': 'v2.1', 'microversion', '2.11'})]
+# [("v2_11", {'api_major_version': 'v2.1', 'microversion': '2.11'})]
class ApiSampleTestBaseV21(testscenarios.WithScenarios,
@@ -69,6 +72,7 @@ class ApiSampleTestBaseV21(testscenarios.WithScenarios,
sample_dir = None
extra_extensions_to_load = None
_legacy_v2_code = False
+ _project_id = True
scenarios = [
# test v2 with the v2.1 compatibility stack
@@ -82,7 +86,13 @@ class ApiSampleTestBaseV21(testscenarios.WithScenarios,
'api_major_version': 'v2',
'_legacy_v2_code': True,
'_additional_fixtures': [
- api_paste_fixture.ApiPasteLegacyV2Fixture]})
+ api_paste_fixture.ApiPasteLegacyV2Fixture]}),
+ # test v2.18 code without project id
+ ('v2_1_noproject_id', {
+ 'api_major_version': 'v2.1',
+ '_project_id': False,
+ '_additional_fixtures': [
+ api_paste_fixture.ApiPasteNoProjectId]})
]
def setUp(self):
diff --git a/nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v21_comp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v21_comp.json.tpl
deleted file mode 100644
index 7743bf475b..0000000000
--- a/nova/tests/functional/api_sample_tests/api_samples/all_extensions/extensions-list-resp-v21_comp.json.tpl
+++ /dev/null
@@ -1,724 +0,0 @@
-{
- "extensions": [
- {
- "alias": "NMN",
- "description": "Multiple network support.",
- "links": [],
- "name": "Multinic",
- "namespace": "http://docs.openstack.org/compute/ext/multinic/api/v1.1",
- "updated": "2011-06-09T00:00:00Z"
- },
- {
- "alias": "OS-DCF",
- "description": "Disk Management Extension.",
- "links": [],
- "name": "DiskConfig",
- "namespace": "http://docs.openstack.org/compute/ext/disk_config/api/v1.1",
- "updated": "2011-09-27T00:00:00Z"
- },
- {
- "alias": "OS-EXT-AZ",
- "description": "Extended Availability Zone support.",
- "links": [],
- "name": "ExtendedAvailabilityZone",
- "namespace": "http://docs.openstack.org/compute/ext/extended_availability_zone/api/v2",
- "updated": "2013-01-30T00:00:00Z"
- },
- {
- "alias": "OS-EXT-IMG-SIZE",
- "description": "Adds image size to image listings.",
- "links": [],
- "name": "ImageSize",
- "namespace": "http://docs.openstack.org/compute/ext/image_size/api/v1.1",
- "updated": "2013-02-19T00:00:00Z"
- },
- {
- "alias": "OS-EXT-IPS",
- "description": "Adds type parameter to the ip list.",
- "links": [],
- "name": "ExtendedIps",
- "namespace": "http://docs.openstack.org/compute/ext/extended_ips/api/v1.1",
- "updated": "2013-01-06T00:00:00Z"
- },
- {
- "alias": "OS-EXT-IPS-MAC",
- "description": "Adds mac address parameter to the ip list.",
- "links": [],
- "name": "ExtendedIpsMac",
- "namespace": "http://docs.openstack.org/compute/ext/extended_ips_mac/api/v1.1",
- "updated": "2013-03-07T00:00:00Z"
- },
- {
- "alias": "OS-EXT-SRV-ATTR",
- "description": "Extended Server Attributes support.",
- "links": [],
- "name": "ExtendedServerAttributes",
- "namespace": "http://docs.openstack.org/compute/ext/extended_status/api/v1.1",
- "updated": "2011-11-03T00:00:00Z"
- },
- {
- "alias": "OS-EXT-STS",
- "description": "Extended Status support.",
- "links": [],
- "name": "ExtendedStatus",
- "namespace": "http://docs.openstack.org/compute/ext/extended_status/api/v1.1",
- "updated": "2011-11-03T00:00:00Z"
- },
- {
- "alias": "OS-EXT-VIF-NET",
- "description": "Adds network id parameter to the virtual interface list.",
- "links": [],
- "name": "ExtendedVIFNet",
- "namespace": "http://docs.openstack.org/compute/ext/extended-virtual-interfaces-net/api/v1.1",
- "updated": "2013-03-07T00:00:00Z"
- },
- {
- "alias": "OS-FLV-DISABLED",
- "description": "Support to show the disabled status of a flavor.",
- "links": [],
- "name": "FlavorDisabled",
- "namespace": "http://docs.openstack.org/compute/ext/flavor_disabled/api/v1.1",
- "updated": "2012-08-29T00:00:00Z"
- },
- {
- "alias": "OS-FLV-EXT-DATA",
- "description": "Provide additional data for flavors.",
- "links": [],
- "name": "FlavorExtraData",
- "namespace": "http://docs.openstack.org/compute/ext/flavor_extra_data/api/v1.1",
- "updated": "2011-09-14T00:00:00Z"
- },
- {
- "alias": "OS-SCH-HNT",
- "description": "Pass arbitrary key/value pairs to the scheduler.",
- "links": [],
- "name": "SchedulerHints",
- "namespace": "http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
- "updated": "2011-07-19T00:00:00Z"
- },
- {
- "alias": "OS-SRV-USG",
- "description": "Adds launched_at and terminated_at on Servers.",
- "links": [],
- "name": "ServerUsage",
- "namespace": "http://docs.openstack.org/compute/ext/server_usage/api/v1.1",
- "updated": "2013-04-29T00:00:00Z"
- },
- {
- "alias": "os-admin-actions",
- "description": "Enable admin-only server actions\n\n Actions include: pause, unpause, suspend, resume, migrate,\n resetNetwork, injectNetworkInfo, lock, unlock, createBackup\n ",
- "links": [],
- "name": "AdminActions",
- "namespace": "http://docs.openstack.org/compute/ext/admin-actions/api/v1.1",
- "updated": "2011-09-20T00:00:00Z"
- },
- {
- "alias": "os-agents",
- "description": "Agents support.",
- "links": [],
- "name": "Agents",
- "namespace": "http://docs.openstack.org/compute/ext/agents/api/v2",
- "updated": "2012-10-28T00:00:00Z"
- },
- {
- "alias": "os-aggregates",
- "description": "Admin-only aggregate administration.",
- "links": [],
- "name": "Aggregates",
- "namespace": "http://docs.openstack.org/compute/ext/aggregates/api/v1.1",
- "updated": "2012-01-12T00:00:00Z"
- },
- {
- "alias": "os-assisted-volume-snapshots",
- "description": "Assisted volume snapshots.",
- "links": [],
- "name": "AssistedVolumeSnapshots",
- "namespace": "http://docs.openstack.org/compute/ext/assisted-volume-snapshots/api/v2",
- "updated": "2013-08-29T00:00:00Z"
- },
- {
- "alias": "os-attach-interfaces",
- "description": "Attach interface support.",
- "links": [],
- "name": "AttachInterfaces",
- "namespace": "http://docs.openstack.org/compute/ext/interfaces/api/v1.1",
- "updated": "2012-07-22T00:00:00Z"
- },
- {
- "alias": "os-availability-zone",
- "description": "1. Add availability_zone to the Create Server v1.1 API.\n 2. Add availability zones describing.\n ",
- "links": [],
- "name": "AvailabilityZone",
- "namespace": "http://docs.openstack.org/compute/ext/availabilityzone/api/v1.1",
- "updated": "2012-12-21T00:00:00Z"
- },
- {
- "alias": "os-baremetal-ext-status",
- "description": "Add extended status in Baremetal Nodes v2 API.",
- "links": [],
- "name": "BareMetalExtStatus",
- "namespace": "http://docs.openstack.org/compute/ext/baremetal_ext_status/api/v2",
- "updated": "2013-08-27T00:00:00Z"
- },
- {
- "alias": "os-baremetal-nodes",
- "description": "Admin-only bare-metal node administration.",
- "links": [],
- "name": "BareMetalNodes",
- "namespace": "http://docs.openstack.org/compute/ext/baremetal_nodes/api/v2",
- "updated": "2013-01-04T00:00:00Z"
- },
- {
- "alias": "os-block-device-mapping-v2-boot",
- "description": "Allow boot with the new BDM data format.",
- "links": [],
- "name": "BlockDeviceMappingV2Boot",
- "namespace": "http://docs.openstack.org/compute/ext/block_device_mapping_v2_boot/api/v2",
- "updated": "2013-07-08T00:00:00Z"
- },
- {
- "alias": "os-cell-capacities",
- "description": "Adding functionality to get cell capacities.",
- "links": [],
- "name": "CellCapacities",
- "namespace": "http://docs.openstack.org/compute/ext/cell_capacities/api/v1.1",
- "updated": "2013-05-27T00:00:00Z"
- },
- {
- "alias": "os-cells",
- "description": "Enables cells-related functionality such as adding neighbor cells,\n listing neighbor cells, and getting the capabilities of the local cell.\n ",
- "links": [],
- "name": "Cells",
- "namespace": "http://docs.openstack.org/compute/ext/cells/api/v1.1",
- "updated": "2013-05-14T00:00:00Z"
- },
- {
- "alias": "os-certificates",
- "description": "Certificates support.",
- "links": [],
- "name": "Certificates",
- "namespace": "http://docs.openstack.org/compute/ext/certificates/api/v1.1",
- "updated": "2012-01-19T00:00:00Z"
- },
- {
- "alias": "os-cloudpipe",
- "description": "Adds actions to create cloudpipe instances.\n\n When running with the Vlan network mode, you need a mechanism to route\n from the public Internet to your vlans. This mechanism is known as a\n cloudpipe.\n\n At the time of creating this class, only OpenVPN is supported. Support for\n a SSH Bastion host is forthcoming.\n ",
- "links": [],
- "name": "Cloudpipe",
- "namespace": "http://docs.openstack.org/compute/ext/cloudpipe/api/v1.1",
- "updated": "2011-12-16T00:00:00Z"
- },
- {
- "alias": "os-cloudpipe-update",
- "description": "Adds the ability to set the vpn ip/port for cloudpipe instances.",
- "links": [],
- "name": "CloudpipeUpdate",
- "namespace": "http://docs.openstack.org/compute/ext/cloudpipe-update/api/v2",
- "updated": "2012-11-14T00:00:00Z"
- },
- {
- "alias": "os-config-drive",
- "description": "Config Drive Extension.",
- "links": [],
- "name": "ConfigDrive",
- "namespace": "http://docs.openstack.org/compute/ext/config_drive/api/v1.1",
- "updated": "2012-07-16T00:00:00Z"
- },
- {
- "alias": "os-console-auth-tokens",
- "description": "Console token authentication support.",
- "links": [],
- "name": "ConsoleAuthTokens",
- "namespace": "http://docs.openstack.org/compute/ext/consoles-auth-tokens/api/v2",
- "updated": "2013-08-13T00:00:00Z"
- },
- {
- "alias": "os-console-output",
- "description": "Console log output support, with tailing ability.",
- "links": [],
- "name": "ConsoleOutput",
- "namespace": "http://docs.openstack.org/compute/ext/os-console-output/api/v2",
- "updated": "2011-12-08T00:00:00Z"
- },
- {
- "alias": "os-consoles",
- "description": "Interactive Console support.",
- "links": [],
- "name": "Consoles",
- "namespace": "http://docs.openstack.org/compute/ext/os-consoles/api/v2",
- "updated": "2011-12-23T00:00:00Z"
- },
- {
- "alias": "os-create-server-ext",
- "description": "Extended support to the Create Server v1.1 API.",
- "links": [],
- "name": "Createserverext",
- "namespace": "http://docs.openstack.org/compute/ext/createserverext/api/v1.1",
- "updated": "2011-07-19T00:00:00Z"
- },
- {
- "alias": "os-deferred-delete",
- "description": "Instance deferred delete.",
- "links": [],
- "name": "DeferredDelete",
- "namespace": "http://docs.openstack.org/compute/ext/deferred-delete/api/v1.1",
- "updated": "2011-09-01T00:00:00Z"
- },
- {
- "alias": "os-evacuate",
- "description": "Enables server evacuation.",
- "links": [],
- "name": "Evacuate",
- "namespace": "http://docs.openstack.org/compute/ext/evacuate/api/v2",
- "updated": "2013-01-06T00:00:00Z"
- },
- {
- "alias": "os-extended-evacuate-find-host",
- "description": "Enables server evacuation without target host. Scheduler will select\n one to target.\n ",
- "links": [],
- "name": "ExtendedEvacuateFindHost",
- "namespace": "http://docs.openstack.org/compute/ext/extended_evacuate_find_host/api/v2",
- "updated": "2014-02-12T00:00:00Z"
- },
- {
- "alias": "os-extended-floating-ips",
- "description": "Adds optional fixed_address to the add floating IP command.",
- "links": [],
- "name": "ExtendedFloatingIps",
- "namespace": "http://docs.openstack.org/compute/ext/extended_floating_ips/api/v2",
- "updated": "2013-04-19T00:00:00Z"
- },
- {
- "alias": "os-extended-hypervisors",
- "description": "Extended hypervisors support.",
- "links": [],
- "name": "ExtendedHypervisors",
- "namespace": "http://docs.openstack.org/compute/ext/extended_hypervisors/api/v1.1",
- "updated": "2014-01-04T00:00:00Z"
- },
- {
- "alias": "os-extended-networks",
- "description": "Adds additional fields to networks.",
- "links": [],
- "name": "ExtendedNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/extended_networks/api/v2",
- "updated": "2014-05-09T00:00:00Z"
- },
- {
- "alias": "os-extended-quotas",
- "description": "Adds ability for admins to delete quota\n and optionally force the update Quota command.\n ",
- "links": [],
- "name": "ExtendedQuotas",
- "namespace": "http://docs.openstack.org/compute/ext/extended_quotas/api/v1.1",
- "updated": "2013-06-09T00:00:00Z"
- },
- {
- "alias": "os-extended-rescue-with-image",
- "description": "Allow the user to specify the image to use for rescue.",
- "links": [],
- "name": "ExtendedRescueWithImage",
- "namespace": "http://docs.openstack.org/compute/ext/extended_rescue_with_image/api/v2",
- "updated": "2014-01-04T00:00:00Z"
- },
- {
- "alias": "os-extended-services",
- "description": "Extended services support.",
- "links": [],
- "name": "ExtendedServices",
- "namespace": "http://docs.openstack.org/compute/ext/extended_services/api/v2",
- "updated": "2013-05-17T00:00:00Z"
- },
- {
- "alias": "os-extended-services-delete",
- "description": "Extended services deletion support.",
- "links": [],
- "name": "ExtendedServicesDelete",
- "namespace": "http://docs.openstack.org/compute/ext/extended_services_delete/api/v2",
- "updated": "2013-12-10T00:00:00Z"
- },
- {
- "alias": "os-extended-volumes",
- "description": "Extended Volumes support.",
- "links": [],
- "name": "ExtendedVolumes",
- "namespace": "http://docs.openstack.org/compute/ext/extended_volumes/api/v1.1",
- "updated": "2013-06-07T00:00:00Z"
- },
- {
- "alias": "os-fixed-ips",
- "description": "Fixed IPs support.",
- "links": [],
- "name": "FixedIPs",
- "namespace": "http://docs.openstack.org/compute/ext/fixed_ips/api/v2",
- "updated": "2012-10-18T19:25:27Z"
- },
- {
- "alias": "os-flavor-access",
- "description": "Flavor access support.",
- "links": [],
- "name": "FlavorAccess",
- "namespace": "http://docs.openstack.org/compute/ext/flavor_access/api/v2",
- "updated": "2012-08-01T00:00:00Z"
- },
- {
- "alias": "os-flavor-extra-specs",
- "description": "Instance type (flavor) extra specs.",
- "links": [],
- "name": "FlavorExtraSpecs",
- "namespace": "http://docs.openstack.org/compute/ext/flavor_extra_specs/api/v1.1",
- "updated": "2011-06-23T00:00:00Z"
- },
- {
- "alias": "os-flavor-manage",
- "description": "Flavor create/delete API support.",
- "links": [],
- "name": "FlavorManage",
- "namespace": "http://docs.openstack.org/compute/ext/flavor_manage/api/v1.1",
- "updated": "2012-01-19T00:00:00Z"
- },
- {
- "alias": "os-flavor-rxtx",
- "description": "Support to show the rxtx status of a flavor.",
- "links": [],
- "name": "FlavorRxtx",
- "namespace": "http://docs.openstack.org/compute/ext/flavor_rxtx/api/v1.1",
- "updated": "2012-08-29T00:00:00Z"
- },
- {
- "alias": "os-flavor-swap",
- "description": "Support to show the swap status of a flavor.",
- "links": [],
- "name": "FlavorSwap",
- "namespace": "http://docs.openstack.org/compute/ext/flavor_swap/api/v1.1",
- "updated": "2012-08-29T00:00:00Z"
- },
- {
- "alias": "os-floating-ip-dns",
- "description": "Floating IP DNS support.",
- "links": [],
- "name": "FloatingIpDns",
- "namespace": "http://docs.openstack.org/ext/floating_ip_dns/api/v1.1",
- "updated": "2011-12-23T00:00:00Z"
- },
- {
- "alias": "os-floating-ip-pools",
- "description": "Floating IPs support.",
- "links": [],
- "name": "FloatingIpPools",
- "namespace": "http://docs.openstack.org/compute/ext/floating_ip_pools/api/v1.1",
- "updated": "2012-01-04T00:00:00Z"
- },
- {
- "alias": "os-floating-ips",
- "description": "Floating IPs support.",
- "links": [],
- "name": "FloatingIps",
- "namespace": "http://docs.openstack.org/compute/ext/floating_ips/api/v1.1",
- "updated": "2011-06-16T00:00:00Z"
- },
- {
- "alias": "os-floating-ips-bulk",
- "description": "Bulk handling of Floating IPs.",
- "links": [],
- "name": "FloatingIpsBulk",
- "namespace": "http://docs.openstack.org/compute/ext/floating_ips_bulk/api/v2",
- "updated": "2012-10-29T19:25:27Z"
- },
- {
- "alias": "os-fping",
- "description": "Fping Management Extension.",
- "links": [],
- "name": "Fping",
- "namespace": "http://docs.openstack.org/compute/ext/fping/api/v1.1",
- "updated": "2012-07-06T00:00:00Z"
- },
- {
- "alias": "os-hide-server-addresses",
- "description": "Support hiding server addresses in certain states.",
- "links": [],
- "name": "HideServerAddresses",
- "namespace": "http://docs.openstack.org/compute/ext/hide_server_addresses/api/v1.1",
- "updated": "2012-12-11T00:00:00Z"
- },
- {
- "alias": "os-hosts",
- "description": "Admin-only host administration.",
- "links": [],
- "name": "Hosts",
- "namespace": "http://docs.openstack.org/compute/ext/hosts/api/v1.1",
- "updated": "2011-06-29T00:00:00Z"
- },
- {
- "alias": "os-hypervisor-status",
- "description": "Show hypervisor status.",
- "links": [],
- "name": "HypervisorStatus",
- "namespace": "http://docs.openstack.org/compute/ext/hypervisor_status/api/v1.1",
- "updated": "2014-04-17T00:00:00Z"
- },
- {
- "alias": "os-hypervisors",
- "description": "Admin-only hypervisor administration.",
- "links": [],
- "name": "Hypervisors",
- "namespace": "http://docs.openstack.org/compute/ext/hypervisors/api/v1.1",
- "updated": "2012-06-21T00:00:00Z"
- },
- {
- "alias": "os-instance-actions",
- "description": "View a log of actions and events taken on an instance.",
- "links": [],
- "name": "InstanceActions",
- "namespace": "http://docs.openstack.org/compute/ext/instance-actions/api/v1.1",
- "updated": "2013-02-08T00:00:00Z"
- },
- {
- "alias": "os-instance_usage_audit_log",
- "description": "Admin-only Task Log Monitoring.",
- "links": [],
- "name": "OSInstanceUsageAuditLog",
- "namespace": "http://docs.openstack.org/ext/services/api/v1.1",
- "updated": "2012-07-06T01:00:00Z"
- },
- {
- "alias": "os-keypairs",
- "description": "Keypair Support.",
- "links": [],
- "name": "Keypairs",
- "namespace": "http://docs.openstack.org/compute/ext/keypairs/api/v1.1",
- "updated": "2011-08-08T00:00:00Z"
- },
- {
- "alias": "os-migrations",
- "description": "Provide data on migrations.",
- "links": [],
- "name": "Migrations",
- "namespace": "http://docs.openstack.org/compute/ext/migrations/api/v2.0",
- "updated": "2013-05-30T00:00:00Z"
- },
- {
- "alias": "os-multiple-create",
- "description": "Allow multiple create in the Create Server v1.1 API.",
- "links": [],
- "name": "MultipleCreate",
- "namespace": "http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1",
- "updated": "2012-08-07T00:00:00Z"
- },
- {
- "alias": "os-networks",
- "description": "Admin-only Network Management Extension.",
- "links": [],
- "name": "Networks",
- "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
- "updated": "2011-12-23T00:00:00Z"
- },
- {
- "alias": "os-networks-associate",
- "description": "Network association support.",
- "links": [],
- "name": "NetworkAssociationSupport",
- "namespace": "http://docs.openstack.org/compute/ext/networks_associate/api/v2",
- "updated": "2012-11-19T00:00:00Z"
- },
- {
- "alias": "os-preserve-ephemeral-rebuild",
- "description": "Allow preservation of the ephemeral partition on rebuild.",
- "links": [],
- "name": "PreserveEphemeralOnRebuild",
- "namespace": "http://docs.openstack.org/compute/ext/preserve_ephemeral_rebuild/api/v2",
- "updated": "2013-12-17T00:00:00Z"
- },
- {
- "alias": "os-quota-class-sets",
- "description": "Quota classes management support.",
- "links": [],
- "name": "QuotaClasses",
- "namespace": "http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1",
- "updated": "2012-03-12T00:00:00Z"
- },
- {
- "alias": "os-quota-sets",
- "description": "Quotas management support.",
- "links": [],
- "name": "Quotas",
- "namespace": "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1",
- "updated": "2011-08-08T00:00:00Z"
- },
- {
- "alias": "os-rescue",
- "description": "Instance rescue mode.",
- "links": [],
- "name": "Rescue",
- "namespace": "http://docs.openstack.org/compute/ext/rescue/api/v1.1",
- "updated": "2011-08-18T00:00:00Z"
- },
- {
- "alias": "os-security-group-default-rules",
- "description": "Default rules for security group support.",
- "links": [],
- "name": "SecurityGroupDefaultRules",
- "namespace": "http://docs.openstack.org/compute/ext/securitygroupdefaultrules/api/v1.1",
- "updated": "2013-02-05T00:00:00Z"
- },
- {
- "alias": "os-security-groups",
- "description": "Security group support.",
- "links": [],
- "name": "SecurityGroups",
- "namespace": "http://docs.openstack.org/compute/ext/securitygroups/api/v1.1",
- "updated": "2013-05-28T00:00:00Z"
- },
- {
- "alias": "os-server-diagnostics",
- "description": "Allow Admins to view server diagnostics through server action.",
- "links": [],
- "name": "ServerDiagnostics",
- "namespace": "http://docs.openstack.org/compute/ext/server-diagnostics/api/v1.1",
- "updated": "2011-12-21T00:00:00Z"
- },
- {
- "alias": "os-server-external-events",
- "description": "Server External Event Triggers.",
- "links": [],
- "name": "ServerExternalEvents",
- "namespace": "http://docs.openstack.org/compute/ext/server-external-events/api/v2",
- "updated": "2014-02-18T00:00:00Z"
- },
- {
- "alias": "os-server-group-quotas",
- "description": "Adds quota support to server groups.",
- "links": [],
- "name": "ServerGroupQuotas",
- "namespace": "http://docs.openstack.org/compute/ext/server-group-quotas/api/v2",
- "updated": "2014-07-25T00:00:00Z"
- },
- {
- "alias": "os-server-groups",
- "description": "Server group support.",
- "links": [],
- "name": "ServerGroups",
- "namespace": "http://docs.openstack.org/compute/ext/servergroups/api/v2",
- "updated": "2013-06-20T00:00:00Z"
- },
- {
- "alias": "os-server-list-multi-status",
- "description": "Allow to specify multiple status values concurrently in the servers\n list API..\n ",
- "links": [],
- "name": "ServerListMultiStatus",
- "namespace": "http://docs.openstack.org/compute/ext/os-server-list-multi-status/api/v2",
- "updated": "2014-05-11T00:00:00Z"
- },
- {
- "alias": "os-server-password",
- "description": "Server password support.",
- "links": [],
- "name": "ServerPassword",
- "namespace": "http://docs.openstack.org/compute/ext/server-password/api/v2",
- "updated": "2012-11-29T00:00:00Z"
- },
- {
- "alias": "os-server-sort-keys",
- "description": "Add sort keys and directions to the Server GET v2 API.",
- "links": [],
- "name": "ServerSortKeys",
- "namespace": "http://docs.openstack.org/compute/ext/server_sort_keys/api/v2",
- "updated": "2014-05-22T00:00:00Z"
- },
- {
- "alias": "os-server-start-stop",
- "description": "Start/Stop instance compute API support.",
- "links": [],
- "name": "ServerStartStop",
- "namespace": "http://docs.openstack.org/compute/ext/servers/api/v1.1",
- "updated": "2012-01-23T00:00:00Z"
- },
- {
- "alias": "os-services",
- "description": "Services support.",
- "links": [],
- "name": "Services",
- "namespace": "http://docs.openstack.org/compute/ext/services/api/v2",
- "updated": "2012-10-28T00:00:00Z"
- },
- {
- "alias": "os-shelve",
- "description": "Instance shelve mode.",
- "links": [],
- "name": "Shelve",
- "namespace": "http://docs.openstack.org/compute/ext/shelve/api/v1.1",
- "updated": "2013-04-06T00:00:00Z"
- },
- {
- "alias": "os-simple-tenant-usage",
- "description": "Simple tenant usage extension.",
- "links": [],
- "name": "SimpleTenantUsage",
- "namespace": "http://docs.openstack.org/compute/ext/os-simple-tenant-usage/api/v1.1",
- "updated": "2011-08-19T00:00:00Z"
- },
- {
- "alias": "os-tenant-networks",
- "description": "Tenant-based Network Management Extension.",
- "links": [],
- "name": "OSTenantNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
- "updated": "2012-03-07T14:46:43Z"
- },
- {
- "alias": "os-used-limits",
- "description": "Provide data on limited resources that are being used.",
- "links": [],
- "name": "UsedLimits",
- "namespace": "http://docs.openstack.org/compute/ext/used_limits/api/v1.1",
- "updated": "2012-07-13T00:00:00Z"
- },
- {
- "alias": "os-used-limits-for-admin",
- "description": "Provide data to admin on limited resources used by other tenants.",
- "links": [],
- "name": "UsedLimitsForAdmin",
- "namespace": "http://docs.openstack.org/compute/ext/used_limits_for_admin/api/v1.1",
- "updated": "2013-05-02T00:00:00Z"
- },
- {
- "alias": "os-user-data",
- "description": "Add user_data to the Create Server v1.1 API.",
- "links": [],
- "name": "UserData",
- "namespace": "http://docs.openstack.org/compute/ext/userdata/api/v1.1",
- "updated": "2012-08-07T00:00:00Z"
- },
- {
- "alias": "os-user-quotas",
- "description": "Project user quota support.",
- "links": [],
- "name": "UserQuotas",
- "namespace": "http://docs.openstack.org/compute/ext/user_quotas/api/v1.1",
- "updated": "2013-07-18T00:00:00Z"
- },
- {
- "alias": "os-virtual-interfaces",
- "description": "Virtual interface support.",
- "links": [],
- "name": "VirtualInterfaces",
- "namespace": "http://docs.openstack.org/compute/ext/virtual_interfaces/api/v1.1",
- "updated": "2011-08-17T00:00:00Z"
- },
- {
- "alias": "os-volume-attachment-update",
- "description": "Support for updating a volume attachment.",
- "links": [],
- "name": "VolumeAttachmentUpdate",
- "namespace": "http://docs.openstack.org/compute/ext/os-volume-attachment-update/api/v2",
- "updated": "2013-06-20T00:00:00Z"
- },
- {
- "alias": "os-volumes",
- "description": "Volumes support.",
- "links": [],
- "name": "Volumes",
- "namespace": "http://docs.openstack.org/compute/ext/volumes/api/v1.1",
- "updated": "2011-03-25T00:00:00Z"
- }
- ]
-} \ No newline at end of file
diff --git a/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-post-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-req.json.tpl
index 0562dcb136..0562dcb136 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-post-req.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-req.json.tpl
diff --git a/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-post-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-resp.json.tpl
index 4b30e0cfbd..4b30e0cfbd 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-post-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/all_extensions/server-create-resp.json.tpl
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-put-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-put-req.json.tpl
deleted file mode 100644
index d38d967042..0000000000
--- a/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-put-req.json.tpl
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "server": {
- "accessIPv4": "%(access_ip_v4)s",
- "accessIPv6": "%(access_ip_v6)s"
- }
-}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-update-put-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-update-put-req.json.tpl
deleted file mode 100644
index 4ac22820df..0000000000
--- a/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-update-put-req.json.tpl
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "server": {
- "OS-DCF:diskConfig": "AUTO"
- }
-}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json.tpl
new file mode 100644
index 0000000000..5c1bd9f124
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/server-get-resp.json.tpl
@@ -0,0 +1,69 @@
+{
+ "server": {
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:reservation_id": "%(reservation_id)s",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:kernel_id": null,
+ "OS-EXT-SRV-ATTR:ramdisk_id": null,
+ "OS-EXT-SRV-ATTR:user_data": null,
+ "locked": false,
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "host_status": "UP",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "key_name": null
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json.tpl
new file mode 100644
index 0000000000..4916d4f413
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-extended-server-attributes/v2.16/servers-detail-resp.json.tpl
@@ -0,0 +1,71 @@
+{
+ "servers": [
+ {
+ "OS-EXT-SRV-ATTR:host": "%(compute_host)s",
+ "OS-EXT-SRV-ATTR:hypervisor_hostname": "%(hypervisor_hostname)s",
+ "OS-EXT-SRV-ATTR:instance_name": "%(instance_name)s",
+ "OS-EXT-SRV-ATTR:hostname": "new-server-test",
+ "OS-EXT-SRV-ATTR:launch_index": 0,
+ "OS-EXT-SRV-ATTR:reservation_id": "%(reservation_id)s",
+ "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda",
+ "OS-EXT-SRV-ATTR:kernel_id": null,
+ "OS-EXT-SRV-ATTR:ramdisk_id": null,
+ "OS-EXT-SRV-ATTR:user_data": null,
+ "locked": false,
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "updated": "%(isotime)s",
+ "created": "%(isotime)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4,
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed"
+ }
+ ]
+ },
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "progress": 0,
+ "status": "ACTIVE",
+ "host_status": "UP",
+ "tenant_id": "openstack",
+ "user_id": "fake",
+ "key_name": null
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json.tpl
new file mode 100644
index 0000000000..7cd5325239
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-action-get-resp.json.tpl
@@ -0,0 +1,27 @@
+{
+ "instanceAction": {
+ "action": "%(action)s",
+ "instance_uuid": "%(instance_uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(strtime)s",
+ "message": "",
+ "events": [
+ {
+ "event": "%(event)s",
+ "start_time": "%(strtime)s",
+ "finish_time": "%(strtime)s",
+ "result": "%(result)s",
+ "traceback": ""
+ },
+ {
+ "event": "%(event)s",
+ "start_time": "%(strtime)s",
+ "finish_time": "%(strtime)s",
+ "result": "%(result)s",
+ "traceback": ""
+ }
+ ]
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json.tpl
new file mode 100644
index 0000000000..0fdc33916a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-instance-actions/v2.21/instance-actions-list-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "instanceActions": [
+ {
+ "action": "%(action)s",
+ "instance_uuid": "%(uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(strtime)s",
+ "message": ""
+ },
+ {
+ "action": "%(action)s",
+ "instance_uuid": "%(uuid)s",
+ "request_id": "%(request_id)s",
+ "user_id": "%(integer_id)s",
+ "project_id": "%(integer_id)s",
+ "start_time": "%(strtime)s",
+ "message": ""
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-get-resp.json.tpl
index 1bdfe9e1b7..d72a5418a6 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-get-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-get-resp.json.tpl
@@ -5,7 +5,7 @@
"policies": ["anti-affinity"],
"members": [],
"metadata": {},
- "project_id": "c7c9f4f175e247acb56c108fd724d667",
+ "project_id": "openstack",
"user_id": "fake"
}
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-list-resp.json.tpl
index 417b9a1a1b..8312769412 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-list-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-list-resp.json.tpl
@@ -6,7 +6,7 @@
"policies": ["anti-affinity"],
"members": [],
"metadata": {},
- "project_id": "c7c9f4f175e247acb56c108fd724d667",
+ "project_id": "openstack",
"user_id": "fake"
}
]
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-resp.json.tpl
index 1bdfe9e1b7..d72a5418a6 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/os-server-groups/v2.13/server-groups-post-resp.json.tpl
@@ -5,7 +5,7 @@
"policies": ["anti-affinity"],
"members": [],
"metadata": {},
- "project_id": "c7c9f4f175e247acb56c108fd724d667",
+ "project_id": "openstack",
"user_id": "fake"
}
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/server-migrations/force_complete.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/server-migrations/force_complete.json.tpl
new file mode 100644
index 0000000000..e2adb7b5a0
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/server-migrations/force_complete.json.tpl
@@ -0,0 +1,3 @@
+{
+ "force_complete": null
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/server-migrations/live-migrate-server.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/server-migrations/live-migrate-server.json.tpl
new file mode 100644
index 0000000000..4800d4aa11
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/server-migrations/live-migrate-server.json.tpl
@@ -0,0 +1,7 @@
+{
+ "os-migrateLive": {
+ "host": "%(hostname)s",
+ "block_migration": false,
+ "disk_over_commit": false
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/server-create-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/server-create-req.json.tpl
new file mode 100644
index 0000000000..25b6415890
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/server-create-req.json.tpl
@@ -0,0 +1,12 @@
+{
+ "server" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "name" : "new-server-test",
+ "imageRef" : "%(image_id)s",
+ "flavorRef" : "1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/server-create-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/server-create-resp.json.tpl
new file mode 100644
index 0000000000..4b30e0cfbd
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/server-create-resp.json.tpl
@@ -0,0 +1,22 @@
+{
+ "server": {
+ "OS-DCF:diskConfig": "AUTO",
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "security_groups": [
+ {
+ "name": "default"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/server-update-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/server-update-req.json.tpl
new file mode 100644
index 0000000000..e34896621d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/server-update-req.json.tpl
@@ -0,0 +1,8 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "OS-DCF:diskConfig": "AUTO",
+ "name" : "new-server-test"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-update-put-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/server-update-resp.json.tpl
index 047d9be049..7261c4bae5 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-disk-config/server-update-put-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/server-update-resp.json.tpl
@@ -1,8 +1,8 @@
{
"server": {
"OS-DCF:diskConfig": "AUTO",
- "accessIPv4": "",
- "accessIPv6": "",
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
"addresses": {
"private": [
{
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.17/server-action-trigger-crash-dump.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.17/server-action-trigger-crash-dump.json.tpl
new file mode 100644
index 0000000000..968c7c05e8
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.17/server-action-trigger-crash-dump.json.tpl
@@ -0,0 +1,3 @@
+{
+ "trigger_crash_dump": null
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild-resp.json.tpl
new file mode 100644
index 0000000000..e863676e7b
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild-resp.json.tpl
@@ -0,0 +1,57 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "version": 4
+ }
+ ]
+ },
+ "adminPass": "%(password)s",
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(uuid)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "locked": false,
+ "metadata": {
+ "meta_var": "meta_val"
+ },
+ "name": "%(name)s",
+ "description": "%(description)s",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild.json.tpl
new file mode 100644
index 0000000000..8e6a86aa2b
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-action-rebuild.json.tpl
@@ -0,0 +1,13 @@
+{
+ "rebuild" : {
+ "accessIPv4" : "%(access_ip_v4)s",
+ "accessIPv6" : "%(access_ip_v6)s",
+ "imageRef" : "%(uuid)s",
+ "name" : "%(name)s",
+ "description" : "%(description)s",
+ "adminPass" : "%(pass)s",
+ "metadata" : {
+ "meta_var" : "meta_val"
+ }
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-get-resp.json.tpl
new file mode 100644
index 0000000000..e9830f6142
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-get-resp.json.tpl
@@ -0,0 +1,59 @@
+{
+ "server": {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "description": "new-server-description",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "locked": false
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-req.json.tpl
new file mode 100644
index 0000000000..b1306fea57
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-req.json.tpl
@@ -0,0 +1,13 @@
+{
+ "server" : {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "name" : "new-server-test",
+ "description" : "new-server-description",
+ "imageRef" : "%(glance_host)s/images/%(image_id)s",
+ "flavorRef" : "%(host)s/flavors/1",
+ "metadata" : {
+ "My Server Name" : "Apache1"
+ }
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-resp.json.tpl
new file mode 100644
index 0000000000..5358868400
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-post-resp.json.tpl
@@ -0,0 +1,16 @@
+{
+ "server": {
+ "adminPass": "%(password)s",
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-req.json.tpl
new file mode 100644
index 0000000000..cf6ceef7b5
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-req.json.tpl
@@ -0,0 +1,6 @@
+{
+ "server" : {
+ "name" : "updated-server-test",
+ "description" : "updated-server-description"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-put-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-resp.json.tpl
index 6df0507c34..cd3aaed4b4 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/os-access-ips/server-put-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/server-put-resp.json.tpl
@@ -44,11 +44,13 @@
"metadata": {
"My Server Name": "Apache1"
},
- "name": "new-server-test",
+ "name": "updated-server-test",
+ "description": "updated-server-description",
"progress": 0,
"status": "ACTIVE",
"tenant_id": "openstack",
"updated": "%(isotime)s",
- "user_id": "fake"
+ "user_id": "fake",
+ "locked": false
}
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-details-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-details-resp.json.tpl
new file mode 100644
index 0000000000..0594f7c81e
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-details-resp.json.tpl
@@ -0,0 +1,61 @@
+{
+ "servers": [
+ {
+ "accessIPv4": "%(access_ip_v4)s",
+ "accessIPv6": "%(access_ip_v6)s",
+ "addresses": {
+ "private": [
+ {
+ "addr": "%(ip)s",
+ "OS-EXT-IPS-MAC:mac_addr": "aa:bb:cc:dd:ee:ff",
+ "OS-EXT-IPS:type": "fixed",
+ "version": 4
+ }
+ ]
+ },
+ "created": "%(isotime)s",
+ "flavor": {
+ "id": "1",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/flavors/1",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "hostId": "%(hostid)s",
+ "id": "%(id)s",
+ "image": {
+ "id": "%(uuid)s",
+ "links": [
+ {
+ "href": "%(compute_endpoint)s/images/%(uuid)s",
+ "rel": "bookmark"
+ }
+ ]
+ },
+ "key_name": null,
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(uuid)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "metadata": {
+ "My Server Name": "Apache1"
+ },
+ "name": "new-server-test",
+ "description": "new-server-description",
+ "progress": 0,
+ "status": "ACTIVE",
+ "tenant_id": "openstack",
+ "updated": "%(isotime)s",
+ "user_id": "fake",
+ "locked": false
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-list-resp.json.tpl
new file mode 100644
index 0000000000..f78d963d5d
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/servers/v2.19/servers-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "servers": [
+ {
+ "id": "%(id)s",
+ "links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/servers/%(id)s",
+ "rel": "self"
+ },
+ {
+ "href": "%(compute_endpoint)s/servers/%(id)s",
+ "rel": "bookmark"
+ }
+ ],
+ "name": "new-server-test"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/versions/v21-version-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/versions/v21-version-get-resp.json.tpl
index 6284331582..afdaee2a49 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/versions/v21-version-get-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/versions/v21-version-get-resp.json.tpl
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.15",
+ "version": "%(max_api_version)s",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/versions/versions-get-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/versions/versions-get-resp.json.tpl
index 7d84250a08..3761edcba1 100644
--- a/nova/tests/functional/api_sample_tests/api_samples/versions/versions-get-resp.json.tpl
+++ b/nova/tests/functional/api_sample_tests/api_samples/versions/versions-get-resp.json.tpl
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.15",
+ "version": "%(max_api_version)s",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/nova/tests/functional/api_sample_tests/test_access_ips.py b/nova/tests/functional/api_sample_tests/test_access_ips.py
index 86d2a3a36c..d89fa63f55 100644
--- a/nova/tests/functional/api_sample_tests/test_access_ips.py
+++ b/nova/tests/functional/api_sample_tests/test_access_ips.py
@@ -93,18 +93,3 @@ class AccessIPsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
subs['id'] = uuid
self._verify_response('server-action-rebuild-resp',
subs, response, 202)
-
- def test_servers_update(self):
- subs = {
- 'image_id': fake.get_valid_image_id(),
- 'compute_endpoint': self._get_compute_endpoint(),
- 'access_ip_v4': '1.2.3.4',
- 'access_ip_v6': 'fe80::'
- }
- uuid = self._servers_post(subs)
- subs['access_ip_v4'] = "4.3.2.1"
- subs['access_ip_v6'] = '80fe::'
- response = self._do_put('servers/%s' % uuid, 'server-put-req', subs)
- subs['hostid'] = '[a-f0-9]+'
- subs['id'] = uuid
- self._verify_response('server-put-resp', subs, response, 200)
diff --git a/nova/tests/functional/api_sample_tests/test_cloudpipe.py b/nova/tests/functional/api_sample_tests/test_cloudpipe.py
index 7abf0411f9..481d89bd79 100644
--- a/nova/tests/functional/api_sample_tests/test_cloudpipe.py
+++ b/nova/tests/functional/api_sample_tests/test_cloudpipe.py
@@ -79,7 +79,7 @@ class CloudPipeSampleTest(api_sample_base.ApiSampleTestBaseV21):
def test_cloud_pipe_update(self):
subs = {'vpn_ip': '192.168.1.1',
- 'vpn_port': 2000}
+ 'vpn_port': '2000'}
response = self._do_put('os-cloudpipe/configure-project',
'cloud-pipe-update-req',
subs)
diff --git a/nova/tests/functional/api_sample_tests/test_disk_config.py b/nova/tests/functional/api_sample_tests/test_disk_config.py
index 425379caaa..c745e8b6a1 100644
--- a/nova/tests/functional/api_sample_tests/test_disk_config.py
+++ b/nova/tests/functional/api_sample_tests/test_disk_config.py
@@ -61,16 +61,6 @@ class DiskConfigJsonTest(test_servers.ServersSampleBase):
subs['access_ip_v6'] = ''
self._verify_response('server-get-resp', subs, response, 200)
- def test_update_server(self):
- uuid = self._post_server(use_common_server_api_samples=False)
- response = self._do_put('servers/%s' % uuid,
- 'server-update-put-req', {})
- subs = {}
- subs['hostid'] = '[a-f0-9]+'
- subs['access_ip_v4'] = ''
- subs['access_ip_v6'] = ''
- self._verify_response('server-update-put-resp', subs, response, 200)
-
def test_resize_server(self):
self.flags(allow_resize_to_same_host=True)
uuid = self._post_server(use_common_server_api_samples=False)
diff --git a/nova/tests/functional/api_sample_tests/test_evacuate.py b/nova/tests/functional/api_sample_tests/test_evacuate.py
index 93d866fe1d..da8569ab68 100644
--- a/nova/tests/functional/api_sample_tests/test_evacuate.py
+++ b/nova/tests/functional/api_sample_tests/test_evacuate.py
@@ -93,7 +93,7 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host='testHost')
+ host='testHost', request_spec=mock.ANY)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -109,7 +109,7 @@ class EvacuateJsonTest(test_servers.ServersSampleBase):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=False, preserve_ephemeral=mock.ANY,
- host=None)
+ host=None, request_spec=mock.ANY)
class EvacuateJsonTestV214(EvacuateJsonTest):
@@ -130,7 +130,7 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host='testHost')
+ host='testHost', request_spec=mock.ANY)
@mock.patch('nova.conductor.manager.ComputeTaskManager.rebuild_instance')
def test_server_evacuate_find_host(self, rebuild_mock):
@@ -145,4 +145,4 @@ class EvacuateJsonTestV214(EvacuateJsonTest):
injected_files=mock.ANY, new_pass="MySecretPass",
orig_sys_metadata=mock.ANY, bdms=mock.ANY, recreate=mock.ANY,
on_shared_storage=None, preserve_ephemeral=mock.ANY,
- host=None)
+ host=None, request_spec=mock.ANY)
diff --git a/nova/tests/functional/api_sample_tests/test_extended_server_attributes.py b/nova/tests/functional/api_sample_tests/test_extended_server_attributes.py
index c6d35d12b0..a749c00a11 100644
--- a/nova/tests/functional/api_sample_tests/test_extended_server_attributes.py
+++ b/nova/tests/functional/api_sample_tests/test_extended_server_attributes.py
@@ -65,3 +65,34 @@ class ExtendedServerAttributesJsonTest(test_servers.ServersSampleBase):
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('servers-detail-resp', subs, response, 200)
+
+
+class ExtendedServerAttributesJsonTestV216(ExtendedServerAttributesJsonTest):
+ microversion = '2.16'
+ scenarios = [('v2_16', {'api_major_version': 'v2.1'})]
+
+ def test_show(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/%s' % uuid)
+ subs = {}
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['access_ip_v4'] = '1.2.3.4'
+ subs['access_ip_v6'] = '80fe::'
+ self._verify_response('server-get-resp', subs, response, 200)
+
+ def test_detail(self):
+ uuid = self._post_server()
+
+ response = self._do_get('servers/detail')
+ subs = {}
+ subs['hostid'] = '[a-f0-9]+'
+ subs['id'] = uuid
+ subs['instance_name'] = 'instance-\d{8}'
+ subs['hypervisor_hostname'] = r'[\w\.\-]+'
+ subs['access_ip_v4'] = '1.2.3.4'
+ subs['access_ip_v6'] = '80fe::'
+ self._verify_response('servers-detail-resp', subs, response, 200)
diff --git a/nova/tests/functional/api_sample_tests/test_extension_info.py b/nova/tests/functional/api_sample_tests/test_extension_info.py
index 7c84b5be27..895bcd8700 100644
--- a/nova/tests/functional/api_sample_tests/test_extension_info.py
+++ b/nova/tests/functional/api_sample_tests/test_extension_info.py
@@ -42,9 +42,10 @@ class ExtensionInfoAllSamplesJsonTest(api_sample_base.ApiSampleTestBaseV21):
# stack. We default to the v2.1 case.
template = 'extensions-list-resp'
if self.api_major_version == 'v2':
- template = 'extensions-list-resp-v21-compatible'
- if self.api_major_version == 'v2' and self._legacy_v2_code:
- template = 'extensions-list-resp-v2'
+ if self._legacy_v2_code:
+ template = 'extensions-list-resp-v2'
+ else:
+ template = 'extensions-list-resp-v21-compatible'
self._verify_response(template, {}, response, 200)
diff --git a/nova/tests/functional/api_sample_tests/test_fixed_ips.py b/nova/tests/functional/api_sample_tests/test_fixed_ips.py
index 2cf6b10bd2..1ad6b7fc44 100644
--- a/nova/tests/functional/api_sample_tests/test_fixed_ips.py
+++ b/nova/tests/functional/api_sample_tests/test_fixed_ips.py
@@ -38,7 +38,7 @@ class FixedIpTest(test_servers.ServersSampleBase):
def setUp(self):
super(FixedIpTest, self).setUp()
-
+ self.api.microversion = self.microversion
instance = dict(test_utils.get_test_instance(),
hostname='openstack', host='host')
fake_fixed_ips = [{'id': 1,
@@ -95,15 +95,13 @@ class FixedIpTest(test_servers.ServersSampleBase):
def test_fixed_ip_reserve(self):
# Reserve a Fixed IP.
response = self._do_post('os-fixed-ips/192.168.1.1/action',
- 'fixedip-post-req', {},
- api_version=self.microversion)
+ 'fixedip-post-req', {})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def _test_get_fixed_ip(self, **kwargs):
# Return data about the given fixed ip.
- response = self._do_get('os-fixed-ips/192.168.1.1',
- api_version=self.microversion)
+ response = self._do_get('os-fixed-ips/192.168.1.1')
project = {'cidr': '192.168.1.0/24',
'hostname': 'openstack',
'host': 'host',
@@ -123,4 +121,4 @@ class FixedIpV24Test(FixedIpTest):
scenarios = [('v2_4', {'api_major_version': 'v2.1'})]
def test_get_fixed_ip(self):
- self._test_get_fixed_ip(reserved=False)
+ self._test_get_fixed_ip(reserved='False')
diff --git a/nova/tests/functional/api_sample_tests/test_flavor_access.py b/nova/tests/functional/api_sample_tests/test_flavor_access.py
index 53521ed87f..e4ad651f50 100644
--- a/nova/tests/functional/api_sample_tests/test_flavor_access.py
+++ b/nova/tests/functional/api_sample_tests/test_flavor_access.py
@@ -49,7 +49,7 @@ class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
def _add_tenant(self):
subs = {
'tenant_id': 'fake_tenant',
- 'flavor_id': 10,
+ 'flavor_id': '10',
}
response = self._do_post('flavors/10/action',
'flavor-access-add-tenant-req',
@@ -59,7 +59,7 @@ class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
def _create_flavor(self):
subs = {
- 'flavor_id': 10,
+ 'flavor_id': '10',
'flavor_name': 'test_flavor'
}
response = self._do_post("flavors",
@@ -78,7 +78,7 @@ class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
def test_flavor_access_list(self):
self._create_flavor()
self._add_tenant()
- flavor_id = 10
+ flavor_id = '10'
response = self._do_get('flavors/%s/os-flavor-access' % flavor_id)
subs = {
'flavor_id': flavor_id,
@@ -87,7 +87,7 @@ class FlavorAccessSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
self._verify_response('flavor-access-list-resp', subs, response, 200)
def test_flavor_access_show(self):
- flavor_id = 1
+ flavor_id = '1'
response = self._do_get('flavors/%s' % flavor_id)
subs = {
'flavor_id': flavor_id
diff --git a/nova/tests/functional/api_sample_tests/test_flavor_manage.py b/nova/tests/functional/api_sample_tests/test_flavor_manage.py
index c54519c699..e50c77343b 100644
--- a/nova/tests/functional/api_sample_tests/test_flavor_manage.py
+++ b/nova/tests/functional/api_sample_tests/test_flavor_manage.py
@@ -49,7 +49,7 @@ class FlavorManageSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
def _create_flavor(self):
"""Create a flavor."""
subs = {
- 'flavor_id': 10,
+ 'flavor_id': '10',
'flavor_name': "test_flavor"
}
response = self._do_post("flavors",
diff --git a/nova/tests/functional/api_sample_tests/test_flavor_rxtx.py b/nova/tests/functional/api_sample_tests/test_flavor_rxtx.py
index ac21c604ee..9af03a883d 100644
--- a/nova/tests/functional/api_sample_tests/test_flavor_rxtx.py
+++ b/nova/tests/functional/api_sample_tests/test_flavor_rxtx.py
@@ -50,7 +50,7 @@ class FlavorRxtxJsonTest(api_sample_base.ApiSampleTestBaseV21):
return f
def test_flavor_rxtx_get(self):
- flavor_id = 1
+ flavor_id = '1'
response = self._do_get('flavors/%s' % flavor_id)
subs = {
'flavor_id': flavor_id,
@@ -64,7 +64,7 @@ class FlavorRxtxJsonTest(api_sample_base.ApiSampleTestBaseV21):
def test_flavors_rxtx_create(self):
subs = {
- 'flavor_id': 100,
+ 'flavor_id': '100',
'flavor_name': 'flavortest'
}
response = self._do_post('flavors',
diff --git a/nova/tests/functional/api_sample_tests/test_hypervisors.py b/nova/tests/functional/api_sample_tests/test_hypervisors.py
index 81ddfd11fd..bd33cc0f73 100644
--- a/nova/tests/functional/api_sample_tests/test_hypervisors.py
+++ b/nova/tests/functional/api_sample_tests/test_hypervisors.py
@@ -75,7 +75,7 @@ class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
response, 200)
def test_hypervisors_detail(self):
- hypervisor_id = 1
+ hypervisor_id = '1'
subs = {
'hypervisor_id': hypervisor_id
}
@@ -83,7 +83,7 @@ class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
self._verify_response('hypervisors-detail-resp', subs, response, 200)
def test_hypervisors_show(self):
- hypervisor_id = 1
+ hypervisor_id = '1'
subs = {
'hypervisor_id': hypervisor_id
}
@@ -101,7 +101,7 @@ class HypervisorsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
self.stub_out('nova.compute.api.HostAPI.get_host_uptime',
fake_get_host_uptime)
- hypervisor_id = 1
+ hypervisor_id = '1'
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
@@ -157,5 +157,5 @@ class HypervisorsCellsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
hypervisor_id = fake_hypervisor['id']
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
- subs = {'hypervisor_id': hypervisor_id}
+ subs = {'hypervisor_id': str(hypervisor_id)}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
diff --git a/nova/tests/functional/api_sample_tests/test_instance_actions.py b/nova/tests/functional/api_sample_tests/test_instance_actions.py
index 63f2fea4d5..f825bb9f2d 100644
--- a/nova/tests/functional/api_sample_tests/test_instance_actions.py
+++ b/nova/tests/functional/api_sample_tests/test_instance_actions.py
@@ -29,6 +29,7 @@ CONF.import_opt('osapi_compute_extension',
class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
+ microversion = None
ADMIN_API = True
extension_name = 'os-instance-actions'
@@ -39,8 +40,14 @@ class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
'contrib.instance_actions.Instance_actions')
return f
+ def _fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=True):
+ return fake_instance.fake_instance_obj(
+ None, **{'uuid': instance_uuid})
+
def setUp(self):
super(ServerActionsSampleJsonTest, self).setUp()
+ self.api.microversion = self.microversion
self.actions = fake_server_actions.FAKE_ACTIONS
self.events = fake_server_actions.FAKE_EVENTS
self.instance = test_utils.get_test_instance(obj=True)
@@ -58,11 +65,6 @@ class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
def fake_instance_get_by_uuid(context, instance_id):
return self.instance
- def fake_get(self, context, instance_uuid, expected_attrs=None,
- want_objects=True):
- return fake_instance.fake_instance_obj(
- None, **{'uuid': instance_uuid})
-
self.stub_out('nova.db.action_get_by_request_id',
fake_instance_action_get_by_request_id)
self.stub_out('nova.db.actions_get', fake_server_actions_get)
@@ -70,7 +72,7 @@ class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
fake_instance_action_events_get)
self.stub_out('nova.db.instance_get_by_uuid',
fake_instance_get_by_uuid)
- self.stub_out('nova.compute.api.API.get', fake_get)
+ self.stub_out('nova.compute.api.API.get', self._fake_get)
def test_instance_action_get(self):
fake_uuid = fake_server_actions.FAKE_UUID
@@ -81,10 +83,10 @@ class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
(fake_uuid, fake_request_id))
subs = {}
subs['action'] = '(reboot)|(resize)'
- subs['instance_uuid'] = fake_uuid
+ subs['instance_uuid'] = str(fake_uuid)
subs['integer_id'] = '[0-9]+'
- subs['request_id'] = fake_action['request_id']
- subs['start_time'] = fake_action['start_time']
+ subs['request_id'] = str(fake_action['request_id'])
+ subs['start_time'] = str(fake_action['start_time'])
subs['result'] = '(Success)|(Error)'
subs['event'] = '(schedule)|(compute_create)'
self._verify_response('instance-action-get-resp', subs, response, 200)
@@ -99,3 +101,14 @@ class ServerActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
'-[0-9a-f]{4}-[0-9a-f]{12}')
self._verify_response('instance-actions-list-resp', subs,
response, 200)
+
+
+class ServerActionsV221SampleJsonTest(ServerActionsSampleJsonTest):
+ microversion = '2.21'
+ scenarios = [('v2_21', {'api_major_version': 'v2.1'})]
+
+ def _fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=True):
+ self.assertEqual('yes', context.read_deleted)
+ return fake_instance.fake_instance_obj(
+ None, **{'uuid': instance_uuid})
diff --git a/nova/tests/functional/api_sample_tests/test_keypairs.py b/nova/tests/functional/api_sample_tests/test_keypairs.py
index f0312639e6..3065153d8e 100644
--- a/nova/tests/functional/api_sample_tests/test_keypairs.py
+++ b/nova/tests/functional/api_sample_tests/test_keypairs.py
@@ -39,6 +39,10 @@ class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
'nova.api.openstack.compute.contrib.keypairs.Keypairs')
return f
+ def setUp(self):
+ super(KeyPairsSampleJsonTest, self).setUp()
+ self.api.microversion = self.microversion
+
# TODO(sdague): this is only needed because we randomly choose the
# uuid each time.
def generalize_subs(self, subs, vanilla_regexes):
@@ -52,8 +56,7 @@ class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
"""Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, **kwargs)
- response = self._do_post('os-keypairs', 'keypairs-post-req', subs,
- api_version=self.microversion)
+ response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
subs = {'keypair_name': key_name}
self._verify_response('keypairs-post-resp', subs, response,
@@ -77,31 +80,28 @@ class KeyPairsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
params['public_key'] = public_key
params.update(**kwargs)
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
- params, api_version=self.microversion)
+ params)
self._verify_response('keypairs-import-post-resp', subs, response,
self.expected_post_status_code)
def test_keypairs_list(self):
# Get api sample of key pairs list request.
key_name = self.test_keypairs_post()
- response = self._do_get('os-keypairs',
- api_version=self.microversion)
+ response = self._do_get('os-keypairs')
subs = {'keypair_name': key_name}
self._verify_response('keypairs-list-resp', subs, response, 200)
def test_keypairs_get(self):
# Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
- response = self._do_get('os-keypairs/%s' % key_name,
- api_version=self.microversion)
+ response = self._do_get('os-keypairs/%s' % key_name)
subs = {'keypair_name': key_name}
self._verify_response('keypairs-get-resp', subs, response, 200)
def test_keypairs_delete(self):
# Get api sample of key pairs delete request.
key_name = self.test_keypairs_post()
- response = self._do_delete('os-keypairs/%s' % key_name,
- api_version=self.microversion)
+ response = self._do_delete('os-keypairs/%s' % key_name)
self.assertEqual(self.expected_delete_status_code,
response.status_code)
@@ -128,8 +128,7 @@ class KeyPairsV22SampleJsonTest(KeyPairsSampleJsonTest):
def test_keypairs_post_invalid(self):
key_name = 'keypair-' + str(uuid.uuid4())
subs = dict(keypair_name=key_name, keypair_type='fakey_type')
- response = self._do_post('os-keypairs', 'keypairs-post-req', subs,
- api_version=self.microversion)
+ response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
self.assertEqual(400, response.status_code)
@@ -154,7 +153,7 @@ class KeyPairsV22SampleJsonTest(KeyPairsSampleJsonTest):
'public_key': fake_crypto.get_ssh_public_key()
}
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
- subs, api_version=self.microversion)
+ subs)
self.assertEqual(400, response.status_code)
@@ -203,8 +202,7 @@ class KeyPairsV210SampleJsonTest(KeyPairsSampleJsonTest):
'user_id': "fake"
}
key_name = self._check_keypairs_post(**subs)
- response = self._do_delete('os-keypairs/%s?user_id=fake' % key_name,
- api_version=self.microversion)
+ response = self._do_delete('os-keypairs/%s?user_id=fake' % key_name)
self.assertEqual(self.expected_delete_status_code,
response.status_code)
@@ -222,8 +220,6 @@ class KeyPairsV210SampleJsonTestNotAdmin(KeyPairsV210SampleJsonTest):
subs = dict(keypair_name=key_name,
keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
user_id='fake1')
- response = self._do_post('os-keypairs', 'keypairs-post-req', subs,
- api_version=self.microversion,
- )
+ response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
self.assertEqual(403, response.status_code)
diff --git a/nova/tests/functional/api_sample_tests/test_migrate_server.py b/nova/tests/functional/api_sample_tests/test_migrate_server.py
index 5a2c6854a0..3cb32894d8 100644
--- a/nova/tests/functional/api_sample_tests/test_migrate_server.py
+++ b/nova/tests/functional/api_sample_tests/test_migrate_server.py
@@ -53,7 +53,7 @@ class MigrateServerSamplesJsonTest(test_servers.ServersSampleBase):
def test_post_live_migrate_server(self):
# Get api samples to server live migrate request.
def fake_live_migrate(_self, context, instance, scheduler_hint,
- block_migration, disk_over_commit):
+ block_migration, disk_over_commit, request_spec):
self.assertEqual(self.uuid, instance["uuid"])
host = scheduler_hint["host"]
self.assertEqual(self.compute.host, host)
diff --git a/nova/tests/functional/api_sample_tests/test_multinic.py b/nova/tests/functional/api_sample_tests/test_multinic.py
index 5abd3d879e..8fa72ae325 100644
--- a/nova/tests/functional/api_sample_tests/test_multinic.py
+++ b/nova/tests/functional/api_sample_tests/test_multinic.py
@@ -46,7 +46,7 @@ class MultinicSampleJsonTest(test_servers.ServersSampleBase):
self.uuid = self._post_server()
def _add_fixed_ip(self):
- subs = {"networkId": 1}
+ subs = {"networkId": '1'}
response = self._do_post('servers/%s/action' % (self.uuid),
'multinic-add-fixed-ip-req', subs)
self.assertEqual(202, response.status_code)
diff --git a/nova/tests/functional/api_sample_tests/test_remote_consoles.py b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
index 8a2bbfaf9f..83a93a4644 100644
--- a/nova/tests/functional/api_sample_tests/test_remote_consoles.py
+++ b/nova/tests/functional/api_sample_tests/test_remote_consoles.py
@@ -23,6 +23,7 @@ CONF.import_opt('osapi_compute_extension',
class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
+ microversion = None
extension_name = "os-remote-consoles"
def _get_flags(self):
@@ -34,6 +35,7 @@ class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
def setUp(self):
super(ConsolesSampleJsonTests, self).setUp()
+ self.api.microversion = self.microversion
self.flags(enabled=True, group='vnc')
self.flags(enabled=True, group='spice')
self.flags(enabled=True, group='rdp')
@@ -96,8 +98,7 @@ class ConsolesV26SampleJsonTests(test_servers.ServersSampleBase):
body = {'protocol': 'vnc', 'type': 'novnc'}
response = self._do_post('servers/%s/remote-consoles' % uuid,
- 'create-vnc-console-req', body,
- api_version='2.6')
+ 'create-vnc-console-req', body)
subs = {"url": self.http_regex}
self._verify_response('create-vnc-console-resp', subs, response, 200)
@@ -117,7 +118,6 @@ class ConsolesV28SampleJsonTests(test_servers.ServersSampleBase):
body = {'protocol': 'mks', 'type': 'webmks'}
response = self._do_post('servers/%s/remote-consoles' % uuid,
- 'create-mks-console-req', body,
- api_version='2.8')
+ 'create-mks-console-req', body)
subs = {"url": self.http_regex}
self._verify_response('create-mks-console-resp', subs, response, 200)
diff --git a/nova/tests/functional/api_sample_tests/test_server_groups.py b/nova/tests/functional/api_sample_tests/test_server_groups.py
index 1cb256881b..66f4db6b5f 100644
--- a/nova/tests/functional/api_sample_tests/test_server_groups.py
+++ b/nova/tests/functional/api_sample_tests/test_server_groups.py
@@ -78,7 +78,7 @@ class ServerGroupsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
self.assertEqual(204, response.status_code)
-class ServerGroupsV213SampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
- extension_name = "os-server-groups"
- request_api_version = '2.13'
- scenarios = [('v2_13', {})]
+class ServerGroupsV213SampleJsonTest(ServerGroupsSampleJsonTest):
+ scenarios = [
+ ("v2_13", {'api_major_version': 'v2.1', 'microversion': '2.13'})
+ ]
diff --git a/nova/tests/functional/api_sample_tests/test_server_migrations.py b/nova/tests/functional/api_sample_tests/test_server_migrations.py
new file mode 100644
index 0000000000..0b063f5843
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/test_server_migrations.py
@@ -0,0 +1,52 @@
+# Copyright 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova.conductor import manager as conductor_manager
+from nova import db
+from nova import objects
+from nova.tests.functional.api_sample_tests import test_servers
+
+
+class ServerMigrationsSampleJsonTest(test_servers.ServersSampleBase):
+ extension_name = 'server-migrations'
+ scenarios = [('v2_22', {'api_major_version': 'v2.1'})]
+ extra_extensions_to_load = ["os-migrate-server", "os-access-ips"]
+
+ def setUp(self):
+ """setUp method for server usage."""
+ super(ServerMigrationsSampleJsonTest, self).setUp()
+ self.uuid = self._post_server()
+ self.api.microversion = '2.22'
+
+ @mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate')
+ @mock.patch.object(db, 'service_get_by_compute_host')
+ @mock.patch.object(objects.Migration, 'get_by_id_and_instance')
+ @mock.patch('nova.compute.manager.ComputeManager.'
+ 'live_migration_force_complete')
+ def test_live_migrate_force_complete(self, live_migration_pause_instance,
+ get_by_id_and_instance,
+ service_get_by_compute_host,
+ _live_migrate):
+ migration = objects.Migration()
+ migration.id = 1
+ migration.status = 'running'
+ get_by_id_and_instance.return_value = migration
+ self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server',
+ {'hostname': self.compute.host})
+ response = self._do_post('servers/%s/migrations/%s/action'
+ % (self.uuid, '3'), 'force_complete', {})
+ self.assertEqual(202, response.status_code)
diff --git a/nova/tests/functional/api_sample_tests/test_servers.py b/nova/tests/functional/api_sample_tests/test_servers.py
index 7d4288944e..3497679f4b 100644
--- a/nova/tests/functional/api_sample_tests/test_servers.py
+++ b/nova/tests/functional/api_sample_tests/test_servers.py
@@ -42,13 +42,22 @@ class ServersSampleBase(api_sample_base.ApiSampleTestBaseV21):
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::'
}
+ # TODO(gmann): Remove this hack once all tests using this common
+ # _post_server method are enabled with all extension.
+ # This is added to avoid all tests updates together.
+ post_req_template = 'server-post-req'
+ post_resp_template = 'server-post-resp'
+ if self.all_extensions and use_common_server_api_samples:
+ post_req_template = 'server-create-req'
+ post_resp_template = 'server-create-resp'
+
orig_value = self.__class__._use_common_server_api_samples
orig_sample_dir = self.__class__.sample_dir
try:
self.__class__._use_common_server_api_samples = (
use_common_server_api_samples)
- response = self._do_post('servers', 'server-post-req', subs)
- status = self._verify_response('server-post-resp', subs,
+ response = self._do_post('servers', post_req_template, subs)
+ status = self._verify_response(post_resp_template, subs,
response, 202)
return status
finally:
@@ -60,6 +69,10 @@ class ServersSampleJsonTest(ServersSampleBase):
sample_dir = 'servers'
microversion = None
+ def setUp(self):
+ super(ServersSampleJsonTest, self).setUp()
+ self.api.microversion = self.microversion
+
def _get_flags(self):
f = super(ServersSampleBase, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
@@ -77,8 +90,7 @@ class ServersSampleJsonTest(ServersSampleBase):
def test_servers_get(self):
uuid = self.test_servers_post()
- response = self._do_get('servers/%s' % uuid,
- api_version=self.microversion)
+ response = self._do_get('servers/%s' % uuid)
subs = {}
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
@@ -90,15 +102,13 @@ class ServersSampleJsonTest(ServersSampleBase):
def test_servers_list(self):
uuid = self._post_server()
- response = self._do_get('servers',
- api_version=self.microversion)
+ response = self._do_get('servers')
subs = {'id': uuid}
self._verify_response('servers-list-resp', subs, response, 200)
def test_servers_details(self):
- uuid = self._post_server()
- response = self._do_get('servers/detail',
- api_version=self.microversion)
+ uuid = self.test_servers_post()
+ response = self._do_get('servers/detail')
subs = {}
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
@@ -117,6 +127,45 @@ class ServersSampleJson29Test(ServersSampleJsonTest):
scenarios = [('v2_9', {'api_major_version': 'v2.1'})]
+class ServersSampleJson219Test(ServersSampleJsonTest):
+ microversion = '2.19'
+ sample_dir = 'servers'
+ scenarios = [('v2_19', {'api_major_version': 'v2.1'})]
+
+ def test_servers_post(self):
+ return self._post_server(False)
+
+ def test_servers_put(self):
+ uuid = self.test_servers_post()
+ response = self._do_put('servers/%s' % uuid, 'server-put-req', {})
+ subs = {
+ 'image_id': fake.get_valid_image_id(),
+ 'hostid': '[a-f0-9]+',
+ 'glance_host': self._get_glance_host(),
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '80fe::'
+ }
+ self._verify_response('server-put-resp', subs, response, 200)
+
+
+class ServersUpdateSampleJsonTest(ServersSampleBase):
+ sample_dir = 'servers'
+
+ # TODO(gmann): This will be removed once all API tests runs for
+ # all extension enable.
+ all_extensions = True
+
+ def test_update_server(self):
+ uuid = self._post_server()
+ subs = {}
+ subs['hostid'] = '[a-f0-9]+'
+ subs['access_ip_v4'] = '1.2.3.4'
+ subs['access_ip_v6'] = '80fe::'
+ response = self._do_put('servers/%s' % uuid,
+ 'server-update-req', subs)
+ self._verify_response('server-update-resp', subs, response, 200)
+
+
class ServerSortKeysJsonTests(ServersSampleBase):
sample_dir = 'servers-sort'
@@ -173,9 +222,6 @@ class ServersActionsJsonTest(ServersSampleBase):
uuid = self._post_server()
image = fake.get_valid_image_id()
params = {
- 'host': self._get_host(),
- 'compute_endpoint': self._get_compute_endpoint(),
- 'versioned_compute_endpoint': self._get_vers_compute_endpoint(),
'uuid': image,
'name': 'foobar',
'pass': 'seekr3t',
@@ -195,7 +241,7 @@ class ServersActionsJsonTest(ServersSampleBase):
uuid = self._post_server()
self._test_server_action(uuid, "resize",
'server-action-resize',
- {"id": 2,
+ {"id": '2',
"host": self._get_host()})
return uuid
@@ -217,6 +263,31 @@ class ServersActionsJsonTest(ServersSampleBase):
{'name': 'foo-image'})
+class ServersActionsJson219Test(ServersSampleBase):
+ microversion = '2.19'
+ sample_dir = 'servers'
+ scenarios = [('v2_19', {'api_major_version': 'v2.1'})]
+
+ def test_server_rebuild(self):
+ uuid = self._post_server()
+ image = fake.get_valid_image_id()
+ params = {
+ 'uuid': image,
+ 'name': 'foobar',
+ 'description': 'description of foobar',
+ 'pass': 'seekr3t',
+ 'hostid': '[a-f0-9]+',
+ 'access_ip_v4': '1.2.3.4',
+ 'access_ip_v6': '80fe::',
+ }
+
+ resp = self._do_post('servers/%s/action' % uuid,
+ 'server-action-rebuild', params)
+ subs = params.copy()
+ del subs['uuid']
+ self._verify_response('server-action-rebuild-resp', subs, resp, 202)
+
+
class ServersActionsAllJsonTest(ServersActionsJsonTest):
all_extensions = True
sample_dir = None
@@ -267,3 +338,18 @@ class ServersSampleMultiStatusJsonTest(ServersSampleBase):
response = self._do_get('servers?status=active&status=error')
subs = {'id': uuid}
self._verify_response('servers-list-resp', subs, response, 200)
+
+
+class ServerTriggerCrashDumpJsonTest(ServersSampleBase):
+ sample_dir = 'servers'
+ microversion = '2.17'
+ scenarios = [('v2_17', {'api_major_version': 'v2.1'})]
+
+ def test_trigger_crash_dump(self):
+ uuid = self._post_server()
+
+ response = self._do_post('servers/%s/action' % uuid,
+ 'server-action-trigger-crash-dump',
+ {})
+ self.assertEqual(response.status_code, 202)
+ self.assertEqual(response.content, "")
diff --git a/nova/tests/functional/api_sample_tests/test_services.py b/nova/tests/functional/api_sample_tests/test_services.py
index c51ebc226a..fef17f8dfa 100644
--- a/nova/tests/functional/api_sample_tests/test_services.py
+++ b/nova/tests/functional/api_sample_tests/test_services.py
@@ -19,6 +19,7 @@ from oslo_utils import fixture as utils_fixture
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.tests.unit.api.openstack.compute import test_services
+
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
@@ -43,6 +44,7 @@ class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV21):
def setUp(self):
super(ServicesJsonTest, self).setUp()
+ self.api.microversion = self.microversion
self.stub_out("nova.db.service_get_all",
test_services.fake_db_api_service_get_all)
self.stub_out("nova.db.service_get_by_host_and_binary",
@@ -53,8 +55,7 @@ class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV21):
def test_services_list(self):
"""Return a list of all agent builds."""
- response = self._do_get('os-services',
- api_version=self.microversion)
+ response = self._do_get('os-services')
subs = {'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
@@ -67,8 +68,7 @@ class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV21):
subs = {"host": "host1",
'binary': 'nova-compute'}
response = self._do_put('os-services/enable',
- 'service-enable-put-req', subs,
- api_version=self.microversion)
+ 'service-enable-put-req', subs)
self._verify_response('service-enable-put-resp', subs, response, 200)
def test_service_disable(self):
@@ -76,8 +76,7 @@ class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV21):
subs = {"host": "host1",
'binary': 'nova-compute'}
response = self._do_put('os-services/disable',
- 'service-disable-put-req', subs,
- api_version=self.microversion)
+ 'service-disable-put-req', subs)
self._verify_response('service-disable-put-resp', subs, response, 200)
def test_service_disable_log_reason(self):
@@ -86,15 +85,13 @@ class ServicesJsonTest(api_sample_base.ApiSampleTestBaseV21):
'binary': 'nova-compute',
'disabled_reason': 'test2'}
response = self._do_put('os-services/disable-log-reason',
- 'service-disable-log-put-req', subs,
- api_version=self.microversion)
+ 'service-disable-log-put-req', subs)
self._verify_response('service-disable-log-put-resp',
subs, response, 200)
def test_service_delete(self):
"""Delete an existing service."""
- response = self._do_delete('os-services/1',
- api_version=self.microversion)
+ response = self._do_delete('os-services/1')
self.assertEqual(204, response.status_code)
self.assertEqual("", response.content)
@@ -107,8 +104,7 @@ class ServicesV211JsonTest(ServicesJsonTest):
def test_services_list(self):
"""Return a list of all agent builds."""
- response = self._do_get('os-services',
- api_version=self.microversion)
+ response = self._do_get('os-services')
subs = {'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
@@ -123,7 +119,6 @@ class ServicesV211JsonTest(ServicesJsonTest):
'binary': 'nova-compute',
'forced_down': 'true'}
response = self._do_put('os-services/force-down',
- 'service-force-down-put-req', subs,
- api_version=self.microversion)
+ 'service-force-down-put-req', subs)
self._verify_response('service-force-down-put-resp', subs,
response, 200)
diff --git a/nova/tests/functional/api_sample_tests/test_versions.py b/nova/tests/functional/api_sample_tests/test_versions.py
index 00a29fe2b9..96222b572f 100644
--- a/nova/tests/functional/api_sample_tests/test_versions.py
+++ b/nova/tests/functional/api_sample_tests/test_versions.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova.api.openstack import api_version_request as avr
from nova.tests.functional.api_sample_tests import api_sample_base
@@ -22,10 +23,12 @@ class VersionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
# as those does not send request on particular endpoint and running
# its tests alone is enough.
scenarios = []
+ max_api_version = avr.max_api_version().get_string()
def test_versions_get(self):
response = self._do_get('', strip_version=True)
- self._verify_response('versions-get-resp', {},
+ self._verify_response('versions-get-resp',
+ {'max_api_version': self.max_api_version},
response, 200, update_links=False)
def test_versions_get_v2(self):
@@ -35,5 +38,6 @@ class VersionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
def test_versions_get_v21(self):
response = self._do_get('/v2.1', strip_version=True)
- self._verify_response('v21-version-get-resp', {},
+ self._verify_response('v21-version-get-resp',
+ {'max_api_version': self.max_api_version},
response, 200, update_links=False)
diff --git a/nova/tests/functional/api_sample_tests/test_volumes.py b/nova/tests/functional/api_sample_tests/test_volumes.py
index 26a33db1f7..8eba7b6439 100644
--- a/nova/tests/functional/api_sample_tests/test_volumes.py
+++ b/nova/tests/functional/api_sample_tests/test_volumes.py
@@ -111,8 +111,6 @@ class VolumesSampleJsonTest(test_servers.ServersSampleBase):
'id': id,
'size': size,
'availability_zone': 'zone1:host1',
- 'instance_uuid': '3912f2b4-c5ba-4aec-9165-872876fe202e',
- 'mountpoint': '/',
'status': 'in-use',
'attach_status': 'attached',
'name': 'vol name',
@@ -122,7 +120,14 @@ class VolumesSampleJsonTest(test_servers.ServersSampleBase):
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
- 'volume_type': {'name': 'Backup'}
+ 'volume_type': {'name': 'Backup'},
+ 'multiattach': False,
+ 'attachments': {'3912f2b4-c5ba-4aec-9165-872876fe202e':
+ {'mountpoint': '/',
+ 'attachment_id':
+ 'a26887c6-c47b-4654-abb5-dfadf7d3f803'
+ }
+ }
}
return volume
diff --git a/nova/tests/functional/api_samples_test_base.py b/nova/tests/functional/api_samples_test_base.py
index 68eb36551e..867859d036 100644
--- a/nova/tests/functional/api_samples_test_base.py
+++ b/nova/tests/functional/api_samples_test_base.py
@@ -67,6 +67,27 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
microversion = None
_use_common_server_api_samples = False
+ def __init__(self, *args, **kwargs):
+ super(ApiSampleTestBase, self).__init__(*args, **kwargs)
+ self.subs = {} # TODO(auggy): subs should really be a class
+
+ @property
+ def subs(self):
+ return self._subs
+
+ @subs.setter
+ def subs(self, value):
+ non_strings = \
+ {k: v for k, v in value.items() if
+ (not k == 'compute_host') and
+ (not isinstance(v, six.string_types))}
+ if len(non_strings) > 0:
+ raise TypeError("subs can't contain non-string values:"
+ "\n%(non_strings)s" %
+ {'non_strings': non_strings})
+ else:
+ self._subs = value
+
@classmethod
def _get_sample_path(cls, name, dirname, suffix='', api_version=None):
parts = [dirname]
@@ -125,9 +146,20 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
name, self.microversion), 'w') as outf:
outf.write(data)
- def _compare_result(self, subs, expected, result, result_str):
+ def _compare_result(self, expected, result, result_str):
+
matched_value = None
- if isinstance(expected, dict):
+ # None
+ if expected is None:
+ if result is None:
+ pass
+ elif result == u'':
+ pass # TODO(auggy): known issue Bug#1544720
+ else:
+ raise NoMatch('%(result_str)s: Expected None, got %(result)s.'
+ % {'result_str': result_str, 'result': result})
+ # dictionary
+ elif isinstance(expected, dict):
if not isinstance(result, dict):
raise NoMatch('%(result_str)s: %(result)s is not a dict.'
% {'result_str': result_str, 'result': result})
@@ -149,9 +181,11 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
{'ex_delta': ex_delta, 'result_str': result_str,
'res_delta': res_delta})
for key in ex_keys:
- res = self._compare_result(subs, expected[key], result[key],
+ # TODO(auggy): pass key name along as well for error reporting
+ res = self._compare_result(expected[key], result[key],
result_str)
matched_value = res or matched_value
+ # list
elif isinstance(expected, list):
if not isinstance(result, list):
raise NoMatch(
@@ -163,7 +197,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
for res_obj in result:
for i, ex_obj in enumerate(expected):
try:
- matched_value = self._compare_result(subs, ex_obj,
+ matched_value = self._compare_result(ex_obj,
res_obj,
result_str)
del expected[i]
@@ -185,6 +219,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
if error:
raise NoMatch('\n'.join(error))
+ # template string
elif isinstance(expected, six.string_types) and '%' in expected:
# NOTE(vish): escape stuff for regex
for char in '[]<>?':
@@ -195,7 +230,8 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
if expected.startswith("%(int:"):
result = str(result)
expected = expected.replace('int:', '')
- expected = expected % subs
+
+ expected = expected % self.subs
expected = '^%s$' % expected
match = re.match(expected, result)
if not match:
@@ -209,12 +245,14 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
except IndexError:
if match.groups():
matched_value = match.groups()[0]
- else:
- if isinstance(expected, six.string_types):
- # NOTE(danms): Ignore whitespace in this comparison
- expected = expected.strip()
- if isinstance(result, six.string_types):
- result = result.strip()
+ # string
+ elif isinstance(expected, six.string_types):
+
+ # NOTE(danms): Ignore whitespace in this comparison
+ expected = expected.strip()
+ if isinstance(result, six.string_types):
+ result = result.strip()
+
if expected != result:
# NOTE(tdurakov):this attempt to parse string as JSON
# is needed for correct comparison of hypervisor.cpu_info,
@@ -225,7 +263,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
try:
expected = objectify(expected)
result = objectify(result)
- return self._compare_result(subs, expected, result,
+ return self._compare_result(expected, result,
result_str)
except ValueError:
pass
@@ -235,6 +273,21 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
'%(result)s' % {'expected': expected,
'result_str': result_str,
'result': result})
+ # int
+ elif isinstance(expected, (six.integer_types, float)):
+ if expected != result:
+ raise NoMatch(
+ 'Values do not match:\n'
+ 'Template: %(expected)s\n%(result_str)s: '
+ '%(result)s' % {'expected': expected,
+ 'result_str': result_str,
+ 'result': result})
+
+ else:
+ raise ValueError(
+ 'Unexpected type %(expected_type)s'
+ % {'expected_type': type(expected)})
+
return matched_value
def generalize_subs(self, subs, vanilla_regexes):
@@ -250,13 +303,24 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
def _update_links(self, sample_data):
"""Process sample data and update version specific links."""
- url_re = self._get_host() + "/v(2\.1|2)"
+ # replace version urls
+ url_re = self._get_host() + "/v(2|2\.1)/openstack"
new_url = self._get_host() + "/" + self.api_major_version
+ if self._project_id:
+ new_url += "/openstack"
updated_data = re.sub(url_re, new_url, sample_data)
+
+ # replace unversioned urls
+ url_re = self._get_host() + "/openstack"
+ new_url = self._get_host()
+ if self._project_id:
+ new_url += "/openstack"
+ updated_data = re.sub(url_re, new_url, updated_data)
return updated_data
def _verify_response(self, name, subs, response, exp_code,
update_links=True):
+
# Always also include the laundry list of base regular
# expressions for possible key values in our templates. Test
# specific patterns (the value of ``subs``) can override
@@ -264,6 +328,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
regexes = self._get_regexes()
regexes.update(subs)
subs = regexes
+ self.subs = subs
self.assertEqual(exp_code, response.status_code)
response_data = response.content
response_data = pretty_data(response_data)
@@ -280,7 +345,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
self._write_sample(name, response_data)
sample_data = response_data
else:
- with file(self._get_sample(name,
+ with open(self._get_sample(name,
self.microversion)) as sample:
sample_data = sample.read()
if update_links:
@@ -289,7 +354,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
try:
template_data = objectify(template_data)
response_data = objectify(response_data)
- response_result = self._compare_result(subs, template_data,
+ response_result = self._compare_result(template_data,
response_data, "Response")
# NOTE(danms): replace some of the subs with patterns for the
# doc/api_samples check, which won't have things like the
@@ -301,8 +366,9 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
subs['uuid'] = vanilla_regexes['uuid']
subs['image_id'] = vanilla_regexes['uuid']
subs = self.generalize_subs(subs, vanilla_regexes)
+ self.subs = subs
sample_data = objectify(sample_data)
- self._compare_result(subs, template_data, sample_data, "Sample")
+ self._compare_result(template_data, sample_data, "Sample")
return response_result
except NoMatch:
raise
@@ -360,13 +426,19 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
def _get_compute_endpoint(self):
# NOTE(sdague): "openstack" is stand in for project_id, it
# should be more generic in future.
- return '%s/%s' % (self._get_host(), 'openstack')
+ if self._project_id:
+ return '%s/%s' % (self._get_host(), 'openstack')
+ else:
+ return self._get_host()
def _get_vers_compute_endpoint(self):
# NOTE(sdague): "openstack" is stand in for project_id, it
# should be more generic in future.
- return '%s/%s/%s' % (self._get_host(), self.api_major_version,
- 'openstack')
+ if self._project_id:
+ return '%s/%s/%s' % (self._get_host(), self.api_major_version,
+ 'openstack')
+ else:
+ return '%s/%s' % (self._get_host(), self.api_major_version)
def _get_response(self, url, method, body=None, strip_version=False,
api_version=None, headers=None):
@@ -394,7 +466,8 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
def _do_post(self, url, name, subs, method='POST', api_version=None,
headers=None):
- body = self._read_template(name) % subs
+ self.subs = subs
+ body = self._read_template(name) % self.subs
sample = self._get_sample(name, self.microversion)
if self.generate_samples and not os.path.exists(sample):
self._write_sample(name, body)
diff --git a/nova/tests/functional/db/api/test_migrations.py b/nova/tests/functional/db/api/test_migrations.py
index 67d5e11aa9..8541cf2576 100644
--- a/nova/tests/functional/db/api/test_migrations.py
+++ b/nova/tests/functional/db/api/test_migrations.py
@@ -33,7 +33,6 @@ import os
from migrate.versioning import repository
import mock
-from oslo_config import cfg
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as db_utils
@@ -47,9 +46,6 @@ from nova.db.sqlalchemy import migration as sa_migration
from nova import test
-CONF = cfg.CONF
-
-
class NovaAPIModelsSync(test_migrations.ModelsMigrationsSync):
"""Test that the models match the database after migrations are run."""
@@ -198,6 +194,50 @@ class NovaAPIMigrationsWalk(test_migrations.WalkVersionsMixin):
self.assertIndexExists(engine, 'request_specs',
'request_spec_instance_uuid_idx')
+ def _check_005(self, engine, data):
+ # flavors
+ for column in ['created_at', 'updated_at', 'name', 'id', 'memory_mb',
+ 'vcpus', 'swap', 'vcpu_weight', 'flavorid', 'rxtx_factor',
+ 'root_gb', 'ephemeral_gb', 'disabled', 'is_public']:
+ self.assertColumnExists(engine, 'flavors', column)
+ self.assertUniqueConstraintExists(engine, 'flavors',
+ ['flavorid'])
+ self.assertUniqueConstraintExists(engine, 'flavors',
+ ['name'])
+
+ # flavor_extra_specs
+ for column in ['created_at', 'updated_at', 'id', 'flavor_id', 'key',
+ 'value']:
+ self.assertColumnExists(engine, 'flavor_extra_specs', column)
+
+ if engine.name != 'ibm_db_sa':
+ self.assertIndexExists(engine, 'flavor_extra_specs',
+ 'flavor_extra_specs_flavor_id_key_idx')
+ self.assertUniqueConstraintExists(engine, 'flavor_extra_specs',
+ ['flavor_id', 'key'])
+
+ inspector = reflection.Inspector.from_engine(engine)
+ # There should only be one foreign key here
+ fk = inspector.get_foreign_keys('flavor_extra_specs')[0]
+ self.assertEqual('flavors', fk['referred_table'])
+ self.assertEqual(['id'], fk['referred_columns'])
+ self.assertEqual(['flavor_id'], fk['constrained_columns'])
+
+ # flavor_projects
+ for column in ['created_at', 'updated_at', 'id', 'flavor_id',
+ 'project_id']:
+ self.assertColumnExists(engine, 'flavor_projects', column)
+
+ self.assertUniqueConstraintExists(engine, 'flavor_projects',
+ ['flavor_id', 'project_id'])
+
+ inspector = reflection.Inspector.from_engine(engine)
+ # There should only be one foreign key here
+ fk = inspector.get_foreign_keys('flavor_projects')[0]
+ self.assertEqual('flavors', fk['referred_table'])
+ self.assertEqual(['id'], fk['referred_columns'])
+ self.assertEqual(['flavor_id'], fk['constrained_columns'])
+
class TestNovaAPIMigrationsWalkSQLite(NovaAPIMigrationsWalk,
test_base.DbTestCase,
diff --git a/nova/tests/functional/db/test_flavor_model.py b/nova/tests/functional/db/test_flavor_model.py
new file mode 100644
index 0000000000..5cf484c054
--- /dev/null
+++ b/nova/tests/functional/db/test_flavor_model.py
@@ -0,0 +1,61 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.db.sqlalchemy import api_models
+from nova.db.sqlalchemy import models
+from nova import test
+
+
+class FlavorTablesCompareTestCase(test.NoDBTestCase):
+ def _get_columns_list(self, model):
+ columns_list = [m.key for m in model.__table__.columns]
+ return columns_list
+
+ def _check_column_list(self, columns_new, columns_old):
+ columns_old.remove('deleted_at')
+ columns_old.remove('deleted')
+ intersect = set(columns_new).intersection(set(columns_old))
+ if intersect != set(columns_new) or intersect != set(columns_old):
+ return False
+ return True
+
+ def test_tables_flavors_instance_types(self):
+ flavors = api_models.Flavors()
+ instance_types = models.InstanceTypes()
+ columns_flavors = self._get_columns_list(flavors)
+ columns_instance_types = self._get_columns_list(instance_types)
+ self.assertTrue(self._check_column_list(columns_flavors,
+ columns_instance_types))
+
+ def test_tables_flavor_instance_type_extra_specs(self):
+ flavor_extra_specs = api_models.FlavorExtraSpecs()
+ instance_type_extra_specs = models.InstanceTypeExtraSpecs()
+ columns_flavor_extra_specs = self._get_columns_list(flavor_extra_specs)
+ columns_instance_type_extra_specs = self._get_columns_list(
+ instance_type_extra_specs)
+ columns_flavor_extra_specs.remove('flavor_id')
+ columns_instance_type_extra_specs.remove('instance_type_id')
+ self.assertTrue(self._check_column_list(
+ columns_flavor_extra_specs,
+ columns_instance_type_extra_specs))
+
+ def test_tables_flavor_instance_type_projects(self):
+ flavor_projects = api_models.FlavorProjects()
+ instance_types_projects = models.InstanceTypeProjects()
+ columns_flavor_projects = self._get_columns_list(flavor_projects)
+ columns_instance_type_projects = self._get_columns_list(
+ instance_types_projects)
+ columns_flavor_projects.remove('flavor_id')
+ columns_instance_type_projects.remove('instance_type_id')
+ self.assertTrue(self._check_column_list(
+ columns_flavor_projects,
+ columns_instance_type_projects))
diff --git a/nova/tests/functional/db/test_request_spec.py b/nova/tests/functional/db/test_request_spec.py
index 95dfa976b0..01e6aa725a 100644
--- a/nova/tests/functional/db/test_request_spec.py
+++ b/nova/tests/functional/db/test_request_spec.py
@@ -31,10 +31,9 @@ class RequestSpecTestCase(test.NoDBTestCase):
args = fake_request_spec.fake_db_spec()
args.pop('id', None)
self.instance_uuid = args['instance_uuid']
- spec = request_spec.RequestSpec._from_db_object(self.context,
- self.spec_obj,
+ request_spec.RequestSpec._from_db_object(self.context, self.spec_obj,
self.spec_obj._create_in_db(self.context, args))
- return spec
+ return self.spec_obj
def test_get_by_instance_uuid_not_found(self):
self.assertRaises(exception.RequestSpecNotFound,
diff --git a/nova/tests/functional/db/test_resource_provider.py b/nova/tests/functional/db/test_resource_provider.py
new file mode 100644
index 0000000000..e51b31b5af
--- /dev/null
+++ b/nova/tests/functional/db/test_resource_provider.py
@@ -0,0 +1,112 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import context
+from nova import exception
+from nova import objects
+from nova.objects import fields
+from nova import test
+from nova.tests import fixtures
+from nova.tests import uuidsentinel
+
+DISK_INVENTORY = dict(
+ total=200,
+ reserved=10,
+ min_unit=2,
+ max_unit=5,
+ step_size=1,
+ allocation_ratio=1.0
+)
+
+
+class ResourceProviderTestCase(test.NoDBTestCase):
+ """Test resource-provider objects' lifecycles."""
+
+ def setUp(self):
+ super(ResourceProviderTestCase, self).setUp()
+ self.useFixture(fixtures.Database())
+ self.context = context.RequestContext('fake-user', 'fake-project')
+
+ def test_create_resource_provider_requires_uuid(self):
+ resource_provider = objects.ResourceProvider(
+ context = self.context)
+ self.assertRaises(exception.ObjectActionError,
+ resource_provider.create)
+
+ def test_create_resource_provider(self):
+ created_resource_provider = objects.ResourceProvider(
+ context=self.context,
+ uuid=uuidsentinel.fake_resource_provider
+ )
+ created_resource_provider.create()
+ self.assertIsInstance(created_resource_provider.id, int)
+
+ retrieved_resource_provider = objects.ResourceProvider.get_by_uuid(
+ self.context,
+ uuidsentinel.fake_resource_provider
+ )
+ self.assertEqual(retrieved_resource_provider.id,
+ created_resource_provider.id)
+
+ def test_create_inventory_with_uncreated_provider(self):
+ resource_provider = objects.ResourceProvider(
+ context=self.context,
+ uuid=uuidsentinel.inventory_resource_provider
+ )
+ resource_class = fields.ResourceClass.DISK_GB
+ disk_inventory = objects.Inventory(
+ context=self.context,
+ resource_provider=resource_provider,
+ resource_class=resource_class,
+ **DISK_INVENTORY
+ )
+ self.assertRaises(exception.ObjectActionError,
+ disk_inventory.create)
+
+ def test_create_and_update_inventory(self):
+ resource_provider = objects.ResourceProvider(
+ context=self.context,
+ uuid=uuidsentinel.inventory_resource_provider
+ )
+ resource_provider.create()
+ resource_class = fields.ResourceClass.DISK_GB
+ disk_inventory = objects.Inventory(
+ context=self.context,
+ resource_provider=resource_provider,
+ resource_class=resource_class,
+ **DISK_INVENTORY
+ )
+ disk_inventory.create()
+
+ self.assertEqual(resource_class, disk_inventory.resource_class)
+ self.assertEqual(resource_provider,
+ disk_inventory.resource_provider)
+ self.assertEqual(DISK_INVENTORY['allocation_ratio'],
+ disk_inventory.allocation_ratio)
+ self.assertEqual(DISK_INVENTORY['total'],
+ disk_inventory.total)
+
+ disk_inventory.total = 32
+ disk_inventory.save()
+
+ inventories = objects.InventoryList.get_all_by_resource_provider_uuid(
+ self.context, resource_provider.uuid)
+
+ self.assertEqual(1, len(inventories))
+ self.assertEqual(32, inventories[0].total)
+
+ inventories[0].total = 33
+ inventories[0].save()
+ reloaded_inventories = (
+ objects.InventoryList.get_all_by_resource_provider_uuid(
+ self.context, resource_provider.uuid))
+ self.assertEqual(33, reloaded_inventories[0].total)
diff --git a/nova/tests/functional/integrated_helpers.py b/nova/tests/functional/integrated_helpers.py
index c5e25c6d92..bb890d86ad 100644
--- a/nova/tests/functional/integrated_helpers.py
+++ b/nova/tests/functional/integrated_helpers.py
@@ -92,8 +92,7 @@ class _IntegratedTestBase(test.TestCase):
return self.start_service('compute')
def _setup_scheduler_service(self):
- self.flags(scheduler_driver='nova.scheduler.'
- 'chance.ChanceScheduler')
+ self.flags(scheduler_driver='chance_scheduler')
return self.start_service('scheduler')
def _setup_services(self):
diff --git a/nova/tests/functional/libvirt/test_numa_servers.py b/nova/tests/functional/libvirt/test_numa_servers.py
index cd7e0e6a77..52d1a56915 100644
--- a/nova/tests/functional/libvirt/test_numa_servers.py
+++ b/nova/tests/functional/libvirt/test_numa_servers.py
@@ -79,8 +79,7 @@ class NUMAServersTest(ServersTestBase):
def _setup_scheduler_service(self):
self.flags(compute_driver='nova.virt.libvirt.LibvirtDriver')
- self.flags(scheduler_driver='nova.scheduler.'
- 'filter_scheduler.FilterScheduler')
+ self.flags(scheduler_driver='filter_scheduler')
self.flags(scheduler_default_filters=CONF.scheduler_default_filters
+ ['NUMATopologyFilter'])
return self.start_service('scheduler')
diff --git a/nova/tests/functional/libvirt/test_rt_servers.py b/nova/tests/functional/libvirt/test_rt_servers.py
index f7c9f51e14..98f1e89475 100644
--- a/nova/tests/functional/libvirt/test_rt_servers.py
+++ b/nova/tests/functional/libvirt/test_rt_servers.py
@@ -16,7 +16,6 @@
import mock
import fixtures
-from oslo_config import cfg
from oslo_log import log as logging
from nova.tests.functional.api import client
@@ -26,7 +25,6 @@ from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
-CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -85,6 +83,16 @@ class RealTimeServersTest(ServersTestBase):
client.OpenStackApiException,
self.api.post_server, {'server': server})
+ def test_no_realtime_mask(self):
+ flavor = self._create_flavor(extra_spec={
+ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated'})
+ server = self._build_server(flavor)
+
+ # Cannot set realtime policy if not vcpus mask defined
+ self.assertRaises(
+ client.OpenStackApiException,
+ self.api.post_server, {'server': server})
+
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_invalid_libvirt_version(self, img_mock):
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
@@ -99,7 +107,8 @@ class RealTimeServersTest(ServersTestBase):
fake_network.set_stub_network_methods(self)
flavor = self._create_flavor(extra_spec={
- 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated'})
+ 'hw:cpu_realtime': 'yes', 'hw:cpu_policy': 'dedicated',
+ 'hw:cpu_realtime_mask': '^1'})
server = self._build_server(flavor)
created = self.api.post_server({'server': server})
diff --git a/nova/tests/functional/notification_sample_tests/__init__.py b/nova/tests/functional/notification_sample_tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/functional/notification_sample_tests/__init__.py
diff --git a/nova/tests/functional/notification_sample_tests/notification_sample_base.py b/nova/tests/functional/notification_sample_tests/notification_sample_base.py
new file mode 100644
index 0000000000..56724e4fee
--- /dev/null
+++ b/nova/tests/functional/notification_sample_tests/notification_sample_base.py
@@ -0,0 +1,92 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from oslo_serialization import jsonutils
+
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.unit import fake_notifier
+
+
+class NotificationSampleTestBase(test.TestCase):
+ """Base class for notification sample testing.
+
+ To add tests for a versioned notification you have to store a sample file
+ under doc/notification_sample directory. In the test method in the subclass
+ trigger a change in the system that expected to generate the notification
+ then use the _verify_notification function to assert if the stored sample
+ matches with the generated one.
+
+ If the notification has different payload content depending on the state
+ change you triggered then the replacements parameter of the
+ _verify_notification function can be used to override values coming from
+ the sample file.
+
+ Check nova.functional.notification_sample_tests.test_service_update as an
+ example.
+ """
+
+ def setUp(self):
+ super(NotificationSampleTestBase, self).setUp()
+
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+
+ self.api = api_fixture.api
+ self.admin_api = api_fixture.admin_api
+
+ fake_notifier.stub_notifier(self.stubs)
+ self.addCleanup(fake_notifier.reset)
+
+ def _get_notification_sample(self, sample):
+ sample_dir = os.path.dirname(os.path.abspath(__file__))
+ sample_dir = os.path.normpath(os.path.join(
+ sample_dir,
+ "../../../../doc/notification_samples"))
+ return sample_dir + '/' + sample + '.json'
+
+ def _apply_replacements(self, replacements, sample_obj):
+ replacements = replacements or {}
+ for key, value in replacements.items():
+ obj = sample_obj['payload']
+ for sub_key in key.split('.')[:-1]:
+ obj = obj['nova_object.data'][sub_key]
+ obj['nova_object.data'][key.split('.')[-1]] = value
+
+ def _verify_notification(self, sample_file_name, replacements=None):
+ """Assert if the generated notification matches with the stored sample
+
+ :param sample_file_name: The name of the sample file to match relative
+ to doc/notification_samples
+ :param replacements: A dict of key value pairs that is used to update
+ the payload field of the sample data before it is
+ matched against the generated notification.
+ The 'x.y':'new-value' key-value pair selects the
+ ["payload"]["nova_object.data"]["x"]
+ ["nova_object.data"]["y"] value from the sample
+ data and overrides it with 'new-value'.
+ """
+
+ self.assertEqual(1, len(fake_notifier.VERSIONED_NOTIFICATIONS))
+ notification = fake_notifier.VERSIONED_NOTIFICATIONS[0]
+
+ with open(self._get_notification_sample(sample_file_name)) as sample:
+ sample_data = sample.read()
+
+ sample_obj = jsonutils.loads(sample_data)
+ self._apply_replacements(replacements, sample_obj)
+
+ self.assertJsonEqual(sample_obj, notification)
diff --git a/nova/tests/functional/notification_sample_tests/test_service_update.py b/nova/tests/functional/notification_sample_tests/test_service_update.py
new file mode 100644
index 0000000000..f8858c4baa
--- /dev/null
+++ b/nova/tests/functional/notification_sample_tests/test_service_update.py
@@ -0,0 +1,64 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_utils import fixture as utils_fixture
+
+from nova.tests.functional.notification_sample_tests \
+ import notification_sample_base
+from nova.tests.unit.api.openstack.compute import test_services
+
+
+class TestServiceUpdateNotificationSample(
+ notification_sample_base.NotificationSampleTestBase):
+
+ def setUp(self):
+ super(TestServiceUpdateNotificationSample, self).setUp()
+ self.stub_out("nova.db.service_get_by_host_and_binary",
+ test_services.fake_service_get_by_host_binary)
+ self.stub_out("nova.db.service_update",
+ test_services.fake_service_update)
+ self.useFixture(utils_fixture.TimeFixture(test_services.fake_utcnow()))
+
+ def test_service_enable(self):
+ body = {'host': 'host1',
+ 'binary': 'nova-compute'}
+ self.admin_api.api_put('os-services/enable', body)
+ self._verify_notification('service-update')
+
+ def test_service_disabled(self):
+ body = {'host': 'host1',
+ 'binary': 'nova-compute'}
+ self.admin_api.api_put('os-services/disable', body)
+ self._verify_notification('service-update',
+ replacements={'disabled': True})
+
+ def test_service_disabled_log_reason(self):
+ body = {'host': 'host1',
+ 'binary': 'nova-compute',
+ 'disabled_reason': 'test2'}
+ self.admin_api.api_put('os-services/disable-log-reason', body)
+ self._verify_notification('service-update',
+ replacements={'disabled': True,
+ 'disabled_reason': 'test2'})
+
+ def test_service_force_down(self):
+ body = {'host': 'host1',
+ 'binary': 'nova-compute',
+ 'forced_down': True}
+ self.admin_api.microversion = '2.12'
+ self.admin_api.api_put('os-services/force-down', body)
+ self._verify_notification('service-update',
+ replacements={'forced_down': True,
+ 'disabled': True,
+ 'disabled_reason': 'test2'})
diff --git a/nova/tests/functional/regressions/README.rst b/nova/tests/functional/regressions/README.rst
new file mode 100644
index 0000000000..9c0ff7c9f6
--- /dev/null
+++ b/nova/tests/functional/regressions/README.rst
@@ -0,0 +1,24 @@
+================================
+ Tests for Specific Regressions
+================================
+
+When we have a bug reported by end users that we can write a full
+stack reproduce on, we should. And we should keep a regression test
+for that bug in our tree. It can be deleted at some future date if
+needed, but largely should not be changed.
+
+Writing Regression Tests
+========================
+
+- These should be full stack tests which inherit from
+ nova.test.TestCase directly. (This is to prevent coupling with other tests).
+
+- They should setup a full stack cloud in their setUp via fixtures
+
+- They should each live in a file which is named test_bug_######.py
+
+Writing Tests Before the Bug is Fixed
+=====================================
+
+TODO describe writing and landing tests before the bug is fixed as a
+reproduce.
diff --git a/nova/tests/functional/regressions/__init__.py b/nova/tests/functional/regressions/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/functional/regressions/__init__.py
diff --git a/nova/tests/functional/regressions/test_bug_1522536.py b/nova/tests/functional/regressions/test_bug_1522536.py
new file mode 100644
index 0000000000..3c1c9b8a34
--- /dev/null
+++ b/nova/tests/functional/regressions/test_bug_1522536.py
@@ -0,0 +1,70 @@
+# Copyright 2016 HPE, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_config import cfg
+
+import nova.scheduler.utils
+import nova.servicegroup
+from nova import test
+from nova.tests import fixtures as nova_fixtures
+from nova.tests.functional.api import client
+from nova.tests.unit import cast_as_call
+import nova.tests.unit.image.fake
+from nova.tests.unit import policy_fixture
+
+CONF = cfg.CONF
+
+
+class TestServerGet(test.TestCase):
+ REQUIRES_LOCKING = True
+
+ def setUp(self):
+ super(TestServerGet, self).setUp()
+ self.useFixture(policy_fixture.RealPolicyFixture())
+ api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
+ api_version='v2.1'))
+
+ self.api = api_fixture.api
+
+ # the image fake backend needed for image discovery
+ nova.tests.unit.image.fake.stub_out_image_service(self)
+
+ self.start_service('conductor', manager=CONF.conductor.manager)
+ self.flags(scheduler_driver='chance_scheduler')
+ self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.compute = self.start_service('compute')
+
+ self.useFixture(cast_as_call.CastAsCall(self.stubs))
+ self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
+
+ self.image_id = self.api.get_images()[0]['id']
+ self.flavor_id = self.api.get_flavors()[0]['id']
+
+ def test_id_overlap(self):
+ """Regression test for bug #1522536.
+
+ Before fixing this bug, getting a numeric id caused a 500
+ error because it treated the numeric value as the db index,
+ fetched the server, but then processing of extensions blew up.
+
+ Since we have fixed this bug it returns a 404, which is
+ expected. In future a 400 might be more appropriate.
+ """
+ server = dict(name='server1',
+ imageRef=self.image_id,
+ flavorRef=self.flavor_id)
+ self.api.post_server({'server': server})
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.get_server, 1)
diff --git a/nova/tests/functional/test_instance_actions.py b/nova/tests/functional/test_instance_actions.py
new file mode 100644
index 0000000000..82c515374d
--- /dev/null
+++ b/nova/tests/functional/test_instance_actions.py
@@ -0,0 +1,79 @@
+# Copyright 2016 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.tests.functional.api import client
+from nova.tests.functional import test_servers
+from nova.tests.unit import fake_network
+
+
+class InstanceActionsTestV2(test_servers.ServersTestBase):
+ """Tests Instance Actions API"""
+
+ def _create_server(self):
+ """Creates a minimal test server via the compute API
+
+ Ensures the server is created and can be retrieved from the compute API
+ and waits for it to be ACTIVE.
+
+ :returns: created server (dict)
+ """
+ # TODO(mriedem): We should pull this up into the parent class so we
+ # don't have so much copy/paste in these functional tests.
+ fake_network.set_stub_network_methods(self)
+
+ # Create a server
+ server = self._build_minimal_create_server_request()
+ created_server = self.api.post_server({'server': server})
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Check it's there
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+ return found_server
+
+ def test_get_instance_actions(self):
+ server = self._create_server()
+ actions = self.api.get_instance_actions(server['id'])
+ self.assertEqual('create', actions[0]['action'])
+
+ def test_get_instance_actions_deleted(self):
+ server = self._create_server()
+ self._delete_server(server['id'])
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.get_instance_actions,
+ server['id'])
+
+
+class InstanceActionsTestV21(InstanceActionsTestV2):
+ api_major_version = 'v2.1'
+
+
+class InstanceActionsTestV221(InstanceActionsTestV21):
+ microversion = '2.21'
+
+ def setUp(self):
+ super(InstanceActionsTestV221, self).setUp()
+ self.api.microversion = self.microversion
+
+ def test_get_instance_actions_deleted(self):
+ server = self._create_server()
+ self._delete_server(server['id'])
+ actions = self.api.get_instance_actions(server['id'])
+ self.assertEqual('delete', actions[0]['action'])
+ self.assertEqual('create', actions[1]['action'])
diff --git a/nova/tests/functional/test_servers.py b/nova/tests/functional/test_servers.py
index 9780c0651c..4f55da6c97 100644
--- a/nova/tests/functional/test_servers.py
+++ b/nova/tests/functional/test_servers.py
@@ -510,3 +510,231 @@ class ServersTest(ServersTestBase):
class ServersTestV21(ServersTest):
api_major_version = 'v2.1'
+
+
+class ServersTestV219(ServersTestBase):
+ api_major_version = 'v2.1'
+
+ def _create_server(self, set_desc = True, desc = None):
+ server = self._build_minimal_create_server_request()
+ if set_desc:
+ server['description'] = desc
+ post = {'server': server}
+ response = self.api.api_post('/servers', post,
+ headers=self._headers).body
+ return (server, response['server'])
+
+ def _update_server(self, server_id, set_desc = True, desc = None):
+ new_name = integrated_helpers.generate_random_alphanumeric(8)
+ server = {'server': {'name': new_name}}
+ if set_desc:
+ server['server']['description'] = desc
+ self.api.api_put('/servers/%s' % server_id, server,
+ headers=self._headers)
+
+ def _rebuild_server(self, server_id, set_desc = True, desc = None):
+ new_name = integrated_helpers.generate_random_alphanumeric(8)
+ post = {}
+ post['rebuild'] = {
+ "name": new_name,
+ self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
+ self._access_ipv4_parameter: "172.19.0.2",
+ self._access_ipv6_parameter: "fe80::2",
+ "metadata": {'some': 'thing'},
+ }
+ post['rebuild'].update(self._get_access_ips_params())
+ if set_desc:
+ post['rebuild']['description'] = desc
+ self.api.api_post('/servers/%s/action' % server_id, post,
+ headers=self._headers)
+
+ def _create_server_and_verify(self, set_desc = True, expected_desc = None):
+ # Creates a server with a description and verifies it is
+ # in the GET responses.
+ created_server_id = self._create_server(set_desc,
+ expected_desc)[1]['id']
+ self._verify_server_description(created_server_id, expected_desc)
+ self._delete_server(created_server_id)
+
+ def _update_server_and_verify(self, server_id, set_desc = True,
+ expected_desc = None):
+ # Updates a server with a description and verifies it is
+ # in the GET responses.
+ self._update_server(server_id, set_desc, expected_desc)
+ self._verify_server_description(server_id, expected_desc)
+
+ def _rebuild_server_and_verify(self, server_id, set_desc = True,
+ expected_desc = None):
+ # Rebuilds a server with a description and verifies it is
+ # in the GET responses.
+ self._rebuild_server(server_id, set_desc, expected_desc)
+ self._verify_server_description(server_id, expected_desc)
+
+ def _verify_server_description(self, server_id, expected_desc = None,
+ desc_in_resp = True):
+ # Calls GET on the servers and verifies that the description
+ # is set as expected in the response, or not set at all.
+ response = self.api.api_get('/servers/%s' % server_id,
+ headers=self._headers)
+ found_server = response.body['server']
+ self.assertEqual(server_id, found_server['id'])
+ if desc_in_resp:
+ # Verify the description is set as expected (can be None)
+ self.assertEqual(expected_desc, found_server.get('description'))
+ else:
+ # Verify the description is not included in the response.
+ self.assertNotIn('description', found_server)
+
+ servers = self.api.api_get('/servers/detail',
+ headers=self._headers).body['servers']
+ server_map = {server['id']: server for server in servers}
+ found_server = server_map.get(server_id)
+ self.assertTrue(found_server)
+ if desc_in_resp:
+ # Verify the description is set as expected (can be None)
+ self.assertEqual(expected_desc, found_server.get('description'))
+ else:
+ # Verify the description is not included in the response.
+ self.assertNotIn('description', found_server)
+
+ def _create_assertRaisesRegex(self, desc):
+ # Verifies that a 400 error is thrown on create server
+ with self.assertRaisesRegex(client.OpenStackApiException,
+ ".*Unexpected status code.*") as cm:
+ self._create_server(True, desc)
+ self.assertEqual(400, cm.exception.response.status_code)
+
+ def _update_assertRaisesRegex(self, server_id, desc):
+ # Verifies that a 400 error is thrown on update server
+ with self.assertRaisesRegex(client.OpenStackApiException,
+ ".*Unexpected status code.*") as cm:
+ self._update_server(server_id, True, desc)
+ self.assertEqual(400, cm.exception.response.status_code)
+
+ def _rebuild_assertRaisesRegex(self, server_id, desc):
+ # Verifies that a 400 error is thrown on rebuild server
+ with self.assertRaisesRegex(client.OpenStackApiException,
+ ".*Unexpected status code.*") as cm:
+ self._rebuild_server(server_id, True, desc)
+ self.assertEqual(400, cm.exception.response.status_code)
+
+ def test_create_server_with_description(self):
+ fake_network.set_stub_network_methods(self)
+
+ self._headers = {}
+ self._headers['X-OpenStack-Nova-API-Version'] = '2.19'
+
+ # Create and get a server with a description
+ self._create_server_and_verify(True, 'test description')
+ # Create and get a server with an empty description
+ self._create_server_and_verify(True, '')
+ # Create and get a server with description set to None
+ self._create_server_and_verify()
+ # Create and get a server without setting the description
+ self._create_server_and_verify(False)
+
+ def test_update_server_with_description(self):
+ fake_network.set_stub_network_methods(self)
+
+ self._headers = {}
+ self._headers['X-OpenStack-Nova-API-Version'] = '2.19'
+
+ # Create a server with an initial description
+ server_id = self._create_server(True, 'test desc 1')[1]['id']
+
+ # Update and get the server with a description
+ self._update_server_and_verify(server_id, True, 'updated desc')
+ # Update and get the server name without changing the description
+ self._update_server_and_verify(server_id, False, 'updated desc')
+ # Update and get the server with an empty description
+ self._update_server_and_verify(server_id, True, '')
+ # Update and get the server by removing the description (set to None)
+ self._update_server_and_verify(server_id)
+ # Update and get the server with a 2nd new description
+ self._update_server_and_verify(server_id, True, 'updated desc2')
+
+ # Cleanup
+ self._delete_server(server_id)
+
+ def test_rebuild_server_with_description(self):
+ fake_network.set_stub_network_methods(self)
+
+ self._headers = {}
+ self._headers['X-OpenStack-Nova-API-Version'] = '2.19'
+
+ # Create a server with an initial description
+ server = self._create_server(True, 'test desc 1')[1]
+ server_id = server['id']
+ self._wait_for_state_change(server, 'BUILD')
+
+ # Rebuild and get the server with a description
+ self._rebuild_server_and_verify(server_id, True, 'updated desc')
+ # Rebuild and get the server name without changing the description
+ self._rebuild_server_and_verify(server_id, False, 'updated desc')
+ # Rebuild and get the server with an empty description
+ self._rebuild_server_and_verify(server_id, True, '')
+ # Rebuild and get the server by removing the description (set to None)
+ self._rebuild_server_and_verify(server_id)
+ # Rebuild and get the server with a 2nd new description
+ self._rebuild_server_and_verify(server_id, True, 'updated desc2')
+
+ # Cleanup
+ self._delete_server(server_id)
+
+ def test_version_compatibility(self):
+ fake_network.set_stub_network_methods(self)
+
+ # Create a server with microversion v2.19 and a description.
+ self._headers = {}
+ self._headers['X-OpenStack-Nova-API-Version'] = '2.19'
+ server_id = self._create_server(True, 'test desc 1')[1]['id']
+ # Verify that the description is not included on V2.18 GETs
+ self._headers['X-OpenStack-Nova-API-Version'] = '2.18'
+ self._verify_server_description(server_id, desc_in_resp = False)
+ # Verify that updating the server with description on V2.18
+ # results in a 400 error
+ self._update_assertRaisesRegex(server_id, 'test update 2.18')
+ # Verify that rebuilding the server with description on V2.18
+ # results in a 400 error
+ self._rebuild_assertRaisesRegex(server_id, 'test rebuild 2.18')
+
+ # Cleanup
+ self._delete_server(server_id)
+
+ # Create a server on V2.18 and verify that the description
+ # defaults to the name on a V2.19 GET
+ self._headers['X-OpenStack-Nova-API-Version'] = '2.18'
+ server_req, response = self._create_server(False)
+ server_id = response['id']
+ self._headers['X-OpenStack-Nova-API-Version'] = '2.19'
+ self._verify_server_description(server_id, server_req['name'])
+
+ # Cleanup
+ self._delete_server(server_id)
+
+ # Verify that creating a server with description on V2.18
+ # results in a 400 error
+ self._headers['X-OpenStack-Nova-API-Version'] = '2.18'
+ self._create_assertRaisesRegex('test create 2.18')
+
+ def test_description_errors(self):
+ fake_network.set_stub_network_methods(self)
+
+ self._headers = {}
+ self._headers['X-OpenStack-Nova-API-Version'] = '2.19'
+
+ # Create servers with invalid descriptions. These throw 400.
+ # Invalid unicode with non-printable control char
+ self._create_assertRaisesRegex(u'invalid\0dstring')
+ # Description is longer than 255 chars
+ self._create_assertRaisesRegex('x' * 256)
+
+ # Update and rebuild servers with invalid descriptions.
+ # These throw 400.
+ server_id = self._create_server(True, "desc")[1]['id']
+ # Invalid unicode with non-printable control char
+ self._update_assertRaisesRegex(server_id, u'invalid\u0604string')
+ self._rebuild_assertRaisesRegex(server_id, u'invalid\u0604string')
+ # Description is longer than 255 chars
+ self._update_assertRaisesRegex(server_id, 'x' * 256)
+ self._rebuild_assertRaisesRegex(server_id, 'x' * 256)
diff --git a/nova/tests/live_migration/hooks/ceph.sh b/nova/tests/live_migration/hooks/ceph.sh
new file mode 100755
index 0000000000..2ee752812e
--- /dev/null
+++ b/nova/tests/live_migration/hooks/ceph.sh
@@ -0,0 +1,317 @@
+#!/bin/bash
+
+CEPH_REPLICAS=2
+
+function setup_ceph_cluster {
+ install_ceph_full
+ configure_ceph_local
+
+ echo "copy ceph.conf and admin keyring to compute only nodes"
+ ls -la /etc/ceph
+ sudo cp /etc/ceph/ceph.conf /tmp/ceph.conf
+ sudo chown ${STACK_USER}:${STACK_USER} /tmp/ceph.conf
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.conf dest=/etc/ceph/ceph.conf owner=root group=root"
+ sudo rm -f /tmp/ceph.conf
+ sudo cp /etc/ceph/ceph.client.admin.keyring /tmp/ceph.client.admin.keyring
+ sudo chown ${STACK_USER}:${STACK_USER} /tmp/ceph.client.admin.keyring
+ sudo chmod 644 /tmp/ceph.client.admin.keyring
+ ls -la /tmp
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.admin.keyring dest=/etc/ceph/ceph.client.admin.keyring owner=root group=root"
+ sudo rm -f /tmp/ceph.client.admin.keyring
+ echo "check result of copying files"
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ls -la /etc/ceph"
+
+
+ echo "start ceph-mon"
+ sudo initctl emit ceph-mon id=$(hostname)
+ echo "start ceph-osd"
+ sudo start ceph-osd id=${OSD_ID}
+ echo "check ceph-osd before second node addition"
+ wait_for_ceph_up
+
+ configure_ceph_remote
+
+ echo "check ceph-osd tree"
+ wait_for_ceph_up
+}
+
+function install_ceph_full {
+ if uses_debs; then
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m apt \
+ -a "name=ceph state=present"
+ elif is_fedora; then
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m yum \
+ -a "name=ceph state=present"
+ fi
+}
+
+function configure_ceph_local {
+ sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
+
+ # create ceph monitor initial key and directory
+ sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \
+ --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \
+ --cap mon 'allow *'
+ sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
+
+ # create a default ceph configuration file
+ sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
+[global]
+fsid = ${CEPH_FSID}
+mon_initial_members = $(hostname)
+mon_host = ${SERVICE_HOST}
+auth_cluster_required = cephx
+auth_service_required = cephx
+auth_client_required = cephx
+filestore_xattr_use_omap = true
+osd crush chooseleaf type = 0
+osd journal size = 100
+EOF
+
+ # bootstrap the ceph monitor
+ sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \
+ --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
+
+ if is_ubuntu; then
+ sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
+ sudo initctl emit ceph-mon id=$(hostname)
+ else
+ sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
+ sudo service ceph start mon.$(hostname)
+ fi
+
+ # wait for the admin key to come up otherwise we will not be able to do the actions below
+ until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
+ echo_summary "Waiting for the Ceph admin key to be ready..."
+
+ count=$(($count + 1))
+ if [ $count -eq 3 ]; then
+ die $LINENO "Maximum of 3 retries reached"
+ fi
+ sleep 5
+ done
+
+ # pools data and metadata were removed in the Giant release so depending on the version we apply different commands
+ ceph_version=$(get_ceph_version)
+ # change pool replica size according to the CEPH_REPLICAS set by the user
+ if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
+ else
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
+ fi
+
+ # create a simple rule to take OSDs instead of host with CRUSH
+ # then apply this rules to the default pool
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
+ RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
+ fi
+
+ OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
+ sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
+ sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
+ sudo ceph auth get-or-create osd.${OSD_ID} \
+ mon 'allow profile osd ' osd 'allow *' | \
+ sudo tee /var/lib/ceph/osd/ceph-${OSD_ID}/keyring
+
+ # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
+ # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
+ # from the init script.
+ if is_ubuntu; then
+ sudo touch /var/lib/ceph/osd/ceph-${OSD_ID}/upstart
+ else
+ sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
+ fi
+
+}
+
+function configure_ceph_remote {
+ echo "boot osd on compute only node"
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a 'CEPH_CONF_FILE\=/etc/ceph/ceph.conf
+ CEPH_DATA_DIR\=/var/lib/ceph/
+ OSD_ID\=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
+ sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
+ sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \
+ mon "allow profile osd" osd "allow *" | \
+ sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
+ sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
+ sudo start ceph-osd id\=${OSD_ID}
+ '
+}
+
+function wait_for_ceph_up {
+ for i in $(seq 1 3); do
+ ceph_osd_stat=$(sudo ceph osd stat)
+ total_osd_amount=$(sudo ceph osd stat | awk -F ":" '{ print $2}' | sed 's/[^0-9]*//g')
+ up_osd_amout=$(sudo ceph osd stat | awk -F ":" '{ print $3}' | awk -F "," '{print $1}'| sed 's/[^0-9]*//g')
+ in_cluster_osd_amout=$(sudo ceph osd stat | awk -F ":" '{ print $3}' | awk -F "," '{print $2}'| sed 's/[^0-9]*//g')
+ if [ "$total_osd_amount" -eq "$up_osd_amout" -a "$up_osd_amout" -eq "$in_cluster_osd_amout" ]
+ then
+ echo "All OSDs are up and ready"
+ return
+ fi
+ sleep 3
+ done
+ die $LINENO "Maximum of 3 retries reached. Failed to start osds properly"
+}
+
+function _ceph_configure_glance {
+ GLANCE_API_CONF=${GLANCE_API_CONF:-/etc/glance/glance-api.conf}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \
+ mon "allow r" \
+ osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \
+ sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
+ sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
+
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=DEFAULT option=show_image_direct_url value=True"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=default_store value=rbd"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=stores value='file, http, rbd'"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_ceph_conf value=$CEPH_CONF_FILE"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_user value=$GLANCE_CEPH_USER"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${GLANCE_API_CONF} section=glance_store option=rbd_store_pool value=$GLANCE_CEPH_POOL"
+
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+
+ #copy glance keyring to compute only node
+ sudo cp /etc/ceph/ceph.client.glance.keyring /tmp/ceph.client.glance.keyring
+ sudo chown $STACK_USER:$STACK_USER /tmp/ceph.client.glance.keyring
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.glance.keyring dest=/etc/ceph/ceph.client.glance.keyring"
+ sudo rm -f /tmp/ceph.client.glance.keyring
+}
+
+function configure_and_start_glance {
+ _ceph_configure_glance
+ echo 'check processes before glance-api stop'
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep glance-api"
+
+ #stop g-api
+ stop 'primary' 'g-api'
+
+ echo 'check processes after glance-api stop'
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep glance-api"
+
+ # restart glance
+ sudo -H -u $STACK_USER bash -c "/tmp/start_process.sh g-api '/usr/local/bin/glance-api --config-file=/etc/glance/glance-api.conf'"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep glance-api"
+}
+
+function _ceph_configure_nova {
+ #setup ceph for nova, we don't reuse configure_ceph_nova - as we need to emulate case where cinder is not configured for ceph
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
+ NOVA_CONF=${NOVA_CONF:-/etc/nova/nova.conf}
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=rbd_user value=${CINDER_CEPH_USER}"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=rbd_secret_uuid value=${CINDER_CEPH_UUID}"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=inject_key value=false"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=inject_partition value=-2"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=disk_cachemodes value='network=writeback'"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_type value=rbd"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_rbd_pool value=${NOVA_CEPH_POOL}"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=${NOVA_CONF} section=libvirt option=images_rbd_ceph_conf value=${CEPH_CONF_FILE}"
+
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
+ mon "allow r" \
+ osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
+ sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
+ sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
+
+ #copy cinder keyring to compute only node
+ sudo cp /etc/ceph/ceph.client.cinder.keyring /tmp/ceph.client.cinder.keyring
+ sudo chown stack:stack /tmp/ceph.client.cinder.keyring
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/ceph.client.cinder.keyring dest=/etc/ceph/ceph.client.cinder.keyring"
+ sudo rm -f /tmp/ceph.client.cinder.keyring
+
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+}
+
+function configure_and_start_nova {
+ _ceph_configure_nova
+ #import secret to libvirt
+ _populate_libvirt_secret
+ echo 'check compute processes before restart'
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute"
+
+ #stop nova-compute
+ stop 'all' 'n-cpu'
+
+ echo 'check processes after compute stop'
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute"
+
+ # restart local nova-compute
+ sudo -H -u $STACK_USER bash -c "/tmp/start_process.sh n-cpu '/usr/local/bin/nova-compute --config-file /etc/nova/nova.conf' libvirtd"
+
+ # restart remote nova-compute
+ for SUBNODE in $SUBNODES ; do
+ ssh $SUBNODE "sudo -H -u $STACK_USER bash -c '/tmp/start_process.sh n-cpu \"/usr/local/bin/nova-compute --config-file /etc/nova/nova.conf\" libvirtd'"
+ done
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ps aux | grep compute"
+
+}
+
+function _ceph_configure_cinder {
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+
+ CINDER_CONF=${CINDER_CONF:-/etc/cinder/cinder.conf}
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_backend_name value=ceph"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=volume_driver value=cinder.volume.drivers.rbd.RBDDriver"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_ceph_conf value=$CEPH_CONF_FILE"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_pool value=$CINDER_CEPH_POOL"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_user value=$CINDER_CEPH_USER"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_uuid value=$CINDER_CEPH_UUID"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_flatten_volume_from_snapshot value=False"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=ceph option=rbd_max_clone_depth value=5"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=glance_api_version value=2"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=default_volume_type value=ceph"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$CINDER_CONF section=DEFAULT option=enabled_backends value=ceph"
+
+}
+
+function configure_and_start_cinder {
+ _ceph_configure_cinder
+ stop 'primary' 'c-vol'
+
+ sudo -H -u $STACK_USER bash -c "/tmp/start_process.sh c-vol '/usr/local/bin/cinder-volume --config-file /etc/cinder/cinder.conf'"
+ source $BASE/new/devstack/openrc
+
+ export OS_USERNAME=admin
+ export OS_PROJECT_NAME=admin
+ lvm_type=$(cinder type-list | awk -F "|" 'NR==4{ print $2}')
+ cinder type-delete $lvm_type
+ openstack volume type create --os-volume-api-version 1 --property volume_backend_name="ceph" ceph
+}
+
+function _populate_libvirt_secret {
+ cat > /tmp/secret.xml <<EOF
+<secret ephemeral='no' private='no'>
+ <uuid>${CINDER_CEPH_UUID}</uuid>
+ <usage type='ceph'>
+ <name>client.${CINDER_CEPH_USER} secret</name>
+ </usage>
+</secret>
+EOF
+
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/secret.xml dest=/tmp/secret.xml"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-define --file /tmp/secret.xml"
+ local secret=$(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
+ # TODO(tdurakov): remove this escaping as https://github.com/ansible/ansible/issues/13862 fixed
+ secret=${secret//=/'\='}
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $secret"
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/tmp/secret.xml state=absent"
+
+}
diff --git a/nova/tests/live_migration/hooks/nfs.sh b/nova/tests/live_migration/hooks/nfs.sh
new file mode 100755
index 0000000000..d6f177894a
--- /dev/null
+++ b/nova/tests/live_migration/hooks/nfs.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+function nfs_setup {
+ if uses_debs; then
+ module=apt
+ elif is_fedora; then
+ module=yum
+ fi
+ $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m $module \
+ -a "name=nfs-common state=present"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m $module \
+ -a "name=nfs-kernel-server state=present"
+
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-User value=nova"
+
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-Group value=nova"
+
+ for SUBNODE in $SUBNODES ; do
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m lineinfile -a "dest=/etc/exports line='/opt/stack/data/nova/instances $SUBNODE(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)'"
+ done
+
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "exportfs -a"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=restarted"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=idmapd state=restarted"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 111 -j ACCEPT"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 111 -j ACCEPT"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 2049 -j ACCEPT"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 2049 -j ACCEPT"
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "mount -t nfs4 -o proto\=tcp,port\=2049 $primary_node:/ /opt/stack/data/nova/instances/"
+}
+
+function nfs_configure_tempest {
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False"
+}
+
+function nfs_verify_setup {
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/opt/stack/data/nova/instances/test_file state=touch"
+ if [ ! -e '/opt/stack/data/nova/instances/test_file' ]; then
+ die $LINENO "NFS configuration failure"
+ fi
+}
+
+function nfs_teardown {
+ #teardown nfs shared storage
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "umount -t nfs4 /opt/stack/data/nova/instances/"
+ $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=stopped"
+} \ No newline at end of file
diff --git a/nova/tests/live_migration/hooks/run_tests.sh b/nova/tests/live_migration/hooks/run_tests.sh
index f8da559444..e5918ca674 100755
--- a/nova/tests/live_migration/hooks/run_tests.sh
+++ b/nova/tests/live_migration/hooks/run_tests.sh
@@ -3,60 +3,53 @@
# environments based on underlying storage, used for ephemerals.
# This hook allows to inject logic of environment reconfiguration in ci job.
# Base scenario for this would be:
-# - run live-migration on env without shared storage
-# - set up ceph for ephemerals, and reconfigure nova, tempest for that
-# - run live-migration tests
-# - remove ceph and set up nfs for ephemerals, make appropriate change in nova
-# and tempest config
-# - run live-migration tests
-
-set -x
+#
+# 1. test with all local storage (use default for volumes)
+# 2. test with NFS for root + ephemeral disks
+# 3. test with Ceph for root + ephemeral disks
+# 4. test with Ceph for volumes and root + ephemeral disk
+
+set -xe
cd $BASE/new/tempest
-sudo -H -u tempest tox -eall -- --concurrency=$TEMPEST_CONCURRENCY live_migration
-
-#nfs preparation
-echo "subnode info:"
-cat /etc/nodepool/sub_nodes_private
-echo "inventory:"
-cat $WORKSPACE/inventory
-echo "process info:"
-ps aux | grep nova-compute
+
+source $BASE/new/devstack/functions
+source $BASE/new/devstack/functions-common
source $WORKSPACE/devstack-gate/functions.sh
+source $BASE/new/nova/nova/tests/live_migration/hooks/utils.sh
+source $BASE/new/nova/nova/tests/live_migration/hooks/nfs.sh
+source $BASE/new/nova/nova/tests/live_migration/hooks/ceph.sh
+primary_node=$(cat /etc/nodepool/primary_node_private)
+SUBNODES=$(cat /etc/nodepool/sub_nodes_private)
+SERVICE_HOST=$primary_node
+STACK_USER=${STACK_USER:-stack}
-if uses_debs; then
- $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m apt \
- -a "name=nfs-common state=present"
- $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m apt \
- -a "name=nfs-kernel-server state=present"
-elif is_fedora; then
- $ANSIBLE all --sudo -f 5 -i "$WORKSPACE/inventory" -m yum \
- -a "name=nfs-common state=present"
- $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m yum \
- -a "name=nfs-kernel-server state=present"
-fi
+populate_start_script
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-User value=nova"
+echo '1. test with all local storage (use default for volumes)'
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=/etc/idmapd.conf section=Mapping option=Nobody-Group value=nova"
+run_tempest "block migration test"
-SUBNODES=$(cat /etc/nodepool/sub_nodes_private)
-for SUBNODE in $SUBNODES ; do
- $ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m lineinfile -a "dest=/etc/exports line='/opt/stack/data/nova/instances $SUBNODE(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash)'"
-done
-
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "exportfs -a"
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=nfs-kernel-server state=restarted"
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m service -a "name=idmapd state=restarted"
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 111 -j ACCEPT"
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 111 -j ACCEPT"
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p tcp --dport 2049 -j ACCEPT"
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "iptables -A INPUT -p udp --dport 2049 -j ACCEPT"
-primary_node=$(cat /etc/nodepool/primary_node_private)
-$ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "mount -t nfs4 -o proto\=tcp,port\=2049 $primary_node:/ /opt/stack/data/nova/instances/"
-$ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m file -a "path=/opt/stack/data/nova/instances/test_file state=touch"
-echo "check whether NFS shared storage works or not:"
-ls -la /opt/stack/data/nova/instances
-SCREEN_NAME=${SCREEN_NAME:-stack}
-$ANSIBLE primary --sudo -f 5 -i "$WORKSPACE/inventory" -m ini_file -a "dest=$BASE/new/tempest/etc/tempest.conf section=compute-feature-enabled option=block_migration_for_live_migration value=False"
-
-sudo -H -u tempest tox -eall -- --concurrency=$TEMPEST_CONCURRENCY live_migration
+echo '2. test with NFS for root + ephemeral disks'
+
+nfs_setup
+nfs_configure_tempest
+nfs_verify_setup
+run_tempest "NFS shared storage test"
+nfs_teardown
+
+echo '3. test with Ceph for root + ephemeral disks'
+
+source $BASE/new/devstack/lib/ceph
+
+#reset output
+set -xe
+
+setup_ceph_cluster
+configure_and_start_glance
+configure_and_start_nova
+run_tempest "Ceph nova&glance test"
+
+echo '4. test with Ceph for volumes and root + ephemeral disk'
+
+configure_and_start_cinder
+run_tempest "Ceph nova&glance&cinder test" \ No newline at end of file
diff --git a/nova/tests/live_migration/hooks/utils.sh b/nova/tests/live_migration/hooks/utils.sh
new file mode 100755
index 0000000000..591a1feb1c
--- /dev/null
+++ b/nova/tests/live_migration/hooks/utils.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+function run_tempest {
+ local message=$1
+ sudo -H -u tempest tox -eall -- --concurrency=$TEMPEST_CONCURRENCY live_migration
+ exitcode=$?
+ if [[ $exitcode -ne 0 ]]; then
+ die $LINENO "$message failure"
+ fi
+}
+
+function populate_start_script {
+ SCREEN_NAME=${SCREEN_NAME:-stack}
+ DEST=${DEST:-/opt/stack}
+ SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+ ENABLED_SERVICES=${ENABLED_SERVICES:-n-cpu,g-api,c-vol}
+ LIBVIRT_GROUP=${LIBVIRT_GROUP:-libvirtd}
+ TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
+ LOGDAYS=${LOGDAYS:-7}
+ CURRENT_LOG_TIME=$(date "+$TIMESTAMP_FORMAT")
+
+ #creates script for starting process without screen and copies it to all
+ # nodes
+ #
+ # args:
+ # $1 - service name to start
+ # $2 - command to execute
+ # $3 - group to run under
+ cat > /tmp/start_process.sh <<EOF
+set -x
+service=\$1
+command=\$2
+sg=\$3
+ENABLED_SERVICES=$ENABLED_SERVICES
+SCREEN_NAME=$SCREEN_NAME
+DEST=$DEST
+SERVICE_DIR=$SERVICE_DIR
+LOGDIR=$DEST/logs
+TIMESTAMP_FORMAT=$TIMESTAMP_FORMAT
+LOGDAYS=$LOGDAYS
+CURRENT_LOG_TIME=\$(date "+$TIMESTAMP_FORMAT")
+REAL_LOG_FILE="\$LOGDIR/\$service.log.\$CURRENT_LOG_TIME"
+if [[ -n "\$LOGDIR" ]]; then
+ exec 1>&"\$REAL_LOG_FILE" 2>&1
+ ln -sf "\$REAL_LOG_FILE" \$LOGDIR/\$service.log
+ export PYTHONUNBUFFERED=1
+fi
+if [[ -n "\$sg" ]]; then
+ setsid sg \$sg -c "\$command" & echo \$! >\$SERVICE_DIR/\$SCREEN_NAME/\$service.pid
+else
+ setsid \$command & echo \$! >\$SERVICE_DIR/\$SCREEN_NAME/\$service.pid
+fi
+exit 0
+EOF
+ chmod +x /tmp/start_process.sh
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m copy -a "src=/tmp/start_process.sh dest=/tmp/start_process.sh owner=$STACK_USER group=$STACK_USER mode=0777"
+ $ANSIBLE subnodes --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "ls -la /tmp/start_process.sh"
+}
+
+function stop {
+ local target=$1
+ local service=$2
+ $ANSIBLE $target --sudo -f 5 -i "$WORKSPACE/inventory" -m shell -a "
+executable=/bin/bash
+BASE\=$BASE
+source $BASE/new/devstack/functions-common
+ENABLED_SERVICES\=$ENABLED_SERVICES
+SCREEN_NAME\=$SCREEN_NAME
+SERVICE_DIR\=$SERVICE_DIR
+stop_process $service
+"
+}
diff --git a/nova/tests/unit/api/openstack/compute/legacy_v2/test_servers.py b/nova/tests/unit/api/openstack/compute/legacy_v2/test_servers.py
index d208908e33..5e3b82119e 100644
--- a/nova/tests/unit/api/openstack/compute/legacy_v2/test_servers.py
+++ b/nova/tests/unit/api/openstack/compute/legacy_v2/test_servers.py
@@ -76,10 +76,6 @@ INSTANCE_IDS = {FAKE_UUID: 1}
FIELDS = instance_obj.INSTANCE_DEFAULT_FIELDS
-def fake_gen_uuid():
- return FAKE_UUID
-
-
def return_servers_empty(context, *args, **kwargs):
return objects.InstanceList(objects=[])
@@ -177,10 +173,10 @@ class ControllerTest(test.TestCase):
lambda api, *a, **k: return_servers(*a, **k))
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: return_server(*a, **k))
- self.stubs.Set(db, 'instance_add_security_group',
- return_security_group)
- self.stubs.Set(db, 'instance_update_and_get_original',
- instance_update_and_get_original)
+ self.stub_out('nova.db.instance_add_security_group',
+ return_security_group)
+ self.stub_out('nova.db.instance_update_and_get_original',
+ instance_update_and_get_original)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
@@ -1352,6 +1348,12 @@ class ServersControllerUpdateTest(ControllerTest):
req.body = jsonutils.dump_as_bytes(body)
return req
+ @property
+ def wsgi_app(self):
+ with mock.patch.object(extensions.ExtensionManager, 'load_extension'):
+ # patch load_extension because it's expensive in fakes.wsgi_app
+ return fakes.wsgi_app(init_only=('servers',))
+
def test_update_server_all_attributes(self):
body = {'server': {
'name': 'server_test',
@@ -1374,7 +1376,7 @@ class ServersControllerUpdateTest(ControllerTest):
xmlns="http://docs.openstack.org/compute/api/v1.1"
key="Label"></meta>"""
req = self._get_request(body, content_type='xml')
- res = req.get_response(fakes.wsgi_app())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(400, res.status_int)
def test_update_server_invalid_xml_raises_expat(self):
@@ -1383,7 +1385,7 @@ class ServersControllerUpdateTest(ControllerTest):
xmlns="http://docs.openstack.org/compute/api/v1.1"
key="Label"></meta>"""
req = self._get_request(body, content_type='xml')
- res = req.get_response(fakes.wsgi_app())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(400, res.status_int)
def test_update_server_name(self):
@@ -1441,9 +1443,9 @@ class ServersControllerUpdateTest(ControllerTest):
filtered_dict['uuid'] = id
return filtered_dict
- self.stubs.Set(db, 'instance_update', server_update)
+ self.stub_out('nova.db.instance_update', server_update)
# FIXME (comstud)
- # self.stubs.Set(db, 'instance_get',
+ # self.stub_out('nova.db.instance_get',
# return_server_with_attributes(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
@@ -1469,7 +1471,7 @@ class ServersControllerUpdateTest(ControllerTest):
def fake_update(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
- self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
+ self.stub_out('nova.db.instance_update_and_get_original', fake_update)
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
@@ -1559,7 +1561,7 @@ class ServersControllerDeleteTest(ControllerTest):
def test_delete_server_instance_while_deleting_host_down(self):
fake_network.stub_out_network_cleanup(self)
req = self._create_delete_request(FAKE_UUID)
- self.stubs.Set(db, 'instance_get_by_uuid',
+ self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.DELETING,
host='fake_host'))
@@ -1579,7 +1581,7 @@ class ServersControllerDeleteTest(ControllerTest):
def test_delete_server_instance_while_resize(self):
req = self._create_delete_request(FAKE_UUID)
- self.stubs.Set(db, 'instance_get_by_uuid',
+ self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.RESIZE_PREP))
@@ -1603,7 +1605,7 @@ class ServersControllerDeleteTest(ControllerTest):
self.server_delete_called = True
deleted_at = timeutils.utcnow()
return fake_instance.fake_db_instance(deleted_at=deleted_at)
- self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+ self.stub_out('nova.db.instance_destroy', instance_destroy_mock)
self.controller.delete(req, FAKE_UUID)
# delete() should be called for instance which has never been active,
@@ -1886,7 +1888,7 @@ class ServersControllerCreateTest(test.TestCase):
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
- 'uuid': FAKE_UUID,
+ 'uuid': inst['uuid'],
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
@@ -1935,17 +1937,14 @@ class ServersControllerCreateTest(test.TestCase):
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self)
- self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'instance_add_security_group',
- return_security_group)
- self.stubs.Set(db, 'project_get_networks',
- project_get_networks)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_system_metadata_update',
- fake_method)
- self.stubs.Set(db, 'instance_get', instance_get)
- self.stubs.Set(db, 'instance_update', instance_update)
- self.stubs.Set(db, 'instance_update_and_get_original',
+ self.stub_out('nova.db.instance_add_security_group',
+ return_security_group)
+ self.stub_out('nova.db.project_get_networks', project_get_networks)
+ self.stub_out('nova.db.instance_create', instance_create)
+ self.stub_out('nova.db.instance_system_metadata_update', fake_method)
+ self.stub_out('nova.db.instance_get', instance_get)
+ self.stub_out('nova.db.instance_update', instance_update)
+ self.stub_out('nova.db.instance_update_and_get_original',
server_update_and_get_original)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
@@ -1992,7 +1991,8 @@ class ServersControllerCreateTest(test.TestCase):
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller.create(self.req, self.body).obj['server']
self._check_admin_pass_len(server)
- self.assertEqual(FAKE_UUID, server['id'])
+ instance = self.instance_cache_by_uuid.values()[0]
+ self.assertEqual(instance['uuid'], server['id'])
return server
def test_create_instance_private_flavor(self):
@@ -2074,7 +2074,8 @@ class ServersControllerCreateTest(test.TestCase):
self.ext_mgr.extensions = {'os-multiple-create': 'fake'}
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, self.body).obj
- self.assertEqual(FAKE_UUID, res["server"]["id"])
+ instance_uuids = self.instance_cache_by_uuid.keys()
+ self.assertIn(res["server"]["id"], instance_uuids)
self._check_admin_pass_len(res["server"])
def test_create_multiple_instances_pass_disabled(self):
@@ -2085,7 +2086,8 @@ class ServersControllerCreateTest(test.TestCase):
self.flags(enable_instance_password=False)
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, self.body).obj
- self.assertEqual(FAKE_UUID, res["server"]["id"])
+ instance_uuids = self.instance_cache_by_uuid.keys()
+ self.assertIn(res["server"]["id"], instance_uuids)
self._check_admin_pass_missing(res["server"])
def test_create_multiple_instances_resv_id_return(self):
@@ -2131,7 +2133,8 @@ class ServersControllerCreateTest(test.TestCase):
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
+ instance = self.instance_cache_by_uuid.values()[0]
+ self.assertEqual(instance['uuid'], server['id'])
def test_create_instance_image_ref_is_invalid(self):
image_uuid = 'this_is_not_a_valid_uuid'
@@ -2170,7 +2173,7 @@ class ServersControllerCreateTest(test.TestCase):
self.assertEqual(kwargs['security_group'], [group])
return old_create(*args, **kwargs)
- self.stubs.Set(db, 'security_group_get_by_name', sec_group_get)
+ self.stub_out('nova.db.security_group_get_by_name', sec_group_get)
# negative test
self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra,
@@ -2299,7 +2302,8 @@ class ServersControllerCreateTest(test.TestCase):
server = res['server']
self._check_admin_pass_len(server)
- self.assertEqual(FAKE_UUID, server['id'])
+ instance = self.instance_cache_by_uuid.values()[0]
+ self.assertEqual(instance['uuid'], server['id'])
def test_create_instance_pass_disabled(self):
self.flags(enable_instance_password=False)
@@ -2308,7 +2312,8 @@ class ServersControllerCreateTest(test.TestCase):
server = res['server']
self._check_admin_pass_missing(server)
- self.assertEqual(FAKE_UUID, server['id'])
+ instance = self.instance_cache_by_uuid.values()[0]
+ self.assertEqual(instance['uuid'], server['id'])
@mock.patch('nova.virt.hardware.numa_get_constraints')
def test_create_instance_numa_topology_wrong(self, numa_constraints_mock):
@@ -2399,7 +2404,8 @@ class ServersControllerCreateTest(test.TestCase):
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, self.body).obj
- self.assertEqual(FAKE_UUID, res["server"]["id"])
+ instance = self.instance_cache_by_uuid.values()[0]
+ self.assertEqual(instance['uuid'], res["server"]["id"])
self._check_admin_pass_len(res["server"])
def test_create_instance_invalid_flavor_href(self):
@@ -2436,7 +2442,8 @@ class ServersControllerCreateTest(test.TestCase):
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
+ instance = self.instance_cache_by_uuid.values()[0]
+ self.assertEqual(instance['uuid'], server['id'])
def test_create_instance_with_bad_config_drive(self):
self.ext_mgr.extensions = {'os-config-drive': 'fake'}
@@ -2451,7 +2458,8 @@ class ServersControllerCreateTest(test.TestCase):
self.req.body = jsonutils.dump_as_bytes(self.body)
res = self.controller.create(self.req, self.body).obj
server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
+ instance = self.instance_cache_by_uuid.values()[0]
+ self.assertEqual(instance['uuid'], server['id'])
def test_create_instance_with_config_drive_disabled(self):
config_drive = [{'config_drive': 'foo'}]
@@ -2478,7 +2486,8 @@ class ServersControllerCreateTest(test.TestCase):
res = self.controller.create(self.req, self.body).obj
server = res['server']
- self.assertEqual(FAKE_UUID, server['id'])
+ instance = self.instance_cache_by_uuid.values()[0]
+ self.assertEqual(instance['uuid'], server['id'])
def test_create_instance_admin_pass(self):
self.body['server']['flavorRef'] = 3,
@@ -2645,7 +2654,7 @@ class ServersControllerCreateTest(test.TestCase):
self.assertEqual(key_name, kwargs['key_name'])
return old_create(*args, **kwargs)
- self.stubs.Set(db, 'key_pair_get', key_pair_get)
+ self.stub_out('nova.db.key_pair_get', key_pair_get)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
@@ -2842,11 +2851,12 @@ class ServersControllerCreateTest(test.TestCase):
self.controller.create, self.req, self.body)
def test_create_location(self):
- selfhref = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
image_href = 'http://localhost/v2/images/%s' % self.image_uuid
self.body['server']['imageRef'] = image_href
self.req.body = jsonutils.dump_as_bytes(self.body)
robj = self.controller.create(self.req, self.body)
+ instance = self.instance_cache_by_uuid.values()[0]
+ selfhref = 'http://localhost/v2/fake/servers/%s' % instance['uuid']
self.assertEqual(selfhref, robj['Location'])
def _do_test_create_instance_above_quota(self, resource, allowed, quota,
@@ -2898,7 +2908,7 @@ class ServersControllerCreateTest(test.TestCase):
self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
- self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.stub_out('nova.db.instance_destroy', fake_instance_destroy)
self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
'os-server-group-quotas': 'fake'}
self.body['server']['scheduler_hints'] = {'group': fake_group.uuid}
@@ -2922,7 +2932,7 @@ class ServersControllerCreateTest(test.TestCase):
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
- self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.stub_out('nova.db.instance_destroy', fake_instance_destroy)
self.ext_mgr.extensions = {'OS-SCH-HNT': 'fake',
'os-server-group-quotas': 'fake'}
self.body['server']['scheduler_hints'] = {'group': test_group.uuid}
diff --git a/nova/tests/unit/api/openstack/compute/test_access_ips.py b/nova/tests/unit/api/openstack/compute/test_access_ips.py
index b4910a6964..0e98e991cc 100644
--- a/nova/tests/unit/api/openstack/compute/test_access_ips.py
+++ b/nova/tests/unit/api/openstack/compute/test_access_ips.py
@@ -21,7 +21,6 @@ from nova.api.openstack.compute import servers as servers_v21
from nova.api.openstack import extensions as extensions_v20
from nova.api.openstack import wsgi
from nova.compute import api as compute_api
-from nova import db
from nova import exception
from nova.objects import instance as instance_obj
from nova import test
@@ -171,7 +170,8 @@ class AccessIPsExtAPIValidationTestV21(test.TestCase):
self._set_up_controller()
fake.stub_out_image_service(self)
- self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fakes.fake_instance_get())
self.stubs.Set(instance_obj.Instance, 'save', fake_save)
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
diff --git a/nova/tests/unit/api/openstack/compute/test_agents.py b/nova/tests/unit/api/openstack/compute/test_agents.py
index 297a895acc..89d8637e3d 100644
--- a/nova/tests/unit/api/openstack/compute/test_agents.py
+++ b/nova/tests/unit/api/openstack/compute/test_agents.py
@@ -83,14 +83,10 @@ class AgentsTestV21(test.NoDBTestCase):
def setUp(self):
super(AgentsTestV21, self).setUp()
- self.stubs.Set(db, "agent_build_get_all",
- fake_agent_build_get_all)
- self.stubs.Set(db, "agent_build_update",
- fake_agent_build_update)
- self.stubs.Set(db, "agent_build_destroy",
- fake_agent_build_destroy)
- self.stubs.Set(db, "agent_build_create",
- fake_agent_build_create)
+ self.stub_out("nova.db.agent_build_get_all", fake_agent_build_get_all)
+ self.stub_out("nova.db.agent_build_update", fake_agent_build_update)
+ self.stub_out("nova.db.agent_build_destroy", fake_agent_build_destroy)
+ self.stub_out("nova.db.agent_build_create", fake_agent_build_create)
self.req = self._get_http_request()
def _get_http_request(self):
@@ -156,8 +152,8 @@ class AgentsTestV21(test.NoDBTestCase):
def fake_agent_build_create_with_exited_agent(context, values):
raise exception.AgentBuildExists(**values)
- self.stubs.Set(db, 'agent_build_create',
- fake_agent_build_create_with_exited_agent)
+ self.stub_out('nova.db.agent_build_create',
+ fake_agent_build_create_with_exited_agent)
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
diff --git a/nova/tests/unit/api/openstack/compute/test_aggregates.py b/nova/tests/unit/api/openstack/compute/test_aggregates.py
index d49cdb8f40..d3d2534e55 100644
--- a/nova/tests/unit/api/openstack/compute/test_aggregates.py
+++ b/nova/tests/unit/api/openstack/compute/test_aggregates.py
@@ -28,6 +28,7 @@ from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
+from nova.tests import uuidsentinel
def _make_agg_obj(agg_dict):
@@ -699,7 +700,7 @@ class AggregateTestCaseV21(test.NoDBTestCase):
# We would expect the dictionary that comes out is the same one
# that we pump into the aggregate object in the first place
agg = {'name': 'aggregate1',
- 'id': 1,
+ 'id': 1, 'uuid': uuidsentinel.aggregate,
'metadata': {'foo': 'bar', 'availability_zone': 'nova'},
'hosts': ['host1', 'host2']}
agg_obj = _make_agg_obj(agg)
@@ -708,6 +709,7 @@ class AggregateTestCaseV21(test.NoDBTestCase):
# _marshall_aggregate() puts all fields and obj_extra_fields in the
# top-level dict, so we need to put availability_zone at the top also
agg['availability_zone'] = 'nova'
+ del agg['uuid']
self.assertEqual(agg, marshalled_agg['aggregate'])
def _assert_agg_data(self, expected, actual):
diff --git a/nova/tests/unit/api/openstack/compute/test_api.py b/nova/tests/unit/api/openstack/compute/test_api.py
index 045d62d362..da98a18b29 100644
--- a/nova/tests/unit/api/openstack/compute/test_api.py
+++ b/nova/tests/unit/api/openstack/compute/test_api.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
from oslo_serialization import jsonutils
import six
import webob
@@ -20,6 +21,7 @@ import webob.dec
import webob.exc
from nova.api import openstack as openstack_api
+from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova import test
@@ -30,7 +32,12 @@ class APITest(test.NoDBTestCase):
def setUp(self):
super(APITest, self).setUp()
- self.wsgi_app = fakes.wsgi_app()
+
+ @property
+ def wsgi_app(self):
+ with mock.patch.object(extensions.ExtensionManager, 'load_extension'):
+ # patch load_extension because it's expensive in fakes.wsgi_app
+ return fakes.wsgi_app(init_only=('versions',))
def _wsgi_app(self, inner_app):
# simpler version of the app than fakes.wsgi_app
@@ -169,9 +176,9 @@ class APITest(test.NoDBTestCase):
class APITestV21(APITest):
- def setUp(self):
- super(APITestV21, self).setUp()
- self.wsgi_app = fakes.wsgi_app_v21()
+ @property
+ def wsgi_app(self):
+ return fakes.wsgi_app_v21(init_only=('versions',))
# TODO(alex_xu): Get rid of the case translate NovaException to
# HTTPException after V2 api code removed. Because V2.1 API required raise
diff --git a/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py b/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
index ddb7d00147..db138cf20d 100644
--- a/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
+++ b/nova/tests/unit/api/openstack/compute/test_attach_interfaces.py
@@ -14,7 +14,6 @@
# under the License.
import mock
-from oslo_config import cfg
from nova.api.openstack.compute import attach_interfaces \
as attach_interfaces_v21
@@ -31,8 +30,6 @@ from nova.tests.unit import fake_network_cache_model
from webob import exc
-CONF = cfg.CONF
-
FAKE_UUID1 = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
FAKE_UUID2 = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'
diff --git a/nova/tests/unit/api/openstack/compute/test_auth.py b/nova/tests/unit/api/openstack/compute/test_auth.py
index 83e495918c..ae5c6552c4 100644
--- a/nova/tests/unit/api/openstack/compute/test_auth.py
+++ b/nova/tests/unit/api/openstack/compute/test_auth.py
@@ -17,6 +17,8 @@
import webob
import webob.dec
+import testscenarios
+
from nova.api import openstack as openstack_api
from nova.api.openstack import auth
from nova.api.openstack import compute
@@ -25,15 +27,29 @@ from nova import test
from nova.tests.unit.api.openstack import fakes
-class TestNoAuthMiddlewareV21(test.NoDBTestCase):
+class TestNoAuthMiddleware(testscenarios.WithScenarios, test.NoDBTestCase):
+
+ scenarios = [
+ ('project_id', {
+ 'expected_url': 'http://localhost/v2.1/user1_project',
+ 'auth_middleware': auth.NoAuthMiddleware}),
+ ('no_project_id', {
+ 'expected_url': 'http://localhost/v2.1',
+ 'auth_middleware': auth.NoAuthMiddlewareV2_18}),
+ ]
def setUp(self):
- super(TestNoAuthMiddlewareV21, self).setUp()
+ super(TestNoAuthMiddleware, self).setUp()
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_networking(self)
- self.wsgi_app = fakes.wsgi_app_v21(use_no_auth=True)
- self.req_url = '/v2'
- self.expected_url = "http://localhost/v2/user1_project"
+ api_v21 = openstack_api.FaultWrapper(
+ self.auth_middleware(
+ compute.APIRouterV21()
+ )
+ )
+ self.wsgi_app = urlmap.URLMap()
+ self.wsgi_app['/v2.1'] = api_v21
+ self.req_url = '/v2.1'
def test_authorize_user(self):
req = webob.Request.blank(self.req_url)
@@ -66,16 +82,3 @@ class TestNoAuthMiddlewareV21(test.NoDBTestCase):
self.assertEqual(result.status, '204 No Content')
self.assertNotIn('X-CDN-Management-Url', result.headers)
self.assertNotIn('X-Storage-Url', result.headers)
-
-
-class TestNoAuthMiddlewareV3(TestNoAuthMiddlewareV21):
-
- def setUp(self):
- super(TestNoAuthMiddlewareV3, self).setUp()
- api_router = compute.APIRouterV3()
- api_v3 = openstack_api.FaultWrapper(auth.NoAuthMiddlewareV3(
- api_router))
- self.wsgi_app = urlmap.URLMap()
- self.wsgi_app['/v3'] = api_v3
- self.req_url = '/v3'
- self.expected_url = "http://localhost/v3"
diff --git a/nova/tests/unit/api/openstack/compute/test_availability_zone.py b/nova/tests/unit/api/openstack/compute/test_availability_zone.py
index 317b8fe1b2..fda85ba11e 100644
--- a/nova/tests/unit/api/openstack/compute/test_availability_zone.py
+++ b/nova/tests/unit/api/openstack/compute/test_availability_zone.py
@@ -101,7 +101,7 @@ class AvailabilityZoneApiTestV21(test.NoDBTestCase):
def setUp(self):
super(AvailabilityZoneApiTestV21, self).setUp()
availability_zones.reset_cache()
- self.stubs.Set(db, 'service_get_all', fake_service_get_all)
+ self.stub_out('nova.db.service_get_all', fake_service_get_all)
self.stubs.Set(availability_zones, 'set_availability_zones',
fake_set_availability_zones)
self.stubs.Set(servicegroup.API, 'service_is_up', fake_service_is_up)
@@ -231,7 +231,7 @@ class ServersControllerCreateTestV21(test.TestCase):
return instance
fake.stub_out_image_service(self)
- self.stubs.Set(db, 'instance_create', instance_create)
+ self.stub_out('nova.db.instance_create', instance_create)
self.req = fakes.HTTPRequest.blank('')
diff --git a/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py b/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
index c5775653d1..69080f8c29 100644
--- a/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
+++ b/nova/tests/unit/api/openstack/compute/test_baremetal_nodes.py
@@ -13,12 +13,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+
+from ironicclient import exc as ironic_exc
import mock
import six
from webob import exc
-from ironicclient import exc as ironic_exc
-
from nova.api.openstack.compute import baremetal_nodes \
as b_nodes_v21
from nova.api.openstack.compute.legacy_v2.contrib import baremetal_nodes \
diff --git a/nova/tests/unit/api/openstack/compute/test_cloudpipe_update.py b/nova/tests/unit/api/openstack/compute/test_cloudpipe_update.py
index c28ed6fe27..b1592c4148 100644
--- a/nova/tests/unit/api/openstack/compute/test_cloudpipe_update.py
+++ b/nova/tests/unit/api/openstack/compute/test_cloudpipe_update.py
@@ -17,7 +17,6 @@ import webob
from nova.api.openstack.compute import cloudpipe as clup_v21
from nova.api.openstack.compute.legacy_v2.contrib import cloudpipe_update \
as clup_v2
-from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -44,8 +43,9 @@ class CloudpipeUpdateTestV21(test.NoDBTestCase):
def setUp(self):
super(CloudpipeUpdateTestV21, self).setUp()
- self.stubs.Set(db, "project_get_networks", fake_project_get_networks)
- self.stubs.Set(db, "network_update", fake_network_update)
+ self.stub_out("nova.db.project_get_networks",
+ fake_project_get_networks)
+ self.stub_out("nova.db.network_update", fake_network_update)
self._setup()
self.req = fakes.HTTPRequest.blank('')
diff --git a/nova/tests/unit/api/openstack/compute/test_config_drive.py b/nova/tests/unit/api/openstack/compute/test_config_drive.py
index e49bd9eba0..92704bb6d6 100644
--- a/nova/tests/unit/api/openstack/compute/test_config_drive.py
+++ b/nova/tests/unit/api/openstack/compute/test_config_drive.py
@@ -26,14 +26,13 @@ from nova.api.openstack.compute import servers as servers_v21
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import flavors
-from nova import db
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
-
+from nova.tests import uuidsentinel as uuids
CONF = cfg.CONF
@@ -52,11 +51,11 @@ class ConfigDriveTestV21(test.TestCase):
self._setup_wsgi()
def test_show(self):
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get())
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get())
- req = webob.Request.blank(self.base_url + '1')
+ self.stub_out('nova.db.instance_get',
+ fakes.fake_instance_get())
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fakes.fake_instance_get())
+ req = webob.Request.blank(self.base_url + uuids.sentinel)
req.headers['Content-Type'] = 'application/json'
response = req.get_response(self.app)
self.assertEqual(response.status_int, 200)
@@ -143,7 +142,7 @@ class ServersControllerCreateTestV21(test.TestCase):
return instance
fake.stub_out_image_service(self)
- self.stubs.Set(db, 'instance_create', instance_create)
+ self.stub_out('nova.db.instance_create', instance_create)
def _test_create_extra(self, params, override_controller):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
diff --git a/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py b/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
index c6333aac2e..79f207c232 100644
--- a/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
+++ b/nova/tests/unit/api/openstack/compute/test_console_auth_tokens.py
@@ -15,7 +15,6 @@
import copy
-from oslo_config import cfg
import webob
from nova.api.openstack.compute import console_auth_tokens \
@@ -26,8 +25,6 @@ from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import test
from nova.tests.unit.api.openstack import fakes
-CONF = cfg.CONF
-
_FAKE_CONNECT_INFO = {'instance_uuid': 'fake_instance_uuid',
'host': 'fake_host',
'port': 'fake_port',
diff --git a/nova/tests/unit/api/openstack/compute/test_consoles.py b/nova/tests/unit/api/openstack/compute/test_consoles.py
index 2efa34a257..d17aa17097 100644
--- a/nova/tests/unit/api/openstack/compute/test_consoles.py
+++ b/nova/tests/unit/api/openstack/compute/test_consoles.py
@@ -25,7 +25,6 @@ from nova.api.openstack.compute import consoles as consoles_v21
from nova.api.openstack.compute.legacy_v2 import consoles as consoles_v2
from nova.compute import vm_states
from nova import console
-from nova import db
from nova import exception
from nova import policy
from nova import test
@@ -128,10 +127,10 @@ class ConsolesControllerTestV21(test.NoDBTestCase):
super(ConsolesControllerTestV21, self).setUp()
self.flags(verbose=True)
self.instance_db = FakeInstanceDB()
- self.stubs.Set(db, 'instance_get',
- self.instance_db.return_server_by_id)
- self.stubs.Set(db, 'instance_get_by_uuid',
- self.instance_db.return_server_by_uuid)
+ self.stub_out('nova.db.instance_get',
+ self.instance_db.return_server_by_id)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ self.instance_db.return_server_by_uuid)
self.uuid = str(stdlib_uuid.uuid4())
self.url = '/v2/fake/servers/%s/consoles' % self.uuid
self._set_up_controller()
diff --git a/nova/tests/unit/api/openstack/compute/test_createserverext.py b/nova/tests/unit/api/openstack/compute/test_createserverext.py
index b187c3b950..14e66ef137 100644
--- a/nova/tests/unit/api/openstack/compute/test_createserverext.py
+++ b/nova/tests/unit/api/openstack/compute/test_createserverext.py
@@ -19,10 +19,10 @@ from oslo_serialization import jsonutils
import webob
from nova.compute import api as compute_api
-from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
+from nova.tests import uuidsentinel as uuids
FAKE_UUID = fakes.FAKE_UUID
@@ -238,10 +238,10 @@ class CreateserverextTest(test.TestCase):
def test_create_instance_with_security_group_json(self):
security_groups = ['test', 'test1']
- self.stubs.Set(db, 'security_group_get_by_name',
- return_security_group_get_by_name)
- self.stubs.Set(db, 'instance_add_security_group',
- return_instance_add_security_group)
+ self.stub_out('nova.db.security_group_get_by_name',
+ return_security_group_get_by_name)
+ self.stub_out('nova.db.instance_add_security_group',
+ return_instance_add_security_group)
body_dict = self._create_security_group_request_dict(security_groups)
request = self._get_create_request_json(body_dict)
response = request.get_response(fakes.wsgi_app(
@@ -250,9 +250,11 @@ class CreateserverextTest(test.TestCase):
self.assertJsonEqual(self.security_group, security_groups)
def test_get_server_by_id_verify_security_groups_json(self):
- self.stubs.Set(db, 'instance_get', fakes.fake_instance_get())
- self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get())
- req = webob.Request.blank('/v2/fake/os-create-server-ext/1')
+ self.stub_out('nova.db.instance_get', fakes.fake_instance_get())
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fakes.fake_instance_get())
+ req = webob.Request.blank('/v2/fake/os-create-server-ext/' +
+ uuids.server)
req.headers['Content-Type'] = 'application/json'
response = req.get_response(fakes.wsgi_app(
init_only=('os-create-server-ext', 'servers')))
diff --git a/nova/tests/unit/api/openstack/compute/test_disk_config.py b/nova/tests/unit/api/openstack/compute/test_disk_config.py
index 159f835061..8b49f26b3b 100644
--- a/nova/tests/unit/api/openstack/compute/test_disk_config.py
+++ b/nova/tests/unit/api/openstack/compute/test_disk_config.py
@@ -20,7 +20,6 @@ from oslo_serialization import jsonutils
from nova.api.openstack import compute
from nova.compute import api as compute_api
from nova.compute import flavors
-from nova import db
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -63,7 +62,7 @@ class DiskConfigTestCaseV21(test.TestCase):
if id_ == instance['id']:
return instance
- self.stubs.Set(db, 'instance_get', fake_instance_get)
+ self.stub_out('nova.db.instance_get', fake_instance_get)
def fake_instance_get_by_uuid(context, uuid,
columns_to_join=None, use_slave=False):
@@ -71,15 +70,15 @@ class DiskConfigTestCaseV21(test.TestCase):
if uuid == instance['uuid']:
return instance
- self.stubs.Set(db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
def fake_instance_get_all(context, *args, **kwargs):
return FAKE_INSTANCES
- self.stubs.Set(db, 'instance_get_all', fake_instance_get_all)
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_instance_get_all)
+ self.stub_out('nova.db.instance_get_all', fake_instance_get_all)
+ self.stub_out('nova.db.instance_get_all_by_filters',
+ fake_instance_get_all)
self.stubs.Set(objects.Instance, 'save',
lambda *args, **kwargs: None)
@@ -107,27 +106,26 @@ class DiskConfigTestCaseV21(test.TestCase):
def fake_instance_get_for_create(context, id_, *args, **kwargs):
return (inst, inst)
- self.stubs.Set(db, 'instance_update_and_get_original',
+ self.stub_out('nova.db.instance_update_and_get_original',
fake_instance_get_for_create)
def fake_instance_get_all_for_create(context, *args, **kwargs):
return [inst]
- self.stubs.Set(db, 'instance_get_all',
+ self.stub_out('nova.db.instance_get_all',
fake_instance_get_all_for_create)
- self.stubs.Set(db, 'instance_get_all_by_filters',
+ self.stub_out('nova.db.instance_get_all_by_filters',
fake_instance_get_all_for_create)
def fake_instance_add_security_group(context, instance_id,
security_group_id):
pass
- self.stubs.Set(db,
- 'instance_add_security_group',
- fake_instance_add_security_group)
+ self.stub_out('nova.db.instance_add_security_group',
+ fake_instance_add_security_group)
return inst
- self.stubs.Set(db, 'instance_create', fake_instance_create)
+ self.stub_out('nova.db.instance_create', fake_instance_create)
def _set_up_app(self):
self.app = compute.APIRouterV21(init_only=('servers', 'images',
diff --git a/nova/tests/unit/api/openstack/compute/test_extended_availability_zone.py b/nova/tests/unit/api/openstack/compute/test_extended_availability_zone.py
index d685658b1a..0f9c6fae15 100644
--- a/nova/tests/unit/api/openstack/compute/test_extended_availability_zone.py
+++ b/nova/tests/unit/api/openstack/compute/test_extended_availability_zone.py
@@ -19,7 +19,6 @@ import webob
from nova import availability_zones
from nova import compute
from nova.compute import vm_states
-from nova import db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
@@ -86,7 +85,7 @@ class ExtendedAvailabilityZoneTestV21(test.TestCase):
self.stubs.Set(availability_zones, 'get_host_availability_zone',
fake_get_host_availability_zone)
return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
def _make_request(self, url):
req = webob.Request.blank(url)
diff --git a/nova/tests/unit/api/openstack/compute/test_extended_server_attributes.py b/nova/tests/unit/api/openstack/compute/test_extended_server_attributes.py
index 6ca36ec23b..834044fbcd 100644
--- a/nova/tests/unit/api/openstack/compute/test_extended_server_attributes.py
+++ b/nova/tests/unit/api/openstack/compute/test_extended_server_attributes.py
@@ -19,7 +19,6 @@ import webob
from nova.api.openstack import wsgi as os_wsgi
from nova import compute
-from nova import db
from nova import exception
from nova import objects
from nova import test
@@ -34,6 +33,12 @@ UUID4 = '00000000-0000-0000-0000-000000000004'
UUID5 = '00000000-0000-0000-0000-000000000005'
+def fake_services(host):
+ service_list = [objects.Service(id=0, host=host, forced_down=True,
+ binary='nova-compute')]
+ return objects.ServiceList(objects=service_list)
+
+
def fake_compute_get(*args, **kwargs):
return fakes.stub_instance_obj(
None, 1, uuid=UUID3, host="host-fake",
@@ -42,7 +47,8 @@ def fake_compute_get(*args, **kwargs):
kernel_id=UUID4, ramdisk_id=UUID5,
display_name="hostname-1",
root_device_name="/dev/vda",
- user_data="userdata")
+ user_data="userdata",
+ services=fake_services("host-fake"))
def fake_compute_get_all(*args, **kwargs):
@@ -53,14 +59,16 @@ def fake_compute_get_all(*args, **kwargs):
kernel_id=UUID4, ramdisk_id=UUID5,
display_name="hostname-1",
root_device_name="/dev/vda",
- user_data="userdata"),
+ user_data="userdata",
+ services=fake_services("host-1")),
fakes.stub_instance_obj(
None, 2, uuid=UUID2, host="host-2", node="node-2",
reservation_id="r-2", launch_index=1,
kernel_id=UUID4, ramdisk_id=UUID5,
display_name="hostname-2",
root_device_name="/dev/vda",
- user_data="userdata"),
+ user_data="userdata",
+ services=fake_services("host-2")),
]
return objects.InstanceList(objects=inst_list)
@@ -76,7 +84,7 @@ class ExtendedServerAttributesTestV21(test.TestCase):
fakes.stub_out_nw_api(self)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.stubs.Set(db, 'instance_get_by_uuid', fake_compute_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_compute_get)
def _make_request(self, url):
req = fakes.HTTPRequest.blank(url)
@@ -208,3 +216,36 @@ class ExtendedServerAttributesTestV23(ExtendedServerAttributesTestV21):
hostname="hostname-%s" % (i + 1),
root_device_name="/dev/vda",
user_data="userdata")
+
+
+class ExtendedServerAttributesTestV216(ExtendedServerAttributesTestV21):
+ wsgi_api_version = '2.16'
+
+ def assertServerAttributes(self, server, host, node, instance_name,
+ host_status):
+ super(ExtendedServerAttributesTestV216, self).assertServerAttributes(
+ server, host, node, instance_name)
+ self.assertEqual(server.get('host_status'), host_status)
+
+ def test_show(self):
+ url = self.fake_url + '/servers/%s' % UUID3
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertServerAttributes(self._get_server(res.body),
+ host='host-fake',
+ node='node-fake',
+ instance_name=NAME_FMT % 1,
+ host_status="DOWN")
+
+ def test_detail(self):
+ url = self.fake_url + '/servers/detail'
+ res = self._make_request(url)
+
+ self.assertEqual(res.status_int, 200)
+ for i, server in enumerate(self._get_servers(res.body)):
+ self.assertServerAttributes(server,
+ host='host-%s' % (i + 1),
+ node='node-%s' % (i + 1),
+ instance_name=NAME_FMT % (i + 1),
+ host_status="DOWN")
diff --git a/nova/tests/unit/api/openstack/compute/test_extended_status.py b/nova/tests/unit/api/openstack/compute/test_extended_status.py
index 776ab1294c..b5888941ad 100644
--- a/nova/tests/unit/api/openstack/compute/test_extended_status.py
+++ b/nova/tests/unit/api/openstack/compute/test_extended_status.py
@@ -17,7 +17,6 @@ from oslo_serialization import jsonutils
import webob
from nova import compute
-from nova import db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
@@ -73,7 +72,7 @@ class ExtendedStatusTestV21(test.TestCase):
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
self._set_flags()
return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
def _get_server(self, body):
return jsonutils.loads(body).get('server')
diff --git a/nova/tests/unit/api/openstack/compute/test_extended_volumes.py b/nova/tests/unit/api/openstack/compute/test_extended_volumes.py
index dd7ead0498..9f34a7a257 100644
--- a/nova/tests/unit/api/openstack/compute/test_extended_volumes.py
+++ b/nova/tests/unit/api/openstack/compute/test_extended_volumes.py
@@ -21,7 +21,6 @@ from nova.api.openstack.compute import (extended_volumes
as extended_volumes_v21)
from nova.api.openstack import wsgi as os_wsgi
from nova import compute
-from nova import db
from nova import objects
from nova.objects import instance as instance_obj
from nova import test
@@ -99,12 +98,12 @@ class ExtendedVolumesTestV21(test.TestCase):
fakes.stub_out_nw_api(self)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance_uuids',
+ self.stub_out('nova.db.block_device_mapping_get_all_by_instance_uuids',
fake_bdms_get_all_by_instance_uuids)
self._setUp()
self.app = self._setup_app()
return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-extended-volumes',
diff --git a/nova/tests/unit/api/openstack/compute/test_fixed_ips.py b/nova/tests/unit/api/openstack/compute/test_fixed_ips.py
index b285c19577..9f93923125 100644
--- a/nova/tests/unit/api/openstack/compute/test_fixed_ips.py
+++ b/nova/tests/unit/api/openstack/compute/test_fixed_ips.py
@@ -20,7 +20,6 @@ from nova.api.openstack.compute.legacy_v2.contrib import fixed_ips \
as fixed_ips_v2
from nova.api.openstack import wsgi as os_wsgi
from nova import context
-from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -127,9 +126,9 @@ class FixedIpTestV21(test.NoDBTestCase):
def setUp(self):
super(FixedIpTestV21, self).setUp()
- self.stubs.Set(db, "fixed_ip_get_by_address",
- fake_fixed_ip_get_by_address)
- self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
+ self.stub_out("nova.db.fixed_ip_get_by_address",
+ fake_fixed_ip_get_by_address)
+ self.stub_out("nova.db.fixed_ip_update", fake_fixed_ip_update)
self.context = context.get_admin_context()
self.controller = self.fixed_ips.FixedIPController()
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_access.py b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
index e463dc64cb..e906fe18fc 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_access.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_access.py
@@ -26,7 +26,6 @@ from nova.api.openstack.compute.legacy_v2.contrib import flavor_access \
as flavor_access_v2
from nova.api.openstack.compute.legacy_v2 import flavors as flavors_api
from nova import context
-from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -139,12 +138,12 @@ class FlavorAccessTestV21(test.NoDBTestCase):
self.req = FakeRequest()
self.req.environ = {"nova.context": context.RequestContext('fake_user',
'fake')}
- self.stubs.Set(db, 'flavor_get_by_flavor_id',
- fake_get_flavor_by_flavor_id)
- self.stubs.Set(db, 'flavor_get_all',
- fake_get_all_flavors_sorted_list)
- self.stubs.Set(db, 'flavor_access_get_by_flavor_id',
- fake_get_flavor_access_by_flavor_id)
+ self.stub_out('nova.db.flavor_get_by_flavor_id',
+ fake_get_flavor_by_flavor_id)
+ self.stub_out('nova.db.flavor_get_all',
+ fake_get_all_flavors_sorted_list)
+ self.stub_out('nova.db.flavor_access_get_by_flavor_id',
+ fake_get_flavor_access_by_flavor_id)
self.flavor_access_controller = self.FlavorAccessController()
self.flavor_action_controller = self.FlavorActionController()
@@ -288,8 +287,8 @@ class FlavorAccessTestV21(test.NoDBTestCase):
def stub_add_flavor_access(context, flavorid, projectid):
self.assertEqual('3', flavorid, "flavorid")
self.assertEqual("proj2", projectid, "projectid")
- self.stubs.Set(db, 'flavor_access_add',
- stub_add_flavor_access)
+ self.stub_out('nova.db.flavor_access_add',
+ stub_add_flavor_access)
expected = {'flavor_access':
[{'flavor_id': '3', 'tenant_id': 'proj3'}]}
body = {'addTenantAccess': {'tenant': 'proj2'}}
@@ -325,8 +324,8 @@ class FlavorAccessTestV21(test.NoDBTestCase):
def stub_add_flavor_access(context, flavorid, projectid):
raise exception.FlavorAccessExists(flavor_id=flavorid,
project_id=projectid)
- self.stubs.Set(db, 'flavor_access_add',
- stub_add_flavor_access)
+ self.stub_out('nova.db.flavor_access_add',
+ stub_add_flavor_access)
body = {'addTenantAccess': {'tenant': 'proj2'}}
add_access = self._get_add_access()
self.assertRaises(exc.HTTPConflict,
@@ -336,8 +335,8 @@ class FlavorAccessTestV21(test.NoDBTestCase):
def stub_remove_flavor_access(context, flavorid, projectid):
raise exception.FlavorAccessNotFound(flavor_id=flavorid,
project_id=projectid)
- self.stubs.Set(db, 'flavor_access_remove',
- stub_remove_flavor_access)
+ self.stub_out('nova.db.flavor_access_remove',
+ stub_remove_flavor_access)
body = {'removeTenantAccess': {'tenant': 'proj2'}}
remove_access = self._get_remove_access()
self.assertRaises(exc.HTTPNotFound,
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_disabled.py b/nova/tests/unit/api/openstack/compute/test_flavor_disabled.py
index 59b769c66d..4bf70bdc97 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_disabled.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_disabled.py
@@ -76,7 +76,7 @@ class FlavorDisabledTestV21(test.NoDBTestCase):
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors')))
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors',)))
return res
def _get_flavor(self, body):
@@ -110,5 +110,5 @@ class FlavorDisabledTestV2(FlavorDisabledTestV21):
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app())
+ res = req.get_response(fakes.wsgi_app(init_only=('flavors',)))
return res
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_manage.py b/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
index 8b0f5329b2..96ec213198 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_manage.py
@@ -27,7 +27,6 @@ from nova.api.openstack.compute.legacy_v2.contrib import flavor_access \
from nova.api.openstack.compute.legacy_v2.contrib import flavormanage \
as flavormanage_v2
from nova.compute import flavors
-from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -104,8 +103,7 @@ class FlavorManageTestV21(test.NoDBTestCase):
"get_flavor_by_flavor_id",
fake_get_flavor_by_flavor_id)
self.stubs.Set(flavors, "destroy", fake_destroy)
- self.stubs.Set(db, "flavor_create", fake_create)
- self.app = self._setup_app()
+ self.stub_out("nova.db.flavor_create", fake_create)
self.request_body = {
"flavor": {
@@ -125,7 +123,8 @@ class FlavorManageTestV21(test.NoDBTestCase):
def _get_http_request(self, url=''):
return fakes.HTTPRequest.blank(url)
- def _setup_app(self):
+ @property
+ def app(self):
return fakes.wsgi_app_v21(init_only=('os-flavor-manage',
'os-flavor-rxtx',
'os-flavor-access', 'flavors',
@@ -379,7 +378,6 @@ class PrivateFlavorManageTestV21(test.TestCase):
super(PrivateFlavorManageTestV21, self).setUp()
self.flavor_access_controller = (flavor_access_v21.
FlavorAccessController())
- self.app = self._setup_app()
self.expected = {
"flavor": {
"name": "test",
@@ -392,7 +390,8 @@ class PrivateFlavorManageTestV21(test.TestCase):
}
}
- def _setup_app(self):
+ @property
+ def app(self):
return fakes.wsgi_app_v21(init_only=('os-flavor-manage',
'os-flavor-access',
'os-flavor-rxtx', 'flavors',
@@ -447,7 +446,8 @@ class FlavorManageTestV2(FlavorManageTestV21):
osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
- def _setup_app(self):
+ @property
+ def app(self):
return fakes.wsgi_app(init_only=('flavors',),
fake_auth_context=self._get_http_request().
environ['nova.context'])
@@ -478,7 +478,8 @@ class PrivateFlavorManageTestV2(PrivateFlavorManageTestV21):
self.flavor_access_controller = (flavor_access_v2.
FlavorAccessController())
- def _setup_app(self):
+ @property
+ def app(self):
return fakes.wsgi_app(init_only=('flavors',),
fake_auth_context=self._get_http_request().
environ['nova.context'])
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_rxtx.py b/nova/tests/unit/api/openstack/compute/test_flavor_rxtx.py
index 96fb09c18d..87663907c4 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_rxtx.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_rxtx.py
@@ -113,4 +113,4 @@ class FlavorRxtxTestV21(test.NoDBTestCase):
class FlavorRxtxTestV20(FlavorRxtxTestV21):
def _get_app(self):
- return fakes.wsgi_app()
+ return fakes.wsgi_app(init_only=('flavors',))
diff --git a/nova/tests/unit/api/openstack/compute/test_flavor_swap.py b/nova/tests/unit/api/openstack/compute/test_flavor_swap.py
index 809b464d8b..8fb0859329 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavor_swap.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavor_swap.py
@@ -77,7 +77,7 @@ class FlavorSwapTestV21(test.NoDBTestCase):
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors')))
+ res = req.get_response(fakes.wsgi_app_v21(init_only=('flavors',)))
return res
def _get_flavor(self, body):
@@ -111,5 +111,5 @@ class FlavorSwapTestV2(FlavorSwapTestV21):
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
- res = req.get_response(fakes.wsgi_app())
+ res = req.get_response(fakes.wsgi_app(init_only=('flavors',)))
return res
diff --git a/nova/tests/unit/api/openstack/compute/test_flavorextradata.py b/nova/tests/unit/api/openstack/compute/test_flavorextradata.py
index f5d9164fdd..b584e4b315 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavorextradata.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavorextradata.py
@@ -64,10 +64,10 @@ class FlavorExtraDataTestV21(test.NoDBTestCase):
fake_get_flavor_by_flavor_id)
self.stubs.Set(flavors, 'get_all_flavors_sorted_list',
fake_get_all_flavors_sorted_list)
- self._setup_app()
- def _setup_app(self):
- self.app = fakes.wsgi_app_v21(init_only=('flavors'))
+ @property
+ def app(self):
+ return fakes.wsgi_app_v21(init_only=('flavors'))
def _verify_flavor_response(self, flavor, expected):
for key in expected:
@@ -123,5 +123,6 @@ class FlavorExtraDataTestV21(test.NoDBTestCase):
class FlavorExtraDataTestV2(FlavorExtraDataTestV21):
- def _setup_app(self):
- self.app = fakes.wsgi_app(init_only=('flavors',))
+ @property
+ def app(self):
+ return fakes.wsgi_app(init_only=('flavors',))
diff --git a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
index 783b3af289..80e3d54f74 100644
--- a/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
+++ b/nova/tests/unit/api/openstack/compute/test_flavors_extra_specs.py
@@ -20,7 +20,6 @@ from nova.api.openstack.compute import flavors_extraspecs \
as flavorextraspecs_v21
from nova.api.openstack.compute.legacy_v2.contrib import flavorextraspecs \
as flavorextraspecs_v2
-import nova.db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -83,14 +82,22 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
self.assertEqual('value1', res_dict['extra_specs']['key1'])
def test_index_no_data(self):
- self.stubs.Set(nova.db, 'flavor_extra_specs_get',
- return_empty_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_get',
+ return_empty_flavor_extra_specs)
req = self._get_request('1/os-extra_specs')
res_dict = self.controller.index(req, 1)
self.assertEqual(0, len(res_dict['extra_specs']))
+ def test_index_flavor_not_found(self):
+ req = self._get_request('1/os-extra_specs',
+ use_admin_context=True)
+ with mock.patch('nova.db.flavor_get_by_flavor_id') as mock_get:
+ mock_get.side_effect = exception.FlavorNotFound(flavor_id='1')
+ self.assertRaises(webob.exc.HTTPNotFound, self.controller.index,
+ req, 1)
+
def test_show(self):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key5': 'value5'})
@@ -102,8 +109,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
self.assertEqual('value5', res_dict['key5'])
def test_show_spec_not_found(self):
- self.stubs.Set(nova.db, 'flavor_extra_specs_get',
- return_empty_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_get',
+ return_empty_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key6')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
@@ -130,8 +137,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
def test_delete(self):
flavor = dict(test_flavor.fake_flavor,
extra_specs={'key5': 'value5'})
- self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
- delete_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_delete',
+ delete_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key5',
use_admin_context=True)
@@ -140,8 +147,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
self.controller.delete(req, 1, 'key5')
def test_delete_no_admin(self):
- self.stubs.Set(nova.db, 'flavor_extra_specs_delete',
- delete_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_delete',
+ delete_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key5')
self.assertRaises(exception.Forbidden, self.controller.delete,
@@ -154,9 +161,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
req, 1, 'key6')
def test_create(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
body = {"extra_specs": {"key1": "value1", "key2": 0.5, "key3": 5}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
@@ -167,9 +173,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
self.assertEqual(5, res_dict['extra_specs']['key3'])
def test_create_no_admin(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs')
@@ -180,9 +185,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
raise exception.FlavorNotFound(flavor_id='')
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- fake_instance_type_extra_specs_update_or_create)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
@@ -192,18 +196,16 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- fake_instance_type_extra_specs_update_or_create)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
body = {"extra_specs": {"key1": "value1"}}
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
req, 1, body=body)
def _test_create_bad_request(self, body):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
req = self._get_request('1/os-extra_specs', use_admin_context=True)
self.assertRaises(self.bad_request, self.controller.create,
@@ -264,9 +266,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
self.assertEqual('value1', res_dict['extra_specs'][key])
def test_update_item(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
@@ -276,9 +277,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
self.assertEqual('value1', res_dict['key1'])
def test_update_item_no_admin(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1')
@@ -286,9 +286,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
req, 1, 'key1', body=body)
def _test_update_item_bad_request(self, body):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
@@ -323,9 +322,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
self._test_update_item_bad_request({"key1": value})
def test_update_item_body_uri_mismatch(self):
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/bad', use_admin_context=True)
@@ -336,9 +334,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
raise exception.FlavorNotFound(flavor_id='')
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- fake_instance_type_extra_specs_update_or_create)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
@@ -350,9 +347,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
def fake_instance_type_extra_specs_update_or_create(*args, **kwargs):
raise exception.FlavorExtraSpecUpdateCreateFailed(id=1, retries=5)
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- fake_instance_type_extra_specs_update_or_create)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ fake_instance_type_extra_specs_update_or_create)
body = {"key1": "value1"}
req = self._get_request('1/os-extra_specs/key1',
@@ -362,9 +358,8 @@ class FlavorsExtraSpecsTestV21(test.TestCase):
def test_update_really_long_integer_value(self):
value = 10 ** 1000
- self.stubs.Set(nova.db,
- 'flavor_extra_specs_update_or_create',
- return_create_flavor_extra_specs)
+ self.stub_out('nova.db.flavor_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
req = self._get_request('1/os-extra_specs/key1',
use_admin_context=True)
diff --git a/nova/tests/unit/api/openstack/compute/test_floating_ips.py b/nova/tests/unit/api/openstack/compute/test_floating_ips.py
index 4cb36b583e..4cc434d102 100644
--- a/nova/tests/unit/api/openstack/compute/test_floating_ips.py
+++ b/nova/tests/unit/api/openstack/compute/test_floating_ips.py
@@ -216,8 +216,8 @@ class FloatingIpTestV21(test.TestCase):
stub_nw_info(self))
fake_network.stub_out_nw_api_get_instance_nw_info(self)
- self.stubs.Set(db, 'instance_get',
- fake_instance_get)
+ self.stub_out('nova.db.instance_get',
+ fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
@@ -791,8 +791,8 @@ class ExtendedFloatingIpTestV21(test.TestCase):
stub_nw_info(self))
fake_network.stub_out_nw_api_get_instance_nw_info(self)
- self.stubs.Set(db, 'instance_get',
- fake_instance_get)
+ self.stub_out('nova.db.instance_get',
+ fake_instance_get)
self.context = context.get_admin_context()
self._create_floating_ips()
diff --git a/nova/tests/unit/api/openstack/compute/test_fping.py b/nova/tests/unit/api/openstack/compute/test_fping.py
index e30c8b0adf..817312c264 100644
--- a/nova/tests/unit/api/openstack/compute/test_fping.py
+++ b/nova/tests/unit/api/openstack/compute/test_fping.py
@@ -40,10 +40,10 @@ class FpingTestV21(test.TestCase):
self.flags(verbose=True, use_ipv6=False)
return_server = fakes.fake_instance_get()
return_servers = fakes.fake_instance_get_all_by_filters()
- self.stubs.Set(nova.db, "instance_get_all_by_filters",
- return_servers)
- self.stubs.Set(nova.db, "instance_get_by_uuid",
- return_server)
+ self.stub_out("nova.db.instance_get_all_by_filters",
+ return_servers)
+ self.stub_out("nova.db.instance_get_by_uuid",
+ return_server)
self.stubs.Set(nova.utils, "execute",
execute)
self.stubs.Set(self.controller_cls, "check_fping",
diff --git a/nova/tests/unit/api/openstack/compute/test_hide_server_addresses.py b/nova/tests/unit/api/openstack/compute/test_hide_server_addresses.py
index 855f938fbf..fd7a0d45a3 100644
--- a/nova/tests/unit/api/openstack/compute/test_hide_server_addresses.py
+++ b/nova/tests/unit/api/openstack/compute/test_hide_server_addresses.py
@@ -19,7 +19,6 @@ import webob
from nova import compute
from nova.compute import vm_states
-from nova import db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
@@ -50,7 +49,7 @@ class HideServerAddressesTestV21(test.TestCase):
super(HideServerAddressesTestV21, self).setUp()
fakes.stub_out_nw_api(self)
return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
self._setup_wsgi()
def _make_request(self, url):
diff --git a/nova/tests/unit/api/openstack/compute/test_hosts.py b/nova/tests/unit/api/openstack/compute/test_hosts.py
index 948cbe3a2e..a4d63fb1ea 100644
--- a/nova/tests/unit/api/openstack/compute/test_hosts.py
+++ b/nova/tests/unit/api/openstack/compute/test_hosts.py
@@ -139,11 +139,11 @@ class HostTestCaseV21(test.TestCase):
def _setup_stubs(self):
# Pretend we have fake_hosts.HOST_LIST in the DB
- self.stubs.Set(db, 'service_get_all',
- stub_service_get_all)
+ self.stub_out('nova.db.service_get_all',
+ stub_service_get_all)
# Only hosts in our fake DB exist
- self.stubs.Set(db, 'service_get_by_host_and_binary',
- stub_service_get_by_host_and_binary)
+ self.stub_out('nova.db.service_get_by_host_and_binary',
+ stub_service_get_by_host_and_binary)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
@@ -193,8 +193,8 @@ class HostTestCaseV21(test.TestCase):
def stub_service_get_all_notimpl(self, req):
return [{'host': 'notimplemented', 'topic': None,
'availability_zone': None}]
- self.stubs.Set(db, 'service_get_all',
- stub_service_get_all_notimpl)
+ self.stub_out('nova.db.service_get_all',
+ stub_service_get_all_notimpl)
body = {key: val}
self.assertRaises(webob.exc.HTTPNotImplemented,
self.controller.update,
diff --git a/nova/tests/unit/api/openstack/compute/test_hypervisors.py b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
index 0961f42ab4..115e0a7a53 100644
--- a/nova/tests/unit/api/openstack/compute/test_hypervisors.py
+++ b/nova/tests/unit/api/openstack/compute/test_hypervisors.py
@@ -25,7 +25,6 @@ from nova.api.openstack.compute.legacy_v2.contrib import hypervisors \
from nova.api.openstack import extensions
from nova.cells import utils as cells_utils
from nova import context
-from nova import db
from nova import exception
from nova import objects
from nova import test
@@ -208,8 +207,8 @@ class HypervisorsTestV21(test.NoDBTestCase):
fake_compute_node_search_by_hypervisor)
self.stubs.Set(self.controller.host_api, 'compute_node_get',
fake_compute_node_get)
- self.stubs.Set(db, 'compute_node_statistics',
- fake_compute_node_statistics)
+ self.stub_out('nova.db.compute_node_statistics',
+ fake_compute_node_statistics)
def test_view_hypervisor_nodetail_noservers(self):
result = self.controller._view_hypervisor(
diff --git a/nova/tests/unit/api/openstack/compute/test_image_size.py b/nova/tests/unit/api/openstack/compute/test_image_size.py
index 5f75aae496..48b92d8d41 100644
--- a/nova/tests/unit/api/openstack/compute/test_image_size.py
+++ b/nova/tests/unit/api/openstack/compute/test_image_size.py
@@ -88,7 +88,7 @@ class ImageSizeTestV21(test.NoDBTestCase):
return res
def _get_app(self):
- return fakes.wsgi_app_v21()
+ return fakes.wsgi_app_v21(init_only=('images', 'image-size'))
def _get_image(self, body):
return jsonutils.loads(body).get('image')
@@ -119,4 +119,4 @@ class ImageSizeTestV21(test.NoDBTestCase):
class ImageSizeTestV2(ImageSizeTestV21):
def _get_app(self):
- return fakes.wsgi_app()
+ return fakes.wsgi_app(init_only=('images',))
diff --git a/nova/tests/unit/api/openstack/compute/test_instance_actions.py b/nova/tests/unit/api/openstack/compute/test_instance_actions.py
index d9ac0f9f4b..220de4cec6 100644
--- a/nova/tests/unit/api/openstack/compute/test_instance_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_instance_actions.py
@@ -23,8 +23,8 @@ from webob import exc
from nova.api.openstack.compute import instance_actions as instance_actions_v21
from nova.api.openstack.compute.legacy_v2.contrib import instance_actions \
as instance_actions_v2
+from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
-from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import objects
@@ -96,7 +96,8 @@ class InstanceActionsPolicyTestV21(test.NoDBTestCase):
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
req = self._get_http_req('os-instance-actions')
self.assertRaises(exception.Forbidden, self.controller.index, req,
str(uuid.uuid4()))
@@ -111,7 +112,8 @@ class InstanceActionsPolicyTestV21(test.NoDBTestCase):
**{'name': 'fake', 'project_id': '%s_unequal' %
context.project_id})
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
req = self._get_http_req('os-instance-actions/1')
self.assertRaises(exception.Forbidden, self.controller.show, req,
str(uuid.uuid4()), '1')
@@ -129,6 +131,11 @@ class InstanceActionsPolicyTestV2(InstanceActionsPolicyTestV21):
class InstanceActionsTestV21(test.NoDBTestCase):
instance_actions = instance_actions_v21
+ wsgi_api_version = os_wsgi.DEFAULT_API_VERSION
+
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=False):
+ return objects.Instance(uuid=instance_uuid)
def setUp(self):
super(InstanceActionsTestV21, self).setUp()
@@ -136,21 +143,19 @@ class InstanceActionsTestV21(test.NoDBTestCase):
self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS)
self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS)
- def fake_get(self, context, instance_uuid, expected_attrs=None,
- want_objects=False):
- return objects.Instance(uuid=instance_uuid)
-
def fake_instance_get_by_uuid(context, instance_id, use_slave=False):
return fake_instance.fake_instance_obj(None,
**{'name': 'fake', 'project_id': context.project_id})
- self.stubs.Set(compute_api.API, 'get', fake_get)
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stubs.Set(compute_api.API, 'get', self.fake_get)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
def _get_http_req(self, action, use_admin_context=False):
fake_url = '/123/servers/12/%s' % action
return fakes.HTTPRequest.blank(fake_url,
- use_admin_context=use_admin_context)
+ use_admin_context=use_admin_context,
+ version=self.wsgi_api_version)
def _set_policy_rules(self):
rules = {'compute:get': '',
@@ -167,7 +172,7 @@ class InstanceActionsTestV21(test.NoDBTestCase):
actions.append(action)
return actions
- self.stubs.Set(db, 'actions_get', fake_get_actions)
+ self.stub_out('nova.db.actions_get', fake_get_actions)
req = self._get_http_req('os-instance-actions')
res_dict = self.controller.index(req, FAKE_UUID)
for res in res_dict['instanceActions']:
@@ -188,8 +193,8 @@ class InstanceActionsTestV21(test.NoDBTestCase):
events.append(event)
return events
- self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
- self.stubs.Set(db, 'action_events_get', fake_get_events)
+ self.stub_out('nova.db.action_get_by_request_id', fake_get_action)
+ self.stub_out('nova.db.action_events_get', fake_get_events)
req = self._get_http_req('os-instance-actions/1',
use_admin_context=True)
res_dict = self.controller.show(req, FAKE_UUID, FAKE_REQUEST_ID)
@@ -206,8 +211,8 @@ class InstanceActionsTestV21(test.NoDBTestCase):
def fake_get_events(context, action_id):
return self.fake_events[action_id]
- self.stubs.Set(db, 'action_get_by_request_id', fake_get_action)
- self.stubs.Set(db, 'action_events_get', fake_get_events)
+ self.stub_out('nova.db.action_get_by_request_id', fake_get_action)
+ self.stub_out('nova.db.action_events_get', fake_get_events)
self._set_policy_rules()
req = self._get_http_req('os-instance-actions/1')
@@ -220,7 +225,7 @@ class InstanceActionsTestV21(test.NoDBTestCase):
def fake_no_action(context, uuid, action_id):
return None
- self.stubs.Set(db, 'action_get_by_request_id', fake_no_action)
+ self.stub_out('nova.db.action_get_by_request_id', fake_no_action)
req = self._get_http_req('os-instance-actions/1')
self.assertRaises(exc.HTTPNotFound, self.controller.show, req,
FAKE_UUID, FAKE_REQUEST_ID)
@@ -244,6 +249,15 @@ class InstanceActionsTestV21(test.NoDBTestCase):
FAKE_UUID, 'fake')
+class InstanceActionsTestV221(InstanceActionsTestV21):
+ wsgi_api_version = "2.21"
+
+ def fake_get(self, context, instance_uuid, expected_attrs=None,
+ want_objects=False):
+ self.assertEqual('yes', context.read_deleted)
+ return objects.Instance(uuid=instance_uuid)
+
+
class InstanceActionsTestV2(InstanceActionsTestV21):
instance_actions = instance_actions_v2
diff --git a/nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py b/nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py
index 884f7d5e76..4ecdb553c5 100644
--- a/nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py
+++ b/nova/tests/unit/api/openstack/compute/test_instance_usage_audit_log.py
@@ -21,7 +21,6 @@ from nova.api.openstack.compute import instance_usage_audit_log as v21_ial
from nova.api.openstack.compute.legacy_v2.contrib \
import instance_usage_audit_log as ial
from nova import context
-from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -126,10 +125,8 @@ class InstanceUsageAuditLogTestV21(test.NoDBTestCase):
self.stubs.Set(utils, 'last_completed_audit_period',
fake_last_completed_audit_period)
- self.stubs.Set(db, 'service_get_all',
- fake_service_get_all)
- self.stubs.Set(db, 'task_log_get_all',
- fake_task_log_get_all)
+ self.stub_out('nova.db.service_get_all', fake_service_get_all)
+ self.stub_out('nova.db.task_log_get_all', fake_task_log_get_all)
self.req = fakes.HTTPRequest.blank('')
diff --git a/nova/tests/unit/api/openstack/compute/test_keypairs.py b/nova/tests/unit/api/openstack/compute/test_keypairs.py
index 884401233f..b9e05709b7 100644
--- a/nova/tests/unit/api/openstack/compute/test_keypairs.py
+++ b/nova/tests/unit/api/openstack/compute/test_keypairs.py
@@ -24,7 +24,6 @@ from nova.api.openstack.compute.legacy_v2.contrib import keypairs \
as keypairs_v2
from nova.api.openstack import wsgi as os_wsgi
from nova.compute import api as compute_api
-from nova import db
from nova import exception
from nova import objects
from nova import policy
@@ -32,6 +31,7 @@ from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.objects import test_keypair
+from nova.tests import uuidsentinel as uuids
QUOTAS = quota.QUOTAS
@@ -82,12 +82,12 @@ class KeypairsTestV21(test.TestCase):
fakes.stub_out_networking(self)
fakes.stub_out_rate_limiting(self.stubs)
- self.stubs.Set(db, "key_pair_get_all_by_user",
- db_key_pair_get_all_by_user)
- self.stubs.Set(db, "key_pair_create",
- db_key_pair_create)
- self.stubs.Set(db, "key_pair_destroy",
- db_key_pair_destroy)
+ self.stub_out("nova.db.key_pair_get_all_by_user",
+ db_key_pair_get_all_by_user)
+ self.stub_out("nova.db.key_pair_create",
+ db_key_pair_create)
+ self.stub_out("nova.db.key_pair_destroy",
+ db_key_pair_destroy)
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
@@ -233,7 +233,7 @@ class KeypairsTestV21(test.TestCase):
self.assertIn('Quota exceeded, too many key pairs.', ex.explanation)
def test_keypair_create_duplicate(self):
- self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
+ self.stub_out("nova.db.key_pair_create", db_key_pair_create_duplicate)
body = {'keypair': {'name': 'create_duplicate'}}
ex = self.assertRaises(webob.exc.HTTPConflict,
self.controller.create, self.req, body=body)
@@ -252,8 +252,8 @@ class KeypairsTestV21(test.TestCase):
def db_key_pair_get_not_found(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
- self.stubs.Set(db, "key_pair_destroy",
- db_key_pair_get_not_found)
+ self.stub_out("nova.db.key_pair_destroy",
+ db_key_pair_get_not_found)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, self.req, 'FAKE')
@@ -264,7 +264,7 @@ class KeypairsTestV21(test.TestCase):
name='foo', public_key='XXX', fingerprint='YYY',
type='ssh')
- self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
+ self.stub_out("nova.db.key_pair_get", _db_key_pair_get)
res_dict = self.controller.show(self.req, 'FAKE')
self.assertEqual('foo', res_dict['keypair']['name'])
@@ -277,17 +277,17 @@ class KeypairsTestV21(test.TestCase):
def _db_key_pair_get(context, user_id, name):
raise exception.KeypairNotFound(user_id=user_id, name=name)
- self.stubs.Set(db, "key_pair_get", _db_key_pair_get)
+ self.stub_out("nova.db.key_pair_get", _db_key_pair_get)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, 'FAKE')
def test_show_server(self):
- self.stubs.Set(db, 'instance_get',
- fakes.fake_instance_get())
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get())
- req = webob.Request.blank(self.base_url + '/servers/1')
+ self.stub_out('nova.db.instance_get',
+ fakes.fake_instance_get())
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fakes.fake_instance_get())
+ req = webob.Request.blank(self.base_url + '/servers/' + uuids.server)
req.headers['Content-Type'] = 'application/json'
response = req.get_response(self.app_server)
self.assertEqual(response.status_int, 200)
@@ -358,12 +358,10 @@ class KeypairPolicyTestV21(test.NoDBTestCase):
name='foo', public_key='XXX', fingerprint='YYY',
type='ssh')
- self.stubs.Set(db, "key_pair_get",
- _db_key_pair_get)
- self.stubs.Set(db, "key_pair_get_all_by_user",
- db_key_pair_get_all_by_user)
- self.stubs.Set(db, "key_pair_destroy",
- db_key_pair_destroy)
+ self.stub_out("nova.db.key_pair_get", _db_key_pair_get)
+ self.stub_out("nova.db.key_pair_get_all_by_user",
+ db_key_pair_get_all_by_user)
+ self.stub_out("nova.db.key_pair_destroy", db_key_pair_destroy)
self.req = fakes.HTTPRequest.blank('')
diff --git a/nova/tests/unit/api/openstack/compute/test_microversions.py b/nova/tests/unit/api/openstack/compute/test_microversions.py
index 94c994f824..bea9e2c8d2 100644
--- a/nova/tests/unit/api/openstack/compute/test_microversions.py
+++ b/nova/tests/unit/api/openstack/compute/test_microversions.py
@@ -13,15 +13,12 @@
# under the License.
import mock
-from oslo_config import cfg
from oslo_serialization import jsonutils
from nova.api.openstack import api_version_request as api_version
from nova import test
from nova.tests.unit.api.openstack import fakes
-CONF = cfg.CONF
-
class MicroversionsTest(test.NoDBTestCase):
diff --git a/nova/tests/unit/api/openstack/compute/test_migrations.py b/nova/tests/unit/api/openstack/compute/test_migrations.py
index 9de4e7e38a..07f1d92127 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrations.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrations.py
@@ -41,6 +41,12 @@ fake_migrations = [
'new_instance_type_id': 2,
'migration_type': 'resize',
'hidden': False,
+ 'memory_total': 123456,
+ 'memory_processed': 12345,
+ 'memory_remaining': 120000,
+ 'disk_total': 234567,
+ 'disk_processed': 23456,
+ 'disk_remaining': 230000,
'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'deleted_at': None,
@@ -59,6 +65,12 @@ fake_migrations = [
'new_instance_type_id': 6,
'migration_type': 'resize',
'hidden': False,
+ 'memory_total': 456789,
+ 'memory_processed': 56789,
+ 'memory_remaining': 45000,
+ 'disk_total': 96789,
+ 'disk_processed': 6789,
+ 'disk_remaining': 96000,
'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'deleted_at': None,
diff --git a/nova/tests/unit/api/openstack/compute/test_multiple_create.py b/nova/tests/unit/api/openstack/compute/test_multiple_create.py
index 44e9efbe48..0424c1381e 100644
--- a/nova/tests/unit/api/openstack/compute/test_multiple_create.py
+++ b/nova/tests/unit/api/openstack/compute/test_multiple_create.py
@@ -14,7 +14,6 @@
# under the License.
import datetime
-import uuid
from oslo_config import cfg
import webob
@@ -28,7 +27,6 @@ from nova.api.openstack.compute import servers as servers_v21
from nova.api.openstack import extensions as extensions_v20
from nova.compute import api as compute_api
from nova.compute import flavors
-from nova import db
from nova import exception
from nova.network import manager
from nova import test
@@ -37,11 +35,6 @@ from nova.tests.unit import fake_instance
from nova.tests.unit.image import fake
CONF = cfg.CONF
-FAKE_UUID = fakes.FAKE_UUID
-
-
-def fake_gen_uuid():
- return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
@@ -77,7 +70,7 @@ class MultiCreateExtensionTestV21(test.TestCase):
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
- 'uuid': FAKE_UUID,
+ 'uuid': inst['uuid'],
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
@@ -125,18 +118,15 @@ class MultiCreateExtensionTestV21(test.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self)
fakes.stub_out_nw_api(self)
- self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'instance_add_security_group',
- return_security_group)
- self.stubs.Set(db, 'project_get_networks',
- project_get_networks)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_system_metadata_update',
- fake_method)
- self.stubs.Set(db, 'instance_get', instance_get)
- self.stubs.Set(db, 'instance_update', instance_update)
- self.stubs.Set(db, 'instance_update_and_get_original',
- server_update)
+ self.stub_out('nova.db.instance_add_security_group',
+ return_security_group)
+ self.stub_out('nova.db.project_get_networks', project_get_networks)
+ self.stub_out('nova.db.instance_create', instance_create)
+ self.stub_out('nova.db.instance_system_metadata_update', fake_method)
+ self.stub_out('nova.db.instance_get', instance_get)
+ self.stub_out('nova.db.instance_update', instance_update)
+ self.stub_out('nova.db.instance_update_and_get_original',
+ server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
self.req = fakes.HTTPRequest.blank('')
@@ -357,7 +347,8 @@ class MultiCreateExtensionTestV21(test.TestCase):
res = self.controller.create(self.req, body=body).obj
- self.assertEqual(FAKE_UUID, res["server"]["id"])
+ instance_uuids = self.instance_cache_by_uuid.keys()
+ self.assertIn(res["server"]["id"], instance_uuids)
self._check_admin_password_len(res["server"])
def test_create_multiple_instances_pass_disabled(self):
@@ -380,7 +371,8 @@ class MultiCreateExtensionTestV21(test.TestCase):
res = self.controller.create(self.req, body=body).obj
- self.assertEqual(FAKE_UUID, res["server"]["id"])
+ instance_uuids = self.instance_cache_by_uuid.keys()
+ self.assertIn(res["server"]["id"], instance_uuids)
self._check_admin_password_missing(res["server"])
def _check_admin_password_len(self, server_dict):
@@ -545,7 +537,7 @@ class MultiCreateExtensionTestV2(MultiCreateExtensionTestV21):
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
- 'uuid': FAKE_UUID,
+ 'uuid': inst['uuid'],
'instance_type': inst_type,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
@@ -577,9 +569,8 @@ class MultiCreateExtensionTestV2(MultiCreateExtensionTestV21):
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self)
- self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_get', instance_get)
+ self.stub_out('nova.db.instance_create', instance_create)
+ self.stub_out('nova.db.instance_get', instance_get)
def _check_multiple_create_extension_disabled(self, **kwargs):
self.assertEqual(kwargs['min_count'], 1)
diff --git a/nova/tests/unit/api/openstack/compute/test_neutron_security_groups.py b/nova/tests/unit/api/openstack/compute/test_neutron_security_groups.py
index c713eb4156..e9ee58cdeb 100644
--- a/nova/tests/unit/api/openstack/compute/test_neutron_security_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_neutron_security_groups.py
@@ -35,6 +35,9 @@ from nova.objects import instance as instance_obj
from nova import test
from nova.tests.unit.api.openstack.compute import test_security_groups
from nova.tests.unit.api.openstack import fakes
+from nova.tests import uuidsentinel as uuids
+
+UUID_SERVER = uuids.server
class TestNeutronSecurityGroupsTestCase(test.TestCase):
@@ -147,8 +150,8 @@ class TestNeutronSecurityGroupsV21(
device_id=test_security_groups.FAKE_UUID1)
expected = [{'rules': [], 'tenant_id': 'fake', 'id': sg['id'],
'name': 'test', 'description': 'test-description'}]
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- test_security_groups.return_server_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ test_security_groups.return_server_by_uuid)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/os-security-groups'
% test_security_groups.FAKE_UUID1)
res_dict = self.server_controller.index(
@@ -212,14 +215,15 @@ class TestNeutronSecurityGroupsV21(
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
- device_id=test_security_groups.FAKE_UUID1)
+ device_id=UUID_SERVER)
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._addSecurityGroup(req, '1', body)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
+ UUID_SERVER)
+ self.manager._addSecurityGroup(req, UUID_SERVER, body)
def test_associate_duplicate_names(self):
sg1 = self._create_security_group(name='sg1',
@@ -229,15 +233,17 @@ class TestNeutronSecurityGroupsV21(
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg1['id']],
- device_id=test_security_groups.FAKE_UUID1)
+ device_id=UUID_SERVER)
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="sg1"))
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
+ UUID_SERVER)
self.assertRaises(webob.exc.HTTPConflict,
- self.manager._addSecurityGroup, req, '1', body)
+ self.manager._addSecurityGroup,
+ req, UUID_SERVER, body)
def test_associate_port_security_enabled_true(self):
sg = self._create_sg_template().get('security_group')
@@ -245,39 +251,43 @@ class TestNeutronSecurityGroupsV21(
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
port_security_enabled=True,
- device_id=test_security_groups.FAKE_UUID1)
+ device_id=UUID_SERVER)
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._addSecurityGroup(req, '1', body)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
+ UUID_SERVER)
+ self.manager._addSecurityGroup(req, UUID_SERVER, body)
def test_associate_port_security_enabled_false(self):
self._create_sg_template().get('security_group')
net = self._create_network()
self._create_port(
network_id=net['network']['id'], port_security_enabled=False,
- device_id=test_security_groups.FAKE_UUID1)
+ device_id=UUID_SERVER)
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ test_security_groups.return_server)
body = dict(addSecurityGroup=dict(name="test"))
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
+ UUID_SERVER)
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup,
- req, '1', body)
+ req, UUID_SERVER, body)
def test_disassociate_by_non_existing_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name='non-existing'))
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
+ UUID_SERVER)
self.assertRaises(webob.exc.HTTPNotFound,
- self.manager._removeSecurityGroup, req, '1', body)
+ self.manager._removeSecurityGroup,
+ req, UUID_SERVER, body)
def test_disassociate_non_running_instance(self):
# Neutron does not care if the instance is running or not. When the
@@ -296,14 +306,15 @@ class TestNeutronSecurityGroupsV21(
net = self._create_network()
self._create_port(
network_id=net['network']['id'], security_groups=[sg['id']],
- device_id=test_security_groups.FAKE_UUID1)
+ device_id=UUID_SERVER)
- self.stubs.Set(nova.db, 'instance_get',
- test_security_groups.return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ test_security_groups.return_server)
body = dict(removeSecurityGroup=dict(name="test"))
- req = fakes.HTTPRequest.blank('/v2/fake/servers/1/action')
- self.manager._removeSecurityGroup(req, '1', body)
+ req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/action' %
+ UUID_SERVER)
+ self.manager._removeSecurityGroup(req, UUID_SERVER, body)
def test_get_raises_no_unique_match_error(self):
diff --git a/nova/tests/unit/api/openstack/compute/test_pci.py b/nova/tests/unit/api/openstack/compute/test_pci.py
index 60fc7f29c6..97c9d98a51 100644
--- a/nova/tests/unit/api/openstack/compute/test_pci.py
+++ b/nova/tests/unit/api/openstack/compute/test_pci.py
@@ -18,7 +18,6 @@ from webob import exc
from nova.api.openstack.compute import pci
from nova.api.openstack import wsgi
from nova import context
-from nova import db
from nova import exception
from nova import objects
from nova.objects import fields
@@ -74,8 +73,8 @@ class PciServerControllerTestV21(test.NoDBTestCase):
return test_pci_device.fake_db_dev
ctxt = context.get_admin_context()
- self.stubs.Set(db, 'pci_device_get_by_addr',
- fake_pci_device_get_by_addr)
+ self.stub_out('nova.db.pci_device_get_by_addr',
+ fake_pci_device_get_by_addr)
self.pci_device = objects.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_show(self):
@@ -155,7 +154,8 @@ class PciControlletestV21(test.NoDBTestCase):
def fake_pci_device_get_by_id(context, id):
return test_pci_device.fake_db_dev
- self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
+ self.stub_out('nova.db.pci_device_get_by_id',
+ fake_pci_device_get_by_id)
req = fakes.HTTPRequest.blank('/os-pci/1', use_admin_context=True)
result = self.controller.show(req, '1')
dist = {'pci_device': {'address': 'a',
@@ -175,7 +175,8 @@ class PciControlletestV21(test.NoDBTestCase):
def fake_pci_device_get_by_id(context, id):
raise exception.PciDeviceNotFoundById(id=id)
- self.stubs.Set(db, 'pci_device_get_by_id', fake_pci_device_get_by_id)
+ self.stub_out('nova.db.pci_device_get_by_id',
+ fake_pci_device_get_by_id)
req = fakes.HTTPRequest.blank('/os-pci/0', use_admin_context=True)
self.assertRaises(exc.HTTPNotFound, self.controller.show, req, '0')
@@ -192,8 +193,8 @@ class PciControlletestV21(test.NoDBTestCase):
def test_index(self):
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
- self.stubs.Set(db, 'pci_device_get_all_by_node',
- self._fake_pci_device_get_all_by_node)
+ self.stub_out('nova.db.pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequest.blank('/os-pci', use_admin_context=True)
result = self.controller.index(req)
@@ -212,8 +213,8 @@ class PciControlletestV21(test.NoDBTestCase):
def test_detail(self):
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
self._fake_compute_node_get_all)
- self.stubs.Set(db, 'pci_device_get_all_by_node',
- self._fake_pci_device_get_all_by_node)
+ self.stub_out('nova.db.pci_device_get_all_by_node',
+ self._fake_pci_device_get_all_by_node)
req = fakes.HTTPRequest.blank('/os-pci/detail',
use_admin_context=True)
result = self.controller.detail(req)
diff --git a/nova/tests/unit/api/openstack/compute/test_plugin_framework.py b/nova/tests/unit/api/openstack/compute/test_plugin_framework.py
index 0482aa994e..0bf6a1952e 100644
--- a/nova/tests/unit/api/openstack/compute/test_plugin_framework.py
+++ b/nova/tests/unit/api/openstack/compute/test_plugin_framework.py
@@ -13,14 +13,11 @@
# under the License.
import mock
-from oslo_config import cfg
from oslo_serialization import jsonutils
from nova import test
from nova.tests.unit.api.openstack import fakes
-CONF = cfg.CONF
-
class PluginTest(test.NoDBTestCase):
diff --git a/nova/tests/unit/api/openstack/compute/test_scheduler_hints.py b/nova/tests/unit/api/openstack/compute/test_scheduler_hints.py
index fb63d1ef20..e138cf368a 100644
--- a/nova/tests/unit/api/openstack/compute/test_scheduler_hints.py
+++ b/nova/tests/unit/api/openstack/compute/test_scheduler_hints.py
@@ -15,6 +15,7 @@
import datetime
+import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
@@ -25,7 +26,7 @@ from nova.api.openstack.compute import servers as servers_v21
from nova.api.openstack import extensions
import nova.compute.api
from nova.compute import flavors
-from nova import db
+from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
@@ -98,7 +99,11 @@ class SchedulerHintsTestCaseV21(test.TestCase):
self.assertEqual(202, res.status_int)
def test_create_server_with_group_hint(self):
- self._test_create_server_with_hint({'group': 'foo'})
+ self._test_create_server_with_hint({'group': UUID})
+
+ def test_create_server_with_non_uuid_group_hint(self):
+ self._create_server_with_scheduler_hints_bad_request(
+ {'group': 'non-uuid'})
def test_create_server_with_different_host_hint(self):
self._test_create_server_with_hint(
@@ -161,6 +166,13 @@ class SchedulerHintsTestCaseV2(SchedulerHintsTestCaseV21):
# We skip this test for v2.0.
pass
+ @mock.patch(
+ 'nova.api.openstack.compute.legacy_v2.servers.Controller.create')
+ def test_create_server_with_non_uuid_group_hint(self, mock_create):
+ mock_create.side_effect = exception.InvalidInput(reason='')
+ self._create_server_with_scheduler_hints_bad_request(
+ {'group': 'non-uuid'})
+
class ServersControllerCreateTestV21(test.TestCase):
@@ -199,7 +211,7 @@ class ServersControllerCreateTestV21(test.TestCase):
return instance
fake.stub_out_image_service(self)
- self.stubs.Set(db, 'instance_create', instance_create)
+ self.stub_out('nova.db.instance_create', instance_create)
def _set_up_controller(self):
ext_info = extension_info.LoadedExtensionInfo()
diff --git a/nova/tests/unit/api/openstack/compute/test_security_group_default_rules.py b/nova/tests/unit/api/openstack/compute/test_security_group_default_rules.py
index f0bad162f0..3f254892d5 100644
--- a/nova/tests/unit/api/openstack/compute/test_security_group_default_rules.py
+++ b/nova/tests/unit/api/openstack/compute/test_security_group_default_rules.py
@@ -13,7 +13,6 @@
# under the License.
import mock
-from oslo_config import cfg
import webob
from nova.api.openstack.compute.legacy_v2.contrib import \
@@ -27,9 +26,6 @@ from nova import test
from nova.tests.unit.api.openstack import fakes
-CONF = cfg.CONF
-
-
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
@@ -284,10 +280,10 @@ class TestSecurityGroupDefaultRulesV21(test.TestCase):
self.assertEqual(sgr['id'], id)
return security_group_default_rule_db(sgr)
- self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
- security_group_default_rule_destroy)
- self.stubs.Set(nova.db, 'security_group_default_rule_get',
- return_security_group_default_rule)
+ self.stub_out('nova.db.security_group_default_rule_destroy',
+ security_group_default_rule_destroy)
+ self.stub_out('nova.db.security_group_default_rule_get',
+ return_security_group_default_rule)
self.controller.delete(self.req, '1')
diff --git a/nova/tests/unit/api/openstack/compute/test_security_groups.py b/nova/tests/unit/api/openstack/compute/test_security_groups.py
index 7fcaf94e67..215a93b318 100644
--- a/nova/tests/unit/api/openstack/compute/test_security_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_security_groups.py
@@ -24,6 +24,7 @@ from nova.api.openstack.compute.legacy_v2.contrib import security_groups as \
secgroups_v2
from nova.api.openstack.compute import security_groups as \
secgroups_v21
+from nova.api.openstack import wsgi
from nova import compute
from nova.compute import power_state
from nova import context as context_maker
@@ -34,10 +35,12 @@ from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
+from nova.tests import uuidsentinel as uuids
CONF = cfg.CONF
FAKE_UUID1 = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16'
FAKE_UUID2 = 'c6e6430a-6563-4efa-9542-5e93c9e97d18'
+UUID_SERVER = uuids.server
class AttrDict(dict):
@@ -90,10 +93,10 @@ def security_group_rule_db(rule, id=None):
def return_server(context, server_id,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
- **{'id': int(server_id),
+ **{'id': 1,
'power_state': 0x01,
'host': "localhost",
- 'uuid': FAKE_UUID1,
+ 'uuid': server_id,
'name': 'asdf'})
@@ -110,13 +113,13 @@ def return_server_by_uuid(context, server_uuid,
def return_non_running_server(context, server_id, columns_to_join=None):
return fake_instance.fake_db_instance(
- **{'id': server_id, 'power_state': power_state.SHUTDOWN,
- 'uuid': FAKE_UUID1, 'host': "localhost", 'name': 'asdf'})
+ **{'id': 1, 'power_state': power_state.SHUTDOWN,
+ 'uuid': server_id, 'host': "localhost", 'name': 'asdf'})
def return_security_group_by_name(context, project_id, group_name):
return {'id': 1, 'name': group_name,
- "instances": [{'id': 1, 'uuid': FAKE_UUID1}]}
+ "instances": [{'id': 1, 'uuid': UUID_SERVER}]}
def return_security_group_without_instances(context, project_id, group_name):
@@ -328,8 +331,8 @@ class TestSecurityGroupsV21(test.TestCase):
def return_security_groups(context, project_id):
return [security_group_db(sg) for sg in groups]
- self.stubs.Set(nova.db, 'security_group_get_by_project',
- return_security_groups)
+ self.stub_out('nova.db.security_group_get_by_project',
+ return_security_groups)
res_dict = self.controller.index(self.req)
@@ -394,14 +397,14 @@ class TestSecurityGroupsV21(test.TestCase):
def return_all_security_groups(context):
return [security_group_db(sg) for sg in all_groups]
- self.stubs.Set(nova.db, 'security_group_get_all',
- return_all_security_groups)
+ self.stub_out('nova.db.security_group_get_all',
+ return_all_security_groups)
def return_tenant_security_groups(context, project_id):
return [security_group_db(sg) for sg in tenant_groups]
- self.stubs.Set(nova.db, 'security_group_get_by_project',
- return_tenant_security_groups)
+ self.stub_out('nova.db.security_group_get_by_project',
+ return_tenant_security_groups)
path = '/v2/fake/os-security-groups'
@@ -429,15 +432,15 @@ class TestSecurityGroupsV21(test.TestCase):
self.assertEqual(server_id, FAKE_UUID1)
return return_server_by_uuid(context, server_id)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_instance)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_instance)
def return_security_groups(context, instance_uuid):
self.assertEqual(instance_uuid, FAKE_UUID1)
return [security_group_db(sg) for sg in groups]
- self.stubs.Set(nova.db, 'security_group_get_by_instance',
- return_security_groups)
+ self.stub_out('nova.db.security_group_get_by_instance',
+ return_security_groups)
res_dict = self.server_controller.index(self.req, FAKE_UUID1)
@@ -460,9 +463,9 @@ class TestSecurityGroupsV21(test.TestCase):
self.req.environ['nova.context'], FAKE_UUID1)
def test_get_security_group_by_instance_non_existing(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
+ self.stub_out('nova.db.instance_get', return_server_nonexistent)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_server_nonexistent)
self.assertRaises(webob.exc.HTTPNotFound,
self.server_controller.index, self.req, '1')
@@ -477,8 +480,8 @@ class TestSecurityGroupsV21(test.TestCase):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
+ self.stub_out('nova.db.security_group_get',
+ return_security_group)
res_dict = self.controller.show(self.req, '2')
@@ -509,10 +512,10 @@ class TestSecurityGroupsV21(test.TestCase):
self.assertEqual(sg_update['description'], values['description'])
return security_group_db(sg_update)
- self.stubs.Set(nova.db, 'security_group_update',
- return_update_security_group)
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
+ self.stub_out('nova.db.security_group_update',
+ return_update_security_group)
+ self.stub_out('nova.db.security_group_get',
+ return_security_group)
res_dict = self.controller.update(self.req, '2',
{'security_group': sg_update})
@@ -527,8 +530,8 @@ class TestSecurityGroupsV21(test.TestCase):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
+ self.stub_out('nova.db.security_group_get',
+ return_security_group)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, '2', {'security_group': sg})
@@ -552,10 +555,10 @@ class TestSecurityGroupsV21(test.TestCase):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
- self.stubs.Set(nova.db, 'security_group_destroy',
- security_group_destroy)
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
+ self.stub_out('nova.db.security_group_destroy',
+ security_group_destroy)
+ self.stub_out('nova.db.security_group_get',
+ return_security_group)
self.controller.delete(self.req, '1')
@@ -596,16 +599,16 @@ class TestSecurityGroupsV21(test.TestCase):
self.assertEqual(sg['id'], group_id)
return security_group_db(sg)
- self.stubs.Set(nova.db, 'security_group_in_use',
- security_group_in_use)
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
+ self.stub_out('nova.db.security_group_in_use',
+ security_group_in_use)
+ self.stub_out('nova.db.security_group_get',
+ return_security_group)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
self.req, '1')
def test_associate_by_non_existing_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stub_out('nova.db.instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(addSecurityGroup=dict(name='non-existing'))
@@ -621,29 +624,29 @@ class TestSecurityGroupsV21(test.TestCase):
'invalid', body)
def test_associate_without_body(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stub_out('nova.db.instance_get', return_server)
body = dict(addSecurityGroup=None)
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_no_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stub_out('nova.db.instance_get', return_server)
body = dict(addSecurityGroup=dict())
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_security_group_name_with_whitespaces(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stub_out('nova.db.instance_get', return_server)
body = dict(addSecurityGroup=dict(name=" "))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_non_existing_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ self.stub_out('nova.db.instance_get', return_server_nonexistent)
+ self.stub_out('nova.db.instance_get_by_uuid',
return_server_nonexistent)
body = dict(addSecurityGroup=dict(name="test"))
@@ -651,56 +654,56 @@ class TestSecurityGroupsV21(test.TestCase):
self.manager._addSecurityGroup, self.req, '1', body)
def test_associate_non_running_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_non_running_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stub_out('nova.db.instance_get', return_non_running_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_non_running_server)
+ self.stub_out('nova.db.security_group_get_by_name',
return_security_group_without_instances)
body = dict(addSecurityGroup=dict(name="test"))
- self.manager._addSecurityGroup(self.req, '1', body)
+ self.manager._addSecurityGroup(self.req, UUID_SERVER, body)
def test_associate_already_associated_security_group_to_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stub_out('nova.db.instance_get', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_server_by_uuid)
+ self.stub_out('nova.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(addSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._addSecurityGroup, self.req,
- '1', body)
+ UUID_SERVER, body)
def test_associate(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ self.stub_out('nova.db.instance_get', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_add_security_group')
nova.db.instance_add_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stub_out('nova.db.security_group_get_by_name',
return_security_group_without_instances)
self.mox.ReplayAll()
body = dict(addSecurityGroup=dict(name="test"))
- self.manager._addSecurityGroup(self.req, '1', body)
+ self.manager._addSecurityGroup(self.req, UUID_SERVER, body)
def test_disassociate_by_non_existing_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stub_out('nova.db.instance_get', return_server)
self.assertEqual(return_server(None, '1'),
nova.db.instance_get(None, '1'))
body = dict(removeSecurityGroup=dict(name='non-existing'))
self.assertRaises(webob.exc.HTTPNotFound,
self.manager._removeSecurityGroup, self.req,
- '1', body)
+ UUID_SERVER, body)
def test_disassociate_by_invalid_server_id(self):
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_by_name)
+ self.stub_out('nova.db.security_group_get_by_name',
+ return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name='test'))
self.assertRaises(webob.exc.HTTPNotFound,
@@ -708,7 +711,7 @@ class TestSecurityGroupsV21(test.TestCase):
'invalid', body)
def test_disassociate_without_body(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stub_out('nova.db.instance_get', return_server)
body = dict(removeSecurityGroup=None)
self.assertRaises(webob.exc.HTTPBadRequest,
@@ -716,7 +719,7 @@ class TestSecurityGroupsV21(test.TestCase):
'1', body)
def test_disassociate_no_security_group_name(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stub_out('nova.db.instance_get', return_server)
body = dict(removeSecurityGroup=dict())
self.assertRaises(webob.exc.HTTPBadRequest,
@@ -724,7 +727,7 @@ class TestSecurityGroupsV21(test.TestCase):
'1', body)
def test_disassociate_security_group_name_with_whitespaces(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
+ self.stub_out('nova.db.instance_get', return_server)
body = dict(removeSecurityGroup=dict(name=" "))
self.assertRaises(webob.exc.HTTPBadRequest,
@@ -732,9 +735,9 @@ class TestSecurityGroupsV21(test.TestCase):
'1', body)
def test_disassociate_non_existing_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_by_name)
+ self.stub_out('nova.db.instance_get', return_server_nonexistent)
+ self.stub_out('nova.db.security_group_get_by_name',
+ return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPNotFound,
@@ -742,42 +745,42 @@ class TestSecurityGroupsV21(test.TestCase):
self.req, '1', body)
def test_disassociate_non_running_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_non_running_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_non_running_server)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stub_out('nova.db.instance_get', return_non_running_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_non_running_server)
+ self.stub_out('nova.db.security_group_get_by_name',
return_security_group_by_name)
body = dict(removeSecurityGroup=dict(name="test"))
- self.manager._removeSecurityGroup(self.req, '1', body)
+ self.manager._removeSecurityGroup(self.req, UUID_SERVER, body)
def test_disassociate_already_associated_security_group_to_instance(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
- self.stubs.Set(nova.db, 'security_group_get_by_name',
+ self.stub_out('nova.db.instance_get', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_server_by_uuid)
+ self.stub_out('nova.db.security_group_get_by_name',
return_security_group_without_instances)
body = dict(removeSecurityGroup=dict(name="test"))
self.assertRaises(webob.exc.HTTPBadRequest,
self.manager._removeSecurityGroup, self.req,
- '1', body)
+ UUID_SERVER, body)
def test_disassociate(self):
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
+ self.stub_out('nova.db.instance_get', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_server_by_uuid)
self.mox.StubOutWithMock(nova.db, 'instance_remove_security_group')
nova.db.instance_remove_security_group(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
- self.stubs.Set(nova.db, 'security_group_get_by_name',
- return_security_group_by_name)
+ self.stub_out('nova.db.security_group_get_by_name',
+ return_security_group_by_name)
self.mox.ReplayAll()
body = dict(removeSecurityGroup=dict(name="test"))
- self.manager._removeSecurityGroup(self.req, '1', body)
+ self.manager._removeSecurityGroup(self.req, UUID_SERVER, body)
class TestSecurityGroupsV2(TestSecurityGroupsV21):
@@ -817,8 +820,8 @@ class TestSecurityGroupRulesV21(test.TestCase):
return db2
raise exception.SecurityGroupNotFound(security_group_id=group_id)
- self.stubs.Set(nova.db, 'security_group_get',
- return_security_group)
+ self.stub_out('nova.db.security_group_get',
+ return_security_group)
self.parent_security_group = db2
self.req = fakes.HTTPRequest.blank('')
@@ -1182,10 +1185,10 @@ class TestSecurityGroupRulesV21(test.TestCase):
def security_group_rule_destroy(context, id):
pass
- self.stubs.Set(nova.db, 'security_group_rule_get',
- security_group_rule_get)
- self.stubs.Set(nova.db, 'security_group_rule_destroy',
- security_group_rule_destroy)
+ self.stub_out('nova.db.security_group_rule_get',
+ security_group_rule_get)
+ self.stub_out('nova.db.security_group_rule_destroy',
+ security_group_rule_destroy)
self.controller.delete(self.req, self.sg2['id'])
@@ -1411,15 +1414,45 @@ class SecurityGroupsOutputPolicyEnforcementV21(test.NoDBTestCase):
self.rule_name = "os_compute_api:os-security-groups"
self.rule = {self.rule_name: "project:non_fake"}
self.policy.set_rules(self.rule)
-
- def test_show_policy_failed(self):
- self.controller.show(self.req, None, FAKE_UUID1)
-
- def test_create_policy_failed(self):
- self.controller.create(self.req, None, {})
-
- def test_detail_policy_failed(self):
- self.controller.detail(self.req, None)
+ self.fake_res = wsgi.ResponseObject({
+ 'server': {'id': '0'},
+ 'servers': [{'id': '0'}, {'id': '2'}]})
+
+ @mock.patch.object(secgroups_v21, "softauth")
+ def test_show_policy_softauth_is_called(self, mock_softauth):
+ mock_softauth.return_value = False
+ self.controller.show(self.req, self.fake_res, FAKE_UUID1)
+ self.assertTrue(mock_softauth.called)
+
+ @mock.patch.object(nova.network.security_group.openstack_driver,
+ "is_neutron_security_groups")
+ def test_show_policy_failed(self, is_neutron_security_groups):
+ self.controller.show(self.req, self.fake_res, FAKE_UUID1)
+ self.assertFalse(is_neutron_security_groups.called)
+
+ @mock.patch.object(secgroups_v21, "softauth")
+ def test_create_policy_softauth_is_called(self, mock_softauth):
+ mock_softauth.return_value = False
+ self.controller.show(self.req, self.fake_res, {})
+ self.assertTrue(mock_softauth.called)
+
+ @mock.patch.object(nova.network.security_group.openstack_driver,
+ "is_neutron_security_groups")
+ def test_create_policy_failed(self, is_neutron_security_groups):
+ self.controller.create(self.req, self.fake_res, {})
+ self.assertFalse(is_neutron_security_groups.called)
+
+ @mock.patch.object(secgroups_v21, "softauth")
+ def test_detail_policy_softauth_is_called(self, mock_softauth):
+ mock_softauth.return_value = False
+ self.controller.detail(self.req, self.fake_res)
+ self.assertTrue(mock_softauth.called)
+
+ @mock.patch.object(nova.network.security_group.openstack_driver,
+ "is_neutron_security_groups")
+ def test_detail_policy_failed(self, is_neutron_security_groups):
+ self.controller.detail(self.req, self.fake_res)
+ self.assertFalse(is_neutron_security_groups.called)
class PolicyEnforcementV21(test.NoDBTestCase):
diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py
index b31be443d7..d6a7817e57 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py
@@ -29,7 +29,6 @@ from nova.api.openstack.compute import servers as servers_v21
from nova.compute import api as compute_api
from nova.compute import task_states
from nova.compute import vm_states
-from nova import db
from nova import exception
from nova.image import glance
from nova import objects
@@ -86,11 +85,11 @@ class ServerActionsControllerTestV21(test.TestCase):
super(ServerActionsControllerTestV21, self).setUp()
CONF.set_override('host', 'localhost', group='glance')
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
host='fake_host'))
- self.stubs.Set(db, 'instance_update_and_get_original',
- instance_update_and_get_original)
+ self.stub_out('nova.db.instance_update_and_get_original',
+ instance_update_and_get_original)
fakes.stub_out_nw_api(self)
fakes.stub_out_compute_api_snapshot(self.stubs)
@@ -129,7 +128,8 @@ class ServerActionsControllerTestV21(test.TestCase):
self.context, objects.Instance(), instance)
self.compute_api.get(self.context, uuid,
- expected_attrs=['flavor', 'pci_devices'],
+ expected_attrs=['flavor', 'pci_devices',
+ 'numa_topology'],
want_objects=True).AndReturn(instance)
return instance
@@ -220,8 +220,8 @@ class ServerActionsControllerTestV21(test.TestCase):
self.req, FAKE_UUID, body=body)
def test_reboot_not_found(self):
- self.stubs.Set(db, 'instance_get_by_uuid',
- return_server_not_found)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_server_not_found)
body = dict(reboot=dict(type="HARD"))
self.assertRaises(webob.exc.HTTPNotFound,
@@ -244,8 +244,8 @@ class ServerActionsControllerTestV21(test.TestCase):
def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="SOFT"))
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
@@ -253,22 +253,22 @@ class ServerActionsControllerTestV21(test.TestCase):
def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
body = dict(reboot=dict(type="HARD"))
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_hard_with_hard_in_progress(self):
body = dict(reboot=dict(type="HARD"))
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING_HARD))
self.controller._action_reboot(self.req, FAKE_UUID, body=body)
def test_reboot_soft_with_hard_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="SOFT"))
- self.stubs.Set(db, 'instance_get_by_uuid',
- fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING_HARD))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
@@ -279,7 +279,7 @@ class ServerActionsControllerTestV21(test.TestCase):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE,
host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
body = {
"rebuild": {
@@ -309,7 +309,7 @@ class ServerActionsControllerTestV21(test.TestCase):
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
body = {
@@ -333,7 +333,7 @@ class ServerActionsControllerTestV21(test.TestCase):
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
- self.stubs.Set(db, 'instance_get',
+ self.stub_out('nova.db.instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
@@ -353,7 +353,7 @@ class ServerActionsControllerTestV21(test.TestCase):
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
- self.stubs.Set(db, 'instance_get',
+ self.stub_out('nova.db.instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
@@ -374,7 +374,7 @@ class ServerActionsControllerTestV21(test.TestCase):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
self_href = 'http://localhost/v2/servers/%s' % FAKE_UUID
body = {
@@ -414,7 +414,7 @@ class ServerActionsControllerTestV21(test.TestCase):
return_server = fakes.fake_instance_get(metadata=metadata,
vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
body = {
"rebuild": {
@@ -468,7 +468,7 @@ class ServerActionsControllerTestV21(test.TestCase):
def test_rebuild_admin_pass(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
body = {
"rebuild": {
@@ -490,7 +490,7 @@ class ServerActionsControllerTestV21(test.TestCase):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
body = {
"rebuild": {
@@ -509,7 +509,7 @@ class ServerActionsControllerTestV21(test.TestCase):
def server_not_found(self, instance_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=instance_id)
- self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
+ self.stub_out('nova.db.instance_get_by_uuid', server_not_found)
body = {
"rebuild": {
@@ -963,8 +963,8 @@ class ServerActionsControllerTestV21(test.TestCase):
'delete_on_termination': False,
'no_device': None})]
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_block_device_mapping_get_all_by_instance)
+ self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
system_metadata = dict(image_kernel_id=_fake_id('b'),
image_ramdisk_id=_fake_id('c'),
@@ -975,7 +975,7 @@ class ServerActionsControllerTestV21(test.TestCase):
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda',
system_metadata=system_metadata)
- self.stubs.Set(db, 'instance_get_by_uuid', instance)
+ self.stub_out('nova.db.instance_get_by_uuid', instance)
self.mox.StubOutWithMock(self.controller.compute_api.compute_rpcapi,
'quiesce_instance')
@@ -1055,8 +1055,8 @@ class ServerActionsControllerTestV21(test.TestCase):
'delete_on_termination': False,
'no_device': None})]
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_block_device_mapping_get_all_by_instance)
+ self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
+ fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(
image_ref='',
@@ -1064,7 +1064,7 @@ class ServerActionsControllerTestV21(test.TestCase):
root_device_name='/dev/vda',
system_metadata={'image_test_key1': 'test_value1',
'image_test_key2': 'test_value2'})
- self.stubs.Set(db, 'instance_get_by_uuid', instance)
+ self.stub_out('nova.db.instance_get_by_uuid', instance)
self.mox.StubOutWithMock(self.controller.compute_api.compute_rpcapi,
'quiesce_instance')
@@ -1297,7 +1297,7 @@ class ServerActionsControllerTestV2(ServerActionsControllerTestV21):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE,
host='fake_host')
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
body = {
"rebuild": {
diff --git a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
index 5197116dde..0d96bb71ed 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_group_quotas.py
@@ -21,7 +21,6 @@ from nova.api.openstack.compute.legacy_v2.contrib import server_groups
from nova.api.openstack.compute import server_groups as sg_v21
from nova.api.openstack import extensions
from nova import context
-import nova.db
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
@@ -146,10 +145,10 @@ class ServerGroupQuotasTestV21(test.TestCase):
self.assertEqual(sg['id'], group_id)
return server_group_db(sg)
- self.stubs.Set(nova.db, 'instance_group_delete',
- server_group_delete)
- self.stubs.Set(nova.db, 'instance_group_get',
- return_server_group)
+ self.stub_out('nova.db.instance_group_delete',
+ server_group_delete)
+ self.stub_out('nova.db.instance_group_get',
+ return_server_group)
resp = self.controller.delete(self.req, '123')
self.assertTrue(self.called)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_groups.py b/nova/tests/unit/api/openstack/compute/test_server_groups.py
index 3566f40b66..50375b759a 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_groups.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_groups.py
@@ -395,10 +395,10 @@ class ServerGroupTestV21(test.TestCase):
self.assertEqual(sg['id'], group_id)
return server_group_db(sg)
- self.stubs.Set(nova.db, 'instance_group_delete',
- server_group_delete)
- self.stubs.Set(nova.db, 'instance_group_get',
- return_server_group)
+ self.stub_out('nova.db.instance_group_delete',
+ server_group_delete)
+ self.stub_out('nova.db.instance_group_get',
+ return_server_group)
resp = self.controller.delete(self.req, '123')
self.assertTrue(self.called)
diff --git a/nova/tests/unit/api/openstack/compute/test_server_metadata.py b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
index c4344fd77f..114f659588 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_metadata.py
@@ -121,12 +121,12 @@ class ServerMetaDataTestV21(test.TestCase):
def setUp(self):
super(ServerMetaDataTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
- self.stubs.Set(nova.db, 'instance_get', return_server)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_by_uuid)
+ self.stub_out('nova.db.instance_get', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_server_by_uuid)
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_server_metadata)
+ self.stub_out('nova.db.instance_metadata_get',
+ return_server_metadata)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
@@ -154,14 +154,14 @@ class ServerMetaDataTestV21(test.TestCase):
self.assertEqual(expected, res_dict)
def test_index_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
+ self.stub_out('nova.db.instance_metadata_get',
return_server_nonexistent)
req = self._get_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
+ self.stub_out('nova.db.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request()
res_dict = self.controller.index(req, self.uuid)
@@ -175,24 +175,24 @@ class ServerMetaDataTestV21(test.TestCase):
self.assertEqual(expected, res_dict)
def test_show_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_server_nonexistent)
+ self.stub_out('nova.db.instance_metadata_get',
+ return_server_nonexistent)
req = self._get_request('/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key2')
def test_show_meta_not_found(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_empty_server_metadata)
+ self.stub_out('nova.db.instance_metadata_get',
+ return_empty_server_metadata)
req = self._get_request('/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key6')
def test_delete(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_server_metadata)
- self.stubs.Set(nova.db, 'instance_metadata_delete',
- delete_server_metadata)
+ self.stub_out('nova.db.instance_metadata_get',
+ return_server_metadata)
+ self.stub_out('nova.db.instance_metadata_delete',
+ delete_server_metadata)
req = self._get_request('/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.uuid, 'key2')
@@ -200,16 +200,16 @@ class ServerMetaDataTestV21(test.TestCase):
self.assertIsNone(res)
def test_delete_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_server_nonexistent)
req = self._get_request('/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key1')
def test_delete_meta_not_found(self):
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_empty_server_metadata)
+ self.stub_out('nova.db.instance_metadata_get',
+ return_empty_server_metadata)
req = self._get_request('/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
@@ -232,8 +232,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.assertEqual(body, res_dict)
def test_create_empty_body(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.headers["content-type"] = "application/json"
@@ -242,8 +242,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.controller.create, req, self.uuid, body=None)
def test_create_item_empty_key(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": {"": "value1"}}
@@ -254,8 +254,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.controller.create, req, self.uuid, body=body)
def test_create_item_non_dict(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": None}
@@ -266,8 +266,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.controller.create, req, self.uuid, body=body)
def test_create_item_key_too_long(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": {("a" * 260): "value1"}}
@@ -279,8 +279,8 @@ class ServerMetaDataTestV21(test.TestCase):
req, self.uuid, body=body)
def test_create_malformed_container(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {}}
@@ -291,8 +291,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.controller.create, req, self.uuid, body=body)
def test_create_malformed_data(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"metadata": ['asdf']}
@@ -303,8 +303,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.controller.create, req, self.uuid, body=body)
def test_create_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_server_nonexistent)
req = self._get_request()
req.method = 'POST'
body = {"metadata": {"key1": "value1"}}
@@ -357,8 +357,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.assertEqual(expected, res_dict)
def test_update_all_empty_body_item(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
@@ -368,8 +368,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=None)
def test_update_all_with_non_dict_item(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"metadata": None}
@@ -381,8 +381,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=body)
def test_update_all_malformed_container(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
@@ -394,8 +394,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=expected)
def test_update_all_malformed_data(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
@@ -407,7 +407,7 @@ class ServerMetaDataTestV21(test.TestCase):
body=expected)
def test_update_all_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_get', return_server_nonexistent)
+ self.stub_out('nova.db.instance_get', return_server_nonexistent)
req = self._get_request()
req.method = 'PUT'
req.content_type = "application/json"
@@ -418,8 +418,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.controller.update_all, req, '100', body=body)
def test_update_all_non_dict(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request()
req.method = 'PUT'
body = {"metadata": None}
@@ -441,8 +441,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_server(self):
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- return_server_nonexistent)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ return_server_nonexistent)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
@@ -454,8 +454,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=body)
def test_update_item_empty_body(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
@@ -465,8 +465,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=None)
def test_update_malformed_container(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'meta': {}}
@@ -478,8 +478,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=expected)
def test_update_malformed_data(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'metadata': ['asdf']}
@@ -491,8 +491,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=expected)
def test_update_item_empty_key(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
@@ -504,8 +504,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=body)
def test_update_item_key_too_long(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
@@ -517,8 +517,8 @@ class ServerMetaDataTestV21(test.TestCase):
req, self.uuid, ("a" * 260), body=body)
def test_update_item_value_too_long(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
@@ -530,8 +530,8 @@ class ServerMetaDataTestV21(test.TestCase):
req, self.uuid, "key1", body=body)
def test_update_item_too_many_keys(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
@@ -543,8 +543,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=body)
def test_update_item_body_uri_mismatch(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
@@ -556,8 +556,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=body)
def test_update_item_non_dict(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request('/bad')
req.method = 'PUT'
body = {"meta": None}
@@ -569,8 +569,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=body)
def test_update_empty_container(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
expected = {'metadata': {}}
@@ -582,8 +582,8 @@ class ServerMetaDataTestV21(test.TestCase):
body=expected)
def test_too_many_metadata_items_on_create(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
@@ -596,8 +596,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.controller.create, req, self.uuid, body=data)
def test_invalid_metadata_items_on_create(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.headers["content-type"] = "application/json"
@@ -621,8 +621,8 @@ class ServerMetaDataTestV21(test.TestCase):
self.controller.create, req, self.uuid, body=data)
def test_too_many_metadata_items_on_update_item(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
@@ -635,10 +635,10 @@ class ServerMetaDataTestV21(test.TestCase):
req, self.uuid, body=data)
def test_invalid_metadata_items_on_update_item(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
data = {"metadata": {}}
for num in range(CONF.quota_metadata_items + 1):
data['metadata']['key%i' % num] = "blah"
@@ -687,15 +687,15 @@ class BadStateServerMetaDataTestV21(test.TestCase):
def setUp(self):
super(BadStateServerMetaDataTestV21, self).setUp()
fakes.stub_out_key_pair_funcs(self.stubs)
- self.stubs.Set(nova.db, 'instance_metadata_get',
- return_server_metadata)
+ self.stub_out('nova.db.instance_metadata_get',
+ return_server_metadata)
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
- self.stubs.Set(nova.db, 'instance_get', self._return_server_in_build)
- self.stubs.Set(nova.db, 'instance_get_by_uuid',
- self._return_server_in_build_by_uuid)
- self.stubs.Set(nova.db, 'instance_metadata_delete',
- delete_server_metadata)
+ self.stub_out('nova.db.instance_get', self._return_server_in_build)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ self._return_server_in_build_by_uuid)
+ self.stub_out('nova.db.instance_metadata_delete',
+ delete_server_metadata)
self._set_up_resources()
def _set_up_resources(self):
@@ -713,8 +713,8 @@ class BadStateServerMetaDataTestV21(test.TestCase):
req, self.uuid, 'key2')
def test_invalid_state_on_update_metadata(self):
- self.stubs.Set(nova.db, 'instance_metadata_update',
- return_create_instance_metadata)
+ self.stub_out('nova.db.instance_metadata_update',
+ return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.content_type = 'application/json'
diff --git a/nova/tests/unit/api/openstack/compute/test_server_migrations.py b/nova/tests/unit/api/openstack/compute/test_server_migrations.py
new file mode 100644
index 0000000000..0d9037fbe2
--- /dev/null
+++ b/nova/tests/unit/api/openstack/compute/test_server_migrations.py
@@ -0,0 +1,108 @@
+# Copyright 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+import webob
+
+from nova.api.openstack.compute import server_migrations
+from nova import exception
+from nova import test
+from nova.tests.unit.api.openstack import fakes
+
+
+class ServerMigrationsTestsV21(test.NoDBTestCase):
+ wsgi_api_version = '2.22'
+
+ def setUp(self):
+ super(ServerMigrationsTestsV21, self).setUp()
+ self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+ self.context = self.req.environ['nova.context']
+ self.controller = server_migrations.ServerMigrationsController()
+ self.compute_api = self.controller.compute_api
+
+ def test_force_complete_succeeded(self):
+ @mock.patch.object(self.compute_api, 'live_migrate_force_complete')
+ @mock.patch.object(self.compute_api, 'get')
+ def _do_test(compute_api_get, live_migrate_force_complete):
+ self.controller._force_complete(self.req, '1', '1',
+ body={'force_complete': None})
+ live_migrate_force_complete.assert_called_once_with(
+ self.context, compute_api_get(), '1')
+ _do_test()
+
+ def _test_force_complete_failed_with_exception(self, fake_exc,
+ expected_exc):
+ @mock.patch.object(self.compute_api, 'live_migrate_force_complete',
+ side_effect=fake_exc)
+ @mock.patch.object(self.compute_api, 'get')
+ def _do_test(compute_api_get, live_migrate_force_complete):
+ self.assertRaises(expected_exc,
+ self.controller._force_complete,
+ self.req, '1', '1',
+ body={'force_complete': None})
+ _do_test()
+
+ def test_force_complete_instance_not_migrating(self):
+ self._test_force_complete_failed_with_exception(
+ exception.InstanceInvalidState(instance_uuid='', state='',
+ attr='', method=''),
+ webob.exc.HTTPConflict)
+
+ def test_force_complete_migration_not_found(self):
+ self._test_force_complete_failed_with_exception(
+ exception.MigrationNotFoundByStatus(instance_id='', status=''),
+ webob.exc.HTTPBadRequest)
+
+ def test_force_complete_instance_is_locked(self):
+ self._test_force_complete_failed_with_exception(
+ exception.InstanceIsLocked(instance_uuid=''),
+ webob.exc.HTTPConflict)
+
+ def test_force_complete_invalid_migration_state(self):
+ self._test_force_complete_failed_with_exception(
+ exception.InvalidMigrationState(migration_id='', instance_uuid='',
+ state='', method=''),
+ webob.exc.HTTPBadRequest)
+
+ def test_force_complete_instance_not_found(self):
+ self._test_force_complete_failed_with_exception(
+ exception.InstanceNotFound(instance_id=''),
+ webob.exc.HTTPNotFound)
+
+ def test_force_complete_unexpected_error(self):
+ self._test_force_complete_failed_with_exception(
+ exception.NovaException(), webob.exc.HTTPInternalServerError)
+
+
+class ServerMigrationsPolicyEnforcementV21(test.NoDBTestCase):
+ wsgi_api_version = '2.22'
+
+ def setUp(self):
+ super(ServerMigrationsPolicyEnforcementV21, self).setUp()
+ self.controller = server_migrations.ServerMigrationsController()
+ self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
+
+ def test_migrate_live_policy_failed(self):
+ rule_name = "os_compute_api:servers:migrations:force_complete"
+ self.policy.set_rules({rule_name: "project:non_fake"})
+ body_args = {'force_complete': None}
+ exc = self.assertRaises(
+ exception.PolicyNotAuthorized,
+ self.controller._force_complete, self.req,
+ fakes.FAKE_UUID, fakes.FAKE_UUID,
+ body=body_args)
+ self.assertEqual(
+ "Policy doesn't allow %s to be performed." % rule_name,
+ exc.format_message())
diff --git a/nova/tests/unit/api/openstack/compute/test_server_start_stop.py b/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
index 5427a74e6b..85f5608448 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_start_stop.py
@@ -24,7 +24,6 @@ from nova.api.openstack.compute.legacy_v2.contrib import server_start_stop \
from nova.api.openstack.compute import servers \
as server_v21
from nova.compute import api as compute_api
-from nova import db
from nova import exception
from nova import policy
from nova import test
@@ -70,7 +69,7 @@ class ServerStartStopTestV21(test.TestCase):
extension_info=ext_info)
def test_start(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
self.mox.StubOutWithMock(compute_api.API, 'start')
compute_api.API.start(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
@@ -83,7 +82,7 @@ class ServerStartStopTestV21(test.TestCase):
self.start_policy: "project_id:non_fake"
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
body = dict(start="")
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._start_server,
@@ -91,21 +90,21 @@ class ServerStartStopTestV21(test.TestCase):
self.assertIn(self.start_policy, exc.format_message())
def test_start_not_ready(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'start', fake_start_stop_not_ready)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, self.req, 'test_inst', body)
def test_start_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'start', fake_start_stop_locked_server)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._start_server, self.req, 'test_inst', body)
def test_start_invalid_state(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'start', fake_start_stop_invalid_state)
body = dict(start="")
ex = self.assertRaises(webob.exc.HTTPConflict,
@@ -113,7 +112,7 @@ class ServerStartStopTestV21(test.TestCase):
self.assertIn('is locked', six.text_type(ex))
def test_stop(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
self.mox.StubOutWithMock(compute_api.API, 'stop')
compute_api.API.stop(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
@@ -126,7 +125,7 @@ class ServerStartStopTestV21(test.TestCase):
self.stop_policy: "project_id:non_fake"
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
body = dict(stop="")
exc = self.assertRaises(exception.PolicyNotAuthorized,
self.controller._stop_server,
@@ -134,14 +133,14 @@ class ServerStartStopTestV21(test.TestCase):
self.assertIn(self.stop_policy, exc.format_message())
def test_stop_not_ready(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_not_ready)
body = dict(stop="")
self.assertRaises(webob.exc.HTTPConflict,
self.controller._stop_server, self.req, 'test_inst', body)
def test_stop_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_locked_server)
body = dict(stop="")
ex = self.assertRaises(webob.exc.HTTPConflict,
@@ -149,7 +148,7 @@ class ServerStartStopTestV21(test.TestCase):
self.assertIn('is locked', six.text_type(ex))
def test_stop_invalid_state(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get)
+ self.stub_out('nova.db.instance_get_by_uuid', fake_instance_get)
self.stubs.Set(compute_api.API, 'stop', fake_start_stop_invalid_state)
body = dict(start="")
self.assertRaises(webob.exc.HTTPConflict,
diff --git a/nova/tests/unit/api/openstack/compute/test_server_usage.py b/nova/tests/unit/api/openstack/compute/test_server_usage.py
index ffe0ec95f5..34a090763b 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_usage.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_usage.py
@@ -20,7 +20,6 @@ from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
from nova import compute
-from nova import db
from nova import exception
from nova import objects
from nova.objects import instance as instance_obj
@@ -71,7 +70,7 @@ class ServerUsageTestV21(test.TestCase):
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Server_usage'])
return_server = fakes.fake_instance_get()
- self.stubs.Set(db, 'instance_get_by_uuid', return_server)
+ self.stub_out('nova.db.instance_get_by_uuid', return_server)
def _make_request(self, url):
req = fakes.HTTPRequest.blank(url)
diff --git a/nova/tests/unit/api/openstack/compute/test_serversV21.py b/nova/tests/unit/api/openstack/compute/test_serversV21.py
index e3a0e96e7f..f4318588cd 100644
--- a/nova/tests/unit/api/openstack/compute/test_serversV21.py
+++ b/nova/tests/unit/api/openstack/compute/test_serversV21.py
@@ -132,6 +132,18 @@ def fake_instance_get_all_with_locked(context, list_locked, **kwargs):
return objects.InstanceList(objects=obj_list)
+def fake_instance_get_all_with_description(context, list_desc, **kwargs):
+ obj_list = []
+ s_id = 0
+ for desc in list_desc:
+ uuid = fakes.get_fake_uuid(desc)
+ s_id = s_id + 1
+ kwargs['display_description'] = desc
+ server = fakes.stub_instance_obj(context, id=s_id, uuid=uuid, **kwargs)
+ obj_list.append(server)
+ return objects.InstanceList(objects=obj_list)
+
+
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
@@ -199,8 +211,8 @@ class ControllerTest(test.TestCase):
lambda api, *a, **k: return_servers(*a, **k))
self.stubs.Set(compute_api.API, 'get',
lambda api, *a, **k: return_server(*a, **k))
- self.stubs.Set(db, 'instance_update_and_get_original',
- instance_update_and_get_original)
+ self.stub_out('nova.db.instance_update_and_get_original',
+ instance_update_and_get_original)
ext_info = extension_info.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
@@ -316,7 +328,7 @@ class ServersControllerTest(ControllerTest):
def fake_get(_self, *args, **kwargs):
expected_attrs = kwargs['expected_attrs']
self.assertEqual(['flavor', 'info_cache', 'metadata',
- 'pci_devices'], expected_attrs)
+ 'numa_topology', 'pci_devices'], expected_attrs)
ctxt = context.RequestContext('fake', 'fake')
return fake_instance.fake_instance_obj(
ctxt, expected_attrs=expected_attrs)
@@ -1409,6 +1421,66 @@ class ServersControllerTestV29(ServersControllerTest):
self.assertNotIn(key, search_opts)
+class ServersControllerTestV219(ServersControllerTest):
+ wsgi_api_version = '2.19'
+
+ def _get_server_data_dict(self, uuid, image_bookmark, flavor_bookmark,
+ status="ACTIVE", progress=100, description=None):
+ server_dict = super(ServersControllerTestV219,
+ self)._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark,
+ status,
+ progress)
+ server_dict['server']['locked'] = False
+ server_dict['server']['description'] = description
+ return server_dict
+
+ @mock.patch.object(compute_api.API, 'get')
+ def _test_get_server_with_description(self, description, get_mock):
+ image_bookmark = "http://localhost/fake/images/10"
+ flavor_bookmark = "http://localhost/fake/flavors/2"
+ uuid = FAKE_UUID
+ get_mock.side_effect = fakes.fake_compute_get(id=2,
+ display_description=description,
+ uuid=uuid)
+
+ req = self.req('/fake/servers/%s' % uuid)
+ res_dict = self.controller.show(req, uuid)
+
+ expected_server = self._get_server_data_dict(uuid,
+ image_bookmark,
+ flavor_bookmark,
+ status="BUILD",
+ progress=0,
+ description=description)
+ self.assertThat(res_dict, matchers.DictMatches(expected_server))
+ return res_dict
+
+ @mock.patch.object(compute_api.API, 'get_all')
+ def _test_list_server_detail_with_descriptions(self,
+ s1_desc,
+ s2_desc,
+ get_all_mock):
+ get_all_mock.return_value = fake_instance_get_all_with_description(
+ context, [s1_desc, s2_desc])
+ req = self.req('/fake/servers/detail')
+ servers_list = self.controller.detail(req)
+ # Check that each returned server has the same 'description' value
+ # and 'id' as they were created.
+ for desc in [s1_desc, s2_desc]:
+ server = next(server for server in servers_list['servers']
+ if (server['id'] == fakes.get_fake_uuid(desc)))
+ expected = desc
+ self.assertEqual(expected, server['description'])
+
+ def test_get_server_with_description(self):
+ self._test_get_server_with_description('test desc')
+
+ def test_list_server_detail_with_descriptions(self):
+ self._test_list_server_detail_with_descriptions('desc1', 'desc2')
+
+
class ServersControllerDeleteTest(ControllerTest):
def setUp(self):
@@ -1485,7 +1557,7 @@ class ServersControllerDeleteTest(ControllerTest):
self.server_delete_called = True
deleted_at = timeutils.utcnow()
return fake_instance.fake_db_instance(deleted_at=deleted_at)
- self.stubs.Set(db, 'instance_destroy', instance_destroy_mock)
+ self.stub_out('nova.db.instance_destroy', instance_destroy_mock)
self.controller.delete(req, FAKE_UUID)
# delete() should be called for instance which has never been active,
@@ -1790,22 +1862,71 @@ class ServersControllerRebuildInstanceTest(ControllerTest):
self.controller._stop_server, req, FAKE_UUID, body)
def test_start_with_bogus_id(self):
- self.stubs.Set(db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid_not_found)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid_not_found)
req = fakes.HTTPRequestV21.blank('/fake/servers/test_inst/action')
body = dict(start="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._start_server, req, 'test_inst', body)
def test_stop_with_bogus_id(self):
- self.stubs.Set(db, 'instance_get_by_uuid',
- fake_instance_get_by_uuid_not_found)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid_not_found)
req = fakes.HTTPRequestV21.blank('/fake/servers/test_inst/action')
body = dict(stop="")
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._stop_server, req, 'test_inst', body)
+class ServersControllerRebuildTestV219(ServersControllerRebuildInstanceTest):
+
+ def setUp(self):
+ super(ServersControllerRebuildTestV219, self).setUp()
+ self.req.api_version_request = \
+ api_version_request.APIVersionRequest('2.19')
+
+ def _rebuild_server(self, set_desc, desc):
+ fake_get = fakes.fake_compute_get(vm_state=vm_states.ACTIVE,
+ display_description=desc)
+ self.stubs.Set(compute_api.API, 'get',
+ lambda api, *a, **k: fake_get(*a, **k))
+
+ if set_desc:
+ self.body['rebuild']['description'] = desc
+ self.req.body = jsonutils.dump_as_bytes(self.body)
+ server = self.controller._action_rebuild(self.req, FAKE_UUID,
+ body=self.body).obj['server']
+ self.assertEqual(server['id'], FAKE_UUID)
+ self.assertEqual(server['description'], desc)
+
+ def test_rebuild_server_with_description(self):
+ self._rebuild_server(True, 'server desc')
+
+ def test_rebuild_server_empty_description(self):
+ self._rebuild_server(True, '')
+
+ def test_rebuild_server_without_description(self):
+ self._rebuild_server(False, '')
+
+ def test_rebuild_server_remove_description(self):
+ self._rebuild_server(True, None)
+
+ def test_rebuild_server_description_too_long(self):
+ self.body['rebuild']['description'] = 'x' * 256
+ self.req.body = jsonutils.dump_as_bytes(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_rebuild_server_description_invalid(self):
+ # Invalid non-printable control char in the desc.
+ self.body['rebuild']['description'] = "123\0d456"
+ self.req.body = jsonutils.dump_as_bytes(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_rebuild,
+ self.req, FAKE_UUID, body=self.body)
+
+
class ServersControllerUpdateTest(ControllerTest):
def _get_request(self, body=None, options=None):
@@ -1844,7 +1965,7 @@ class ServersControllerUpdateTest(ControllerTest):
req, FAKE_UUID, body=body)
def test_update_server_name_all_blank_spaces(self):
- self.stubs.Set(db, 'instance_get',
+ self.stub_out('nova.db.instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
@@ -1855,7 +1976,7 @@ class ServersControllerUpdateTest(ControllerTest):
req, FAKE_UUID, body=body)
def test_update_server_name_with_spaces_in_the_middle(self):
- self.stubs.Set(db, 'instance_get',
+ self.stub_out('nova.db.instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
@@ -1865,7 +1986,7 @@ class ServersControllerUpdateTest(ControllerTest):
self.controller.update(req, FAKE_UUID, body=body)
def test_update_server_name_with_leading_trailing_spaces(self):
- self.stubs.Set(db, 'instance_get',
+ self.stub_out('nova.db.instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
@@ -1876,7 +1997,7 @@ class ServersControllerUpdateTest(ControllerTest):
self.controller.update, req, FAKE_UUID, body=body)
def test_update_server_name_with_leading_trailing_spaces_compat_mode(self):
- self.stubs.Set(db, 'instance_get',
+ self.stub_out('nova.db.instance_get',
fakes.fake_instance_get(name='server_test'))
req = fakes.HTTPRequest.blank('/fake/servers/%s' % FAKE_UUID)
req.method = 'PUT'
@@ -1922,7 +2043,7 @@ class ServersControllerUpdateTest(ControllerTest):
def fake_update(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
- self.stubs.Set(db, 'instance_update_and_get_original', fake_update)
+ self.stub_out('nova.db.instance_update_and_get_original', fake_update)
body = {'server': {'name': 'server_test'}}
req = self._get_request(body)
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
@@ -1937,6 +2058,153 @@ class ServersControllerUpdateTest(ControllerTest):
self.controller.update, req, FAKE_UUID, body=body)
+class ServersControllerTriggerCrashDumpTest(ControllerTest):
+
+ def setUp(self):
+ super(ServersControllerTriggerCrashDumpTest, self).setUp()
+
+ self.instance = fakes.stub_instance_obj(None,
+ vm_state=vm_states.ACTIVE)
+
+ def fake_get(ctrl, ctxt, uuid):
+ if uuid != FAKE_UUID:
+ raise webob.exc.HTTPNotFound(explanation='fakeout')
+ return self.instance
+
+ self.useFixture(
+ fixtures.MonkeyPatch('nova.api.openstack.compute.servers.'
+ 'ServersController._get_instance',
+ fake_get))
+
+ self.req = fakes.HTTPRequest.blank('/servers/%s/action' % FAKE_UUID)
+ self.req.api_version_request =\
+ api_version_request.APIVersionRequest('2.17')
+ self.body = dict(trigger_crash_dump=None)
+
+ @mock.patch.object(compute_api.API, 'trigger_crash_dump')
+ def test_trigger_crash_dump(self, mock_trigger_crash_dump):
+ ctxt = self.req.environ['nova.context']
+ self.controller._action_trigger_crash_dump(self.req, FAKE_UUID,
+ body=self.body)
+ mock_trigger_crash_dump.assert_called_with(ctxt, self.instance)
+
+ def test_trigger_crash_dump_policy_failed(self):
+ rule_name = "os_compute_api:servers:trigger_crash_dump"
+ self.policy.set_rules({rule_name: "project_id:non_fake"})
+ exc = self.assertRaises(exception.PolicyNotAuthorized,
+ self.controller._action_trigger_crash_dump,
+ self.req, FAKE_UUID, body=self.body)
+ self.assertIn("os_compute_api:servers:trigger_crash_dump",
+ exc.format_message())
+
+ @mock.patch.object(compute_api.API, 'trigger_crash_dump',
+ fake_start_stop_not_ready)
+ def test_trigger_crash_dump_not_ready(self):
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_trigger_crash_dump,
+ self.req, FAKE_UUID, body=self.body)
+
+ @mock.patch.object(compute_api.API, 'trigger_crash_dump',
+ fakes.fake_actions_to_locked_server)
+ def test_trigger_crash_dump_locked_server(self):
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_trigger_crash_dump,
+ self.req, FAKE_UUID, body=self.body)
+
+ @mock.patch.object(compute_api.API, 'trigger_crash_dump',
+ fake_start_stop_invalid_state)
+ def test_trigger_crash_dump_invalid_state(self):
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_trigger_crash_dump,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_trigger_crash_dump_with_bogus_id(self):
+ self.assertRaises(webob.exc.HTTPNotFound,
+ self.controller._action_trigger_crash_dump,
+ self.req, 'test_inst', body=self.body)
+
+ def test_trigger_crash_dump_schema_invalid_type(self):
+ self.body['trigger_crash_dump'] = 'not null'
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_trigger_crash_dump,
+ self.req, FAKE_UUID, body=self.body)
+
+ def test_trigger_crash_dump_schema_extra_property(self):
+ self.body['extra_property'] = 'extra'
+ self.assertRaises(exception.ValidationError,
+ self.controller._action_trigger_crash_dump,
+ self.req, FAKE_UUID, body=self.body)
+
+ @mock.patch.object(compute_api.API, 'trigger_crash_dump',
+ side_effect=exception.NMINotSupported)
+ def test_trigger_crash_dump_not_supported(self, mock_trigger_crash_dump):
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller._action_trigger_crash_dump,
+ self.req, FAKE_UUID, body=self.body)
+
+
+class ServersControllerUpdateTestV219(ServersControllerUpdateTest):
+ def _get_request(self, body=None, options=None):
+ req = super(ServersControllerUpdateTestV219, self)._get_request(
+ body=body,
+ options=options)
+ req.api_version_request = api_version_request.APIVersionRequest('2.19')
+ return req
+
+ def _update_server_desc(self, set_desc, desc=None):
+ body = {'server': {}}
+ if set_desc:
+ body['server']['description'] = desc
+ req = self._get_request()
+ res_dict = self.controller.update(req, FAKE_UUID, body=body)
+ return res_dict
+
+ def test_update_server_description(self):
+ res_dict = self._update_server_desc(True, 'server_desc')
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['description'], 'server_desc')
+
+ def test_update_server_empty_description(self):
+ res_dict = self._update_server_desc(True, '')
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['description'], '')
+
+ def test_update_server_without_description(self):
+ res_dict = self._update_server_desc(False)
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertIsNone(res_dict['server']['description'])
+
+ def test_update_server_remove_description(self):
+ res_dict = self._update_server_desc(True)
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertIsNone(res_dict['server']['description'])
+
+ def test_update_server_all_attributes(self):
+ body = {'server': {
+ 'name': 'server_test',
+ 'description': 'server_desc'
+ }}
+ req = self._get_request(body, {'name': 'server_test'})
+ res_dict = self.controller.update(req, FAKE_UUID, body=body)
+
+ self.assertEqual(res_dict['server']['id'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server_test')
+ self.assertEqual(res_dict['server']['description'], 'server_desc')
+
+ def test_update_server_description_too_long(self):
+ body = {'server': {'description': 'x' * 256}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+ def test_update_server_description_invalid(self):
+ # Invalid non-printable control char in the desc.
+ body = {'server': {'description': "123\0d456"}}
+ req = self._get_request(body, {'name': 'server_test'})
+ self.assertRaises(exception.ValidationError, self.controller.update,
+ req, FAKE_UUID, body=body)
+
+
class ServerStatusTest(test.TestCase):
def setUp(self):
@@ -1947,7 +2215,7 @@ class ServerStatusTest(test.TestCase):
self.controller = servers.ServersController(extension_info=ext_info)
def _get_with_state(self, vm_state, task_state=None):
- self.stubs.Set(db, 'instance_get_by_uuid',
+ self.stub_out('nova.db.instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_state,
task_state=task_state))
@@ -2065,6 +2333,7 @@ class ServersControllerCreateTest(test.TestCase):
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
+ 'display_description': inst['display_description'] or '',
'uuid': FAKE_UUID,
'instance_type': inst_type,
'image_ref': inst.get('image_ref', def_image_ref),
@@ -2112,14 +2381,12 @@ class ServersControllerCreateTest(test.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'project_get_networks',
- project_get_networks)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_system_metadata_update',
- fake_method)
- self.stubs.Set(db, 'instance_get', instance_get)
- self.stubs.Set(db, 'instance_update', instance_update)
- self.stubs.Set(db, 'instance_update_and_get_original',
+ self.stub_out('nova.db.project_get_networks', project_get_networks)
+ self.stub_out('nova.db.instance_create', instance_create)
+ self.stub_out('nova.db.instance_system_metadata_update', fake_method)
+ self.stub_out('nova.db.instance_get', instance_get)
+ self.stub_out('nova.db.instance_update', instance_update)
+ self.stub_out('nova.db.instance_update_and_get_original',
server_update_and_get_original)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
@@ -2305,7 +2572,7 @@ class ServersControllerCreateTest(test.TestCase):
# self.assertEqual(kwargs['key_name'], key_name)
# return old_create(*args, **kwargs)
#
- # self.stubs.Set(db, 'key_pair_get', key_pair_get)
+ # self.stub_out('nova.db.key_pair_get', key_pair_get)
# self.stubs.Set(compute_api.API, 'create', create)
# self._test_create_extra(params)
#
@@ -2819,7 +3086,7 @@ class ServersControllerCreateTest(test.TestCase):
self.stubs.Set(fakes.QUOTAS, 'count', fake_count)
self.stubs.Set(fakes.QUOTAS, 'limit_check', fake_limit_check)
- self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.stub_out('nova.db.instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {'group': fake_group.uuid}
self.req.body = jsonutils.dump_as_bytes(self.body)
expected_msg = "Quota exceeded, too many servers in group"
@@ -2840,7 +3107,7 @@ class ServersControllerCreateTest(test.TestCase):
def fake_instance_destroy(context, uuid, constraint):
return fakes.stub_instance(1)
- self.stubs.Set(db, 'instance_destroy', fake_instance_destroy)
+ self.stub_out('nova.db.instance_destroy', fake_instance_destroy)
self.body['os:scheduler_hints'] = {'group': test_group.uuid}
self.req.body = jsonutils.dump_as_bytes(self.body)
server = self.controller.create(self.req, body=self.body).obj['server']
@@ -2848,6 +3115,24 @@ class ServersControllerCreateTest(test.TestCase):
test_group = objects.InstanceGroup.get_by_uuid(ctxt, test_group.uuid)
self.assertIn(server['id'], test_group.members)
+ def test_create_instance_with_group_hint_group_not_found(self):
+ def fake_instance_destroy(context, uuid, constraint):
+ return fakes.stub_instance(1)
+
+ self.stub_out('nova.db.instance_destroy', fake_instance_destroy)
+ self.body['os:scheduler_hints'] = {
+ 'group': '5b674f73-c8cf-40ef-9965-3b6fe4b304b1'}
+ self.req.body = jsonutils.dump_as_bytes(self.body)
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create, self.req, body=self.body)
+
+ def test_create_instance_with_group_hint_wrong_uuid_format(self):
+ self.body['os:scheduler_hints'] = {
+ 'group': 'non-uuid'}
+ self.req.body = jsonutils.dump_as_bytes(self.body)
+ self.assertRaises(exception.ValidationError,
+ self.controller.create, self.req, body=self.body)
+
def test_create_instance_with_neutronv2_port_in_use(self):
network = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
port = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
@@ -2976,6 +3261,13 @@ class ServersControllerCreateTest(test.TestCase):
self.req, body=self.body)
@mock.patch.object(compute_api.API, 'create',
+ side_effect=exception.InvalidBDM)
+ def test_create_instance_raise_invalid_bdm(self, mock_create):
+ self.assertRaises(webob.exc.HTTPBadRequest,
+ self.controller.create,
+ self.req, body=self.body)
+
+ @mock.patch.object(compute_api.API, 'create',
side_effect=exception.ImageBadRequest(
image_id='dummy', response='dummy'))
def test_create_instance_raise_image_bad_request(self, mock_create):
@@ -3027,6 +3319,48 @@ class ServersControllerCreateTest(test.TestCase):
self.req, body=self.body)
+class ServersControllerCreateTestV219(ServersControllerCreateTest):
+ def _create_instance_req(self, set_desc, desc=None):
+ # proper local hrefs must start with 'http://localhost/v2/'
+ image_href = 'http://localhost/v2/images/%s' % self.image_uuid
+ self.body['server']['imageRef'] = image_href
+ if set_desc:
+ self.body['server']['description'] = desc
+ self.req.body = jsonutils.dump_as_bytes(self.body)
+ self.req.api_version_request = \
+ api_version_request.APIVersionRequest('2.19')
+
+ def test_create_instance_with_description(self):
+ self._create_instance_req(True, 'server_desc')
+ # The fact that the action doesn't raise is enough validation
+ self.controller.create(self.req, body=self.body).obj
+
+ def test_create_instance_with_none_description(self):
+ self._create_instance_req(True)
+ # The fact that the action doesn't raise is enough validation
+ self.controller.create(self.req, body=self.body).obj
+
+ def test_create_instance_with_empty_description(self):
+ self._create_instance_req(True, '')
+ # The fact that the action doesn't raise is enough validation
+ self.controller.create(self.req, body=self.body).obj
+
+ def test_create_instance_without_description(self):
+ self._create_instance_req(False)
+ # The fact that the action doesn't raise is enough validation
+ self.controller.create(self.req, body=self.body).obj
+
+ def test_create_instance_description_too_long(self):
+ self._create_instance_req(True, 'X' * 256)
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.req, body=self.body)
+
+ def test_create_instance_description_invalid(self):
+ self._create_instance_req(True, "abc\0ddef")
+ self.assertRaises(exception.ValidationError, self.controller.create,
+ self.req, body=self.body)
+
+
class ServersControllerCreateTestWithMock(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
@@ -3704,7 +4038,7 @@ class FakeExt(extensions.V21APIExtensionBase):
pass
def fake_schema_extension_point(self, version):
- if version == '2.1':
+ if version == '2.1' or version == '2.19':
return self.fake_schema
elif version == '2.0':
return {}
@@ -3876,6 +4210,17 @@ class ServersPolicyEnforcementV21(test.NoDBTestCase):
rule, rule_name, self.controller._stop_server,
self.req, FAKE_UUID, body={})
+ @mock.patch.object(servers.ServersController, '_get_instance')
+ def test_trigger_crash_dump_policy_failed(self, _get_instance_mock):
+ _get_instance_mock.return_value = None
+ rule_name = "os_compute_api:servers:trigger_crash_dump"
+ rule = {rule_name: "project:non_fake"}
+ self.req.api_version_request =\
+ api_version_request.APIVersionRequest('2.17')
+ self._common_policy_check(
+ rule, rule_name, self.controller._action_trigger_crash_dump,
+ self.req, FAKE_UUID, body={'trigger_crash_dump': None})
+
def test_index_policy_failed(self):
rule_name = "os_compute_api:servers:index"
rule = {rule_name: "project:non_fake"}
diff --git a/nova/tests/unit/api/openstack/compute/test_services.py b/nova/tests/unit/api/openstack/compute/test_services.py
index f01a8ec4f9..d15d6989dd 100644
--- a/nova/tests/unit/api/openstack/compute/test_services.py
+++ b/nova/tests/unit/api/openstack/compute/test_services.py
@@ -13,6 +13,7 @@
# under the License.
+import copy
import datetime
import iso8601
@@ -47,6 +48,7 @@ fake_services_list = [
topic='scheduler',
updated_at=datetime.datetime(2012, 10, 29, 13, 42, 2),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ last_seen_up=datetime.datetime(2012, 10, 29, 13, 42, 2),
forced_down=False,
disabled_reason='test1'),
dict(test_service.fake_service,
@@ -57,6 +59,7 @@ fake_services_list = [
topic='compute',
updated_at=datetime.datetime(2012, 10, 29, 13, 42, 5),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 27),
+ last_seen_up=datetime.datetime(2012, 10, 29, 13, 42, 5),
forced_down=False,
disabled_reason='test2'),
dict(test_service.fake_service,
@@ -67,6 +70,7 @@ fake_services_list = [
topic='scheduler',
updated_at=datetime.datetime(2012, 9, 19, 6, 55, 34),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ last_seen_up=datetime.datetime(2012, 9, 19, 6, 55, 34),
forced_down=False,
disabled_reason=None),
dict(test_service.fake_service,
@@ -77,8 +81,32 @@ fake_services_list = [
topic='compute',
updated_at=datetime.datetime(2012, 9, 18, 8, 3, 38),
created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ last_seen_up=datetime.datetime(2012, 9, 18, 8, 3, 38),
forced_down=False,
disabled_reason='test4'),
+ # NOTE(rpodolyaka): API services are special case and must be filtered out
+ dict(test_service.fake_service,
+ binary='nova-osapi_compute',
+ host='host2',
+ id=5,
+ disabled=False,
+ topic=None,
+ updated_at=None,
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ last_seen_up=None,
+ forced_down=False,
+ disabled_reason=None),
+ dict(test_service.fake_service,
+ binary='nova-metadata',
+ host='host2',
+ id=6,
+ disabled=False,
+ topic=None,
+ updated_at=None,
+ created_at=datetime.datetime(2012, 9, 18, 2, 46, 28),
+ last_seen_up=None,
+ forced_down=False,
+ disabled_reason=None),
]
@@ -142,6 +170,8 @@ def fake_db_service_update(services):
service = _service_get_by_id(services, service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
+ service = copy.deepcopy(service)
+ service.update(values)
return service
return service_update
diff --git a/nova/tests/unit/api/openstack/compute/test_shelve.py b/nova/tests/unit/api/openstack/compute/test_shelve.py
index 2c9df3f74a..c25c64ff2c 100644
--- a/nova/tests/unit/api/openstack/compute/test_shelve.py
+++ b/nova/tests/unit/api/openstack/compute/test_shelve.py
@@ -20,7 +20,6 @@ import webob
from nova.api.openstack.compute.legacy_v2.contrib import shelve as shelve_v2
from nova.api.openstack.compute import shelve as shelve_v21
from nova.compute import api as compute_api
-from nova import db
from nova import exception
from nova import policy
from nova import test
@@ -52,7 +51,8 @@ class ShelvePolicyTestV21(test.NoDBTestCase):
self.req, str(uuid.uuid4()), {})
def test_shelve_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
self.stubs.Set(compute_api.API, 'shelve',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller._shelve,
@@ -66,7 +66,8 @@ class ShelvePolicyTestV21(test.NoDBTestCase):
self.req, str(uuid.uuid4()), {})
def test_unshelve_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
self.stubs.Set(compute_api.API, 'unshelve',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller._unshelve,
@@ -82,7 +83,8 @@ class ShelvePolicyTestV21(test.NoDBTestCase):
str(uuid.uuid4()), {})
def test_shelve_offload_locked_server(self):
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
self.stubs.Set(compute_api.API, 'shelve_offload',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict,
@@ -100,7 +102,8 @@ class ShelvePolicyTestV2(ShelvePolicyTestV21):
rules = {'compute:get': '',
'compute_extension:%sshelve' % self.prefix: ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
self.assertRaises(exception.Forbidden, self.controller._shelve,
self.req, str(uuid.uuid4()), {})
@@ -109,7 +112,8 @@ class ShelvePolicyTestV2(ShelvePolicyTestV21):
'compute_extension:%sunshelve' % self.prefix: ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
self.assertRaises(exception.Forbidden, self.controller._unshelve,
self.req, str(uuid.uuid4()), {})
@@ -118,7 +122,8 @@ class ShelvePolicyTestV2(ShelvePolicyTestV21):
'compute_extension:%s%s' % (self.prefix, self.offload): ''}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
- self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
+ self.stub_out('nova.db.instance_get_by_uuid',
+ fake_instance_get_by_uuid)
self.assertRaises(exception.Forbidden,
self.controller._shelve_offload,
self.req,
diff --git a/nova/tests/unit/api/openstack/compute/test_user_data.py b/nova/tests/unit/api/openstack/compute/test_user_data.py
index d498e75ec6..48e9c9c6f9 100644
--- a/nova/tests/unit/api/openstack/compute/test_user_data.py
+++ b/nova/tests/unit/api/openstack/compute/test_user_data.py
@@ -26,7 +26,6 @@ from nova.api.openstack.compute import servers
from nova.api.openstack.compute import user_data
from nova.compute import api as compute_api
from nova.compute import flavors
-from nova import db
from nova import exception
from nova.network import manager
from nova import test
@@ -123,17 +122,15 @@ class ServersControllerCreateTest(test.TestCase):
fake.stub_out_image_service(self)
fakes.stub_out_nw_api(self)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
- self.stubs.Set(db, 'instance_add_security_group',
- return_security_group)
- self.stubs.Set(db, 'project_get_networks',
- project_get_networks)
- self.stubs.Set(db, 'instance_create', instance_create)
- self.stubs.Set(db, 'instance_system_metadata_update',
- fake_method)
- self.stubs.Set(db, 'instance_get', instance_get)
- self.stubs.Set(db, 'instance_update', instance_update)
- self.stubs.Set(db, 'instance_update_and_get_original',
- server_update)
+ self.stub_out('nova.db.instance_add_security_group',
+ return_security_group)
+ self.stub_out('nova.db.project_get_networks', project_get_networks)
+ self.stub_out('nova.db.instance_create', instance_create)
+ self.stub_out('nova.db.instance_system_metadata_update', fake_method)
+ self.stub_out('nova.db.instance_get', instance_get)
+ self.stub_out('nova.db.instance_update', instance_update)
+ self.stub_out('nova.db.instance_update_and_get_original',
+ server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
diff --git a/nova/tests/unit/api/openstack/compute/test_versions.py b/nova/tests/unit/api/openstack/compute/test_versions.py
index f3369d46bb..1a6ce6f636 100644
--- a/nova/tests/unit/api/openstack/compute/test_versions.py
+++ b/nova/tests/unit/api/openstack/compute/test_versions.py
@@ -16,10 +16,13 @@
import copy
import uuid as stdlib_uuid
+import mock
from oslo_serialization import jsonutils
import webob
+from nova.api.openstack import api_version_request as avr
from nova.api.openstack.compute import views
+from nova.api.openstack import extensions
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import matchers
@@ -31,6 +34,7 @@ NS = {
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
+MAX_API_VERSION = avr.max_api_version().get_string()
EXP_LINKS = {
'v2.0': {
@@ -66,7 +70,7 @@ EXP_VERSIONS = {
"v2.1": {
"id": "v2.1",
"status": "CURRENT",
- "version": "2.15",
+ "version": MAX_API_VERSION,
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
@@ -101,9 +105,11 @@ def _get_self_href(response):
class VersionsTestV20(test.NoDBTestCase):
- def setUp(self):
- super(VersionsTestV20, self).setUp()
- self.wsgi_app = fakes.wsgi_app()
+ @property
+ def wsgi_app(self):
+ with mock.patch.object(extensions.ExtensionManager, 'load_extension'):
+ # patch load_extension because it's expensive in fakes.wsgi_app
+ return fakes.wsgi_app(init_only=('servers', 'images', 'versions'))
def test_get_version_list(self):
req = webob.Request.blank('/')
@@ -128,7 +134,7 @@ class VersionsTestV20(test.NoDBTestCase):
{
"id": "v2.1",
"status": "CURRENT",
- "version": "2.15",
+ "version": MAX_API_VERSION,
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z",
"links": [
@@ -194,7 +200,7 @@ class VersionsTestV20(test.NoDBTestCase):
self._test_get_version_2_detail('/', accept=accept)
def test_get_version_2_versions_invalid(self):
- req = webob.Request.blank('/v2/versions/1234')
+ req = webob.Request.blank('/v2/versions/1234/foo')
req.accept = "application/json"
res = req.get_response(self.wsgi_app)
self.assertEqual(404, res.status_int)
@@ -444,10 +450,14 @@ class VersionsTestV21(test.NoDBTestCase):
{'href': 'http://localhost/v2.1/', 'rel': 'self'},
)
+ @property
+ def wsgi_app(self):
+ return fakes.wsgi_app_v21(init_only=('versions',))
+
def test_get_version_list_302(self):
req = webob.Request.blank('/v2.1')
req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(302, res.status_int)
redirect_req = webob.Request.blank('/v2.1/')
self.assertEqual(redirect_req.url, res.location)
@@ -455,7 +465,7 @@ class VersionsTestV21(test.NoDBTestCase):
def test_get_version_21_detail(self):
req = webob.Request.blank('/v2.1/')
req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
version = jsonutils.loads(res.body)
@@ -465,7 +475,7 @@ class VersionsTestV21(test.NoDBTestCase):
def test_get_version_21_versions_v21_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.1')
req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
version = jsonutils.loads(res.body)
@@ -475,7 +485,7 @@ class VersionsTestV21(test.NoDBTestCase):
def test_get_version_21_versions_v20_detail(self):
req = webob.Request.blank('/v2.1/fake/versions/v2.0')
req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
version = jsonutils.loads(res.body)
@@ -483,15 +493,15 @@ class VersionsTestV21(test.NoDBTestCase):
self.assertEqual(expected, version)
def test_get_version_21_versions_invalid(self):
- req = webob.Request.blank('/v2.1/versions/1234')
+ req = webob.Request.blank('/v2.1/versions/1234/foo')
req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app_v21())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(404, res.status_int)
def test_get_version_21_detail_content_type(self):
req = webob.Request.blank('/')
req.accept = "application/json;version=2.1"
- res = req.get_response(fakes.wsgi_app_v21())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(200, res.status_int)
self.assertEqual("application/json", res.content_type)
version = jsonutils.loads(res.body)
@@ -504,10 +514,16 @@ class VersionBehindSslTestCase(test.NoDBTestCase):
super(VersionBehindSslTestCase, self).setUp()
self.flags(secure_proxy_ssl_header='HTTP_X_FORWARDED_PROTO')
+ @property
+ def wsgi_app(self):
+ with mock.patch.object(extensions.ExtensionManager, 'load_extension'):
+ # patch load_extension because it's expensive in fakes.wsgi_app
+ return fakes.wsgi_app(init_only=('versions',))
+
def test_versions_without_headers(self):
req = wsgi.Request.blank('/')
req.accept = "application/json"
- res = req.get_response(fakes.wsgi_app())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(200, res.status_int)
href = _get_self_href(res)
self.assertTrue(href.startswith('http://'))
@@ -516,7 +532,7 @@ class VersionBehindSslTestCase(test.NoDBTestCase):
req = wsgi.Request.blank('/')
req.accept = "application/json"
req.headers['X-Forwarded-Proto'] = 'https'
- res = req.get_response(fakes.wsgi_app())
+ res = req.get_response(self.wsgi_app)
self.assertEqual(200, res.status_int)
href = _get_self_href(res)
self.assertTrue(href.startswith('https://'))
@@ -524,6 +540,6 @@ class VersionBehindSslTestCase(test.NoDBTestCase):
class VersionsTestV21WithV2CompatibleWrapper(VersionsTestV20):
- def setUp(self):
- super(VersionsTestV21WithV2CompatibleWrapper, self).setUp()
- self.wsgi_app = fakes.wsgi_app_v21(v2_compatible=True)
+ @property
+ def wsgi_app(self):
+ return fakes.wsgi_app_v21(v2_compatible=True, init_only=('versions',))
diff --git a/nova/tests/unit/api/openstack/compute/test_volumes.py b/nova/tests/unit/api/openstack/compute/test_volumes.py
index 19da82b324..42745aa100 100644
--- a/nova/tests/unit/api/openstack/compute/test_volumes.py
+++ b/nova/tests/unit/api/openstack/compute/test_volumes.py
@@ -23,6 +23,7 @@ from six.moves import urllib
import webob
from webob import exc
+from nova.api.openstack import common
from nova.api.openstack.compute import assisted_volume_snapshots \
as assisted_snaps_v21
from nova.api.openstack.compute.legacy_v2.contrib import \
@@ -32,8 +33,8 @@ from nova.api.openstack.compute import volumes as volumes_v21
from nova.api.openstack import extensions
from nova.compute import api as compute_api
from nova.compute import flavors
+from nova.compute import vm_states
from nova import context
-from nova import db
from nova import exception
from nova import objects
from nova import test
@@ -60,7 +61,10 @@ def fake_get_instance(self, context, instance_id, want_objects=False,
def fake_get_volume(self, context, id):
- return {'id': 'woot'}
+ return {'id': FAKE_UUID_A,
+ 'status': 'available',
+ 'attach_status': 'detached'
+ }
def fake_attach_volume(self, context, instance, volume_id, device):
@@ -236,10 +240,10 @@ class VolumeApiTestV21(test.NoDBTestCase):
osapi_compute_ext_list=['Volumes'])
self.context = context.get_admin_context()
- self.app = self._get_app()
- def _get_app(self):
- return fakes.wsgi_app_v21()
+ @property
+ def app(self):
+ return fakes.wsgi_app_v21(init_only=('os-volumes', 'servers'))
def test_volume_create(self):
self.stubs.Set(cinder.API, "create", fakes.stub_volume_create)
@@ -344,18 +348,9 @@ class VolumeApiTestV21(test.NoDBTestCase):
class VolumeApiTestV2(VolumeApiTestV21):
- def setUp(self):
- super(VolumeApiTestV2, self).setUp()
- self.flags(
- osapi_compute_extension=[
- 'nova.api.openstack.compute.contrib.select_extensions'],
- osapi_compute_ext_list=['Volumes'])
-
- self.context = context.get_admin_context()
- self.app = self._get_app()
-
- def _get_app(self):
- return fakes.wsgi_app()
+ @property
+ def app(self):
+ return fakes.wsgi_app(init_only=('os-volumes', 'servers'))
class VolumeAttachTestsV21(test.NoDBTestCase):
@@ -363,8 +358,8 @@ class VolumeAttachTestsV21(test.NoDBTestCase):
def setUp(self):
super(VolumeAttachTestsV21, self).setUp()
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_bdms_get_all_by_instance)
+ self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
+ fake_bdms_get_all_by_instance)
self.stubs.Set(compute_api.API, 'get', fake_get_instance)
self.stubs.Set(cinder.API, 'get', fake_get_volume)
self.context = context.get_admin_context()
@@ -458,6 +453,40 @@ class VolumeAttachTestsV21(test.NoDBTestCase):
status_int = result.status_int
self.assertEqual(202, status_int)
+ @mock.patch.object(common, 'get_instance')
+ def test_detach_vol_shelved_not_supported(self, mock_get_instance):
+ inst = fake_instance.fake_instance_obj(self.context,
+ **{'uuid': FAKE_UUID})
+ inst.vm_state = vm_states.SHELVED
+ mock_get_instance.return_value = inst
+ req = fakes.HTTPRequest.blank(
+ '/v2/servers/id/os-volume_attachments/uuid', version='2.19')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.attachments.delete,
+ req,
+ FAKE_UUID,
+ FAKE_UUID_A)
+
+ @mock.patch.object(compute_api.API, 'detach_volume')
+ @mock.patch.object(common, 'get_instance')
+ def test_detach_vol_shelved_supported(self,
+ mock_get_instance,
+ mock_detach):
+ inst = fake_instance.fake_instance_obj(self.context,
+ **{'uuid': FAKE_UUID})
+ inst.vm_state = vm_states.SHELVED
+ mock_get_instance.return_value = inst
+ req = fakes.HTTPRequest.blank(
+ '/v2/servers/id/os-volume_attachments/uuid', version='2.20')
+ req.method = 'DELETE'
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.attachments.delete(req, FAKE_UUID, FAKE_UUID_A)
+ self.assertTrue(mock_detach.called)
+
def test_detach_vol_not_found(self):
self.stubs.Set(compute_api.API,
'detach_volume',
@@ -521,6 +550,51 @@ class VolumeAttachTestsV21(test.NoDBTestCase):
self.assertEqual('00000000-aaaa-aaaa-aaaa-000000000000',
result['volumeAttachment']['id'])
+ @mock.patch.object(common, 'get_instance')
+ def test_attach_vol_shelved_not_supported(self, mock_get_instance):
+ body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
+ 'device': '/dev/fake'}}
+
+ inst = fake_instance.fake_instance_obj(self.context,
+ **{'uuid': FAKE_UUID})
+ inst.vm_state = vm_states.SHELVED
+ mock_get_instance.return_value = inst
+ req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments',
+ version='2.19')
+ req.method = 'POST'
+ req.body = jsonutils.dump_as_bytes({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.attachments.create,
+ req,
+ FAKE_UUID,
+ body=body)
+
+ @mock.patch.object(compute_api.API, 'attach_volume',
+ return_value='/dev/myfake')
+ @mock.patch.object(common, 'get_instance')
+ def test_attach_vol_shelved_supported(self,
+ mock_get_instance,
+ mock_attach):
+ body = {'volumeAttachment': {'volumeId': FAKE_UUID_A,
+ 'device': '/dev/fake'}}
+
+ inst = fake_instance.fake_instance_obj(self.context,
+ **{'uuid': FAKE_UUID})
+ inst.vm_state = vm_states.SHELVED
+ mock_get_instance.return_value = inst
+ req = fakes.HTTPRequest.blank('/v2/servers/id/os-volume_attachments',
+ version='2.20')
+ req.method = 'POST'
+ req.body = jsonutils.dump_as_bytes({})
+ req.headers['content-type'] = 'application/json'
+ req.environ['nova.context'] = self.context
+ result = self.attachments.create(req, FAKE_UUID, body=body)
+ self.assertEqual('00000000-aaaa-aaaa-aaaa-000000000000',
+ result['volumeAttachment']['id'])
+ self.assertEqual('/dev/myfake', result['volumeAttachment']['device'])
+
@mock.patch.object(compute_api.API, 'attach_volume',
return_value='/dev/myfake')
def test_attach_volume_with_auto_device(self, mock_attach):
diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py
index 4367b5d158..3b4e33928d 100644
--- a/nova/tests/unit/api/openstack/fakes.py
+++ b/nova/tests/unit/api/openstack/fakes.py
@@ -431,6 +431,7 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0,
auto_disk_config=False, display_name=None,
+ display_description=None,
include_fake_metadata=True, config_drive=None,
power_state=None, nw_cache=None, metadata=None,
security_groups=None, root_device_name=None,
@@ -440,7 +441,8 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
availability_zone='', locked_by=None, cleaned=False,
memory_mb=0, vcpus=0, root_gb=0, ephemeral_gb=0,
instance_type=None, launch_index=0, kernel_id="",
- ramdisk_id="", user_data=None, system_metadata=None):
+ ramdisk_id="", user_data=None, system_metadata=None,
+ services=None):
if user_id is None:
user_id = 'fake_user'
if project_id is None:
@@ -521,7 +523,7 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
"terminated_at": terminated_at,
"availability_zone": availability_zone,
"display_name": display_name or server_name,
- "display_description": "",
+ "display_description": display_description,
"locked": locked_by is not None,
"locked_by": locked_by,
"metadata": metadata,
@@ -548,7 +550,8 @@ def stub_instance(id=1, user_id=None, project_id=None, host=None,
"pci_requests": None,
"flavor": flavorinfo,
},
- "cleaned": cleaned}
+ "cleaned": cleaned,
+ "services": services}
instance.update(info_cache)
instance['info_cache']['instance_uuid'] = instance['uuid']
@@ -564,6 +567,10 @@ def stub_instance_obj(ctxt, *args, **kwargs):
db_inst,
expected_attrs=expected)
inst.fault = None
+ if db_inst["services"] is not None:
+ # This ensures services there if one wanted so
+ inst.services = db_inst["services"]
+
return inst
@@ -575,8 +582,6 @@ def stub_volume(id, **kwargs):
'host': 'fakehost',
'size': 1,
'availability_zone': 'fakeaz',
- 'instance_uuid': 'fakeuuid',
- 'mountpoint': '/',
'status': 'fakestatus',
'attach_status': 'attached',
'name': 'vol name',
@@ -586,7 +591,12 @@ def stub_volume(id, **kwargs):
'snapshot_id': None,
'volume_type_id': 'fakevoltype',
'volume_metadata': [],
- 'volume_type': {'name': 'vol_type_name'}}
+ 'volume_type': {'name': 'vol_type_name'},
+ 'multiattach': True,
+ 'attachments': {'fakeuuid': {'mountpoint': '/'},
+ 'fakeuuid2': {'mountpoint': '/dev/sdb'}
+ }
+ }
volume.update(kwargs)
return volume
diff --git a/nova/tests/unit/api/openstack/test_wsgi.py b/nova/tests/unit/api/openstack/test_wsgi.py
index f3f2fbe8a3..b71f27549c 100644
--- a/nova/tests/unit/api/openstack/test_wsgi.py
+++ b/nova/tests/unit/api/openstack/test_wsgi.py
@@ -206,12 +206,6 @@ class ActionDispatcherTest(test.NoDBTestCase):
self.assertEqual(serializer.dispatch({}, action='update'), 'trousers')
-class DictSerializerTest(test.NoDBTestCase):
- def test_dispatch_default(self):
- serializer = wsgi.DictSerializer()
- self.assertEqual(serializer.serialize({}, 'update'), '')
-
-
class JSONDictSerializerTest(test.NoDBTestCase):
def test_json(self):
input_dict = dict(servers=dict(a=(2, 3)))
@@ -222,12 +216,6 @@ class JSONDictSerializerTest(test.NoDBTestCase):
self.assertEqual(result, expected_json)
-class TextDeserializerTest(test.NoDBTestCase):
- def test_dispatch_default(self):
- deserializer = wsgi.TextDeserializer()
- self.assertEqual(deserializer.deserialize({}, 'update'), {})
-
-
class JSONDeserializerTest(test.NoDBTestCase):
def test_json(self):
data = """{"a": {
@@ -416,7 +404,9 @@ class ResourceTest(test.NoDBTestCase):
response = req.get_response(app)
self.assertEqual(response.status_int, 200)
# verify no content_type is contained in the request
- req.content_type = None
+ req = webob.Request.blank('/tests/test_id', method="PUT",
+ content_type='application/xml')
+ req.content_type = 'application/xml'
req.body = b'{"body": {"key": "value"}}'
response = req.get_response(app)
expected_unsupported_type_body = {'badRequest':
@@ -555,9 +545,8 @@ class ResourceTest(test.NoDBTestCase):
request.headers['Content-Type'] = 'application/none'
request.body = b'foo'
- content_type, body = resource.get_body(request)
- self.assertIsNone(content_type)
- self.assertEqual(b'', body)
+ self.assertRaises(exception.InvalidContentType,
+ resource.get_body, request)
def test_get_body_no_content_type(self):
class Controller(object):
@@ -646,31 +635,16 @@ class ResourceTest(test.NoDBTestCase):
self.assertEqual(b'', response.body)
self.assertEqual(response.status_int, 200)
- def test_deserialize_badtype(self):
- class Controller(object):
- def index(self, req, pants=None):
- return pants
-
- controller = Controller()
- resource = wsgi.Resource(controller)
- self.assertRaises(exception.InvalidContentType,
- resource.deserialize,
- controller.index, 'application/none', 'foo')
-
def test_deserialize_default(self):
- class JSONDeserializer(object):
- def deserialize(self, body):
- return 'json'
-
class Controller(object):
def index(self, req, pants=None):
return pants
controller = Controller()
- resource = wsgi.Resource(controller, json=JSONDeserializer)
+ resource = wsgi.Resource(controller)
- obj = resource.deserialize(controller.index, 'application/json', 'foo')
- self.assertEqual(obj, 'json')
+ obj = resource.deserialize('["foo"]')
+ self.assertEqual(obj, {'body': ['foo']})
def test_register_actions(self):
class Controller(object):
@@ -1084,62 +1058,6 @@ class ResponseObjectTest(test.NoDBTestCase):
hdrs['hEADER'] = 'bar'
self.assertEqual(robj['hEADER'], 'foo')
- def test_default_serializers(self):
- robj = wsgi.ResponseObject({})
- self.assertEqual(robj.serializers, {})
-
- def test_bind_serializers(self):
- robj = wsgi.ResponseObject({}, json='foo')
- robj._bind_method_serializers(dict(xml='bar', json='baz'))
- self.assertEqual(robj.serializers, dict(xml='bar', json='foo'))
-
- def test_get_serializer(self):
- robj = wsgi.ResponseObject({}, json='json', xml='xml', atom='atom')
- for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
- _mtype, serializer = robj.get_serializer(content_type)
- self.assertEqual(serializer, mtype)
-
- def test_get_serializer_defaults(self):
- robj = wsgi.ResponseObject({})
- default_serializers = dict(json='json', xml='xml', atom='atom')
- for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
- self.assertRaises(exception.InvalidContentType,
- robj.get_serializer, content_type)
- _mtype, serializer = robj.get_serializer(content_type,
- default_serializers)
- self.assertEqual(serializer, mtype)
-
- def test_serialize(self):
- class JSONSerializer(object):
- def serialize(self, obj):
- return 'json'
-
- class AtomSerializer(object):
- def serialize(self, obj):
- return 'atom'
-
- robj = wsgi.ResponseObject({}, code=202,
- json=JSONSerializer,
- atom=AtomSerializer)
- robj['X-header1'] = 'header1'
- robj['X-header2'] = 'header2'
- robj['X-header3'] = 3
- robj['X-header-unicode'] = u'header-unicode'
-
- for content_type, mtype in wsgi._MEDIA_TYPE_MAP.items():
- request = wsgi.Request.blank('/tests/123')
- response = robj.serialize(request, content_type)
- self.assertEqual(content_type.encode("utf-8"),
- response.headers['Content-Type'])
- for hdr, val in six.iteritems(response.headers):
- # All headers must be utf8
- self.assertThat(val, matchers.EncodedByUTF8())
- self.assertEqual(b'header1', response.headers['X-header1'])
- self.assertEqual(b'header2', response.headers['X-header2'])
- self.assertEqual(b'3', response.headers['X-header3'])
- self.assertEqual(response.status_int, 202)
- self.assertEqual(mtype.encode("utf-8"), response.body)
-
class ValidBodyTest(test.NoDBTestCase):
diff --git a/nova/tests/unit/api_samples_test_base/__init__.py b/nova/tests/unit/api_samples_test_base/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/nova/tests/unit/api_samples_test_base/__init__.py
diff --git a/nova/tests/unit/api_samples_test_base/test_compare_result.py b/nova/tests/unit/api_samples_test_base/test_compare_result.py
new file mode 100644
index 0000000000..c1ab0b8b54
--- /dev/null
+++ b/nova/tests/unit/api_samples_test_base/test_compare_result.py
@@ -0,0 +1,470 @@
+# Copyright 2015 HPE, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import mock
+import testtools
+
+from nova import test
+from nova.tests.functional import api_samples_test_base
+
+
+class TestCompareResult(test.NoDBTestCase):
+ """Provide test coverage for result comparison logic in functional tests.
+
+ _compare_result two types of comparisons, template data and sample
+ data.
+
+ Template data means the response is checked against a regex that is
+ referenced by the template name. The template name is specified in
+ the format %(name)
+
+ Sample data is a normal value comparison.
+ """
+
+ def getApiSampleTestBaseHelper(self):
+ """Build an instance without running any unwanted test methods"""
+
+ # NOTE(auggy): TestCase takes a "test" method name to run in __init__
+ # calling this way prevents additional test methods from running
+ ast_instance = api_samples_test_base.ApiSampleTestBase('setUp')
+
+ # required by ApiSampleTestBase
+ ast_instance.api_major_version = 'v2'
+ ast_instance._project_id = 'True'
+
+ # automagically create magic methods usually handled by test classes
+ ast_instance.compute = mock.MagicMock()
+
+ ast_instance.subs = ast_instance._get_regexes()
+
+ return ast_instance
+
+ def setUp(self):
+ super(TestCompareResult, self).setUp()
+ self.ast = self.getApiSampleTestBaseHelper()
+
+ def test_bare_strings_match(self):
+ """compare 2 bare strings that match"""
+ sample_data = u'foo'
+ response_data = u'foo'
+ result = self.ast._compare_result(
+ expected=sample_data,
+ result=response_data,
+ result_str="Test")
+
+ # NOTE(auggy): _compare_result will not return a matched value in the
+ # case of bare strings. If they don't match it will throw an exception,
+ # otherwise it returns "None".
+ self.assertEqual(
+ expected=None,
+ observed=result,
+ message='Check _compare_result of 2 bare strings')
+
+ def test_bare_strings_no_match(self):
+ """check 2 bare strings that don't match"""
+ sample_data = u'foo'
+ response_data = u'bar'
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=sample_data,
+ result=response_data,
+ result_str="Test")
+
+ def test_template_strings_match(self):
+ """compare 2 template strings (contain %) that match"""
+ template_data = u'%(id)s'
+ response_data = u'858f295a-8543-45fa-804a-08f8356d616d'
+ result = self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ self.assertEqual(
+ expected=response_data,
+ observed=result,
+ message='Check _compare_result of 2 template strings')
+
+ def test_template_strings_no_match(self):
+ """check 2 template strings (contain %) that don't match"""
+ template_data = u'%(id)s'
+ response_data = u'$58f295a-8543-45fa-804a-08f8356d616d'
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ # TODO(auggy): _compare_result needs a consistent return value
+ # In some cases it returns the value if it matched, in others it returns
+ # None. In all cases, it throws an exception if there's no match.
+ def test_bare_int_match(self):
+ """check 2 bare ints that match"""
+ sample_data = 42
+ response_data = 42
+ result = self.ast._compare_result(
+ expected=sample_data,
+ result=response_data,
+ result_str="Test")
+ self.assertEqual(
+ expected=None,
+ observed=result,
+ message='Check _compare_result of 2 bare ints')
+
+ def test_bare_int_no_match(self):
+ """check 2 bare ints that don't match"""
+ sample_data = 42
+ response_data = 43
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=sample_data,
+ result=response_data,
+ result_str="Test")
+
+ # TODO(auggy): _compare_result needs a consistent return value
+ def test_template_int_match(self):
+ """check template int against string containing digits"""
+ template_data = u'%(int)s'
+ response_data = u'42'
+
+ result = self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ self.assertEqual(
+ expected=None,
+ observed=result,
+ message='Check _compare_result of template ints')
+
+ def test_template_int_no_match(self):
+ """check template int against a string containing no digits"""
+ template_data = u'%(int)s'
+ response_data = u'foo'
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ def test_template_int_value(self):
+ """check an int value of a template int throws exception"""
+
+ # template_data = u'%(int_test)'
+ # response_data = 42
+
+ # use an int instead of a string as the subs value
+ local_subs = copy.deepcopy(self.ast.subs)
+ local_subs.update({'int_test': 42})
+
+ with testtools.ExpectedException(TypeError):
+ self.ast.subs = local_subs
+
+ # TODO(auggy): _compare_result needs a consistent return value
+ def test_dict_match(self):
+ """check 2 matching dictionaries"""
+ template_data = {
+ u'server': {
+ u'id': u'%(id)s',
+ u'adminPass': u'%(password)s'
+ }
+ }
+ response_data = {
+ u'server': {
+ u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
+ u'adminPass': u'4ZQ3bb6WYbC2'}
+ }
+
+ result = self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ self.assertEqual(
+ expected=u'858f295a-8543-45fa-804a-08f8356d616d',
+ observed=result,
+ message='Check _compare_result of 2 dictionaries')
+
+ def test_dict_no_match_value(self):
+ """check 2 dictionaries where one has a different value"""
+ sample_data = {
+ u'server': {
+ u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
+ u'adminPass': u'foo'
+ }
+ }
+ response_data = {
+ u'server': {
+ u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
+ u'adminPass': u'4ZQ3bb6WYbC2'}
+ }
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=sample_data,
+ result=response_data,
+ result_str="Test")
+
+ def test_dict_no_match_extra_key(self):
+ """check 2 dictionaries where one has an extra key"""
+ template_data = {
+ u'server': {
+ u'id': u'%(id)s',
+ u'adminPass': u'%(password)s',
+ u'foo': u'foo'
+ }
+ }
+ response_data = {
+ u'server': {
+ u'id': u'858f295a-8543-45fa-804a-08f8356d616d',
+ u'adminPass': u'4ZQ3bb6WYbC2'}
+ }
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ def test_dict_result_type_mismatch(self):
+ """check expected is a dictionary and result is not a dictionary"""
+
+ template_data = {
+ u'server': {
+ u'id': u'%(id)s',
+ u'adminPass': u'%(password)s',
+ }
+ }
+ response_data = u'foo'
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ # TODO(auggy): _compare_result needs a consistent return value
+ def test_list_match(self):
+ """check 2 matching lists"""
+ template_data = {
+ u'links':
+ [
+ {
+ u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s',
+ u'rel': u'self'
+ },
+ {
+ u'href': u'%(compute_endpoint)s/servers/%(uuid)s',
+ u'rel': u'bookmark'
+ }
+ ]
+ }
+ response_data = {
+ u'links':
+ [
+ {
+ u'href':
+ (u'http://openstack.example.com/v2/openstack/server/'
+ '858f295a-8543-45fa-804a-08f8356d616d'),
+ u'rel': u'self'
+ },
+ {
+ u'href':
+ (u'http://openstack.example.com/openstack/servers/'
+ '858f295a-8543-45fa-804a-08f8356d616d'),
+ u'rel': u'bookmark'
+ }
+ ]
+ }
+
+ result = self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ self.assertEqual(
+ expected=None,
+ observed=result,
+ message='Check _compare_result of 2 lists')
+
+ def test_list_match_extra_item_result(self):
+ """check extra list items in result """
+ template_data = {
+ u'links':
+ [
+ {
+ u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s',
+ u'rel': u'self'
+ },
+ {
+ u'href': u'%(compute_endpoint)s/servers/%(uuid)s',
+ u'rel': u'bookmark'
+ }
+ ]
+ }
+ response_data = {
+ u'links':
+ [
+ {
+ u'href':
+ (u'http://openstack.example.com/v2/openstack/server/'
+ '858f295a-8543-45fa-804a-08f8356d616d'),
+ u'rel': u'self'
+ },
+ {
+ u'href':
+ (u'http://openstack.example.com/openstack/servers/'
+ '858f295a-8543-45fa-804a-08f8356d616d'),
+ u'rel': u'bookmark'
+ },
+ u'foo'
+ ]
+ }
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ def test_list_match_extra_item_template(self):
+ """check extra list items in template """
+ template_data = {
+ u'links':
+ [
+ {
+ u'href': u'%(versioned_compute_endpoint)s/server/%(uuid)s',
+ u'rel': u'self'
+ },
+ {
+ u'href': u'%(compute_endpoint)s/servers/%(uuid)s',
+ u'rel': u'bookmark'
+ },
+ u'foo' # extra field
+ ]
+ }
+ response_data = {
+ u'links':
+ [
+ {
+ u'href':
+ (u'http://openstack.example.com/v2/openstack/server/'
+ '858f295a-8543-45fa-804a-08f8356d616d'),
+ u'rel': u'self'
+ },
+ {
+ u'href':
+ (u'http://openstack.example.com/openstack/servers/'
+ '858f295a-8543-45fa-804a-08f8356d616d'),
+ u'rel': u'bookmark'
+ }
+ ]
+ }
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ def test_list_no_match(self):
+ """check 2 matching lists"""
+ template_data = {
+ u'things':
+ [
+ {
+ u'foo': u'bar',
+ u'baz': 0
+ },
+ {
+ u'foo': u'zod',
+ u'baz': 1
+ }
+ ]
+ }
+ response_data = {
+ u'things':
+ [
+ {
+ u'foo': u'bar',
+ u'baz': u'0'
+ },
+ {
+ u'foo': u'zod',
+ u'baz': 1
+ }
+ ]
+ }
+
+ # TODO(auggy): This error returns "extra list items"
+ # it should show the item/s in the list that didn't match
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
+
+ def test_none_match(self):
+ """check that None matches"""
+ sample_data = None
+ response_data = None
+ result = self.ast._compare_result(
+ expected=sample_data,
+ result=response_data,
+ result_str="Test")
+
+ # NOTE(auggy): _compare_result will not return a matched value in the
+ # case of bare strings. If they don't match it will throw an exception,
+ # otherwise it returns "None".
+ self.assertEqual(
+ expected=None,
+ observed=result,
+ message='Check _compare_result of None')
+
+ def test_none_no_match(self):
+ """check expected none and non-None response don't match"""
+ sample_data = None
+ response_data = u'bar'
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=sample_data,
+ result=response_data,
+ result_str="Test")
+
+ def test_none_result_no_match(self):
+ """check result none and expected non-None response don't match"""
+ sample_data = u'foo'
+ response_data = None
+
+ with testtools.ExpectedException(api_samples_test_base.NoMatch):
+ self.ast._compare_result(
+ expected=sample_data,
+ result=response_data,
+ result_str="Test")
+
+ def test_template_no_subs_key(self):
+ """check an int value of a template int throws exception"""
+ template_data = u'%(foo)'
+ response_data = 'bar'
+
+ with testtools.ExpectedException(KeyError):
+ self.ast._compare_result(
+ expected=template_data,
+ result=response_data,
+ result_str="Test")
diff --git a/nova/tests/unit/cells/test_cells_messaging.py b/nova/tests/unit/cells/test_cells_messaging.py
index af207f4506..c4f8900865 100644
--- a/nova/tests/unit/cells/test_cells_messaging.py
+++ b/nova/tests/unit/cells/test_cells_messaging.py
@@ -48,7 +48,7 @@ CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
-class CellsMessageClassesTestCase(test.TestCase):
+class CellsMessageClassesTestCase(test.NoDBTestCase):
"""Test case for the main Cells Message classes."""
def setUp(self):
super(CellsMessageClassesTestCase, self).setUp()
@@ -647,7 +647,33 @@ class CellsMessageClassesTestCase(test.TestCase):
self.assertRaises(test.TestingException, response.value_or_raise)
-class CellsTargetedMethodsTestCase(test.TestCase):
+class CellsTargetedMethodsWithDatabaseTestCase(test.TestCase):
+ """These tests access the database unlike the others."""
+
+ def setUp(self):
+ super(CellsTargetedMethodsWithDatabaseTestCase, self).setUp()
+ fakes.init(self)
+ self.ctxt = context.RequestContext('fake', 'fake')
+ self._setup_attrs('api-cell', 'api-cell!child-cell2')
+
+ def _setup_attrs(self, source_cell, target_cell):
+ self.tgt_cell_name = target_cell
+ self.src_msg_runner = fakes.get_message_runner(source_cell)
+
+ def test_service_delete(self):
+ fake_service = dict(id=42, host='fake_host', binary='nova-compute',
+ topic='compute')
+
+ ctxt = self.ctxt.elevated()
+ db.service_create(ctxt, fake_service)
+
+ self.src_msg_runner.service_delete(
+ ctxt, self.tgt_cell_name, fake_service['id'])
+ self.assertRaises(exception.ServiceNotFound,
+ db.service_get, ctxt, fake_service['id'])
+
+
+class CellsTargetedMethodsTestCase(test.NoDBTestCase):
"""Test case for _TargetedMessageMethods class. Most of these
tests actually test the full path from the MessageRunner through
to the functionality of the message method. Hits 2 birds with 1
@@ -864,18 +890,6 @@ class CellsTargetedMethodsTestCase(test.TestCase):
self.assertEqual(jsonutils.to_primitive(fake_service),
jsonutils.to_primitive(result))
- def test_service_delete(self):
- fake_service = dict(id=42, host='fake_host', binary='nova-compute',
- topic='compute')
-
- ctxt = self.ctxt.elevated()
- db.service_create(ctxt, fake_service)
-
- self.src_msg_runner.service_delete(
- ctxt, self.tgt_cell_name, fake_service['id'])
- self.assertRaises(exception.ServiceNotFound,
- db.service_get, ctxt, fake_service['id'])
-
def test_proxy_rpc_to_manager_call(self):
fake_topic = 'fake-topic'
fake_rpc_message = {'method': 'fake_rpc_method', 'args': {}}
@@ -1408,7 +1422,7 @@ class CellsTargetedMethodsTestCase(test.TestCase):
{}, False)
-class CellsBroadcastMethodsTestCase(test.TestCase):
+class CellsBroadcastMethodsTestCase(test.NoDBTestCase):
"""Test case for _BroadcastMessageMethods class. Most of these
tests actually test the full path from the MessageRunner through
to the functionality of the message method. Hits 2 birds with 1
@@ -2121,7 +2135,7 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
self.assertEqual(fake_process.return_value, responses)
-class CellsPublicInterfacesTestCase(test.TestCase):
+class CellsPublicInterfacesTestCase(test.NoDBTestCase):
"""Test case for the public interfaces into cells messaging."""
def setUp(self):
super(CellsPublicInterfacesTestCase, self).setUp()
diff --git a/nova/tests/unit/cells/test_cells_state_manager.py b/nova/tests/unit/cells/test_cells_state_manager.py
index f8630612b8..7f72a3d9f4 100644
--- a/nova/tests/unit/cells/test_cells_state_manager.py
+++ b/nova/tests/unit/cells/test_cells_state_manager.py
@@ -16,15 +16,16 @@
Tests For CellStateManager
"""
+import datetime
import time
import mock
from oslo_config import cfg
from oslo_db import exception as db_exc
+from oslo_utils import timeutils
import six
from nova.cells import state
-from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import objects
@@ -45,6 +46,13 @@ FAKE_COMPUTES_N_TO_ONE = [
('host2', 1024, 100, 300, 30),
]
+FAKE_SERVICES = [
+ ('host1', 0),
+ ('host2', 0),
+ ('host3', 0),
+ ('host4', 3600),
+]
+
# NOTE(alaski): It's important to have multiple types that end up having the
# same memory and disk requirements. So two types need the same first value,
# and two need the second and third values to add up to the same thing.
@@ -67,12 +75,29 @@ def _create_fake_node(host, total_mem, total_disk, free_mem, free_disk):
@classmethod
def _fake_service_get_all_by_binary(cls, context, binary):
def _node(host, total_mem, total_disk, free_mem, free_disk):
- return objects.Service(host=host, disabled=False)
+ now = timeutils.utcnow()
+ return objects.Service(host=host,
+ disabled=False,
+ forced_down=False,
+ last_seen_up=now)
return [_node(*fake) for fake in FAKE_COMPUTES]
@classmethod
+def _fake_service_get_all_by_binary_nodedown(cls, context, binary):
+ def _service(host, noupdate_sec):
+ now = timeutils.utcnow()
+ last_seen = now - datetime.timedelta(seconds=noupdate_sec)
+ return objects.Service(host=host,
+ disabled=False,
+ forced_down=False,
+ last_seen_up=last_seen)
+
+ return [_service(*fake) for fake in FAKE_SERVICES]
+
+
+@classmethod
def _fake_compute_node_get_all(cls, context):
return [_create_fake_node(*fake) for fake in FAKE_COMPUTES]
@@ -104,8 +129,8 @@ class TestCellsStateManager(test.NoDBTestCase):
_fake_compute_node_get_all)
self.stubs.Set(objects.ServiceList, 'get_by_binary',
_fake_service_get_all_by_binary)
- self.stubs.Set(db, 'flavor_get_all', _fake_instance_type_all)
- self.stubs.Set(db, 'cell_get_all', _fake_cell_get_all)
+ self.stub_out('nova.db.flavor_get_all', _fake_instance_type_all)
+ self.stub_out('nova.db.cell_get_all', _fake_cell_get_all)
def test_cells_config_not_found(self):
self.flags(cells_config='no_such_file_exists.conf', group='cells')
@@ -117,7 +142,7 @@ class TestCellsStateManager(test.NoDBTestCase):
@mock.patch.object(utils, 'read_cached_file')
def test_filemanager_returned(self, mock_read_cached_file, mock_find_file):
mock_find_file.return_value = "/etc/nova/cells.json"
- mock_read_cached_file.return_value = (False, six.StringIO({}))
+ mock_read_cached_file.return_value = (False, six.StringIO('{}'))
self.flags(cells_config='cells.json', group='cells')
manager = state.CellStateManager()
self.assertIsInstance(manager,
@@ -146,7 +171,7 @@ class TestCellsStateManager(test.NoDBTestCase):
self.assertEqual(0, cap['ram_free']['units_by_mb']['0'])
self.assertEqual(0, cap['disk_free']['units_by_mb']['0'])
- units = cell_free_ram / 50
+ units = cell_free_ram // 50
self.assertEqual(units, cap['ram_free']['units_by_mb']['50'])
sz = 25 * 1024
@@ -229,6 +254,37 @@ class TestCellsStateManagerNToOne(TestCellsStateManager):
self.assertEqual(units, cap['disk_free']['units_by_mb'][str(sz)])
+class TestCellsStateManagerNodeDown(test.NoDBTestCase):
+ def setUp(self):
+ super(TestCellsStateManagerNodeDown, self).setUp()
+
+ self.stub_out('nova.objects.ComputeNodeList.get_all',
+ _fake_compute_node_get_all)
+ self.stub_out('nova.objects.ServiceList.get_by_binary',
+ _fake_service_get_all_by_binary_nodedown)
+ self.stub_out('nova.db.flavor_get_all', _fake_instance_type_all)
+ self.stub_out('nova.db.cell_get_all', _fake_cell_get_all)
+
+ def test_capacity_no_reserve_nodedown(self):
+ cap = self._capacity(0.0)
+
+ cell_free_ram = sum(compute[3] for compute in FAKE_COMPUTES[:-1])
+ self.assertEqual(cell_free_ram, cap['ram_free']['total_mb'])
+
+ free_disk = sum(compute[4] for compute in FAKE_COMPUTES[:-1])
+ cell_free_disk = 1024 * free_disk
+ self.assertEqual(cell_free_disk, cap['disk_free']['total_mb'])
+
+ def _get_state_manager(self, reserve_percent=0.0):
+ self.flags(reserve_percent=reserve_percent, group='cells')
+ return state.CellStateManager()
+
+ def _capacity(self, reserve_percent):
+ state_manager = self._get_state_manager(reserve_percent)
+ my_state = state_manager.get_my_state()
+ return my_state.capacities
+
+
class TestCellStateManagerException(test.NoDBTestCase):
@mock.patch.object(time, 'sleep')
def test_init_db_error(self, mock_sleep):
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 642f48def9..c256765a7c 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -29,7 +29,6 @@ from eventlet import greenthread
import mock
from mox3 import mox
from neutronclient.common import exceptions as neutron_exceptions
-from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
@@ -56,6 +55,7 @@ from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import manager as conductor_manager
+import nova.conf
from nova.console import type as ctype
from nova import context
from nova import db
@@ -98,11 +98,10 @@ from nova.volume import cinder
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
-CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
FAKE_IMAGE_REF = uuids.image_ref
@@ -172,6 +171,7 @@ class BaseTestCase(test.TestCase):
def fake_get_compute_nodes_in_db(context, use_slave=False):
fake_compute_nodes = [{'local_gb': 259,
+ 'uuid': uuids.fake_compute_node,
'vcpus_used': 0,
'deleted': 0,
'hypervisor_type': 'powervm',
@@ -198,6 +198,7 @@ class BaseTestCase(test.TestCase):
'host': 'fake_phyp1',
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5,
+ 'disk_allocation_ratio': 1.0,
'host_ip': '127.0.0.1'}]
return [objects.ComputeNode._from_db_object(
context, objects.ComputeNode(), cn)
@@ -208,7 +209,7 @@ class BaseTestCase(test.TestCase):
self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
fake_get_compute_nodes_in_db)
- self.stubs.Set(db, 'compute_node_delete',
+ self.stub_out('nova.db.compute_node_delete',
fake_compute_node_delete)
self.compute.update_available_resource(
@@ -281,7 +282,7 @@ class BaseTestCase(test.TestCase):
inst.reservation_id = 'r-fakeres'
inst.user_id = self.user_id
inst.project_id = self.project_id
- inst.host = 'fake_host'
+ inst.host = self.compute.host
inst.node = NODENAME
inst.instance_type_id = flavor.id
inst.ami_launch_index = 0
@@ -367,7 +368,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.context, objects.Instance(),
fake_instance.fake_db_instance())
self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
- {'id': 'fake', 'size': 4,
+ {'id': uuids.volume_id, 'size': 4,
'attach_status': 'detached'})
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda *a, **kw: None)
@@ -388,8 +389,8 @@ class ComputeVolumeTestCase(BaseTestCase):
self.cinfo = jsonutils.loads(args[-1].get('connection_info'))
return self.fake_volume
- self.stubs.Set(db, 'block_device_mapping_create', store_cinfo)
- self.stubs.Set(db, 'block_device_mapping_update', store_cinfo)
+ self.stub_out('nova.db.block_device_mapping_create', store_cinfo)
+ self.stub_out('nova.db.block_device_mapping_update', store_cinfo)
def test_attach_volume_serial(self):
fake_bdm = objects.BlockDeviceMapping(context=self.context,
@@ -436,7 +437,7 @@ class ComputeVolumeTestCase(BaseTestCase):
mock_get.return_value = fake_bdm
self.assertRaises(
test.TestingException, self.compute.detach_volume,
- self.context, 'fake', instance)
+ self.context, 'fake', instance, 'fake_id')
mock_internal_detach.assert_called_once_with(self.context,
instance,
fake_bdm)
@@ -748,6 +749,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.driver, 'block_stats')
self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms')
self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage')
+ self.mox.StubOutWithMock(self.compute.driver, 'instance_exists')
# The following methods will be called
objects.BlockDeviceMapping.get_by_volume_and_instance(
@@ -767,6 +769,8 @@ class ComputeVolumeTestCase(BaseTestCase):
'wr_bytes': 5,
'instance': instance}])
+ self.compute.driver.instance_exists(mox.IgnoreArg()).AndReturn(True)
+
self.mox.ReplayAll()
def fake_get_volume_encryption_metadata(self, context, volume_id):
@@ -1057,7 +1061,7 @@ class ComputeVolumeTestCase(BaseTestCase):
def test_validate_bdm_media_service_exceptions(self):
instance_type = {'swap': 1, 'ephemeral_gb': 1}
- all_mappings = [fake_block_device.FakeDbBlockDeviceDict({
+ bdms = [fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'no_device': None,
'source_type': 'volume',
@@ -1067,8 +1071,8 @@ class ComputeVolumeTestCase(BaseTestCase):
'device_name': 'vda',
'boot_index': 0,
'delete_on_termination': False}, anon=True)]
- all_mappings = block_device_obj.block_device_make_list_from_dicts(
- self.context, all_mappings)
+ bdms = block_device_obj.block_device_make_list_from_dicts(
+ self.context, bdms)
# First we test a list of invalid status values that should result
# in an InvalidVolume exception being raised.
@@ -1081,15 +1085,24 @@ class ComputeVolumeTestCase(BaseTestCase):
)
for status, attach_status in status_values:
- def fake_volume_get(self, ctxt, volume_id):
- return {'id': volume_id,
- 'status': status,
- 'attach_status': attach_status}
+ if attach_status == 'attached':
+ def fake_volume_get(self, ctxt, volume_id):
+ return {'id': volume_id,
+ 'status': status,
+ 'attach_status': attach_status,
+ 'multiattach': False,
+ 'attachments': {}}
+ else:
+ def fake_volume_get(self, ctxt, volume_id):
+ return {'id': volume_id,
+ 'status': status,
+ 'attach_status': attach_status,
+ 'multiattach': False}
self.stubs.Set(cinder.API, 'get', fake_volume_get)
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
- instance_type, all_mappings)
+ instance_type, bdms)
# Now we test a 404 case that results in InvalidBDMVolume.
def fake_volume_get_not_found(self, context, volume_id):
@@ -1099,18 +1112,19 @@ class ComputeVolumeTestCase(BaseTestCase):
self.assertRaises(exception.InvalidBDMVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
- instance_type, all_mappings)
+ instance_type, bdms)
# Check that the volume status is 'available' and attach_status is
# 'detached' and accept the request if so
def fake_volume_get_ok(self, context, volume_id):
return {'id': volume_id,
'status': 'available',
- 'attach_status': 'detached'}
+ 'attach_status': 'detached',
+ 'multiattach': False}
self.stubs.Set(cinder.API, 'get', fake_volume_get_ok)
self.compute_api._validate_bdm(self.context, self.instance,
- instance_type, all_mappings)
+ instance_type, bdms)
def test_volume_snapshot_create(self):
self.assertRaises(messaging.ExpectedException,
@@ -1873,12 +1887,18 @@ class ComputeTestCase(BaseTestCase):
pass
def fake_volume_get(self, context, volume_id):
- return {'id': volume_id}
+ return {'id': volume_id,
+ 'attach_status': 'attached',
+ 'attachments': {instance.uuid: {
+ 'attachment_id': 'abc123'
+ }
+ }
+ }
def fake_terminate_connection(self, context, volume_id, connector):
pass
- def fake_detach(self, context, volume_id):
+ def fake_detach(self, context, volume_id, instance_uuid):
pass
bdms = []
@@ -2216,7 +2236,8 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(nova.virt.fake.FakeDriver, 'rescue')
self.compute._get_rescue_image(
- mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn({})
+ mox.IgnoreArg(), inst_obj, mox.IgnoreArg()).AndReturn(
+ objects.ImageMeta.from_dict({}))
nova.virt.fake.FakeDriver.rescue(
mox.IgnoreArg(), inst_obj, [], mox.IgnoreArg(), 'password'
).AndRaise(RuntimeError("Try again later"))
@@ -2256,7 +2277,8 @@ class ComputeTestCase(BaseTestCase):
mock_image_get.assert_called_with(ctxt, image_ref)
mock_rescue.assert_called_with(ctxt, instance, [],
- rescue_image_meta, 'password')
+ test.MatchType(objects.ImageMeta),
+ 'password')
self.compute.terminate_instance(ctxt, instance, [], [])
@mock.patch.object(image_api.API, "get")
@@ -2284,7 +2306,8 @@ class ComputeTestCase(BaseTestCase):
mock_image_get.assert_called_with(ctxt, image_ref)
mock_rescue.assert_called_with(ctxt, instance, [],
- rescue_image_meta, 'password')
+ test.MatchType(objects.ImageMeta),
+ 'password')
self.compute.terminate_instance(self.context, instance, [], [])
def test_power_on(self):
@@ -3978,19 +4001,18 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
self.compute.network_api.allocate_for_instance(
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- requested_networks=None,
- vpn=False, macs=macs,
- security_groups=[], dhcp_options=None).AndReturn(
+ self.context, instance, vpn=False,
+ requested_networks=None, macs=macs,
+ security_groups=[], dhcp_options=None,
+ bind_host_id=self.compute.host).AndReturn(
fake_network.fake_get_instance_nw_info(self, 1, 1))
self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
self.compute.driver.macs_for_instance(
mox.IsA(instance_obj.Instance)).AndReturn(macs)
self.mox.ReplayAll()
- self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
- block_device_mapping=[])
+ self.compute._build_networks_for_instance(self.context, instance,
+ requested_networks=None, security_groups=None)
def _create_server_group(self, policies, instance_host):
group_instance = self._create_fake_instance_obj(
@@ -4018,35 +4040,21 @@ class ComputeTestCase(BaseTestCase):
def test_instance_set_to_error_on_uncaught_exception(self):
# Test that instance is set to error state when exception is raised.
instance = self._create_fake_instance_obj()
-
- self.mox.StubOutWithMock(self.compute.network_api,
- "allocate_for_instance")
- self.mox.StubOutWithMock(self.compute.network_api,
- "deallocate_for_instance")
- self.compute.network_api.allocate_for_instance(
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- requested_networks=None,
- vpn=False, macs=None,
- security_groups=[], dhcp_options=None
- ).AndRaise(messaging.RemoteError())
- self.compute.network_api.deallocate_for_instance(
- mox.IgnoreArg(),
- mox.IgnoreArg(),
- requested_networks=None).MultipleTimes()
-
fake_network.unset_stub_network_methods(self)
- self.mox.ReplayAll()
+ @mock.patch.object(self.compute.network_api, 'allocate_for_instance',
+ side_effect=messaging.RemoteError())
+ @mock.patch.object(self.compute.network_api, 'deallocate_for_instance')
+ def _do_test(mock_deallocate, mock_allocate):
+ self.compute.build_and_run_instance(self.context, instance, {},
+ {}, {}, block_device_mapping=[])
- self.compute.build_and_run_instance(
- self.context, instance, {}, {}, {},
- block_device_mapping=[])
+ instance.refresh()
+ self.assertEqual(vm_states.ERROR, instance.vm_state)
- instance.refresh()
- self.assertEqual(vm_states.ERROR, instance.vm_state)
+ self.compute.terminate_instance(self.context, instance, [], [])
- self.compute.terminate_instance(self.context, instance, [], [])
+ _do_test()
def test_delete_instance_keeps_net_on_power_off_fail(self):
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
@@ -4467,7 +4475,7 @@ class ComputeTestCase(BaseTestCase):
vm_state = vm_states.STOPPED
params = {'vm_state': vm_state}
instance = self._create_fake_instance_obj(params)
- image = 'fake-image'
+ image = {}
disk_info = 'fake-disk-info'
instance_type = flavors.get_default_flavor()
@@ -4564,7 +4572,8 @@ class ComputeTestCase(BaseTestCase):
self.compute.driver.finish_migration(self.context, migration,
instance, disk_info,
'fake-nwinfo1',
- image, resize_instance,
+ mox.IsA(objects.ImageMeta),
+ resize_instance,
'fake-bdminfo', power_on)
# Ensure instance status updates is after the migration finish
migration.save().WithSideEffects(_mig_save)
@@ -5537,6 +5546,7 @@ class ComputeTestCase(BaseTestCase):
'source_type': 'volume',
'destination_type': 'volume'}))
]
+ migrate_data = migrate_data_obj.LiveMigrateData()
# creating mocks
self.mox.StubOutWithMock(self.compute.driver,
@@ -5560,18 +5570,19 @@ class ComputeTestCase(BaseTestCase):
block_device_info=block_device_info).AndReturn('fake_disk')
self.compute.compute_rpcapi.pre_live_migration(c,
instance, True, 'fake_disk', dest_host,
- {}).AndRaise(test.TestingException())
+ migrate_data).AndRaise(test.TestingException())
self.compute.network_api.setup_networks_on_host(c,
instance, self.compute.host)
objects.BlockDeviceMappingList.get_by_instance_uuid(c,
instance.uuid).MultipleTimes().AndReturn(fake_bdms)
self.compute.compute_rpcapi.remove_volume_connection(
- c, instance, uuids.volume_id_1, dest_host)
+ c, uuids.volume_id_1, instance, dest_host)
self.compute.compute_rpcapi.remove_volume_connection(
- c, instance, uuids.volume_id_2, dest_host)
+ c, uuids.volume_id_2, instance, dest_host)
self.compute.compute_rpcapi.rollback_live_migration_at_destination(
- c, instance, dest_host, destroy_disks=True, migrate_data={})
+ c, instance, dest_host, destroy_disks=True,
+ migrate_data=mox.IsA(migrate_data_obj.LiveMigrateData))
# start test
self.mox.ReplayAll()
@@ -5580,7 +5591,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.live_migration,
c, dest=dest_host, block_migration=True,
instance=instance, migration=migration,
- migrate_data={})
+ migrate_data=migrate_data)
instance.refresh()
self.assertEqual('src_host', instance.host)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
@@ -5597,12 +5608,15 @@ class ComputeTestCase(BaseTestCase):
instance.host = self.compute.host
dest = 'desthost'
- migrate_data = {'is_shared_instance_path': False}
+ migrate_data = migrate_data_obj.LibvirtLiveMigrateData(
+ is_shared_instance_path=False,
+ is_shared_block_storage=False)
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'pre_live_migration')
self.compute.compute_rpcapi.pre_live_migration(
- c, instance, False, None, dest, migrate_data)
+ c, instance, False, None, dest, migrate_data).AndReturn(
+ migrate_data)
self.mox.StubOutWithMock(self.compute.network_api,
'migrate_instance_start')
@@ -5626,13 +5640,11 @@ class ComputeTestCase(BaseTestCase):
migration = objects.Migration()
- with mock.patch.object(self.compute, '_get_migrate_data_obj') as gmdo:
- gmdo.return_value = migrate_data_obj.LiveMigrateData()
- ret = self.compute.live_migration(c, dest=dest,
- instance=instance,
- block_migration=False,
- migration=migration,
- migrate_data=migrate_data)
+ ret = self.compute.live_migration(c, dest=dest,
+ instance=instance,
+ block_migration=False,
+ migration=migration,
+ migrate_data=migrate_data)
self.assertIsNone(ret)
event_mock.assert_called_with(
@@ -5814,84 +5826,6 @@ class ComputeTestCase(BaseTestCase):
terminate_connection.assert_called_once_with(
c, uuids.volume_id, 'fake-connector')
- def _begin_post_live_migration_at_destination(self):
- self.mox.StubOutWithMock(self.compute.network_api,
- 'setup_networks_on_host')
- self.mox.StubOutWithMock(self.compute.network_api,
- 'migrate_instance_finish')
- self.mox.StubOutWithMock(self.compute, '_get_power_state')
- self.mox.StubOutWithMock(self.compute, '_get_compute_info')
-
- params = {'task_state': task_states.MIGRATING,
- 'power_state': power_state.PAUSED, }
- self.instance = self._create_fake_instance_obj(params)
-
- self.admin_ctxt = context.get_admin_context()
-
- self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
- self.instance,
- self.compute.host)
- migration = {'source_compute': self.instance['host'],
- 'dest_compute': self.compute.host, }
- self.compute.network_api.migrate_instance_finish(
- self.admin_ctxt, self.instance, migration)
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self.compute.driver.post_live_migration_at_destination(self.admin_ctxt,
- self.instance,
- fake_net_info,
- False,
- fake_block_dev_info)
- self.compute._get_power_state(self.admin_ctxt,
- self.instance).AndReturn(10001)
-
- def _finish_post_live_migration_at_destination(self):
- self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
- mox.IgnoreArg(), mox.IgnoreArg(), teardown=True)
- self.compute.network_api.setup_networks_on_host(self.admin_ctxt,
- mox.IgnoreArg(), self.compute.host)
-
- fake_notifier.NOTIFICATIONS = []
- self.mox.ReplayAll()
-
- self.compute.post_live_migration_at_destination(self.admin_ctxt,
- self.instance, False)
-
- self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
- msg = fake_notifier.NOTIFICATIONS[0]
- self.assertEqual(msg.event_type,
- 'compute.instance.live_migration.post.dest.start')
- msg = fake_notifier.NOTIFICATIONS[1]
- self.assertEqual(msg.event_type,
- 'compute.instance.live_migration.post.dest.end')
-
- return objects.Instance.get_by_uuid(self.admin_ctxt,
- self.instance['uuid'])
-
- def test_post_live_migration_at_destination_with_compute_info(self):
- """The instance's node property should be updated correctly."""
- self._begin_post_live_migration_at_destination()
- hypervisor_hostname = 'fake_hypervisor_hostname'
- fake_compute_info = objects.ComputeNode(
- hypervisor_hostname=hypervisor_hostname)
- self.compute._get_compute_info(mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(
- fake_compute_info)
- updated = self._finish_post_live_migration_at_destination()
- self.assertEqual(updated['node'], hypervisor_hostname)
-
- def test_post_live_migration_at_destination_without_compute_info(self):
- """The instance's node property should be set to None if we fail to
- get compute_info.
- """
- self._begin_post_live_migration_at_destination()
- self.compute._get_compute_info(
- mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(
- exception.ComputeHostNotFound(host='fake-host'))
- updated = self._finish_post_live_migration_at_destination()
- self.assertIsNone(updated['node'])
-
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
def test_rollback_live_migration(self, mock_bdms):
c = context.get_admin_context()
@@ -5954,16 +5888,19 @@ class ComputeTestCase(BaseTestCase):
side_effect=test.TestingException)
@mock.patch('nova.virt.driver.ComputeDriver.'
'rollback_live_migration_at_destination')
+ @mock.patch('nova.objects.migrate_data.LiveMigrateData.'
+ 'detect_implementation')
def test_rollback_live_migration_at_destination_network_fails(
- self, mock_rollback, net_mock):
+ self, mock_detect, mock_rollback, net_mock):
c = context.get_admin_context()
instance = self._create_fake_instance_obj()
self.assertRaises(test.TestingException,
self.compute.rollback_live_migration_at_destination,
c, instance, destroy_disks=True, migrate_data={})
- mock_rollback.assert_called_once_with(c, instance, mock.ANY, mock.ANY,
- destroy_disks=True,
- migrate_data={})
+ mock_rollback.assert_called_once_with(
+ c, instance, mock.ANY, mock.ANY,
+ destroy_disks=True,
+ migrate_data=mock_detect.return_value)
def test_run_kill_vm(self):
# Detect when a vm is terminated behind the scenes.
@@ -6020,7 +5957,7 @@ class ComputeTestCase(BaseTestCase):
except NotImplementedError:
exc_info = sys.exc_info()
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+ self.stub_out('nova.db.instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
@@ -6055,7 +5992,7 @@ class ComputeTestCase(BaseTestCase):
raised_exc = exc
exc_info = sys.exc_info()
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+ self.stub_out('nova.db.instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
@@ -6084,7 +6021,7 @@ class ComputeTestCase(BaseTestCase):
except exception.Invalid:
exc_info = sys.exc_info()
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+ self.stub_out('nova.db.instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
@@ -6104,7 +6041,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(expected, values)
return self._fill_fault(expected)
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+ self.stub_out('nova.db.instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
@@ -6127,7 +6064,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(expected, values)
return self._fill_fault(expected)
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+ self.stub_out('nova.db.instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
@@ -6156,7 +6093,7 @@ class ComputeTestCase(BaseTestCase):
except NotImplementedError:
exc_info = sys.exc_info()
- self.stubs.Set(nova.db, 'instance_fault_create', fake_db_fault_create)
+ self.stub_out('nova.db.instance_fault_create', fake_db_fault_create)
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(ctxt,
@@ -6315,7 +6252,8 @@ class ComputeTestCase(BaseTestCase):
if instance_uuid not in instance_map:
raise exception.InstanceNotFound(instance_id=instance_uuid)
call_info['get_by_uuid'] += 1
- self.assertEqual(['system_metadata', 'info_cache'],
+ self.assertEqual(['system_metadata', 'info_cache', 'extra',
+ 'extra.flavor'],
columns_to_join)
return instance_map[instance_uuid]
@@ -6334,9 +6272,9 @@ class ComputeTestCase(BaseTestCase):
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance['uuid'])
- self.stubs.Set(db, 'instance_get_all_by_host',
+ self.stub_out('nova.db.instance_get_all_by_host',
fake_instance_get_all_by_host)
- self.stubs.Set(db, 'instance_get_by_uuid',
+ self.stub_out('nova.db.instance_get_by_uuid',
fake_instance_get_by_uuid)
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info',
fake_get_instance_nw_info)
@@ -6554,11 +6492,11 @@ class ComputeTestCase(BaseTestCase):
migration['instance_uuid']):
migration2['status'] = 'confirmed'
- self.stubs.Set(db, 'instance_get_by_uuid',
+ self.stub_out('nova.db.instance_get_by_uuid',
fake_instance_get_by_uuid)
- self.stubs.Set(db, 'migration_get_unconfirmed_by_dest_compute',
+ self.stub_out('nova.db.migration_get_unconfirmed_by_dest_compute',
fake_migration_get_unconfirmed_by_dest_compute)
- self.stubs.Set(db, 'migration_update', fake_migration_update)
+ self.stub_out('nova.db.migration_update', fake_migration_update)
self.stubs.Set(self.compute.compute_api, 'confirm_resize',
fake_confirm_resize)
@@ -6621,7 +6559,6 @@ class ComputeTestCase(BaseTestCase):
sort_dir,
marker=None,
columns_to_join=[],
- use_slave=True,
limit=None)
self.assertThat(conductor_instance_update.mock_calls,
testtools_matchers.HasLength(len(old_instances)))
@@ -6843,8 +6780,8 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(instance, 'destroy', fake_destroy)
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- lambda *a, **k: None)
+ self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
+ lambda *a, **k: None)
self.stubs.Set(self.compute,
'_complete_deletion',
@@ -7575,9 +7512,9 @@ class ComputeAPITestCase(BaseTestCase):
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance_uuid)
- ephemeral = filter(block_device.new_format_is_ephemeral, bdms)
+ ephemeral = list(filter(block_device.new_format_is_ephemeral, bdms))
self.assertEqual(1, len(ephemeral))
- swap = filter(block_device.new_format_is_swap, bdms)
+ swap = list(filter(block_device.new_format_is_swap, bdms))
self.assertEqual(1, len(swap))
self.assertEqual(1024, swap[0].volume_size)
@@ -7701,7 +7638,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertRaises(exception.InstanceUserDataTooLarge,
self.compute_api.create, self.context, inst_type,
- self.fake_image['id'], user_data=('1' * 65536))
+ self.fake_image['id'], user_data=(b'1' * 65536))
def test_create_with_malformed_user_data(self):
# Test an instance type with malformed user data.
@@ -7713,7 +7650,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertRaises(exception.InstanceUserDataMalformed,
self.compute_api.create, self.context, inst_type,
- self.fake_image['id'], user_data='banana')
+ self.fake_image['id'], user_data=b'banana')
def test_create_with_base64_user_data(self):
# Test an instance type with ok much user data.
@@ -7727,11 +7664,12 @@ class ComputeAPITestCase(BaseTestCase):
# base64
(refs, resv_id) = self.compute_api.create(
self.context, inst_type, self.fake_image['id'],
- user_data=base64.encodestring('1' * 48510))
+ user_data=base64.encodestring(b'1' * 48510))
def test_populate_instance_for_create(self):
base_options = {'image_ref': self.fake_image['id'],
- 'system_metadata': {'fake': 'value'}}
+ 'system_metadata': {'fake': 'value'},
+ 'uuid': uuids.instance}
instance = objects.Instance()
instance.update(base_options)
inst_type = flavors.get_flavor_by_name("m1.tiny")
@@ -7796,9 +7734,27 @@ class ComputeAPITestCase(BaseTestCase):
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
inst_type = flavors.get_default_flavor()
- self.assertRaises(exception.InvalidInput, self.compute_api.create,
- self.context, inst_type, self.fake_image['id'],
- scheduler_hints={'group': 'groupname'})
+ self.assertRaises(
+ exception.InvalidInput,
+ self.compute_api.create,
+ self.context,
+ inst_type,
+ self.fake_image['id'],
+ scheduler_hints={'group': 'non-uuid'})
+
+ def test_instance_create_with_group_uuid_fails_group_not_exist(self):
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
+
+ inst_type = flavors.get_default_flavor()
+ self.assertRaises(
+ exception.InstanceGroupNotFound,
+ self.compute_api.create,
+ self.context,
+ inst_type,
+ self.fake_image['id'],
+ scheduler_hints={'group':
+ '5b674f73-c8cf-40ef-9965-3b6fe4b304b1'})
def test_destroy_instance_disassociates_security_groups(self):
# Make sure destroying disassociates security groups.
@@ -7811,7 +7767,8 @@ class ComputeAPITestCase(BaseTestCase):
security_group=['testgroup'])
db.instance_destroy(self.context, ref[0]['uuid'])
- group = db.security_group_get(self.context, group['id'])
+ group = db.security_group_get(self.context, group['id'],
+ columns_to_join=['instances'])
self.assertEqual(0, len(group['instances']))
def test_destroy_security_group_disassociates_instances(self):
@@ -7827,7 +7784,8 @@ class ComputeAPITestCase(BaseTestCase):
db.security_group_destroy(self.context, group['id'])
admin_deleted_context = context.get_admin_context(
read_deleted="only")
- group = db.security_group_get(admin_deleted_context, group['id'])
+ group = db.security_group_get(admin_deleted_context, group['id'],
+ columns_to_join=['instances'])
self.assertEqual(0, len(group['instances']))
def _test_rebuild(self, vm_state):
@@ -8129,13 +8087,6 @@ class ComputeAPITestCase(BaseTestCase):
want_objects=True)
self.assertEqual(exp_instance.id, instance.id)
- def test_get_with_integer_id(self):
- # Test get instance with an integer id.
- exp_instance = self._create_fake_instance_obj()
- instance = self.compute_api.get(self.context, exp_instance['id'],
- want_objects=True)
- self.assertEqual(exp_instance.id, instance.id)
-
def test_get_all_by_name_regexp(self):
# Test searching instances by name (display_name).
c = context.get_admin_context()
@@ -9167,6 +9118,42 @@ class ComputeAPITestCase(BaseTestCase):
None,
'/invalid')
+ def test_check_dev_name_assign_dev_name(self):
+ instance = self._create_fake_instance_obj()
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'instance_uuid': instance.uuid,
+ 'volume_id': 'vol-id',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': None,
+ 'boot_index': None,
+ 'disk_bus': None,
+ 'device_type': None
+ }))]
+ self.compute._check_dev_name(bdms, instance)
+ self.assertIsNotNone(bdms[0].device_name)
+
+ @mock.patch.object(compute_manager.ComputeManager,
+ '_get_device_name_for_instance')
+ def test_check_dev_name_skip_bdms_with_dev_name(self, mock_get_dev_name):
+ instance = self._create_fake_instance_obj()
+ bdms = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'instance_uuid': instance.uuid,
+ 'volume_id': 'vol-id',
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': '/dev/vda',
+ 'boot_index': None,
+ 'disk_bus': None,
+ 'device_type': None
+ }))]
+ self.compute._check_dev_name(bdms, instance)
+ self.assertFalse(mock_get_dev_name.called)
+
def test_no_attach_volume_in_rescue_state(self):
def fake(*args, **kwargs):
pass
@@ -9206,7 +9193,7 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance_obj(params=params)
volume = {'id': 1, 'attach_status': 'attached',
- 'instance_uuid': instance['uuid']}
+ 'instance_uuid': instance['uuid']}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.detach_volume,
@@ -9619,6 +9606,25 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(a[2].device_name, '/dev/vdb')
self.assertEqual(a[2].volume_id, uuids.volume_id)
+ def test_attach_volume_shelved_offloaded(self):
+ instance = self._create_fake_instance_obj()
+ with test.nested(
+ mock.patch.object(compute_api.API,
+ '_check_attach_and_reserve_volume'),
+ mock.patch.object(cinder.API, 'attach')
+ ) as (mock_attach_and_reserve, mock_attach):
+ self.compute_api._attach_volume_shelved_offloaded(
+ self.context, instance, 'fake-volume-id',
+ '/dev/vdb', 'ide', 'cdrom')
+ mock_attach_and_reserve.assert_called_once_with(self.context,
+ 'fake-volume-id',
+ instance)
+ mock_attach.assert_called_once_with(self.context,
+ 'fake-volume-id',
+ instance.uuid,
+ '/dev/vdb')
+ self.assertTrue(mock_attach.called)
+
def test_attach_volume_no_device(self):
called = {}
@@ -9666,8 +9672,7 @@ class ComputeAPITestCase(BaseTestCase):
called = {}
instance = self._create_fake_instance_obj()
# Set attach_status to 'fake' as nothing is reading the value.
- volume = {'id': 1, 'attach_status': 'fake',
- 'instance_uuid': instance['uuid']}
+ volume = {'id': 1, 'attach_status': 'fake'}
def fake_check_detach(*args, **kwargs):
called['fake_check_detach'] = True
@@ -9689,6 +9694,26 @@ class ComputeAPITestCase(BaseTestCase):
self.assertTrue(called.get('fake_begin_detaching'))
self.assertTrue(called.get('fake_rpc_detach_volume'))
+ @mock.patch.object(compute_api.API, '_check_and_begin_detach')
+ @mock.patch.object(compute_api.API, '_local_cleanup_bdm_volumes')
+ @mock.patch.object(objects.BlockDeviceMapping, 'get_by_volume_id')
+ def test_detach_volume_shelved_offloaded(self,
+ mock_block_dev,
+ mock_local_cleanup,
+ mock_check_begin_detach):
+
+ mock_block_dev.return_value = [block_device_obj.BlockDeviceMapping(
+ context=context)]
+ instance = self._create_fake_instance_obj()
+ volume = {'id': 1, 'attach_status': 'fake'}
+ self.compute_api._detach_volume_shelved_offloaded(self.context,
+ instance,
+ volume)
+ mock_check_begin_detach.assert_called_once_with(self.context,
+ volume,
+ instance)
+ self.assertTrue(mock_local_cleanup.called)
+
def test_detach_invalid_volume(self):
# Ensure exception is raised while detaching an un-attached volume
fake_instance = self._fake_instance({
@@ -9697,7 +9722,7 @@ class ComputeAPITestCase(BaseTestCase):
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE,
'task_state': None})
- volume = {'id': 1, 'attach_status': 'detached'}
+ volume = {'id': 1, 'attach_status': 'detached', 'status': 'available'}
self.assertRaises(exception.InvalidVolume,
self.compute_api.detach_volume, self.context,
@@ -9712,8 +9737,8 @@ class ComputeAPITestCase(BaseTestCase):
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE,
'task_state': None})
- volume = {'id': 1, 'attach_status': 'attached',
- 'instance_uuid': 'f7000000-0000-0000-0000-000000000002'}
+ volume = {'id': 1, 'attach_status': 'attached', 'status': 'in-use',
+ 'attachments': {'fake_uuid': {'attachment_id': 'fakeid'}}}
self.assertRaises(exception.VolumeUnattached,
self.compute_api.detach_volume, self.context,
@@ -9785,6 +9810,12 @@ class ComputeAPITestCase(BaseTestCase):
'connection_info': '{"test": "test"}'})
bdm = objects.BlockDeviceMapping(context=self.context, **fake_bdm)
+ # Stub out fake_volume_get so cinder api does not raise exception
+ # and manager gets to call bdm.destroy()
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id}
+ self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
+
with test.nested(
mock.patch.object(self.compute.driver, 'detach_volume',
side_effect=exception.DiskNotFound('sdb')),
@@ -9804,7 +9835,8 @@ class ComputeAPITestCase(BaseTestCase):
'fake-id',
'fake-connector')
mock_destroy.assert_called_once_with()
- mock_detach.assert_called_once_with(mock.ANY, 'fake-id')
+ mock_detach.assert_called_once_with(mock.ANY, 'fake-id',
+ instance.uuid, None)
def test_terminate_with_volumes(self):
# Make sure that volumes get detached during instance termination.
@@ -9827,7 +9859,7 @@ class ComputeAPITestCase(BaseTestCase):
# Stub out and record whether it gets detached
result = {"detached": False}
- def fake_detach(self, context, volume_id_param):
+ def fake_detach(self, context, volume_id_param, instance_uuid):
result["detached"] = volume_id_param == volume_id
self.stubs.Set(cinder.API, "detach", fake_detach)
@@ -9868,7 +9900,14 @@ class ComputeAPITestCase(BaseTestCase):
bdm_obj.create()
bdms.append(bdm_obj)
- self.stubs.Set(self.compute, 'volume_api', mox.MockAnything())
+ self.stub_out('nova.volume.cinder.API.terminate_connection',
+ mox.MockAnything())
+ self.stub_out('nova.volume.cinder.API.detach', mox.MockAnything())
+
+ def fake_volume_get(self, context, volume_id):
+ return {'id': volume_id}
+ self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
+
self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
@@ -9992,21 +10031,29 @@ class ComputeAPITestCase(BaseTestCase):
instance, instance_uuid = self._run_instance()
rpcapi = self.compute_api.compute_task_api
- self.mox.StubOutWithMock(self.compute_api, '_record_action_start')
- self.mox.StubOutWithMock(rpcapi, 'live_migrate_instance')
- self.compute_api._record_action_start(self.context, instance,
- 'live-migration')
- rpcapi.live_migrate_instance(self.context, instance, 'fake_dest_host',
- block_migration=True,
- disk_over_commit=True)
-
- self.mox.ReplayAll()
-
- self.compute_api.live_migrate(self.context, instance,
- block_migration=True,
- disk_over_commit=True,
- host_name='fake_dest_host')
-
+ fake_spec = objects.RequestSpec()
+
+ @mock.patch.object(rpcapi, 'live_migrate_instance')
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ @mock.patch.object(self.compute_api, '_record_action_start')
+ def do_test(record_action_start, get_by_instance_uuid,
+ live_migrate_instance):
+ get_by_instance_uuid.return_value = fake_spec
+
+ self.compute_api.live_migrate(self.context, instance,
+ block_migration=True,
+ disk_over_commit=True,
+ host_name='fake_dest_host')
+
+ record_action_start.assert_called_once_with(self.context, instance,
+ 'live-migration')
+ live_migrate_instance.assert_called_once_with(
+ self.context, instance, 'fake_dest_host',
+ block_migration=True,
+ disk_over_commit=True,
+ request_spec=fake_spec)
+
+ do_test()
instance.refresh()
self.assertEqual(instance['task_state'], task_states.MIGRATING)
@@ -10014,22 +10061,44 @@ class ComputeAPITestCase(BaseTestCase):
instance = self._create_fake_instance_obj(services=True)
self.assertIsNone(instance.task_state)
- def fake_service_is_up(*args, **kwargs):
- return False
+ ctxt = self.context.elevated()
+
+ fake_spec = objects.RequestSpec()
def fake_rebuild_instance(*args, **kwargs):
instance.host = kwargs['host']
instance.save()
- self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
- fake_service_is_up)
- self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
- fake_rebuild_instance)
- self.compute_api.evacuate(self.context.elevated(),
- instance,
- host='fake_dest_host',
- on_shared_storage=True,
- admin_password=None)
+ @mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance')
+ @mock.patch.object(objects.RequestSpec,
+ 'get_by_instance_uuid')
+ @mock.patch.object(self.compute_api.servicegroup_api, 'service_is_up')
+ def do_test(service_is_up, get_by_instance_uuid, rebuild_instance):
+ service_is_up.return_value = False
+ get_by_instance_uuid.return_value = fake_spec
+ rebuild_instance.side_effect = fake_rebuild_instance
+
+ self.compute_api.evacuate(ctxt,
+ instance,
+ host='fake_dest_host',
+ on_shared_storage=True,
+ admin_password=None)
+
+ rebuild_instance.assert_called_once_with(
+ ctxt,
+ instance=instance,
+ new_pass=None,
+ injected_files=None,
+ image_ref=None,
+ orig_image_ref=None,
+ orig_sys_metadata=None,
+ bdms=None,
+ recreate=True,
+ on_shared_storage=True,
+ request_spec=fake_spec,
+ host='fake_dest_host')
+ do_test()
instance.refresh()
self.assertEqual(instance.task_state, task_states.REBUILDING)
@@ -10037,7 +10106,7 @@ class ComputeAPITestCase(BaseTestCase):
migs = objects.MigrationList.get_by_filters(
self.context, {'source_host': 'fake_host'})
self.assertEqual(1, len(migs))
- self.assertEqual('fake_host', migs[0].source_compute)
+ self.assertEqual(self.compute.host, migs[0].source_compute)
self.assertEqual('accepted', migs[0].status)
self.assertEqual('compute.instance.evacuate',
fake_notifier.NOTIFICATIONS[0].event_type)
@@ -10098,8 +10167,7 @@ class ComputeAPITestCase(BaseTestCase):
admin_password=None)
def test_get_migrations(self):
- migration = test_migration.fake_db_migration(
- uuid=uuids.migration_instance)
+ migration = test_migration.fake_db_migration()
filters = {'host': 'host1'}
self.mox.StubOutWithMock(db, "migration_get_all_by_filters")
db.migration_get_all_by_filters(self.context,
@@ -10979,9 +11047,11 @@ class ComputePolicyTestCase(BaseTestCase):
"network:validate_networks": []}
self.policy.set_rules(rules)
- self.compute_api.create(self.context, None,
- image_href=uuids.host_instance,
- availability_zone='1', forced_host='1')
+ self.compute_api.create(self.context,
+ objects.Flavor(id=1, disabled=False, memory_mb=256, vcpus=1,
+ root_gb=1, ephemeral_gb=1, swap=0),
+ image_href=uuids.host_instance, availability_zone='1',
+ forced_host='1')
class DisabledInstanceTypesTestCase(BaseTestCase):
@@ -11276,8 +11346,6 @@ class EvacuateHostTestCase(BaseTestCase):
send_node=False):
network_api = self.compute.network_api
ctxt = context.get_admin_context()
- mock_context = mock.Mock()
- mock_context.elevated.return_value = ctxt
node = limits = None
if send_node:
@@ -11286,7 +11354,8 @@ class EvacuateHostTestCase(BaseTestCase):
@mock.patch.object(network_api, 'setup_networks_on_host')
@mock.patch.object(network_api, 'setup_instance_network_on_host')
- def _test_rebuild(mock_setup_instance_network_on_host,
+ @mock.patch('nova.context.RequestContext.elevated', return_value=ctxt)
+ def _test_rebuild(mock_context, mock_setup_instance_network_on_host,
mock_setup_networks_on_host):
orig_image_ref = None
image_ref = None
@@ -11294,7 +11363,7 @@ class EvacuateHostTestCase(BaseTestCase):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, self.inst.uuid)
self.compute.rebuild_instance(
- mock_context, self.inst, orig_image_ref,
+ ctxt, self.inst, orig_image_ref,
image_ref, injected_files, 'newpass', {}, bdms, recreate=True,
on_shared_storage=on_shared_storage, migration=migration,
scheduled_node=node, limits=limits)
@@ -11405,7 +11474,7 @@ class EvacuateHostTestCase(BaseTestCase):
# Stub out and record whether it gets detached
result = {"detached": False}
- def fake_detach(self, context, volume):
+ def fake_detach(self, context, volume, instance_uuid, attachment_id):
result["detached"] = volume["id"] == 'fake_volume_id'
self.stubs.Set(cinder.API, "detach", fake_detach)
@@ -11421,7 +11490,8 @@ class EvacuateHostTestCase(BaseTestCase):
# make sure volumes attach, detach are called
self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
- self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
+ self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg(),
+ mox.IgnoreArg(), None)
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.compute._prep_block_device(mox.IsA(self.context),
@@ -11445,7 +11515,9 @@ class EvacuateHostTestCase(BaseTestCase):
"""Confirm evacuate scenario on shared storage."""
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance), {}, mox.IgnoreArg(), 'newpass',
+ mox.IsA(objects.Instance),
+ mox.IsA(objects.ImageMeta),
+ mox.IgnoreArg(), 'newpass',
network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
@@ -11458,14 +11530,11 @@ class EvacuateHostTestCase(BaseTestCase):
"""Confirm evacuate scenario without shared storage
(rebuild from image)
"""
- fake_image = {'id': 1,
- 'name': 'fake_name',
- 'properties': {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id'}}
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance), mox.IsA(fake_image),
+ mox.IsA(objects.Instance),
+ mox.IsA(objects.ImageMeta),
mox.IgnoreArg(), mox.IsA('newpass'),
network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
@@ -11496,14 +11565,10 @@ class EvacuateHostTestCase(BaseTestCase):
lambda: self._rebuild(on_shared_storage=True))
def test_on_shared_storage_not_provided_host_without_shared_storage(self):
- fake_image = {'id': 1,
- 'name': 'fake_name',
- 'properties': {'kernel_id': 'fake_kernel_id',
- 'ramdisk_id': 'fake_ramdisk_id'}}
-
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance), mox.IsA(fake_image),
+ mox.IsA(objects.Instance),
+ mox.IsA(objects.ImageMeta),
mox.IgnoreArg(), mox.IsA('newpass'),
network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
@@ -11517,7 +11582,9 @@ class EvacuateHostTestCase(BaseTestCase):
def test_on_shared_storage_not_provided_host_with_shared_storage(self):
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance), {}, mox.IgnoreArg(), 'newpass',
+ mox.IsA(objects.Instance),
+ mox.IsA(objects.ImageMeta),
+ mox.IgnoreArg(), 'newpass',
network_info=mox.IgnoreArg(),
block_device_info=mox.IgnoreArg())
@@ -11638,8 +11705,8 @@ class ComputeInjectedFilesTestCase(BaseTestCase):
def test_injected_success(self):
# test with valid b64 encoded content.
injected_files = [
- ('/a/b/c', base64.b64encode('foobarbaz')),
- ('/d/e/f', base64.b64encode('seespotrun')),
+ ('/a/b/c', base64.b64encode(b'foobarbaz')),
+ ('/d/e/f', base64.b64encode(b'seespotrun')),
]
decoded_files = [
@@ -11651,7 +11718,7 @@ class ComputeInjectedFilesTestCase(BaseTestCase):
def test_injected_invalid(self):
# test with invalid b64 encoded content
injected_files = [
- ('/a/b/c', base64.b64encode('foobarbaz')),
+ ('/a/b/c', base64.b64encode(b'foobarbaz')),
('/d/e/f', 'seespotrun'),
]
diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py
index 3936efebaf..701dcca359 100644
--- a/nova/tests/unit/compute/test_compute_api.py
+++ b/nova/tests/unit/compute/test_compute_api.py
@@ -41,6 +41,7 @@ from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
+from nova.objects import fields as fields_obj
from nova.objects import quotas as quotas_obj
from nova import policy
from nova import quota
@@ -70,6 +71,7 @@ class _ComputeAPIUnitTestMixIn(object):
super(_ComputeAPIUnitTestMixIn, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
+ self.compute_api = compute_api.API()
self.context = context.RequestContext(self.user_id,
self.project_id)
@@ -155,6 +157,13 @@ class _ComputeAPIUnitTestMixIn(object):
instance.obj_reset_changes()
return instance
+ def _obj_to_list_obj(self, list_obj, obj):
+ list_obj.objects = []
+ list_obj.objects.append(obj)
+ list_obj._context = self.context
+ list_obj.obj_reset_changes()
+ return list_obj
+
def test_create_quota_exceeded_messages(self):
image_href = "image_href"
image_id = 0
@@ -243,6 +252,57 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_specified_ip_and_multiple_instances_helper(
requested_networks)
+ @mock.patch.object(compute_rpcapi.ComputeAPI, 'reserve_block_device_name')
+ def test_create_volume_bdm_call_reserve_dev_name(self, mock_reserve):
+ bdm = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'volume_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ 'boot_index': 1,
+ }))
+ mock_reserve.return_value = bdm
+ instance = self._create_instance_obj()
+ result = self.compute_api._create_volume_bdm(self.context,
+ instance,
+ 'vda',
+ '1',
+ None,
+ None)
+ self.assertTrue(mock_reserve.called)
+ self.assertEqual(result, bdm)
+
+ @mock.patch.object(objects.BlockDeviceMapping, 'create')
+ def test_create_volume_bdm_local_creation(self, bdm_create):
+ instance = self._create_instance_obj()
+ volume_id = 'fake-vol-id'
+ bdm = objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'instance_uuid': instance.uuid,
+ 'volume_id': volume_id,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ 'boot_index': None,
+ 'disk_bus': None,
+ 'device_type': None
+ }))
+ result = self.compute_api._create_volume_bdm(self.context,
+ instance,
+ '/dev/vda',
+ volume_id,
+ None,
+ None,
+ is_local_creation=True)
+ self.assertEqual(result.instance_uuid, bdm.instance_uuid)
+ self.assertIsNone(result.device_name)
+ self.assertEqual(result.volume_id, bdm.volume_id)
+ self.assertTrue(bdm_create.called)
+
def test_suspend(self):
# Ensure instance can be suspended.
instance = self._create_instance_obj()
@@ -781,6 +841,7 @@ class _ComputeAPIUnitTestMixIn(object):
updates.update({'deleted_at': delete_time,
'deleted': True})
fake_inst = fake_instance.fake_db_instance(**updates)
+ self.compute_api._local_cleanup_bdm_volumes([], inst, self.context)
db.instance_destroy(self.context, inst.uuid,
constraint=None).AndReturn(fake_inst)
compute_utils.notify_about_instance_usage(
@@ -1002,8 +1063,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.mox.StubOutWithMock(rpcapi, 'terminate_instance')
db.block_device_mapping_get_all_by_instance(self.context,
- inst.uuid,
- use_slave=False).AndReturn([])
+ inst.uuid).AndReturn([])
inst.save()
self.compute_api._create_reservations(self.context,
inst, inst.task_state,
@@ -1138,7 +1198,7 @@ class _ComputeAPIUnitTestMixIn(object):
self.useFixture(utils_fixture.TimeFixture(delete_time))
db.block_device_mapping_get_all_by_instance(
- self.context, inst.uuid, use_slave=False).AndReturn([])
+ self.context, inst.uuid).AndReturn([])
inst.save().AndRaise(test.TestingException)
self.mox.ReplayAll()
@@ -1728,9 +1788,10 @@ class _ComputeAPIUnitTestMixIn(object):
instance = self._create_instance_obj(params=paused_state)
self._live_migrate_instance(instance)
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceAction, 'action_start')
- def _live_migrate_instance(self, instance, _save, _action):
+ def _live_migrate_instance(self, instance, _save, _action, get_spec):
# TODO(gilliard): This logic is upside-down (different
# behaviour depending on which class this method is mixed-into. Once
# we have cellsv2 we can remove this kind of logic from this test
@@ -1738,6 +1799,8 @@ class _ComputeAPIUnitTestMixIn(object):
api = self.compute_api.cells_rpcapi
else:
api = conductor.api.ComputeTaskAPI
+ fake_spec = objects.RequestSpec()
+ get_spec.return_value = fake_spec
with mock.patch.object(api, 'live_migrate_instance') as task:
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
@@ -1747,7 +1810,8 @@ class _ComputeAPIUnitTestMixIn(object):
task.assert_called_once_with(self.context, instance,
'fake_dest_host',
block_migration=True,
- disk_over_commit=True)
+ disk_over_commit=True,
+ request_spec=fake_spec)
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
@@ -1787,16 +1851,21 @@ class _ComputeAPIUnitTestMixIn(object):
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'attach_status': 'attached',
- 'instance_uuid': uuids.vol_instance,
'size': 5,
- 'status': 'in-use'}
+ 'status': 'in-use',
+ 'multiattach': False,
+ 'attachments': {uuids.vol_instance: {
+ 'attachment_id': 'fakeid'
+ }
+ }
+ }
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'attach_status': 'detached',
- 'instance_uuid': None,
'size': 5,
- 'status': 'available'}
+ 'status': 'available',
+ 'multiattach': False}
self.assertRaises(exception.InstanceInvalidState,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
@@ -1813,13 +1882,15 @@ class _ComputeAPIUnitTestMixIn(object):
volumes[old_volume_id]['attach_status'] = 'attached'
# Should fail if old volume's instance_uuid is not that of the instance
- volumes[old_volume_id]['instance_uuid'] = uuids.vol_instance_2
+ volumes[old_volume_id]['attachments'] = {uuids.vol_instance_2:
+ {'attachment_id': 'fakeid'}}
self.assertRaises(exception.InvalidVolume,
self.compute_api.swap_volume, self.context, instance,
volumes[old_volume_id], volumes[new_volume_id])
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
- volumes[old_volume_id]['instance_uuid'] = uuids.vol_instance
+ volumes[old_volume_id]['attachments'] = {uuids.vol_instance:
+ {'attachment_id': 'fakeid'}}
# Should fail if new volume is attached
volumes[new_volume_id]['attach_status'] = 'attached'
@@ -2059,17 +2130,19 @@ class _ComputeAPIUnitTestMixIn(object):
def _test_snapshot_volume_backed(self, quiesce_required, quiesce_fails,
vm_state=vm_states.ACTIVE):
+ fake_sys_meta = {'image_min_ram': '11',
+ 'image_min_disk': '22',
+ 'image_container_format': 'ami',
+ 'image_disk_format': 'ami',
+ 'image_ram_disk': 'fake_ram_disk_id',
+ 'image_bdm_v2': 'True',
+ 'image_block_device_mapping': '[]',
+ 'image_mappings': '[]',
+ 'image_cache_in_nova': 'True'}
+ if quiesce_required:
+ fake_sys_meta['image_os_require_quiesce'] = 'yes'
params = dict(locked=True, vm_state=vm_state,
- system_metadata={'image_min_ram': '11',
- 'image_min_disk': '22',
- 'image_container_format': 'ami',
- 'image_disk_format': 'ami',
- 'image_ram_disk': 'fake_ram_disk_id',
- 'image_bdm_v2': 'True',
- 'image_block_device_mapping': '[]',
- 'image_mappings': '[]',
- 'image_cache_in_nova': 'True',
- })
+ system_metadata=fake_sys_meta)
instance = self._create_instance_obj(params=params)
instance['root_device_name'] = 'vda'
@@ -2084,14 +2157,12 @@ class _ComputeAPIUnitTestMixIn(object):
'is_public': False,
'min_ram': '11',
}
+ if quiesce_required:
+ expect_meta['properties']['os_require_quiesce'] = 'yes'
quiesced = [False, False]
quiesce_expected = not quiesce_fails and vm_state == vm_states.ACTIVE
- if quiesce_required:
- instance.system_metadata['image_os_require_quiesce'] = 'yes'
- expect_meta['properties']['os_require_quiesce'] = 'yes'
-
def fake_get_all_by_instance(context, instance, use_slave=False):
return copy.deepcopy(instance_bdms)
@@ -2113,8 +2184,8 @@ class _ComputeAPIUnitTestMixIn(object):
def fake_unquiesce_instance(context, instance, mapping=None):
quiesced[1] = True
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- fake_get_all_by_instance)
+ self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
+ fake_get_all_by_instance)
self.stubs.Set(self.compute_api.image_api, 'create',
fake_image_create)
self.stubs.Set(self.compute_api.volume_api, 'get',
@@ -2125,6 +2196,7 @@ class _ComputeAPIUnitTestMixIn(object):
fake_quiesce_instance)
self.stubs.Set(self.compute_api.compute_rpcapi, 'unquiesce_instance',
fake_unquiesce_instance)
+ fake_image.stub_out_image_service(self)
# No block devices defined
self.compute_api.snapshot_volume_backed(
@@ -2787,6 +2859,68 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_create_db_entry_for_new_instance_with_cinder_error(
expected_exception=exception.InvalidVolume)
+ def test_provision_instances_creates_request_spec(self):
+ @mock.patch.object(self.compute_api, '_check_num_instances_quota')
+ @mock.patch.object(objects.Instance, 'create')
+ @mock.patch.object(self.compute_api.security_group_api,
+ 'ensure_default')
+ @mock.patch.object(self.compute_api, '_validate_bdm')
+ @mock.patch.object(self.compute_api, '_create_block_device_mapping')
+ @mock.patch.object(objects.RequestSpec, 'from_components')
+ def do_test(mock_from_components, _mock_create_bdm, _mock_validate_bdm,
+ _mock_ensure_default, _mock_create, mock_check_num_inst_quota):
+ quota_mock = mock.MagicMock()
+ req_spec_mock = mock.MagicMock()
+
+ mock_check_num_inst_quota.return_value = (1, quota_mock)
+ mock_from_components.return_value = req_spec_mock
+
+ ctxt = context.RequestContext('fake-user', 'fake-project')
+ flavor = self._create_flavor()
+ min_count = max_count = 1
+ boot_meta = {
+ 'id': 'fake-image-id',
+ 'properties': {'mappings': []},
+ 'status': 'fake-status',
+ 'location': 'far-away'}
+ base_options = {'image_ref': 'fake-ref',
+ 'display_name': 'fake-name',
+ 'project_id': 'fake-project',
+ 'availability_zone': None,
+ 'numa_topology': None,
+ 'pci_requests': None}
+ security_groups = {}
+ block_device_mapping = [objects.BlockDeviceMapping(
+ **fake_block_device.FakeDbBlockDeviceDict(
+ {
+ 'id': 1,
+ 'volume_id': 1,
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_name': 'vda',
+ 'boot_index': 0,
+ }))]
+ shutdown_terminate = True
+ instance_group = None
+ check_server_group_quota = False
+ filter_properties = {'scheduler_hints': None,
+ 'instance_type': flavor}
+
+ instances = self.compute_api._provision_instances(ctxt, flavor,
+ min_count, max_count, base_options, boot_meta,
+ security_groups, block_device_mapping, shutdown_terminate,
+ instance_group, check_server_group_quota,
+ filter_properties)
+ self.assertTrue(uuidutils.is_uuid_like(instances[0].uuid))
+
+ mock_from_components.assert_called_once_with(ctxt, mock.ANY,
+ boot_meta, flavor, base_options['numa_topology'],
+ base_options['pci_requests'], filter_properties,
+ instance_group, base_options['availability_zone'])
+ req_spec_mock.create.assert_called_once_with()
+
+ do_test()
+
def _test_rescue(self, vm_state=vm_states.ACTIVE, rescue_password=None,
rescue_image=None, clean_shutdown=True):
instance = self._create_instance_obj(params={'vm_state': vm_state})
@@ -2991,6 +3125,20 @@ class _ComputeAPIUnitTestMixIn(object):
filters = mock_get.call_args_list[0][0][1]
self.assertEqual({'project_id': 'foo'}, filters)
+ def test_metadata_invalid_return_empty_object(self):
+ api = compute_api.API()
+ ret = api.get_all(self.context, want_objects=True,
+ search_opts={'metadata': 'foo'})
+ self.assertIsInstance(ret, objects.InstanceList)
+ self.assertEqual(0, len(ret))
+
+ def test_metadata_invalid_return_empty_list(self):
+ api = compute_api.API()
+ ret = api.get_all(self.context, want_objects=False,
+ search_opts={'metadata': 'foo'})
+ self.assertIsInstance(ret, list)
+ self.assertEqual(0, len(ret))
+
def test_populate_instance_names_host_name(self):
params = dict(display_name="vm1")
instance = self._create_instance_obj(params=params)
@@ -3019,6 +3167,105 @@ class _ComputeAPIUnitTestMixIn(object):
instance, 1)
self.assertEqual('Server-%s' % instance.uuid, instance.hostname)
+ def test_host_statuses(self):
+ instances = [
+ objects.Instance(uuid=uuids.instance_1, host='host1', services=
+ self._obj_to_list_obj(objects.ServiceList(
+ self.context), objects.Service(id=0, host='host1',
+ disabled=True, forced_down=True,
+ binary='nova-compute'))),
+ objects.Instance(uuid=uuids.instance_2, host='host2', services=
+ self._obj_to_list_obj(objects.ServiceList(
+ self.context), objects.Service(id=0, host='host2',
+ disabled=True, forced_down=False,
+ binary='nova-compute'))),
+ objects.Instance(uuid=uuids.instance_3, host='host3', services=
+ self._obj_to_list_obj(objects.ServiceList(
+ self.context), objects.Service(id=0, host='host3',
+ disabled=False, last_seen_up=timeutils.utcnow()
+ - datetime.timedelta(minutes=5),
+ forced_down=False, binary='nova-compute'))),
+ objects.Instance(uuid=uuids.instance_4, host='host4', services=
+ self._obj_to_list_obj(objects.ServiceList(
+ self.context), objects.Service(id=0, host='host4',
+ disabled=False, last_seen_up=timeutils.utcnow(),
+ forced_down=False, binary='nova-compute'))),
+ objects.Instance(uuid=uuids.instance_5, host='host5', services=
+ objects.ServiceList()),
+ objects.Instance(uuid=uuids.instance_6, host=None, services=
+ self._obj_to_list_obj(objects.ServiceList(
+ self.context), objects.Service(id=0, host='host6',
+ disabled=True, forced_down=False,
+ binary='nova-compute'))),
+ objects.Instance(uuid=uuids.instance_7, host='host2', services=
+ self._obj_to_list_obj(objects.ServiceList(
+ self.context), objects.Service(id=0, host='host2',
+ disabled=True, forced_down=False,
+ binary='nova-compute')))
+ ]
+
+ host_statuses = self.compute_api.get_instances_host_statuses(
+ instances)
+ expect_statuses = {uuids.instance_1: fields_obj.HostStatus.DOWN,
+ uuids.instance_2: fields_obj.HostStatus.MAINTENANCE,
+ uuids.instance_3: fields_obj.HostStatus.UNKNOWN,
+ uuids.instance_4: fields_obj.HostStatus.UP,
+ uuids.instance_5: fields_obj.HostStatus.NONE,
+ uuids.instance_6: fields_obj.HostStatus.NONE,
+ uuids.instance_7: fields_obj.HostStatus.MAINTENANCE}
+ for instance in instances:
+ self.assertEqual(expect_statuses[instance.uuid],
+ host_statuses[instance.uuid])
+
+ @mock.patch.object(objects.Migration, 'get_by_id_and_instance')
+ def test_live_migrate_force_complete_succeeded(
+ self, get_by_id_and_instance):
+
+ if self.cell_type == 'api':
+ # cell api has not been implemented.
+ return
+ rpcapi = self.compute_api.compute_rpcapi
+
+ instance = self._create_instance_obj()
+ instance.task_state = task_states.MIGRATING
+
+ migration = objects.Migration()
+ migration.id = 0
+ migration.status = 'running'
+ get_by_id_and_instance.return_value = migration
+
+ with mock.patch.object(
+ rpcapi, 'live_migration_force_complete') as lm_force_complete:
+ self.compute_api.live_migrate_force_complete(
+ self.context, instance, migration.id)
+
+ lm_force_complete.assert_called_once_with(self.context,
+ instance,
+ 0)
+
+ @mock.patch.object(objects.Migration, 'get_by_id_and_instance')
+ def test_live_migrate_force_complete_invalid_migration_state(
+ self, get_by_id_and_instance):
+ instance = self._create_instance_obj()
+ instance.task_state = task_states.MIGRATING
+
+ migration = objects.Migration()
+ migration.id = 0
+ migration.status = 'error'
+ get_by_id_and_instance.return_value = migration
+
+ self.assertRaises(exception.InvalidMigrationState,
+ self.compute_api.live_migrate_force_complete,
+ self.context, instance, migration.id)
+
+ def test_live_migrate_force_complete_invalid_vm_state(self):
+ instance = self._create_instance_obj()
+ instance.task_state = None
+
+ self.assertRaises(exception.InstanceInvalidState,
+ self.compute_api.live_migrate_force_complete,
+ self.context, instance, '1')
+
class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
def setUp(self):
@@ -3043,6 +3290,20 @@ class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn,
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
+ @mock.patch.object(compute_cells_api, 'ComputeRPCAPIRedirect')
+ def test_create_volume_bdm_call_reserve_dev_name(self, mock_reserve):
+ instance = self._create_instance_obj()
+ # In the cells rpcapi there isn't the call for the
+ # reserve_block_device_name so the volume_bdm returned
+ # by the _create_volume_bdm is None
+ result = self.compute_api._create_volume_bdm(self.context,
+ instance,
+ 'vda',
+ '1',
+ None,
+ None)
+ self.assertIsNone(result, None)
+
class ComputeAPIComputeCellUnitTestCase(_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
@@ -3106,6 +3367,13 @@ class SecurityGroupAPITest(test.NoDBTestCase):
mock_get.return_value = groups
names = self.secgroup_api.get_instance_security_groups(self.context,
uuids.instance)
- self.assertEqual([{'name': 'bar'}, {'name': 'foo'}], sorted(names))
+ self.assertEqual(sorted([{'name': 'bar'}, {'name': 'foo'}], key=str),
+ sorted(names, key=str))
self.assertEqual(1, mock_get.call_count)
self.assertEqual(uuids.instance, mock_get.call_args_list[0][0][1].uuid)
+
+ @mock.patch('nova.objects.security_group.make_secgroup_list')
+ def test_populate_security_groups(self, mock_msl):
+ r = self.secgroup_api.populate_security_groups([mock.sentinel.group])
+ mock_msl.assert_called_once_with([mock.sentinel.group])
+ self.assertEqual(r, mock_msl.return_value)
diff --git a/nova/tests/unit/compute/test_compute_cells.py b/nova/tests/unit/compute/test_compute_cells.py
index e4376cce7a..7d98576af4 100644
--- a/nova/tests/unit/compute/test_compute_cells.py
+++ b/nova/tests/unit/compute/test_compute_cells.py
@@ -370,9 +370,10 @@ class CellsConductorAPIRPCRedirect(test.NoDBTestCase):
self.compute_api.resize(self.context, instance)
self.assertTrue(self.cells_rpcapi.resize_instance.called)
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
@mock.patch.object(compute_api.API, '_record_action_start')
@mock.patch.object(objects.Instance, 'save')
- def test_live_migrate_instance(self, instance_save, _record):
+ def test_live_migrate_instance(self, instance_save, _record, _get_spec):
orig_system_metadata = {}
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE, cell_name='fake-cell',
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index dbf6486dde..63a217cb02 100644
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -12,6 +12,7 @@
"""Unit tests for ComputeManager()."""
+import datetime
import time
import uuid
@@ -47,6 +48,7 @@ from nova import test
from nova.tests import fixtures
from nova.tests.unit.compute import fake_resource_tracker
from nova.tests.unit import fake_block_device
+from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_network_cache_model
@@ -202,8 +204,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
get_rt.side_effect = _get_rt_side_effect
self.compute.update_available_resource(ctxt)
get_db_nodes.assert_called_once_with(ctxt, use_slave=True)
- self.assertEqual([mock.call(node) for node in avail_nodes],
- get_rt.call_args_list)
+ self.assertEqual(sorted([mock.call(node) for node in avail_nodes]),
+ sorted(get_rt.call_args_list))
for rt in rts:
rt.update_available_resource.assert_called_once_with(ctxt)
self.assertEqual(expected_rt_dict,
@@ -416,8 +418,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
context.get_admin_context().AndReturn(self.context)
db.instance_get_all_by_host(
self.context, our_host,
- columns_to_join=['info_cache', 'metadata'],
- use_slave=False
+ columns_to_join=['info_cache', 'metadata']
).AndReturn(startup_instances)
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_on()
@@ -515,8 +516,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(self.context)
db.instance_get_all_by_host(self.context, our_host,
- columns_to_join=['info_cache', 'metadata'],
- use_slave=False
+ columns_to_join=['info_cache', 'metadata']
).AndReturn([])
self.compute.init_virt_events()
@@ -1079,7 +1079,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
@mock.patch(
'nova.compute.manager.ComputeManager._get_instance_block_device_info')
@mock.patch('nova.virt.driver.ComputeDriver.destroy')
- @mock.patch('nova.virt.driver.ComputeDriver.get_volume_connector')
+ @mock.patch('nova.virt.fake.FakeDriver.get_volume_connector')
def _test_shutdown_instance_exception(self, exc, mock_connector,
mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
mock_connector.side_effect = exc
@@ -1099,15 +1099,15 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_client_exception(self):
- exc = cinder_exception.ClientException
+ exc = cinder_exception.ClientException(code=9001)
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_volume_not_found(self):
- exc = exception.VolumeNotFound
+ exc = exception.VolumeNotFound(volume_id=42)
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_disk_not_found(self):
- exc = exception.DiskNotFound
+ exc = exception.DiskNotFound(location="not\\here")
self._test_shutdown_instance_exception(exc)
def _test_init_instance_retries_reboot(self, instance, reboot_type,
@@ -1284,9 +1284,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
{'uuid': [inst['uuid'] for
inst in driver_instances]},
'created_at', 'desc', columns_to_join=None,
- limit=None, marker=None,
- use_slave=True).AndReturn(
- driver_instances)
+ limit=None, marker=None).AndReturn(driver_instances)
self.mox.ReplayAll()
@@ -1335,8 +1333,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
db.instance_get_all_by_filters(
self.context, filters,
'created_at', 'desc', columns_to_join=None,
- limit=None, marker=None,
- use_slave=True).AndReturn(all_instances)
+ limit=None, marker=None).AndReturn(all_instances)
self.mox.ReplayAll()
@@ -1722,20 +1719,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
fake_vol_unreserve)
self.stubs.Set(self.compute.volume_api, 'terminate_connection',
fake_vol_api_func)
- self.stubs.Set(db,
- 'block_device_mapping_get_by_instance_and_volume_id',
- lambda x, y, z, v: fake_bdm)
+ self.stub_out('nova.db.'
+ 'block_device_mapping_get_by_instance_and_volume_id',
+ lambda x, y, z, v: fake_bdm)
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda x: {})
self.stubs.Set(self.compute.driver, 'swap_volume',
fake_swap_volume)
self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
fake_vol_migrate_volume_completion)
- self.stubs.Set(db, 'block_device_mapping_update',
- fake_block_device_mapping_update)
- self.stubs.Set(db,
- 'instance_fault_create',
- lambda x, y:
+ self.stub_out('nova.db.block_device_mapping_update',
+ fake_block_device_mapping_update)
+ self.stub_out('nova.db.instance_fault_create',
+ lambda x, y:
test_instance_fault.fake_faults['fake-uuid'][0])
self.stubs.Set(self.compute, '_instance_update',
lambda c, u, **k: {})
@@ -1771,12 +1767,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
@mock.patch.object(compute_utils, 'EventReporter')
def test_check_can_live_migrate_source(self, event_mock):
is_volume_backed = 'volume_backed'
- dest_check_data = dict(foo='bar')
+ dest_check_data = migrate_data_obj.LiveMigrateData()
db_instance = fake_instance.fake_db_instance()
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
- expected_dest_check_data = dict(dest_check_data,
- is_volume_backed=is_volume_backed)
self.mox.StubOutWithMock(self.compute.compute_api,
'is_volume_backed_instance')
@@ -1791,7 +1785,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.context, instance, refresh_conn_info=True
).AndReturn({'block_device_mapping': 'fake'})
self.compute.driver.check_can_live_migrate_source(
- self.context, instance, expected_dest_check_data,
+ self.context, instance, dest_check_data,
{'block_device_mapping': 'fake'})
self.mox.ReplayAll()
@@ -1802,11 +1796,11 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
event_mock.assert_called_once_with(
self.context, 'compute_check_can_live_migrate_source',
instance.uuid)
+ self.assertTrue(dest_check_data.is_volume_backed)
@mock.patch.object(compute_utils, 'EventReporter')
def _test_check_can_live_migrate_destination(self, event_mock,
- do_raise=False,
- has_mig_data=False):
+ do_raise=False):
db_instance = fake_instance.fake_db_instance(host='fake-host')
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
@@ -1817,10 +1811,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
dest_info = 'dest_info'
dest_check_data = dict(foo='bar')
mig_data = dict(cow='moo')
- expected_result = dict(mig_data)
- if has_mig_data:
- dest_check_data['migrate_data'] = dict(cat='meow')
- expected_result.update(cat='meow')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
self.mox.StubOutWithMock(self.compute.driver,
@@ -1857,7 +1847,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.context, instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
- self.assertEqual(expected_result, result)
+ self.assertEqual(mig_data, result)
event_mock.assert_called_once_with(
self.context, 'compute_check_can_live_migrate_destination',
instance.uuid)
@@ -1865,9 +1855,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
def test_check_can_live_migrate_destination_success(self):
self._test_check_can_live_migrate_destination()
- def test_check_can_live_migrate_destination_success_w_mig_data(self):
- self._test_check_can_live_migrate_destination(has_mig_data=True)
-
def test_check_can_live_migrate_destination_fail(self):
self.assertRaises(
test.TestingException,
@@ -2201,9 +2188,10 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
'_notify_about_instance_usage')
def _test_detach_volume(self, notify_inst_usage, detach,
bdm_get, destroy_bdm=True):
- volume_id = '123'
+ volume_id = uuids.volume
inst_obj = mock.Mock()
- inst_obj.uuid = 'uuid'
+ inst_obj.uuid = uuids.instance
+ attachment_id = uuids.attachment
bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
bdm.device_name = 'vdb'
@@ -2216,13 +2204,16 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.compute._detach_volume(self.context, volume_id,
inst_obj,
- destroy_bdm=destroy_bdm)
+ destroy_bdm=destroy_bdm,
+ attachment_id=attachment_id)
detach.assert_called_once_with(self.context, inst_obj, bdm)
driver.get_volume_connector.assert_called_once_with(inst_obj)
volume_api.terminate_connection.assert_called_once_with(
self.context, volume_id, connector_sentinel)
- volume_api.detach.assert_called_once_with(mock.ANY, volume_id)
+ volume_api.detach.assert_called_once_with(mock.ANY, volume_id,
+ inst_obj.uuid,
+ attachment_id)
notify_inst_usage.assert_called_once_with(
self.context, inst_obj, "volume.detach",
extra_usage_info={'volume_id': volume_id}
@@ -2237,7 +2228,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE)
fake_nw_info = network_model.NetworkInfo()
- rescue_image_meta = {'id': 'fake', 'name': 'fake'}
+ rescue_image_meta = objects.ImageMeta.from_dict(
+ {'id': 'fake', 'name': 'fake'})
with test.nested(
mock.patch.object(self.context, 'elevated',
return_value=self.context),
@@ -2672,6 +2664,32 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
do_test()
+ def _test_rebuild_ex(self, instance, ex):
+ # Test that we do not raise on certain exceptions
+ with test.nested(
+ mock.patch.object(self.compute, '_get_compute_info'),
+ mock.patch.object(self.compute, '_do_rebuild_instance_with_claim',
+ side_effect=ex),
+ mock.patch.object(self.compute, '_set_migration_status'),
+ mock.patch.object(self.compute, '_notify_about_instance_usage')
+ ) as (mock_get, mock_rebuild, mock_set, mock_notify):
+ self.compute.rebuild_instance(self.context, instance, None, None,
+ None, None, None, None, None)
+ mock_set.assert_called_once_with(None, 'failed')
+ mock_notify.assert_called_once_with(mock.ANY, instance,
+ 'rebuild.error', fault=ex)
+
+ def test_rebuild_deleting(self):
+ instance = objects.Instance(uuid='fake-uuid')
+ ex = exception.UnexpectedDeletingTaskStateError(
+ instance_uuid=instance.uuid, expected='expected', actual='actual')
+ self._test_rebuild_ex(instance, ex)
+
+ def test_rebuild_notfound(self):
+ instance = objects.Instance(uuid='fake-uuid')
+ ex = exception.InstanceNotFound(instance_id=instance.uuid)
+ self._test_rebuild_ex(instance, ex)
+
def test_rebuild_default_impl(self):
def _detach(context, bdms):
# NOTE(rpodolyaka): check that instance has been powered off by
@@ -2758,7 +2776,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
uuids.instance, 'fake-mac',
start_period=0, use_slave=True)
# NOTE(sdague): bw_usage_update happens at some time in
- # the future, so what last_refreshed is is irrelevant.
+ # the future, so what last_refreshed is irrelevant.
bw_usage_update.assert_called_once_with(self.context,
uuids.instance,
'fake-mac', 0, 4, 6, 1, 2,
@@ -2768,7 +2786,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
def test_reverts_task_state_instance_not_found(self):
# Tests that the reverts_task_state decorator in the compute manager
# will not trace when an InstanceNotFound is raised.
- instance = objects.Instance(uuid=uuids.instance)
+ instance = objects.Instance(uuid=uuids.instance, task_state="FAKE")
instance_update_mock = mock.Mock(
side_effect=exception.InstanceNotFound(instance_id=instance.uuid))
self.compute._instance_update = instance_update_mock
@@ -3165,7 +3183,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
try_deallocate_networks=False)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
- self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.compute.driver.spawn(self.context, self.instance,
+ mox.IsA(objects.ImageMeta),
self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
@@ -3438,7 +3457,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
try_deallocate_networks=False)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
- self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.compute.driver.spawn(self.context, self.instance,
+ mox.IsA(objects.ImageMeta),
self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
@@ -3473,7 +3493,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
exc = test.TestingException()
- self.compute.driver.spawn(self.context, self.instance, self.image,
+ self.compute.driver.spawn(self.context, self.instance,
+ mox.IsA(objects.ImageMeta),
self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
@@ -3576,7 +3597,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
spawn.assert_has_calls([mock.call(self.context, self.instance,
- self.image, self.injected_files, self.admin_pass,
+ test.MatchType(objects.ImageMeta),
+ self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info)])
@@ -3633,7 +3655,8 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups, self.image,
+ self.requested_networks, self.security_groups,
+ mox.IsA(objects.ImageMeta),
self.block_device_mapping).AndRaise(exc)
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
@@ -4265,7 +4288,7 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def test_check_migrate_source_converts_object(self):
# NOTE(danms): Make sure that we legacy-ify any data objects
- # the drivers give us back, until we're ready for them
+ # the drivers give us back, if we were passed a non-object
data = migrate_data_obj.LiveMigrateData(is_volume_backed=False)
compute = manager.ComputeManager()
@@ -4278,28 +4301,273 @@ class ComputeManagerMigrationTestCase(test.NoDBTestCase):
compute.check_can_live_migrate_source(
self.context, {'uuid': uuids.instance}, {}),
dict)
+ self.assertIsInstance(mock_cclms.call_args_list[0][0][2],
+ migrate_data_obj.LiveMigrateData)
_test()
- def test_check_migrate_destination_converts_object(self):
- # NOTE(danms): Make sure that we legacy-ify any data objects
- # the drivers give us back, until we're ready for them
- data = migrate_data_obj.LiveMigrateData(is_volume_backed=False)
- inst = objects.Instance(id=1, uuid=uuids.instance, host='bar')
+ def test_pre_live_migration_handles_dict(self):
compute = manager.ComputeManager()
- @mock.patch.object(compute.driver,
- 'check_can_live_migrate_destination')
- @mock.patch.object(compute.compute_rpcapi,
- 'check_can_live_migrate_source')
- @mock.patch.object(compute, '_get_compute_info')
- def _test(mock_gci, mock_cclms, mock_cclmd):
- mock_gci.return_value = inst
- mock_cclmd.return_value = data
- mock_cclms.return_value = {}
- result = compute.check_can_live_migrate_destination(
- self.context, inst, False, False)
- self.assertIsInstance(mock_cclms.call_args_list[0][0][2], dict)
- self.assertIsInstance(result, dict)
+ @mock.patch.object(compute, '_notify_about_instance_usage')
+ @mock.patch.object(compute, 'network_api')
+ @mock.patch.object(compute.driver, 'pre_live_migration')
+ @mock.patch.object(compute, '_get_instance_block_device_info')
+ @mock.patch.object(compute.compute_api, 'is_volume_backed_instance')
+ def _test(mock_ivbi, mock_gibdi, mock_plm, mock_nwapi, mock_notify):
+ migrate_data = migrate_data_obj.LiveMigrateData()
+ mock_plm.return_value = migrate_data
+ r = compute.pre_live_migration(self.context, {'uuid': 'foo'},
+ False, {}, {})
+ self.assertIsInstance(r, dict)
+ self.assertIsInstance(mock_plm.call_args_list[0][0][5],
+ migrate_data_obj.LiveMigrateData)
_test()
+
+ def test_live_migration_handles_dict(self):
+ compute = manager.ComputeManager()
+
+ @mock.patch.object(compute, 'compute_rpcapi')
+ @mock.patch.object(compute, 'driver')
+ def _test(mock_driver, mock_rpc):
+ migrate_data = migrate_data_obj.LiveMigrateData()
+ migration = objects.Migration()
+ migration.save = mock.MagicMock()
+ mock_rpc.pre_live_migration.return_value = migrate_data
+ compute._do_live_migration(self.context, 'foo', {'uuid': 'foo'},
+ False, migration, {})
+ self.assertIsInstance(
+ mock_rpc.pre_live_migration.call_args_list[0][0][5],
+ migrate_data_obj.LiveMigrateData)
+
+ _test()
+
+ def test_rollback_live_migration_handles_dict(self):
+ compute = manager.ComputeManager()
+
+ @mock.patch.object(compute, 'network_api')
+ @mock.patch.object(compute, '_notify_about_instance_usage')
+ @mock.patch.object(compute, '_live_migration_cleanup_flags')
+ @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
+ def _test(mock_bdm, mock_lmcf, mock_notify, mock_nwapi):
+ mock_bdm.return_value = []
+ mock_lmcf.return_value = False, False
+ self.compute._rollback_live_migration(self.context,
+ mock.MagicMock(),
+ 'foo', False, {})
+ self.assertIsInstance(mock_lmcf.call_args_list[0][0][1],
+ migrate_data_obj.LiveMigrateData)
+
+ def test_live_migration_force_complete_succeeded(self):
+
+ instance = objects.Instance(uuid=str(uuid.uuid4()))
+ migration = objects.Migration()
+ migration.status = 'running'
+ migration.id = 0
+
+ @mock.patch.object(self.compute, '_notify_about_instance_usage')
+ @mock.patch.object(objects.Migration, 'get_by_id',
+ return_value=migration)
+ @mock.patch.object(self.compute.driver,
+ 'live_migration_force_complete')
+ def _do_test(force_complete, get_by_id, _notify_about_instance_usage):
+ self.compute.live_migration_force_complete(
+ self.context, instance, migration.id)
+
+ force_complete.assert_called_once_with(instance)
+
+ _notify_usage_calls = [
+ mock.call(self.context, instance,
+ 'live.migration.force.complete.start'),
+ mock.call(self.context, instance,
+ 'live.migration.force.complete.end')
+ ]
+
+ _notify_about_instance_usage.assert_has_calls(_notify_usage_calls)
+
+ _do_test()
+
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ def test_live_migration_pause_vm_invalid_migration_state(
+ self, add_instance_fault_from_exc):
+
+ instance = objects.Instance(id=1234, uuid=str(uuid.uuid4()))
+ migration = objects.Migration()
+ migration.status = 'aborted'
+ migration.id = 0
+
+ @mock.patch.object(objects.Migration, 'get_by_id',
+ return_value=migration)
+ def _do_test(get_by_id):
+ self.assertRaises(exception.InvalidMigrationState,
+ self.compute.live_migration_force_complete,
+ self.context, instance, migration.id)
+
+ def test_post_live_migration_at_destination_success(self):
+
+ @mock.patch.object(self.instance, 'save')
+ @mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
+ return_value='test_network')
+ @mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
+ @mock.patch.object(self.compute.network_api, 'migrate_instance_finish')
+ @mock.patch.object(self.compute, '_notify_about_instance_usage')
+ @mock.patch.object(self.compute, '_get_instance_block_device_info')
+ @mock.patch.object(self.compute, '_get_power_state', return_value=1)
+ @mock.patch.object(self.compute, '_get_compute_info')
+ @mock.patch.object(self.compute.driver,
+ 'post_live_migration_at_destination')
+ def _do_test(post_live_migration_at_destination, _get_compute_info,
+ _get_power_state, _get_instance_block_device_info,
+ _notify_about_instance_usage, migrate_instance_finish,
+ setup_networks_on_host, get_instance_nw_info, save):
+
+ cn = mock.Mock(spec_set=['hypervisor_hostname'])
+ cn.hypervisor_hostname = 'test_host'
+ _get_compute_info.return_value = cn
+ cn_old = self.instance.host
+ instance_old = self.instance
+
+ self.compute.post_live_migration_at_destination(
+ self.context, self.instance, False)
+
+ setup_networks_calls = [
+ mock.call(self.context, self.instance, self.compute.host),
+ mock.call(self.context, self.instance, cn_old, teardown=True),
+ mock.call(self.context, self.instance, self.compute.host)
+ ]
+ setup_networks_on_host.assert_has_calls(setup_networks_calls)
+
+ notify_usage_calls = [
+ mock.call(self.context, instance_old,
+ "live_migration.post.dest.start",
+ network_info='test_network'),
+ mock.call(self.context, self.instance,
+ "live_migration.post.dest.end",
+ network_info='test_network')
+ ]
+ _notify_about_instance_usage.assert_has_calls(notify_usage_calls)
+
+ migrate_instance_finish.assert_called_once_with(
+ self.context, self.instance,
+ {'source_compute': cn_old,
+ 'dest_compute': self.compute.host})
+ _get_instance_block_device_info.assert_called_once_with(
+ self.context, self.instance
+ )
+ get_instance_nw_info.assert_called_once_with(self.context,
+ self.instance)
+ _get_power_state.assert_called_once_with(self.context,
+ self.instance)
+ _get_compute_info.assert_called_once_with(self.context,
+ self.compute.host)
+
+ self.assertEqual(self.compute.host, self.instance.host)
+ self.assertEqual('test_host', self.instance.node)
+ self.assertEqual(1, self.instance.power_state)
+ self.assertIsNone(self.instance.task_state)
+ save.assert_called_once_with(
+ expected_task_state=task_states.MIGRATING)
+
+ _do_test()
+
+ def test_post_live_migration_at_destination_compute_not_found(self):
+
+ @mock.patch.object(self.instance, 'save')
+ @mock.patch.object(self.compute, 'network_api')
+ @mock.patch.object(self.compute, '_notify_about_instance_usage')
+ @mock.patch.object(self.compute, '_get_instance_block_device_info')
+ @mock.patch.object(self.compute, '_get_power_state', return_value=1)
+ @mock.patch.object(self.compute, '_get_compute_info',
+ side_effect=exception.ComputeHostNotFound(
+ host='fake'))
+ @mock.patch.object(self.compute.driver,
+ 'post_live_migration_at_destination')
+ def _do_test(post_live_migration_at_destination, _get_compute_info,
+ _get_power_state, _get_instance_block_device_info,
+ _notify_about_instance_usage, network_api, save):
+ cn = mock.Mock(spec_set=['hypervisor_hostname'])
+ cn.hypervisor_hostname = 'test_host'
+ _get_compute_info.return_value = cn
+
+ self.compute.post_live_migration_at_destination(
+ self.context, self.instance, False)
+ self.assertIsNone(self.instance.node)
+
+ _do_test()
+
+ def test_post_live_migration_at_destination_unexpected_exception(self):
+
+ @mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
+ @mock.patch.object(self.instance, 'save')
+ @mock.patch.object(self.compute, 'network_api')
+ @mock.patch.object(self.compute, '_notify_about_instance_usage')
+ @mock.patch.object(self.compute, '_get_instance_block_device_info')
+ @mock.patch.object(self.compute, '_get_power_state', return_value=1)
+ @mock.patch.object(self.compute, '_get_compute_info')
+ @mock.patch.object(self.compute.driver,
+ 'post_live_migration_at_destination',
+ side_effect=exception.NovaException)
+ def _do_test(post_live_migration_at_destination, _get_compute_info,
+ _get_power_state, _get_instance_block_device_info,
+ _notify_about_instance_usage, network_api, save,
+ add_instance_fault_from_exc):
+ cn = mock.Mock(spec_set=['hypervisor_hostname'])
+ cn.hypervisor_hostname = 'test_host'
+ _get_compute_info.return_value = cn
+
+ self.assertRaises(exception.NovaException,
+ self.compute.post_live_migration_at_destination,
+ self.context, self.instance, False)
+ self.assertEqual(vm_states.ERROR, self.instance.vm_state)
+
+ _do_test()
+
+
+class ComputeManagerInstanceUsageAuditTestCase(test.TestCase):
+ def setUp(self):
+ super(ComputeManagerInstanceUsageAuditTestCase, self).setUp()
+ self.flags(use_local=True, group='conductor')
+ self.flags(instance_usage_audit=True)
+
+ @mock.patch('nova.objects.TaskLog')
+ def test_deleted_instance(self, mock_task_log):
+ mock_task_log.get.return_value = None
+
+ compute = importutils.import_object(CONF.compute_manager)
+ admin_context = context.get_admin_context()
+
+ fake_db_flavor = fake_flavor.fake_db_flavor()
+ flavor = objects.Flavor(admin_context, **fake_db_flavor)
+
+ updates = {'host': compute.host, 'flavor': flavor, 'root_gb': 0,
+ 'ephemeral_gb': 0}
+
+ # fudge beginning and ending time by a second (backwards and forwards,
+ # respectively) so they differ from the instance's launch and
+ # termination times when sub-seconds are truncated and fall within the
+ # audit period
+ one_second = datetime.timedelta(seconds=1)
+
+ begin = timeutils.utcnow() - one_second
+ instance = objects.Instance(admin_context, **updates)
+ instance.create()
+ instance.launched_at = timeutils.utcnow()
+ instance.save()
+ instance.destroy()
+ end = timeutils.utcnow() + one_second
+
+ def fake_last_completed_audit_period():
+ return (begin, end)
+
+ self.stub_out('nova.utils.last_completed_audit_period',
+ fake_last_completed_audit_period)
+
+ compute._instance_usage_audit(admin_context)
+
+ self.assertEqual(1, mock_task_log().task_items,
+ 'the deleted test instance was not found in the audit'
+ ' period')
+ self.assertEqual(0, mock_task_log().errors,
+ 'an error was encountered processing the deleted test'
+ ' instance')
diff --git a/nova/tests/unit/compute/test_compute_utils.py b/nova/tests/unit/compute/test_compute_utils.py
index cac60775be..429b831227 100644
--- a/nova/tests/unit/compute/test_compute_utils.py
+++ b/nova/tests/unit/compute/test_compute_utils.py
@@ -21,7 +21,6 @@ import string
import uuid
import mock
-from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
@@ -30,8 +29,8 @@ from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
+import nova.conf
from nova import context
-from nova import db
from nova import exception
from nova.image import glance
from nova.network import api as network_api
@@ -51,9 +50,8 @@ from nova.tests.unit.objects import test_migration
from nova.tests import uuidsentinel as uuids
from nova.virt import driver
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
def create_instance(context, user_id='fake', project_id='fake', params=None):
@@ -103,8 +101,8 @@ class ComputeValidateDeviceTestCase(test.NoDBTestCase):
self.data = []
- self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
- lambda context, instance, use_slave=False: self.data)
+ self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
+ lambda context, instance: self.data)
def _validate_device(self, device=None):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
diff --git a/nova/tests/unit/compute/test_compute_xen.py b/nova/tests/unit/compute/test_compute_xen.py
index 5b036ce399..36d57bb6a3 100644
--- a/nova/tests/unit/compute/test_compute_xen.py
+++ b/nova/tests/unit/compute/test_compute_xen.py
@@ -12,10 +12,10 @@
"""Tests for expectations of behaviour from the Xen driver."""
-from oslo_config import cfg
from oslo_utils import importutils
from nova.compute import power_state
+import nova.conf
from nova import context
from nova import objects
from nova.objects import instance as instance_obj
@@ -24,9 +24,8 @@ from nova.tests.unit import fake_instance
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import vm_utils
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
class ComputeXenTestCase(stubs.XenAPITestBaseNoDB):
diff --git a/nova/tests/unit/compute/test_keypairs.py b/nova/tests/unit/compute/test_keypairs.py
index 58204f2602..f85825a163 100644
--- a/nova/tests/unit/compute/test_keypairs.py
+++ b/nova/tests/unit/compute/test_keypairs.py
@@ -20,7 +20,6 @@ import six
from nova.compute import api as compute_api
from nova import context
-from nova import db
from nova import exception
from nova.objects import keypair as keypair_obj
from nova import quota
@@ -76,14 +75,11 @@ class KeypairAPITestCase(test_compute.BaseTestCase):
else:
raise exception.KeypairNotFound(user_id=user_id, name=name)
- self.stubs.Set(db, "key_pair_get_all_by_user",
+ self.stub_out("nova.db.key_pair_get_all_by_user",
db_key_pair_get_all_by_user)
- self.stubs.Set(db, "key_pair_create",
- db_key_pair_create)
- self.stubs.Set(db, "key_pair_destroy",
- db_key_pair_destroy)
- self.stubs.Set(db, "key_pair_get",
- db_key_pair_get)
+ self.stub_out("nova.db.key_pair_create", db_key_pair_create)
+ self.stub_out("nova.db.key_pair_destroy", db_key_pair_destroy)
+ self.stub_out("nova.db.key_pair_get", db_key_pair_get)
def _check_notifications(self, action='create', key_name='foo'):
self.assertEqual(2, len(fake_notifier.NOTIFICATIONS))
@@ -146,7 +142,7 @@ class CreateImportSharedTestMixIn(object):
def db_key_pair_create_duplicate(context, keypair):
raise exception.KeyPairExists(key_name=keypair.get('name', ''))
- self.stubs.Set(db, "key_pair_create", db_key_pair_create_duplicate)
+ self.stub_out("nova.db.key_pair_create", db_key_pair_create_duplicate)
msg = ("Key pair '%(key_name)s' already exists." %
{'key_name': self.existing_key_name})
diff --git a/nova/tests/unit/compute/test_multiple_nodes.py b/nova/tests/unit/compute/test_multiple_nodes.py
index 1f4b2c57ed..4e9aa1a634 100644
--- a/nova/tests/unit/compute/test_multiple_nodes.py
+++ b/nova/tests/unit/compute/test_multiple_nodes.py
@@ -14,19 +14,18 @@
# under the License.
"""Tests for compute service with multiple compute nodes."""
-from oslo_config import cfg
from oslo_utils import importutils
+import nova.conf
from nova import context
-from nova import db
from nova import objects
from nova import test
+from nova.tests import uuidsentinel
from nova.virt import fake
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
class BaseTestCase(test.TestCase):
@@ -74,6 +73,7 @@ class MultiNodeComputeTestCase(BaseTestCase):
def fake_get_compute_nodes_in_db(context, use_slave=False):
fake_compute_nodes = [{'local_gb': 259,
+ 'uuid': uuidsentinel.fake_compute,
'vcpus_used': 0,
'deleted': 0,
'hypervisor_type': 'powervm',
@@ -100,6 +100,7 @@ class MultiNodeComputeTestCase(BaseTestCase):
'host': 'fake_phyp1',
'cpu_allocation_ratio': None,
'ram_allocation_ratio': None,
+ 'disk_allocation_ratio': None,
'host_ip': '127.0.0.1'}]
return [objects.ComputeNode._from_db_object(
context, objects.ComputeNode(), cn)
@@ -110,7 +111,7 @@ class MultiNodeComputeTestCase(BaseTestCase):
self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
fake_get_compute_nodes_in_db)
- self.stubs.Set(db, 'compute_node_delete',
+ self.stub_out('nova.db.compute_node_delete',
fake_compute_node_delete)
def test_update_available_resource_add_remove_node(self):
@@ -152,7 +153,7 @@ class MultiNodeComputeTestCase(BaseTestCase):
self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
fake_get_compute_nodes_in_db)
- self.stubs.Set(db, 'compute_node_delete',
+ self.stub_out('nova.db.compute_node_delete',
fake_compute_node_delete)
self.compute.update_available_resource(ctx)
diff --git a/nova/tests/unit/compute/test_resource_tracker.py b/nova/tests/unit/compute/test_resource_tracker.py
index 3137a56ad2..c2a0ab65d9 100644
--- a/nova/tests/unit/compute/test_resource_tracker.py
+++ b/nova/tests/unit/compute/test_resource_tracker.py
@@ -17,13 +17,13 @@
import copy
import datetime
-import six
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
+import six
from nova.compute.monitors import base as monitor_base
from nova.compute import resource_tracker
@@ -31,7 +31,6 @@ from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
-from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
@@ -40,6 +39,7 @@ from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.pci import fakes as pci_fakes
+from nova.tests import uuidsentinel
from nova.virt import driver
@@ -130,7 +130,6 @@ class FakeVirtDriver(driver.ComputeDriver):
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
- 'parent_addr': None,
},
{
'label': 'label_8086_0123',
@@ -142,7 +141,6 @@ class FakeVirtDriver(driver.ComputeDriver):
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
- 'parent_addr': None,
},
{
'label': 'label_8086_7891',
@@ -254,6 +252,7 @@ class BaseTestCase(test.TestCase):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
+ "uuid": uuidsentinel.fake_compute_node,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
@@ -281,6 +280,7 @@ class BaseTestCase(test.TestCase):
'deleted': False,
'cpu_allocation_ratio': None,
'ram_allocation_ratio': None,
+ 'disk_allocation_ratio': None,
}
if values:
compute.update(values)
@@ -524,11 +524,11 @@ class MissingComputeNodeTestCase(BaseTestCase):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
- self.stubs.Set(db, 'service_get_by_compute_host',
+ self.stub_out('nova.db.service_get_by_compute_host',
self._fake_service_get_by_compute_host)
- self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
+ self.stub_out('nova.db.compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
- self.stubs.Set(db, 'compute_node_create',
+ self.stub_out('nova.db.compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
@@ -565,17 +565,17 @@ class BaseTrackerTestCase(BaseTestCase):
self.tracker = self._tracker()
self._migrations = {}
- self.stubs.Set(db, 'service_get_by_compute_host',
+ self.stub_out('nova.db.service_get_by_compute_host',
self._fake_service_get_by_compute_host)
- self.stubs.Set(db, 'compute_node_get_by_host_and_nodename',
+ self.stub_out('nova.db.compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
- self.stubs.Set(db, 'compute_node_update',
+ self.stub_out('nova.db.compute_node_update',
self._fake_compute_node_update)
- self.stubs.Set(db, 'compute_node_delete',
+ self.stub_out('nova.db.compute_node_delete',
self._fake_compute_node_delete)
- self.stubs.Set(db, 'migration_update',
+ self.stub_out('nova.db.migration_update',
self._fake_migration_update)
- self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
+ self.stub_out('nova.db.migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
@@ -1008,19 +1008,46 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
- def test_instances_with_live_migrations(self, mock_migration_list):
+ @mock.patch('nova.objects.InstanceList.get_by_host_and_node')
+ def test_instances_with_live_migrations(self, mock_instance_list,
+ mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
- self.tracker.update_available_resource(self.context)
- self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
- self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
+ mock_instance_list.return_value = [instance]
+ with mock.patch.object(self.tracker, '_pair_instances_to_migrations'
+ ) as mock_pair:
+ self.tracker.update_available_resource(self.context)
+ self.assertTrue(mock_pair.called)
+ self.assertEqual(
+ instance.uuid,
+ mock_pair.call_args_list[0][0][0][0].instance_uuid)
+ self.assertEqual(instance.uuid,
+ mock_pair.call_args_list[0][0][1][0].uuid)
+ self.assertEqual(
+ ['system_metadata', 'numa_topology', 'flavor',
+ 'migration_context'],
+ mock_instance_list.call_args_list[0][1]['expected_attrs'])
+ self.assertEqual(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
+ self.tracker.compute_node['memory_mb_used'])
+ self.assertEqual(ROOT_GB + EPHEMERAL_GB,
+ self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
+ def test_pair_instances_to_migrations(self):
+ migrations = [objects.Migration(instance_uuid=uuidsentinel.instance1),
+ objects.Migration(instance_uuid=uuidsentinel.instance2)]
+ instances = [objects.Instance(uuid=uuidsentinel.instance2),
+ objects.Instance(uuid=uuidsentinel.instance1)]
+ self.tracker._pair_instances_to_migrations(migrations, instances)
+ order = [uuidsentinel.instance1, uuidsentinel.instance2]
+ for i, migration in enumerate(migrations):
+ self.assertEqual(order[i], migration.instance.uuid)
+
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
diff --git a/nova/tests/unit/compute/test_resources.py b/nova/tests/unit/compute/test_resources.py
index 4381e8314e..6e333c3f1a 100644
--- a/nova/tests/unit/compute/test_resources.py
+++ b/nova/tests/unit/compute/test_resources.py
@@ -240,8 +240,8 @@ class BaseTestCase(test.NoDBTestCase):
self._initialize_used_res_counter()
result = self.r_handler.test_resources(flavor, limits)
expected = ['Free 4 < requested 5 ', None]
- self.assertEqual(sorted(expected),
- sorted(result))
+ self.assertEqual(sorted(expected, key=str),
+ sorted(result, key=str))
def test_empty_resource_handler(self):
"""An empty resource handler has no resource extensions,
diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py
index 3b9bf97320..8396adef92 100644
--- a/nova/tests/unit/compute/test_rpcapi.py
+++ b/nova/tests/unit/compute/test_rpcapi.py
@@ -24,6 +24,7 @@ from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova import exception
from nova.objects import block_device as objects_block_dev
+from nova.objects import migrate_data as migrate_data_obj
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_flavor
@@ -134,9 +135,6 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
if '_return_value' in kwargs:
rpc_mock.return_value = kwargs.pop('_return_value')
del expected_kwargs['_return_value']
- elif 'return_bdm_object' in kwargs:
- del kwargs['return_bdm_object']
- rpc_mock.return_value = objects_block_dev.BlockDeviceMapping()
elif rpc_method == 'call':
rpc_mock.return_value = 'foo'
else:
@@ -198,7 +196,31 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
def test_detach_volume(self):
self._test_compute_api('detach_volume', 'cast',
instance=self.fake_instance_obj, volume_id='id',
- version='4.0')
+ attachment_id='fake_id', version='4.7')
+
+ def test_detach_volume_no_attachment_id(self):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ instance = self.fake_instance_obj
+ rpcapi = compute_rpcapi.ComputeAPI()
+ cast_mock = mock.Mock()
+ cctxt_mock = mock.Mock(cast=cast_mock)
+ with test.nested(
+ mock.patch.object(rpcapi.client, 'can_send_version',
+ return_value=False),
+ mock.patch.object(rpcapi.client, 'prepare',
+ return_value=cctxt_mock)
+ ) as (
+ can_send_mock, prepare_mock
+ ):
+ rpcapi.detach_volume(ctxt, instance=instance,
+ volume_id='id', attachment_id='fake_id')
+ # assert our mocks were called as expected
+ can_send_mock.assert_called_once_with('4.7')
+ prepare_mock.assert_called_once_with(server=instance['host'],
+ version='4.0')
+ cast_mock.assert_called_once_with(ctxt, 'detach_volume',
+ instance=instance,
+ volume_id='id')
def test_finish_resize(self):
self._test_compute_api('finish_resize', 'cast',
@@ -280,7 +302,12 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
instance=self.fake_instance_obj, dest='dest',
block_migration='blockity_block', host='tsoh',
migration='migration',
- migrate_data={}, version='4.2')
+ migrate_data={}, version='4.8')
+
+ def test_live_migration_force_complete(self):
+ self._test_compute_api('live_migration_force_complete', 'cast',
+ instance=self.fake_instance_obj,
+ migration_id='1', version='4.9')
def test_post_live_migration_at_destination(self):
self._test_compute_api('post_live_migration_at_destination', 'cast',
@@ -309,7 +336,7 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance_obj,
block_migration='block_migration', disk='disk', host='host',
- migrate_data=None, version='4.0')
+ migrate_data=None, version='4.8')
def test_prep_resize(self):
self._test_compute_api('prep_resize', 'cast',
@@ -361,10 +388,6 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
version='4.0',
_return_value=objects_block_dev.BlockDeviceMapping())
- def refresh_provider_fw_rules(self):
- self._test_compute_api('refresh_provider_fw_rules', 'cast',
- host='host')
-
def test_refresh_instance_security_rules(self):
expected_args = {'instance': self.fake_instance_obj}
self._test_compute_api('refresh_instance_security_rules', 'cast',
@@ -530,3 +553,111 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
self._test_compute_api,
'trigger_crash_dump', 'cast',
instance=self.fake_instance_obj, version='4.6')
+
+ def _test_simple_call(self, method, inargs, callargs, callret,
+ calltype='call', can_send=False):
+ rpc = compute_rpcapi.ComputeAPI()
+
+ @mock.patch.object(rpc, 'client')
+ @mock.patch.object(compute_rpcapi, '_compute_host')
+ def _test(mock_ch, mock_client):
+ mock_client.can_send_version.return_value = can_send
+ call = getattr(mock_client.prepare.return_value, calltype)
+ call.return_value = callret
+ ctxt = mock.MagicMock()
+ result = getattr(rpc, method)(ctxt, **inargs)
+ call.assert_called_once_with(ctxt, method, **callargs)
+ return result
+
+ return _test()
+
+ def test_check_can_live_migrate_source_converts_objects(self):
+ obj = migrate_data_obj.LiveMigrateData()
+ result = self._test_simple_call('check_can_live_migrate_source',
+ inargs={'instance': 'foo',
+ 'dest_check_data': obj},
+ callargs={'instance': 'foo',
+ 'dest_check_data': {}},
+ callret=obj)
+ self.assertEqual(obj, result)
+ result = self._test_simple_call('check_can_live_migrate_source',
+ inargs={'instance': 'foo',
+ 'dest_check_data': obj},
+ callargs={'instance': 'foo',
+ 'dest_check_data': {}},
+ callret={'foo': 'bar'})
+ self.assertIsInstance(result, migrate_data_obj.LiveMigrateData)
+
+ @mock.patch('nova.objects.migrate_data.LiveMigrateData.'
+ 'detect_implementation')
+ def test_check_can_live_migrate_destination_converts_dict(self,
+ mock_det):
+ result = self._test_simple_call('check_can_live_migrate_destination',
+ inargs={'instance': 'foo',
+ 'destination': 'bar',
+ 'block_migration': False,
+ 'disk_over_commit': False},
+ callargs={'instance': 'foo',
+ 'block_migration': False,
+ 'disk_over_commit': False},
+ callret={'foo': 'bar'})
+ self.assertEqual(mock_det.return_value, result)
+
+ def test_live_migration_converts_objects(self):
+ obj = migrate_data_obj.LiveMigrateData()
+ self._test_simple_call('live_migration',
+ inargs={'instance': 'foo',
+ 'dest': 'foo',
+ 'block_migration': False,
+ 'host': 'foo',
+ 'migration': None,
+ 'migrate_data': obj},
+ callargs={'instance': 'foo',
+ 'dest': 'foo',
+ 'block_migration': False,
+ 'migration': None,
+ 'migrate_data': {
+ 'pre_live_migration_result': {}}},
+ callret=None,
+ calltype='cast')
+
+ def test_pre_live_migration_converts_objects(self):
+ obj = migrate_data_obj.LiveMigrateData()
+ result = self._test_simple_call('pre_live_migration',
+ inargs={'instance': 'foo',
+ 'block_migration': False,
+ 'disk': None,
+ 'host': 'foo',
+ 'migrate_data': obj},
+ callargs={'instance': 'foo',
+ 'block_migration': False,
+ 'disk': None,
+ 'migrate_data': {}},
+ callret=obj)
+ self.assertEqual(obj, result)
+ result = self._test_simple_call('pre_live_migration',
+ inargs={'instance': 'foo',
+ 'block_migration': False,
+ 'disk': None,
+ 'host': 'foo',
+ 'migrate_data': obj},
+ callargs={'instance': 'foo',
+ 'block_migration': False,
+ 'disk': None,
+ 'migrate_data': {}},
+ callret={'foo': 'bar'})
+ self.assertIsInstance(result, migrate_data_obj.LiveMigrateData)
+
+ def test_rollback_live_migration_at_destination_converts_objects(self):
+ obj = migrate_data_obj.LiveMigrateData()
+ method = 'rollback_live_migration_at_destination'
+ self._test_simple_call(method,
+ inargs={'instance': 'foo',
+ 'host': 'foo',
+ 'destroy_disks': False,
+ 'migrate_data': obj},
+ callargs={'instance': 'foo',
+ 'destroy_disks': False,
+ 'migrate_data': {}},
+ callret=None,
+ calltype='cast')
diff --git a/nova/tests/unit/compute/test_shelve.py b/nova/tests/unit/compute/test_shelve.py
index 8b8ffb5d14..de306c4057 100644
--- a/nova/tests/unit/compute/test_shelve.py
+++ b/nova/tests/unit/compute/test_shelve.py
@@ -148,6 +148,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
+ self.mox.StubOutWithMock(self.compute, '_update_resource_tracker')
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.start')
@@ -161,6 +162,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.context, instance, instance.host)
self.compute._get_power_state(self.context,
instance).AndReturn(123)
+ self.compute._update_resource_tracker(self.context, instance)
self.compute._notify_about_instance_usage(self.context, instance,
'shelve_offload.end')
self.mox.ReplayAll()
@@ -241,7 +243,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
self.compute.network_api.setup_instance_network_on_host(
self.context, instance, self.compute.host)
- self.compute.driver.spawn(self.context, instance, image,
+ self.compute.driver.spawn(self.context, instance,
+ mox.IsA(objects.ImageMeta),
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
@@ -317,7 +320,8 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
self.rt.instance_claim(self.context, instance, limits).AndReturn(
claims.Claim(self.context, instance, self.rt,
_fake_resources()))
- self.compute.driver.spawn(self.context, instance, image_meta,
+ self.compute.driver.spawn(self.context, instance,
+ mox.IsA(objects.ImageMeta),
injected_files=[], admin_password=None,
network_info=[],
block_device_info='fake_bdm')
@@ -474,7 +478,8 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
- def test_unshelve(self):
+ @mock.patch.object(objects.RequestSpec, 'get_by_instance_uuid')
+ def test_unshelve(self, get_by_instance_uuid):
# Ensure instance can be unshelved.
instance = self._create_fake_instance_obj()
@@ -486,7 +491,14 @@ class ShelveComputeAPITestCase(test_compute.BaseTestCase):
instance.vm_state = vm_states.SHELVED
instance.save()
- self.compute_api.unshelve(self.context, instance)
+ fake_spec = objects.RequestSpec()
+ get_by_instance_uuid.return_value = fake_spec
+ with mock.patch.object(self.compute_api.compute_task_api,
+ 'unshelve_instance') as unshelve:
+ self.compute_api.unshelve(self.context, instance)
+ get_by_instance_uuid.assert_called_once_with(self.context,
+ instance.uuid)
+ unshelve.assert_called_once_with(self.context, instance, fake_spec)
self.assertEqual(instance.task_state, task_states.UNSHELVING)
diff --git a/nova/tests/unit/compute/test_stats.py b/nova/tests/unit/compute/test_stats.py
index 0c9219983a..fffe59f0a7 100644
--- a/nova/tests/unit/compute/test_stats.py
+++ b/nova/tests/unit/compute/test_stats.py
@@ -20,6 +20,7 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import test
from nova.tests.unit import fake_instance
+from nova.tests import uuidsentinel as uuids
class StatsTestCase(test.NoDBTestCase):
@@ -37,7 +38,7 @@ class StatsTestCase(test.NoDBTestCase):
"task_state": None,
"vm_state": vm_states.BUILDING,
"vcpus": 1,
- "uuid": "12-34-56-78-90",
+ "uuid": uuids.stats_linux_instance_1,
}
if values:
instance.update(values)
@@ -79,7 +80,7 @@ class StatsTestCase(test.NoDBTestCase):
"task_state": None,
"vm_state": vm_states.BUILDING,
"vcpus": 3,
- "uuid": "12-34-56-78-90",
+ "uuid": uuids.stats_linux_instance_1,
}
self.stats.update_stats_for_instance(self._fake_object(instance))
@@ -89,7 +90,7 @@ class StatsTestCase(test.NoDBTestCase):
"task_state": task_states.SCHEDULING,
"vm_state": None,
"vcpus": 1,
- "uuid": "23-45-67-89-01",
+ "uuid": uuids.stats_freebsd_instance,
}
self.stats.update_stats_for_instance(self._fake_object(instance))
@@ -99,7 +100,7 @@ class StatsTestCase(test.NoDBTestCase):
"task_state": task_states.SCHEDULING,
"vm_state": vm_states.BUILDING,
"vcpus": 2,
- "uuid": "34-56-78-90-12",
+ "uuid": uuids.stats_linux_instance_2,
}
self.stats.update_stats_for_instance(self._fake_object(instance))
@@ -110,7 +111,7 @@ class StatsTestCase(test.NoDBTestCase):
"task_state": task_states.RESCUING,
"vm_state": vm_states.ACTIVE,
"vcpus": 2,
- "uuid": "34-56-78-90-13",
+ "uuid": uuids.stats_linux_instance_3,
}
self.stats.update_stats_for_instance(self._fake_object(instance))
@@ -121,7 +122,7 @@ class StatsTestCase(test.NoDBTestCase):
"task_state": task_states.UNSHELVING,
"vm_state": vm_states.ACTIVE,
"vcpus": 2,
- "uuid": "34-56-78-90-14",
+ "uuid": uuids.stats_linux_instance_4,
}
self.stats.update_stats_for_instance(self._fake_object(instance))
@@ -194,6 +195,19 @@ class StatsTestCase(test.NoDBTestCase):
self.assertEqual(0, self.stats.num_os_type("Linux"))
self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING])
+ def test_update_stats_for_instance_offloaded(self):
+ instance = self._create_instance()
+ self.stats.update_stats_for_instance(instance)
+ self.assertEqual(1, self.stats["num_proj_1234"])
+
+ instance["vm_state"] = vm_states.SHELVED_OFFLOADED
+ self.stats.update_stats_for_instance(instance)
+
+ self.assertEqual(0, self.stats.num_instances)
+ self.assertEqual(0, self.stats.num_instances_for_project("1234"))
+ self.assertEqual(0, self.stats.num_os_type("Linux"))
+ self.assertEqual(0, self.stats["num_vm_" + vm_states.BUILDING])
+
def test_io_workload(self):
vms = [vm_states.ACTIVE, vm_states.BUILDING, vm_states.PAUSED]
tasks = [task_states.RESIZE_MIGRATING, task_states.REBUILDING,
diff --git a/nova/tests/unit/compute/test_tracker.py b/nova/tests/unit/compute/test_tracker.py
index ac6aa310ab..d989d4aabe 100644
--- a/nova/tests/unit/compute/test_tracker.py
+++ b/nova/tests/unit/compute/test_tracker.py
@@ -74,6 +74,7 @@ _COMPUTE_NODE_FIXTURES = [
numa_topology=None,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0,
),
]
@@ -452,7 +453,9 @@ class TestUpdateAvailableResources(BaseTestCase):
'fake-node',
expected_attrs=[
'system_metadata',
- 'numa_topology'])
+ 'numa_topology',
+ 'flavor',
+ 'migration_context'])
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
migr_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
@@ -979,6 +982,7 @@ class TestInitComputeNode(BaseTestCase):
get_mock.side_effect = exc.NotFound
cpu_alloc_ratio = 1.0
ram_alloc_ratio = 1.0
+ disk_alloc_ratio = 1.0
resources = {
'host_ip': '1.1.1.1',
@@ -1021,11 +1025,13 @@ class TestInitComputeNode(BaseTestCase):
# NOTE(sbauza): ResourceTracker adds CONF allocation ratios
ram_allocation_ratio=ram_alloc_ratio,
cpu_allocation_ratio=cpu_alloc_ratio,
+ disk_allocation_ratio=disk_alloc_ratio,
)
# Forcing the flags to the values we know
self.rt.ram_allocation_ratio = ram_alloc_ratio
self.rt.cpu_allocation_ratio = cpu_alloc_ratio
+ self.rt.disk_allocation_ratio = disk_alloc_ratio
self.rt._init_compute_node(mock.sentinel.ctx, resources)
@@ -1037,7 +1043,8 @@ class TestInitComputeNode(BaseTestCase):
self.rt.compute_node))
def test_copy_resources_adds_allocation_ratios(self):
- self.flags(cpu_allocation_ratio=4.0, ram_allocation_ratio=3.0)
+ self.flags(cpu_allocation_ratio=4.0, ram_allocation_ratio=3.0,
+ disk_allocation_ratio=2.0)
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
@@ -1047,6 +1054,7 @@ class TestInitComputeNode(BaseTestCase):
self.rt._copy_resources(resources)
self.assertEqual(4.0, self.rt.compute_node.cpu_allocation_ratio)
self.assertEqual(3.0, self.rt.compute_node.ram_allocation_ratio)
+ self.assertEqual(2.0, self.rt.compute_node.disk_allocation_ratio)
class TestUpdateComputeNode(BaseTestCase):
@@ -1079,6 +1087,7 @@ class TestUpdateComputeNode(BaseTestCase):
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
@@ -1126,6 +1135,7 @@ class TestUpdateComputeNode(BaseTestCase):
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
@@ -1218,6 +1228,40 @@ class TestInstanceClaim(BaseTestCase):
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
+ def test_update_usage_removed(self, migr_mock, pci_mock):
+ # Test that RT.update_usage() removes the instance when update is
+ # called in a removed state
+ pci_mock.return_value = objects.InstancePCIRequests(requests=[])
+
+ expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
+ disk_used = self.instance.root_gb + self.instance.ephemeral_gb
+ expected.update({
+ 'local_gb_used': disk_used,
+ 'memory_mb_used': self.instance.memory_mb,
+ 'free_disk_gb': expected['local_gb'] - disk_used,
+ "free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
+ 'running_vms': 1,
+ 'vcpus_used': 1,
+ 'pci_device_pools': objects.PciDevicePoolList(),
+ })
+ with mock.patch.object(self.rt, '_update') as update_mock:
+ with mock.patch.object(self.instance, 'save'):
+ self.rt.instance_claim(self.ctx, self.instance, None)
+ update_mock.assert_called_once_with(self.elevated)
+ self.assertTrue(obj_base.obj_equal_prims(expected,
+ self.rt.compute_node))
+
+ expected_updated = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
+ expected_updated['pci_device_pools'] = objects.PciDevicePoolList()
+
+ self.instance.vm_state = vm_states.SHELVED_OFFLOADED
+ with mock.patch.object(self.rt, '_update') as update_mock:
+ self.rt.update_usage(self.ctx, self.instance)
+ self.assertTrue(obj_base.obj_equal_prims(expected_updated,
+ self.rt.compute_node))
+
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
+ @mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
diff --git a/nova/tests/unit/compute/test_virtapi.py b/nova/tests/unit/compute/test_virtapi.py
index 5a236760d4..28a4f4cdd6 100644
--- a/nova/tests/unit/compute/test_virtapi.py
+++ b/nova/tests/unit/compute/test_virtapi.py
@@ -42,9 +42,6 @@ class VirtAPIBaseTest(test.NoDBTestCase, test.APICoverage):
getattr(self.virtapi, method), self.context,
*args, **kwargs)
- def test_provider_fw_rule_get_all(self):
- self.assertExpected('provider_fw_rule_get_all')
-
def test_wait_for_instance_event(self):
self.assertExpected('wait_for_instance_event',
'instance', ['event'])
diff --git a/nova/tests/unit/conductor/tasks/test_live_migrate.py b/nova/tests/unit/conductor/tasks/test_live_migrate.py
index d3b3549635..ff48f98c42 100644
--- a/nova/tests/unit/conductor/tasks/test_live_migrate.py
+++ b/nova/tests/unit/conductor/tasks/test_live_migrate.py
@@ -11,11 +11,11 @@
# under the License.
import mock
-from mox3 import mox
import oslo_messaging as messaging
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
+from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova import exception
from nova import objects
@@ -38,6 +38,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
host=self.instance_host,
uuid=self.instance_uuid,
power_state=power_state.RUNNING,
+ vm_state = vm_states.ACTIVE,
memory_mb=512,
image_ref=self.instance_image)
self.instance = objects.Instance._from_db_object(
@@ -47,13 +48,15 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.block_migration = "bm"
self.disk_over_commit = "doc"
self.migration = objects.Migration()
+ self.fake_spec = objects.RequestSpec()
self._generate_task()
def _generate_task(self):
self.task = live_migrate.LiveMigrationTask(self.context,
self.instance, self.destination, self.block_migration,
self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(),
- servicegroup.API(), scheduler_client.SchedulerClient())
+ servicegroup.API(), scheduler_client.SchedulerClient(),
+ self.fake_spec)
def test_execute_with_destination(self):
self.mox.StubOutWithMock(self.task, '_check_host_is_up')
@@ -250,9 +253,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
def test_find_destination_works(self):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
@@ -261,16 +262,11 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
+ fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host]})
- fake_spec = objects.RequestSpec()
- objects.RequestSpec.from_primitives(
- self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(fake_spec)
+ self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(
- self.context, fake_spec).AndReturn(
+ self.context, self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
@@ -278,30 +274,57 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.mox.ReplayAll()
self.assertEqual("host1", self.task._find_destination())
+ def test_find_destination_works_with_no_request_spec(self):
+ task = live_migrate.LiveMigrationTask(
+ self.context, self.instance, self.destination,
+ self.block_migration, self.disk_over_commit, self.migration,
+ compute_rpcapi.ComputeAPI(), servicegroup.API(),
+ scheduler_client.SchedulerClient(), request_spec=None)
+ another_spec = objects.RequestSpec()
+ self.instance.flavor = objects.Flavor()
+ self.instance.numa_topology = None
+ self.instance.pci_requests = None
+
+ @mock.patch.object(task, '_call_livem_checks_on_host')
+ @mock.patch.object(task, '_check_compatible_with_source_hypervisor')
+ @mock.patch.object(task.scheduler_client, 'select_destinations')
+ @mock.patch.object(objects.RequestSpec, 'from_components')
+ @mock.patch.object(scheduler_utils, 'setup_instance_group')
+ @mock.patch.object(utils, 'get_image_from_system_metadata')
+ def do_test(get_image, setup_ig, from_components, select_dest,
+ check_compat, call_livem_checks):
+ get_image.return_value = "image"
+ from_components.return_value = another_spec
+ select_dest.return_value = [{'host': 'host1'}]
+
+ self.assertEqual("host1", task._find_destination())
+
+ get_image.assert_called_once_with(self.instance.system_metadata)
+ fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
+ setup_ig.assert_called_once_with(
+ self.context, fake_props,
+ {'ignore_hosts': [self.instance_host]}
+ )
+ select_dest.assert_called_once_with(self.context, another_spec)
+ check_compat.assert_called_once_with("host1")
+ call_livem_checks.assert_called_once_with("host1")
+ do_test()
+
def test_find_destination_no_image_works(self):
self.instance['image_ref'] = ''
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
'_check_compatible_with_source_hypervisor')
self.mox.StubOutWithMock(self.task, '_call_livem_checks_on_host')
- scheduler_utils.build_request_spec(
- self.context,
- {'properties': {'hw_disk_bus': 'scsi'}},
- mox.IgnoreArg()).AndReturn({})
+ fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host]})
- fake_spec = objects.RequestSpec()
- objects.RequestSpec.from_primitives(
- self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(fake_spec)
+ self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
- fake_spec).AndReturn(
+ self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
@@ -311,9 +334,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
def _test_find_destination_retry_hypervisor_raises(self, error):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
@@ -322,25 +343,17 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
+ fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host]})
- fake_spec = objects.RequestSpec()
- objects.RequestSpec.from_primitives(
- self.context, {}, mox.IgnoreArg()).AndReturn(fake_spec)
+ self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
- fake_spec).AndReturn(
+ self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(error)
- scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
- objects.RequestSpec.from_primitives(
- self.context, {}, mox.IgnoreArg()).AndReturn(fake_spec)
self.task.scheduler_client.select_destinations(self.context,
- fake_spec).AndReturn(
+ self.fake_spec).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
@@ -359,9 +372,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
def test_find_destination_retry_with_invalid_livem_checks(self):
self.flags(migrate_max_retries=1)
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
@@ -370,28 +381,18 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
+ fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host]})
- fake_spec = objects.RequestSpec()
- objects.RequestSpec.from_primitives(
- self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(fake_spec)
+ self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
- fake_spec).AndReturn(
+ self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.Invalid)
- scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
- objects.RequestSpec.from_primitives(
- self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(fake_spec)
self.task.scheduler_client.select_destinations(self.context,
- fake_spec).AndReturn(
+ self.fake_spec).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
@@ -402,9 +403,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
def test_find_destination_retry_with_failed_migration_pre_checks(self):
self.flags(migrate_max_retries=1)
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
@@ -413,29 +412,18 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
+ fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host]})
- fake_spec = objects.RequestSpec()
- objects.RequestSpec.from_primitives(
- self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(fake_spec)
+ self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
- fake_spec).AndReturn(
+ self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.MigrationPreCheckError("reason"))
- scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host, "host1"]})
- fake_spec = objects.RequestSpec()
- objects.RequestSpec.from_primitives(
- self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(fake_spec)
self.task.scheduler_client.select_destinations(self.context,
- fake_spec).AndReturn(
+ self.fake_spec).AndReturn(
[{'host': 'host2'}])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
@@ -446,9 +434,7 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
def test_find_destination_retry_exceeds_max(self):
self.flags(migrate_max_retries=0)
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.task,
@@ -456,16 +442,11 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
+ fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host]})
- fake_spec = objects.RequestSpec()
- objects.RequestSpec.from_primitives(
- self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn(fake_spec)
+ self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
- fake_spec).AndReturn(
+ self.fake_spec).AndReturn(
[{'host': 'host1'}])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(exception.DestinationHypervisorTooOld)
@@ -479,28 +460,39 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
def test_find_destination_when_runs_out_of_hosts(self):
self.mox.StubOutWithMock(utils, 'get_image_from_system_metadata')
- self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
- self.mox.StubOutWithMock(objects.RequestSpec, 'from_primitives')
self.mox.StubOutWithMock(self.task.scheduler_client,
'select_destinations')
utils.get_image_from_system_metadata(
self.instance.system_metadata).AndReturn("image")
- scheduler_utils.build_request_spec(self.context, mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn({})
+ fake_props = {'instance_properties': {'uuid': self.instance_uuid}}
scheduler_utils.setup_instance_group(
- self.context, {}, {'ignore_hosts': [self.instance_host]})
- fake_spec = objects.RequestSpec()
- objects.RequestSpec.from_primitives(
- self.context,
- mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fake_spec)
+ self.context, fake_props, {'ignore_hosts': [self.instance_host]})
self.task.scheduler_client.select_destinations(self.context,
- fake_spec).AndRaise(
+ self.fake_spec).AndRaise(
exception.NoValidHost(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)
+ @mock.patch("nova.utils.get_image_from_system_metadata")
+ @mock.patch("nova.scheduler.utils.build_request_spec")
+ @mock.patch("nova.scheduler.utils.setup_instance_group")
+ @mock.patch("nova.objects.RequestSpec.from_primitives")
+ def test_find_destination_with_remoteError(self,
+ m_from_primitives, m_setup_instance_group,
+ m_build_request_spec, m_get_image_from_system_metadata):
+ m_get_image_from_system_metadata.return_value = {'properties': {}}
+ m_build_request_spec.return_value = {}
+ fake_spec = objects.RequestSpec()
+ m_from_primitives.return_value = fake_spec
+ with mock.patch.object(self.task.scheduler_client,
+ 'select_destinations') as m_select_destinations:
+ error = messaging.RemoteError()
+ m_select_destinations.side_effect = error
+ self.assertRaises(exception.MigrationSchedulerRPCError,
+ self.task._find_destination)
+
def test_call_livem_checks_on_host(self):
with mock.patch.object(self.task.compute_rpcapi,
'check_can_live_migrate_destination',
diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py
index 807f3baf85..71127179e7 100644
--- a/nova/tests/unit/conductor/test_conductor.py
+++ b/nova/tests/unit/conductor/test_conductor.py
@@ -50,6 +50,7 @@ from nova.tests.unit import cast_as_call
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_notifier
+from nova.tests.unit import fake_request_spec
from nova.tests.unit import fake_server_actions
from nova.tests.unit import fake_utils
from nova import utils
@@ -66,7 +67,6 @@ class FakeContext(context.RequestContext):
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
- self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
@@ -84,14 +84,6 @@ class _BaseTestCase(object):
fake_utils.stub_out_utils_spawn_n(self.stubs)
- def test_provider_fw_rule_get_all(self):
- fake_rules = ['a', 'b', 'c']
- self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
- db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
- self.mox.ReplayAll()
- result = self.conductor.provider_fw_rule_get_all(self.context)
- self.assertEqual(result, fake_rules)
-
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
@@ -196,6 +188,10 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
self.conductor.reset()
mock_clear_cache.assert_called_once_with()
+ def test_provider_fw_rule_get_all(self):
+ result = self.conductor.provider_fw_rule_get_all(self.context)
+ self.assertEqual([], result)
+
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
@@ -215,7 +211,6 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
- self.db = None
def test_wait_until_ready(self):
timeouts = []
@@ -255,14 +250,13 @@ class ConductorLocalAPITestCase(ConductorAPITestCase):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
- self.db = db
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
-class ConductorImportTest(test.TestCase):
+class ConductorImportTest(test.NoDBTestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
@@ -316,13 +310,18 @@ class _BaseTaskTestCase(object):
'recreate': False,
'on_shared_storage': False,
'preserve_ephemeral': False,
- 'host': 'compute-host'}
+ 'host': 'compute-host',
+ 'request_spec': None}
if update_args:
rebuild_args.update(update_args)
compute_rebuild_args = copy.deepcopy(rebuild_args)
compute_rebuild_args['migration'] = migration
compute_rebuild_args['node'] = node
compute_rebuild_args['limits'] = limits
+
+ # Args that are passed in to the method but don't get passed to RPC
+ compute_rebuild_args.pop('request_spec')
+
return rebuild_args, compute_rebuild_args
@mock.patch('nova.objects.Migration')
@@ -431,7 +430,7 @@ class _BaseTaskTestCase(object):
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.block_device_mapping_get_all_by_instance(self.context,
- instances[0].uuid, use_slave=False).AndReturn([])
+ instances[0].uuid).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
@@ -452,7 +451,7 @@ class _BaseTaskTestCase(object):
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.block_device_mapping_get_all_by_instance(self.context,
- instances[1].uuid, use_slave=False).AndReturn([])
+ instances[1].uuid).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
@@ -529,7 +528,7 @@ class _BaseTaskTestCase(object):
for instance in instances:
set_state_calls.append(mock.call(
self.context, instance.uuid, 'compute_task', 'build_instances',
- updates, exception, spec, self.conductor_manager.db))
+ updates, exception, spec))
cleanup_network_calls.append(mock.call(
self.context, mock.ANY, None))
state_mock.assert_has_calls(set_state_calls)
@@ -570,8 +569,7 @@ class _BaseTaskTestCase(object):
filter_properties, instances[0].uuid)
set_vm_state_and_notify.assert_called_once_with(
self.context, instances[0].uuid, 'compute_task',
- 'build_instances', updates, mock.ANY, {},
- self.conductor_manager.db)
+ 'build_instances', updates, mock.ANY, {})
cleanup_mock.assert_called_once_with(self.context, mock.ANY, None)
_test()
@@ -642,6 +640,51 @@ class _BaseTaskTestCase(object):
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
+ def test_unshelve_offload_instance_on_host_with_request_spec(self):
+ instance = self._create_fake_instance_obj()
+ instance.vm_state = vm_states.SHELVED_OFFLOADED
+ instance.task_state = task_states.UNSHELVING
+ instance.save()
+ system_metadata = instance.system_metadata
+
+ system_metadata['shelved_at'] = timeutils.utcnow()
+ system_metadata['shelved_image_id'] = 'fake_image_id'
+ system_metadata['shelved_host'] = 'fake-mini'
+
+ fake_spec = fake_request_spec.fake_spec_obj()
+ # FIXME(sbauza): Modify the fake RequestSpec object to either add a
+ # non-empty SchedulerRetries object or nullify the field
+ fake_spec.retry = None
+ # FIXME(sbauza): Modify the fake RequestSpec object to either add a
+ # non-empty SchedulerLimits object or nullify the field
+ fake_spec.limits = None
+ # FIXME(sbauza): Modify the fake RequestSpec object to either add a
+ # non-empty InstanceGroup object or nullify the field
+ fake_spec.instance_group = None
+
+ filter_properties = fake_spec.to_legacy_filter_properties_dict()
+ request_spec = fake_spec.to_legacy_request_spec_dict()
+
+ host = {'host': 'host1', 'nodename': 'node1', 'limits': []}
+
+ @mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'unshelve_instance')
+ @mock.patch.object(self.conductor_manager, '_schedule_instances')
+ def do_test(sched_instances, unshelve_instance):
+ sched_instances.return_value = [host]
+ self.conductor_manager.unshelve_instance(self.context, instance,
+ fake_spec)
+ scheduler_utils.populate_retry(filter_properties, instance.uuid)
+ scheduler_utils.populate_filter_properties(filter_properties, host)
+ sched_instances.assert_called_once_with(self.context, request_spec,
+ filter_properties)
+ unshelve_instance.assert_called_once_with(
+ self.context, instance, host['host'], image=mock.ANY,
+ filter_properties=filter_properties, node=host['nodename']
+ )
+
+ do_test()
+
def test_unshelve_offloaded_instance_glance_image_not_found(self):
shelved_image_id = "image_not_found"
@@ -959,6 +1002,53 @@ class _BaseTaskTestCase(object):
instance=inst_obj,
**compute_args)
+ def test_rebuild_instance_with_request_spec(self):
+ inst_obj = self._create_fake_instance_obj()
+ inst_obj.host = 'noselect'
+ expected_host = 'thebesthost'
+ expected_node = 'thebestnode'
+ expected_limits = 'fake-limits'
+ request_spec = {}
+ filter_properties = {'ignore_hosts': [(inst_obj.host)]}
+ fake_spec = objects.RequestSpec(ignore_hosts=[])
+ augmented_spec = objects.RequestSpec(ignore_hosts=[inst_obj.host])
+ rebuild_args, compute_args = self._prepare_rebuild_args(
+ {'host': None, 'node': expected_node, 'limits': expected_limits,
+ 'request_spec': fake_spec})
+ with test.nested(
+ mock.patch.object(self.conductor_manager.compute_rpcapi,
+ 'rebuild_instance'),
+ mock.patch.object(scheduler_utils, 'setup_instance_group',
+ return_value=False),
+ mock.patch.object(objects.RequestSpec, 'from_primitives',
+ return_value=augmented_spec),
+ mock.patch.object(self.conductor_manager.scheduler_client,
+ 'select_destinations',
+ return_value=[{'host': expected_host,
+ 'nodename': expected_node,
+ 'limits': expected_limits}]),
+ mock.patch.object(fake_spec, 'to_legacy_request_spec_dict',
+ return_value=request_spec),
+ mock.patch.object(fake_spec, 'to_legacy_filter_properties_dict',
+ return_value=filter_properties),
+ ) as (rebuild_mock, sig_mock, fp_mock, select_dest_mock, to_reqspec,
+ to_filtprops):
+ self.conductor_manager.rebuild_instance(context=self.context,
+ instance=inst_obj,
+ **rebuild_args)
+ to_reqspec.assert_called_once_with()
+ to_filtprops.assert_called_once_with()
+ fp_mock.assert_called_once_with(self.context, request_spec,
+ filter_properties)
+ select_dest_mock.assert_called_once_with(self.context,
+ augmented_spec)
+ compute_args['host'] = expected_host
+ rebuild_mock.assert_called_once_with(self.context,
+ instance=inst_obj,
+ **compute_args)
+ self.assertEqual('compute.instance.rebuild.scheduled',
+ fake_notifier.NOTIFICATIONS[0].event_type)
+
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
@@ -1012,8 +1102,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
- ex, self._build_request_spec(inst_obj),
- self.conductor_manager.db)
+ ex, self._build_request_spec(inst_obj))
def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
@@ -1037,8 +1126,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
- ex, self._build_request_spec(inst_obj),
- self.conductor_manager.db)
+ ex, self._build_request_spec(inst_obj))
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
@@ -1061,7 +1149,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
exc.InvalidHypervisorType(),
exc.InvalidCPUInfo(reason='dummy'),
exc.UnableToMigrateToSelf(instance_id='dummy', host='dummy'),
- exc.InvalidLocalStorage(path='dummy', reason='dummy')]
+ exc.InvalidLocalStorage(path='dummy', reason='dummy'),
+ exc.MigrationSchedulerRPCError(reason='dummy')]
for ex in exs:
self._test_migrate_server_deals_with_expected_exceptions(ex)
@@ -1087,7 +1176,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
dict(vm_state=vm_states.ERROR,
task_state=inst_obj.task_state,
expected_task_state=task_states.MIGRATING,),
- expected_ex, request_spec, self.conductor.db)
+ expected_ex, request_spec)
self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex))
def test_set_vm_state_and_notify(self):
@@ -1095,7 +1184,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 1, 'compute_task', 'method', 'updates',
- 'ex', 'request_spec', self.conductor.db)
+ 'ex', 'request_spec')
self.mox.ReplayAll()
diff --git a/nova/tests/unit/conf_fixture.py b/nova/tests/unit/conf_fixture.py
index d893bfee2a..c27aa54131 100644
--- a/nova/tests/unit/conf_fixture.py
+++ b/nova/tests/unit/conf_fixture.py
@@ -31,8 +31,6 @@ CONF.import_opt('network_size', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('floating_ip_dns_manager', 'nova.network.floating_ips')
CONF.import_opt('instance_dns_manager', 'nova.network.floating_ips')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-CONF.import_opt('api_paste_config', 'nova.wsgi')
class ConfFixture(config_fixture.Config):
@@ -55,7 +53,8 @@ class ConfFixture(config_fixture.Config):
self.conf.set_default('use_ipv6', True)
self.conf.set_default('vlan_interface', 'eth0')
self.conf.set_default('auth_strategy', 'noauth2')
- config.parse_args([], default_config_files=[], configure_db=False)
+ config.parse_args([], default_config_files=[], configure_db=False,
+ init_rpc=False)
self.conf.set_default('connection', "sqlite://", group='database')
self.conf.set_default('connection', "sqlite://", group='api_database')
self.conf.set_default('sqlite_synchronous', False, group='database')
@@ -63,6 +62,11 @@ class ConfFixture(config_fixture.Config):
group='api_database')
self.conf.set_default('fatal_exception_format_errors', True)
self.conf.set_default('enabled', True, 'osapi_v21')
+ # TODO(sdague): this makes our project_id match 'fake' and
+ # 'openstack' as well. We should fix the tests to use real
+ # UUIDs then drop this work around.
+ self.conf.set_default('project_id_regex',
+ '[0-9a-fopnstk\-]+', 'osapi_v21')
self.conf.set_default('force_dhcp_release', False)
self.conf.set_default('periodic_enable', False)
policy_opts.set_defaults(self.conf)
diff --git a/nova/tests/unit/console/test_console.py b/nova/tests/unit/console/test_console.py
index 381bbe47d3..6f04d9c927 100644
--- a/nova/tests/unit/console/test_console.py
+++ b/nova/tests/unit/console/test_console.py
@@ -127,7 +127,7 @@ class ConsoleTestCase(test.TestCase):
db.instance_destroy(self.context, instance['uuid'])
-class ConsoleAPITestCase(test.TestCase):
+class ConsoleAPITestCase(test.NoDBTestCase):
"""Test case for console API."""
def setUp(self):
super(ConsoleAPITestCase, self).setUp()
diff --git a/nova/tests/unit/console/test_websocketproxy.py b/nova/tests/unit/console/test_websocketproxy.py
index a6b20650e6..1e7e306dcf 100644
--- a/nova/tests/unit/console/test_websocketproxy.py
+++ b/nova/tests/unit/console/test_websocketproxy.py
@@ -20,9 +20,6 @@ import mock
from nova.console import websocketproxy
from nova import exception
from nova import test
-from oslo_config import cfg
-
-CONF = cfg.CONF
class NovaProxyRequestHandlerBaseTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/consoleauth/test_consoleauth.py b/nova/tests/unit/consoleauth/test_consoleauth.py
index c0f6d11268..d79444eb99 100644
--- a/nova/tests/unit/consoleauth/test_consoleauth.py
+++ b/nova/tests/unit/consoleauth/test_consoleauth.py
@@ -58,9 +58,9 @@ class ConsoleauthTestCase(test.NoDBTestCase):
self.manager_api.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',
self.instance_uuid)
- self.assertTrue(self.manager_api.check_token(self.context, token))
+ self.assertIsNotNone(self.manager_api.check_token(self.context, token))
timeutils.advance_time_seconds(1)
- self.assertFalse(self.manager_api.check_token(self.context, token))
+ self.assertIsNone(self.manager_api.check_token(self.context, token))
def _stub_validate_console_port(self, result):
def fake_validate_console_port(ctxt, instance, port, console_type):
@@ -84,7 +84,8 @@ class ConsoleauthTestCase(test.NoDBTestCase):
self.instance_uuid)
for token in tokens:
- self.assertTrue(self.manager_api.check_token(self.context, token))
+ self.assertIsNotNone(
+ self.manager_api.check_token(self.context, token))
def test_delete_tokens_for_instance(self):
tokens = [u"token" + str(i) for i in range(10)]
@@ -100,7 +101,8 @@ class ConsoleauthTestCase(test.NoDBTestCase):
self.assertEqual(len(stored_tokens), 0)
for token in tokens:
- self.assertFalse(self.manager_api.check_token(self.context, token))
+ self.assertIsNone(
+ self.manager_api.check_token(self.context, token))
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_wrong_token_has_port(self, mock_get):
@@ -113,7 +115,7 @@ class ConsoleauthTestCase(test.NoDBTestCase):
self.manager_api.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',
instance_uuid=self.instance_uuid)
- self.assertFalse(self.manager_api.check_token(self.context, token))
+ self.assertIsNone(self.manager_api.check_token(self.context, token))
def test_delete_expired_tokens(self):
self.useFixture(test.TimeOverride())
@@ -126,7 +128,7 @@ class ConsoleauthTestCase(test.NoDBTestCase):
'127.0.0.1', '8080', 'host',
self.instance_uuid)
timeutils.advance_time_seconds(1)
- self.assertFalse(self.manager_api.check_token(self.context, token))
+ self.assertIsNone(self.manager_api.check_token(self.context, token))
token1 = u'mytok2'
self.manager_api.authorize_console(self.context, token1, 'novnc',
@@ -148,18 +150,31 @@ class ControlauthMemcacheEncodingTestCase(test.NoDBTestCase):
self.u_instance = u"instance"
def test_authorize_console_encoding(self):
- self.mox.StubOutWithMock(self.manager.mc, "set")
- self.mox.StubOutWithMock(self.manager.mc, "get")
- self.manager.mc.set(mox.IsA(str), mox.IgnoreArg(), mox.IgnoreArg()
- ).AndReturn(True)
- self.manager.mc.get(mox.IsA(str)).AndReturn(None)
- self.manager.mc.set(mox.IsA(str), mox.IgnoreArg()).AndReturn(True)
-
- self.mox.ReplayAll()
-
- self.manager.authorize_console(self.context, self.u_token, 'novnc',
- '127.0.0.1', '8080', 'host',
- self.u_instance)
+ with test.nested(
+ mock.patch.object(self.manager.mc_instance,
+ 'set', return_value=True),
+ mock.patch.object(self.manager.mc_instance,
+ 'get', return_value='["token"]'),
+ mock.patch.object(self.manager.mc,
+ 'set', return_value=True),
+ mock.patch.object(self.manager.mc,
+ 'get', return_value=None),
+ mock.patch.object(self.manager.mc,
+ 'get_multi', return_value=["token1"]),
+ ) as (
+ mock_instance_set,
+ mock_instance_get,
+ mock_set,
+ mock_get,
+ mock_get_multi):
+ self.manager.authorize_console(self.context, self.u_token,
+ 'novnc', '127.0.0.1', '8080',
+ 'host', self.u_instance)
+ mock_set.assert_has_calls([mock.call('token', mock.ANY)])
+ mock_instance_get.assert_has_calls([mock.call('instance')])
+ mock_get_multi.assert_has_calls([mock.call(['token'])])
+ mock_instance_set.assert_has_calls(
+ [mock.call('instance', mock.ANY)])
def test_check_token_encoding(self):
self.mox.StubOutWithMock(self.manager.mc, "get")
@@ -170,15 +185,25 @@ class ControlauthMemcacheEncodingTestCase(test.NoDBTestCase):
self.manager.check_token(self.context, self.u_token)
def test_delete_tokens_for_instance_encoding(self):
- self.mox.StubOutWithMock(self.manager.mc, "delete")
- self.mox.StubOutWithMock(self.manager.mc, "get")
- self.manager.mc.get(mox.IsA(str)).AndReturn('["token"]')
- self.manager.mc.delete(mox.IsA(str)).AndReturn(True)
- self.manager.mc.delete(mox.IsA(str)).AndReturn(True)
-
- self.mox.ReplayAll()
-
- self.manager.delete_tokens_for_instance(self.context, self.u_instance)
+ with test.nested(
+ mock.patch.object(self.manager.mc_instance,
+ 'get', return_value='["token"]'),
+ mock.patch.object(self.manager.mc_instance,
+ 'delete', return_value=True),
+ mock.patch.object(self.manager.mc,
+ 'get'),
+ mock.patch.object(self.manager.mc,
+ 'delete_multi', return_value=True),
+ ) as (
+ mock_instance_get,
+ mock_instance_delete,
+ mock_get,
+ mock_delete_multi):
+ self.manager.delete_tokens_for_instance(self.context,
+ self.u_instance)
+ mock_instance_get.assert_has_calls([mock.call('instance')])
+ mock_instance_delete.assert_has_calls([mock.call('instance')])
+ mock_delete_multi.assert_has_calls([mock.call(['token'])])
class CellsConsoleauthTestCase(ConsoleauthTestCase):
diff --git a/nova/tests/unit/db/fakes.py b/nova/tests/unit/db/fakes.py
index 2838a36634..65a9c8e25c 100644
--- a/nova/tests/unit/db/fakes.py
+++ b/nova/tests/unit/db/fakes.py
@@ -21,7 +21,6 @@ import datetime
import six
-from nova import db
from nova import exception
@@ -46,12 +45,12 @@ class FakeModel(object):
return self.values[name]
-def stub_out(stubs, funcs):
+def stub_out(test, funcs):
"""Set the stubs in mapping in the db api."""
for func in funcs:
func_name = '_'.join(func.__name__.split('_')[1:])
- stubs.Set(db, func_name, func)
- stubs.Set(db.api, func_name, func)
+ test.stub_out('nova.db.' + func_name, func)
+ test.stub_out('nova.db.api.' + func_name, func)
fixed_ip_fields = {'id': 0,
@@ -311,7 +310,7 @@ def fake_project_get_networks(context, project_id):
if n['project_id'] == project_id]
-def stub_out_db_network_api(stubs):
+def stub_out_db_network_api(test):
funcs = [fake_floating_ip_allocate_address,
fake_floating_ip_deallocate,
@@ -341,10 +340,10 @@ def stub_out_db_network_api(stubs):
fake_network_update,
fake_project_get_networks]
- stub_out(stubs, funcs)
+ stub_out(test, funcs)
-def stub_out_db_instance_api(stubs, injected=True):
+def stub_out_db_instance_api(test, injected=True):
"""Stubs out the db API for creating Instances."""
def _create_instance_type(**updates):
@@ -450,4 +449,4 @@ def stub_out_db_instance_api(stubs, injected=True):
fake_flavor_get_by_name,
fake_flavor_get,
fake_fixed_ip_get_by_instance]
- stub_out(stubs, funcs)
+ stub_out(test, funcs)
diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py
index ecdf1355ad..8e46b8f963 100644
--- a/nova/tests/unit/db/test_db_api.py
+++ b/nova/tests/unit/db/test_db_api.py
@@ -65,6 +65,7 @@ from nova.objects import fields
from nova import quota
from nova import test
from nova.tests.unit import matchers
+from nova.tests import uuidsentinel
from nova import utils
CONF = cfg.CONF
@@ -72,12 +73,13 @@ CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
get_engine = sqlalchemy_api.get_engine
-get_session = sqlalchemy_api.get_session
def _reservation_get(context, uuid):
- result = sqlalchemy_api.model_query(context, models.Reservation,
- read_deleted="no").filter_by(uuid=uuid).first()
+ with sqlalchemy_api.main_context_manager.reader.using(context):
+ result = sqlalchemy_api.model_query(
+ context, models.Reservation, read_deleted="no").filter_by(
+ uuid=uuid).first()
if not result:
raise exception.ReservationNotFound(uuid=uuid)
@@ -95,7 +97,7 @@ def _quota_reserve(context, project_id, user_id):
"""
def get_sync(resource, usage):
- def sync(elevated, project_id, user_id, session):
+ def sync(elevated, project_id, user_id):
return {resource: usage}
return sync
quotas = {}
@@ -519,46 +521,21 @@ class UnsupportedDbRegexpTestCase(DbTestCase):
class ModelQueryTestCase(DbTestCase):
def test_model_query_invalid_arguments(self):
- # read_deleted shouldn't accept invalid values
- self.assertRaises(ValueError, sqlalchemy_api.model_query,
- self.context, models.Instance, read_deleted=False)
- self.assertRaises(ValueError, sqlalchemy_api.model_query,
- self.context, models.Instance, read_deleted="foo")
-
- # Check model is a valid model
- self.assertRaises(TypeError, sqlalchemy_api.model_query,
- self.context, "")
-
- @mock.patch.object(sqlalchemy_api, 'get_session')
- def test_model_query_use_slave_false(self, mock_get_session):
- sqlalchemy_api.model_query(self.context, models.Instance,
- use_slave=False)
- mock_get_session.assert_called_once_with(use_slave=False)
-
- @mock.patch.object(sqlalchemy_api, 'get_session')
- def test_model_query_use_slave_no_slave_connection(self, mock_get_session):
- self.flags(slave_connection='', group='database')
- sqlalchemy_api.model_query(self.context, models.Instance,
- use_slave=True)
- mock_get_session.assert_called_once_with(use_slave=False)
-
- @mock.patch.object(sqlalchemy_api, 'get_session')
- def test_model_query_use_slave_true(self, mock_get_session):
- self.flags(slave_connection='foo://bar', group='database')
- sqlalchemy_api.model_query(self.context, models.Instance,
- use_slave=True)
- mock_get_session.assert_called_once_with(use_slave=True)
-
- @mock.patch.object(sqlalchemy_api, 'get_session')
- def test_model_query_lazy_session_default(self, mock_get_session):
- sqlalchemy_api.model_query(self.context, models.Instance,
- session=mock.MagicMock())
- self.assertFalse(mock_get_session.called)
-
- @mock.patch.object(sqlalchemy_api, 'get_session')
+ with sqlalchemy_api.main_context_manager.reader.using(self.context):
+ # read_deleted shouldn't accept invalid values
+ self.assertRaises(ValueError, sqlalchemy_api.model_query,
+ self.context, models.Instance,
+ read_deleted=False)
+ self.assertRaises(ValueError, sqlalchemy_api.model_query,
+ self.context, models.Instance,
+ read_deleted="foo")
+
+ # Check model is a valid model
+ self.assertRaises(TypeError, sqlalchemy_api.model_query,
+ self.context, "")
+
@mock.patch.object(sqlalchemyutils, 'model_query')
- def test_model_query_use_context_session(self, mock_model_query,
- mock_get_session):
+ def test_model_query_use_context_session(self, mock_model_query):
@sqlalchemy_api.main_context_manager.reader
def fake_method(context):
session = context.session
@@ -566,14 +543,12 @@ class ModelQueryTestCase(DbTestCase):
return session
session = fake_method(self.context)
- self.assertFalse(mock_get_session.called)
mock_model_query.assert_called_once_with(models.Instance, session,
None, deleted=False)
class EngineFacadeTestCase(DbTestCase):
- @mock.patch.object(sqlalchemy_api, 'get_session')
- def test_use_single_context_session_writer(self, mock_get_session):
+ def test_use_single_context_session_writer(self):
# Checks that session in context would not be overwritten by
# annotation @sqlalchemy_api.main_context_manager.writer if annotation
# is used twice.
@@ -590,11 +565,9 @@ class EngineFacadeTestCase(DbTestCase):
return session
parent_session, child_session = fake_parent_method(self.context)
- self.assertFalse(mock_get_session.called)
self.assertEqual(parent_session, child_session)
- @mock.patch.object(sqlalchemy_api, 'get_session')
- def test_use_single_context_session_reader(self, mock_get_session):
+ def test_use_single_context_session_reader(self):
# Checks that session in context would not be overwritten by
# annotation @sqlalchemy_api.main_context_manager.reader if annotation
# is used twice.
@@ -611,7 +584,6 @@ class EngineFacadeTestCase(DbTestCase):
return session
parent_session, child_session = fake_parent_method(self.context)
- self.assertFalse(mock_get_session.called)
self.assertEqual(parent_session, child_session)
@@ -904,7 +876,7 @@ class AggregateDBApiTestCase(test.TestCase):
result = _create_aggregate(context=ctxt, metadata=None)
def counted():
- def get_query(context, id, session, read_deleted):
+ def get_query(context, id, read_deleted):
get_query.counter += 1
raise db_exc.DBDuplicateEntry
get_query.counter = 0
@@ -1099,6 +1071,13 @@ class SqlAlchemyDbApiNoDbTestCase(test.NoDBTestCase):
mock_create_facade.assert_called_once_with()
mock_facade.get_engine.assert_called_once_with(use_slave=False)
+ def test_get_db_conf_with_connection(self):
+ mock_conf_group = mock.MagicMock()
+ mock_conf_group.connection = 'fakemain://'
+ db_conf = sqlalchemy_api._get_db_conf(mock_conf_group,
+ connection='fake://')
+ self.assertEqual('fake://', db_conf['connection'])
+
@mock.patch.object(sqlalchemy_api.api_context_manager._factory,
'get_legacy_facade')
def test_get_api_engine(self, mock_create_facade):
@@ -1109,26 +1088,6 @@ class SqlAlchemyDbApiNoDbTestCase(test.NoDBTestCase):
mock_create_facade.assert_called_once_with()
mock_facade.get_engine.assert_called_once_with()
- @mock.patch.object(sqlalchemy_api.main_context_manager._factory,
- 'get_legacy_facade')
- def test_get_session(self, mock_create_facade):
- mock_facade = mock.MagicMock()
- mock_create_facade.return_value = mock_facade
-
- sqlalchemy_api.get_session()
- mock_create_facade.assert_called_once_with()
- mock_facade.get_session.assert_called_once_with(use_slave=False)
-
- @mock.patch.object(sqlalchemy_api.api_context_manager._factory,
- 'get_legacy_facade')
- def test_get_api_session(self, mock_create_facade):
- mock_facade = mock.MagicMock()
- mock_create_facade.return_value = mock_facade
-
- sqlalchemy_api.get_api_session()
- mock_create_facade.assert_called_once_with()
- mock_facade.get_session.assert_called_once_with()
-
@mock.patch.object(sqlalchemy_api, '_instance_get_by_uuid')
@mock.patch.object(sqlalchemy_api, '_instances_fill_metadata')
@mock.patch('oslo_db.sqlalchemy.utils.paginate_query')
@@ -1137,8 +1096,7 @@ class SqlAlchemyDbApiNoDbTestCase(test.NoDBTestCase):
ctxt = mock.MagicMock()
ctxt.elevated.return_value = mock.sentinel.elevated
sqlalchemy_api.instance_get_all_by_filters_sort(ctxt, {}, marker='foo')
- mock_get.assert_called_once_with(mock.sentinel.elevated,
- 'foo', session=mock.ANY)
+ mock_get.assert_called_once_with(mock.sentinel.elevated, 'foo')
ctxt.elevated.assert_called_once_with(read_deleted='yes')
@@ -1149,7 +1107,9 @@ class SqlAlchemyDbApiTestCase(DbTestCase):
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
- result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
+ with sqlalchemy_api.main_context_manager.reader.using(ctxt):
+ result = sqlalchemy_api._instance_get_all_uuids_by_host(
+ ctxt, 'host1')
self.assertEqual(2, len(result))
def test_instance_get_all_uuids_by_host(self):
@@ -1157,7 +1117,9 @@ class SqlAlchemyDbApiTestCase(DbTestCase):
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
- result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
+ with sqlalchemy_api.main_context_manager.reader.using(ctxt):
+ result = sqlalchemy_api._instance_get_all_uuids_by_host(
+ ctxt, 'host1')
self.assertEqual(2, len(result))
self.assertEqual(six.text_type, type(result[0]))
@@ -1237,10 +1199,10 @@ class SqlAlchemyDbApiTestCase(DbTestCase):
ctxt = context.get_admin_context()
sqlalchemy_api.instance_get_all_by_filters(ctxt, {'foo': 'bar'},
'sort_key', 'sort_dir', limit=100, marker='uuid',
- columns_to_join='columns', use_slave=True)
+ columns_to_join='columns')
mock_get_all_filters_sort.assert_called_once_with(ctxt, {'foo': 'bar'},
limit=100, marker='uuid', columns_to_join='columns',
- use_slave=True, sort_keys=['sort_key'], sort_dirs=['sort_dir'])
+ sort_keys=['sort_key'], sort_dirs=['sort_dir'])
def test_instance_get_all_by_filters_sort_key_invalid(self):
'''InvalidSortKey raised if an invalid key is given.'''
@@ -1371,6 +1333,7 @@ class MigrationTestCase(test.TestCase):
self._create(status='reverted')
self._create(status='confirmed')
self._create(status='error')
+ self._create(status='failed')
self._create(status='accepted')
self._create(source_compute='host2', source_node='b',
dest_compute='host1', dest_node='a')
@@ -1398,6 +1361,7 @@ class MigrationTestCase(test.TestCase):
self.assertNotEqual('confirmed', migration['status'])
self.assertNotEqual('reverted', migration['status'])
self.assertNotEqual('error', migration['status'])
+ self.assertNotEqual('failed', migration['status'])
self.assertNotEqual('accepted', migration['status'])
def test_migration_get_in_progress_joins(self):
@@ -1515,16 +1479,27 @@ class MigrationTestCase(test.TestCase):
self.assertRaises(exception.MigrationNotFound,
db.migration_update, self.ctxt, 42, {})
+ def test_get_migration_for_instance(self):
+ migrations = db.migration_get_all_by_filters(self.ctxt, [])
+ migration_id = migrations[0].id
+ instance_uuid = migrations[0].instance_uuid
+ instance_migration = db.migration_get_by_id_and_instance(
+ self.ctxt, migration_id, instance_uuid)
+ self.assertEqual(migration_id, instance_migration.id)
+ self.assertEqual(instance_uuid, instance_migration.instance_uuid)
+
+ def test_get_migration_for_instance_not_found(self):
+ self.assertRaises(exception.MigrationNotFoundForInstance,
+ db.migration_get_by_id_and_instance, self.ctxt,
+ '500', '501')
+
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
- if isinstance(obj, dict):
- obj_items = obj.items()
- else:
- obj_items = obj.iteritems()
- return {k: v for k, v in obj_items
+
+ return {k: v for k, v in obj.items()
if k not in ignored_keys}
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
@@ -2263,18 +2238,22 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def test_instance_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
- meta = sqlalchemy_api._instance_metadata_get_multi(self.ctxt, uuids)
+ with sqlalchemy_api.main_context_manager.reader.using(self.ctxt):
+ meta = sqlalchemy_api._instance_metadata_get_multi(
+ self.ctxt, uuids)
for row in meta:
self.assertIn(row['instance_uuid'], uuids)
def test_instance_metadata_get_multi_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
- sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
+ with sqlalchemy_api.main_context_manager.reader.using(self.ctxt):
+ sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
def test_instance_system_system_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
- sys_meta = sqlalchemy_api._instance_system_metadata_get_multi(
+ with sqlalchemy_api.main_context_manager.reader.using(self.ctxt):
+ sys_meta = sqlalchemy_api._instance_system_metadata_get_multi(
self.ctxt, uuids)
for row in sys_meta:
self.assertIn(row['instance_uuid'], uuids)
@@ -2589,6 +2568,33 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra',
'system_metadata', 'info_cache', 'pci_devices'])
+ def test_instance_get_all_by_filters_tags_and_project_id(self):
+ context1 = context.RequestContext('user1', 'p1')
+ context2 = context.RequestContext('user2', 'p2')
+
+ inst1 = self.create_instance_with_args(context=context1,
+ project_id='p1')
+ inst2 = self.create_instance_with_args(context=context1,
+ project_id='p1')
+ inst3 = self.create_instance_with_args(context=context2,
+ project_id='p2')
+ t1 = u'tag1'
+ t2 = u'tag2'
+ t3 = u'tag3'
+ t4 = u'tag4'
+
+ db.instance_tag_set(context1, inst1.uuid, [t1, t2])
+ db.instance_tag_set(context1, inst2.uuid, [t1, t2, t4])
+ db.instance_tag_set(context2, inst3.uuid, [t1, t2, t3, t4])
+
+ result = db.instance_get_all_by_filters(self.ctxt,
+ {'tags': [t1, t2],
+ 'tags-any': [t3, t4],
+ 'project_id': 'p1'})
+ self._assertEqualListsOfObjects([inst2], result,
+ ignored_keys=['deleted', 'deleted_at', 'metadata', 'extra',
+ 'system_metadata', 'info_cache', 'pci_devices'])
+
def test_instance_get_all_by_host_and_node_no_join(self):
instance = self.create_instance_with_args()
result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
@@ -2799,35 +2805,32 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.assertEqual(meta, {'mk1': 'mv3'})
def test_instance_update_and_get_original_no_conflict_on_session(self):
- session = get_session()
- # patch get_session so that we may inspect it outside of the
- # method; once enginefacade is implemented, this can be simplified
- with mock.patch("nova.db.sqlalchemy.api.get_session", lambda: session):
+ with sqlalchemy_api.main_context_manager.writer.using(self.ctxt):
instance = self.create_instance_with_args()
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
- # test some regular persisted fields
- self.assertEqual(old_ref.uuid, new_ref.uuid)
- self.assertEqual(old_ref.project_id, new_ref.project_id)
+ # test some regular persisted fields
+ self.assertEqual(old_ref.uuid, new_ref.uuid)
+ self.assertEqual(old_ref.project_id, new_ref.project_id)
- # after a copy operation, we can assert:
+ # after a copy operation, we can assert:
- # 1. the two states have their own InstanceState
- old_insp = inspect(old_ref)
- new_insp = inspect(new_ref)
- self.assertNotEqual(old_insp, new_insp)
+ # 1. the two states have their own InstanceState
+ old_insp = inspect(old_ref)
+ new_insp = inspect(new_ref)
+ self.assertNotEqual(old_insp, new_insp)
- # 2. only one of the objects is still in our Session
- self.assertIs(new_insp.session, session)
- self.assertIsNone(old_insp.session)
+ # 2. only one of the objects is still in our Session
+ self.assertIs(new_insp.session, self.ctxt.session)
+ self.assertIsNone(old_insp.session)
- # 3. The "new" object remains persistent and ready
- # for updates
- self.assertTrue(new_insp.persistent)
+ # 3. The "new" object remains persistent and ready
+ # for updates
+ self.assertTrue(new_insp.persistent)
- # 4. the "old" object is detached from this Session.
- self.assertTrue(old_insp.detached)
+ # 4. the "old" object is detached from this Session.
+ self.assertTrue(old_insp.detached)
def test_instance_update_and_get_original_conflict_race(self):
# Ensure that we retry if update_on_match fails for no discernable
@@ -3143,36 +3146,38 @@ class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
db.instance_destroy, ctxt, instance['uuid'])
def test_check_instance_exists(self):
- session = get_session()
instance = self.create_instance_with_args()
- self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project(
- self.ctxt, session, instance['uuid']))
+ with sqlalchemy_api.main_context_manager.reader.using(self.ctxt):
+ self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project(
+ self.ctxt, instance['uuid']))
def test_check_instance_exists_non_existing_instance(self):
- session = get_session()
- self.assertRaises(exception.InstanceNotFound,
- sqlalchemy_api._check_instance_exists_in_project,
- self.ctxt, session, '123')
+ with sqlalchemy_api.main_context_manager.reader.using(self.ctxt):
+ self.assertRaises(exception.InstanceNotFound,
+ sqlalchemy_api._check_instance_exists_in_project,
+ self.ctxt, '123')
def test_check_instance_exists_from_different_tenant(self):
context1 = context.RequestContext('user1', 'project1')
context2 = context.RequestContext('user2', 'project2')
- session = get_session()
instance = self.create_instance_with_args(context=context1)
- self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project(
- context1, session, instance['uuid']))
- self.assertRaises(exception.InstanceNotFound,
- sqlalchemy_api._check_instance_exists_in_project,
- context2, session, instance['uuid'])
+ with sqlalchemy_api.main_context_manager.reader.using(context1):
+ self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project(
+ context1, instance['uuid']))
+
+ with sqlalchemy_api.main_context_manager.reader.using(context2):
+ self.assertRaises(exception.InstanceNotFound,
+ sqlalchemy_api._check_instance_exists_in_project,
+ context2, instance['uuid'])
def test_check_instance_exists_admin_context(self):
- session = get_session()
some_context = context.RequestContext('some_user', 'some_project')
instance = self.create_instance_with_args(context=some_context)
- # Check that method works correctly with admin context
- self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project(
- self.ctxt, session, instance['uuid']))
+ with sqlalchemy_api.main_context_manager.reader.using(self.ctxt):
+ # Check that method works correctly with admin context
+ self.assertIsNone(sqlalchemy_api._check_instance_exists_in_project(
+ self.ctxt, instance['uuid']))
class InstanceMetadataTestCase(test.TestCase):
@@ -3233,7 +3238,8 @@ class InstanceExtraTestCase(test.TestCase):
self.assertEqual('changed', inst_extra.numa_topology)
def test_instance_extra_update_by_uuid_and_create(self):
- sqlalchemy_api.model_query(self.ctxt, models.InstanceExtra).\
+ with sqlalchemy_api.main_context_manager.writer.using(self.ctxt):
+ sqlalchemy_api.model_query(self.ctxt, models.InstanceExtra).\
filter_by(instance_uuid=self.instance['uuid']).\
delete()
inst_extra = db.instance_extra_get_by_instance_uuid(
@@ -3985,6 +3991,16 @@ class InstanceTypeTestCase(BaseInstanceTypeTestCase):
ignored_keys)
self._assertEqualObjects(extra_specs, flavor['extra_specs'])
+ @mock.patch('sqlalchemy.orm.query.Query.all', return_value=[])
+ def test_flavor_create_with_extra_specs_duplicate(self, mock_all):
+ extra_specs = dict(key='value')
+ flavorid = 'flavorid'
+ self._create_flavor({'flavorid': flavorid, 'extra_specs': extra_specs})
+
+ self.assertRaises(exception.FlavorExtraSpecUpdateCreateFailed,
+ db.flavor_extra_specs_update_or_create,
+ self.ctxt, flavorid, extra_specs)
+
def test_flavor_get_all(self):
# NOTE(boris-42): Remove base instance types
for it in db.flavor_get_all(self.ctxt):
@@ -4267,7 +4283,7 @@ class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
def test_flavor_extra_specs_update_or_create_retry(self):
def counted():
- def get_id(context, flavorid, session):
+ def get_id(context, flavorid):
get_id.counter += 1
raise db_exc.DBDuplicateEntry
get_id.counter = 0
@@ -4770,6 +4786,27 @@ class FixedIPTestCase(BaseInstanceTypeTestCase):
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
+ def test_fixed_ip_associate_pool_order(self):
+ """Test that fixed_ip always uses oldest fixed_ip.
+
+ We should always be using the fixed ip with the oldest
+ updated_at.
+ """
+ instance_uuid = self._create_instance()
+ network = db.network_create_safe(self.ctxt, {})
+ self.addCleanup(timeutils.clear_time_override)
+ start = timeutils.utcnow()
+ for i in range(1, 4):
+ now = start - datetime.timedelta(hours=i)
+ timeutils.set_time_override(now)
+ address = self.create_fixed_ip(
+ updated_at=now,
+ address='10.1.0.%d' % i,
+ network_id=network['id'])
+ db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
+ fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
+ self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
+
def test_fixed_ip_associate_pool_succeeds_fip_ref_network_id_is_none(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
@@ -7005,9 +7042,10 @@ class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
def test_get_project_user_quota_usages_in_order(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
- with mock.patch.object(query.Query, 'order_by') as order_mock:
- sqlalchemy_api._get_project_user_quota_usages(
- self.ctxt, None, 'p1', 'u1')
+ with sqlalchemy_api.main_context_manager.reader.using(self.ctxt):
+ with mock.patch.object(query.Query, 'order_by') as order_mock:
+ sqlalchemy_api._get_project_user_quota_usages(
+ self.ctxt, 'p1', 'u1')
self.assertTrue(order_mock.called)
def test_quota_usage_update_nonexistent(self):
@@ -7067,10 +7105,10 @@ class QuotaReserveNoDbTestCase(test.NoDBTestCase):
# Now test if the QuotaUsage was created with a user_id or not.
if per_project_quotas:
quc.assert_called_once_with(
- project_id, None, resource, 0, 0, None, session=session)
+ project_id, None, resource, 0, 0, None, session)
else:
quc.assert_called_once_with(
- project_id, user_id, resource, 0, 0, None, session=session)
+ project_id, user_id, resource, 0, 0, None, session)
def test_create_quota_usage_if_missing_created_per_project_quotas(self):
self._test_create_quota_usage_if_missing_created(True)
@@ -7277,6 +7315,7 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
disabled=False)
self.service = db.service_create(self.ctxt, self.service_dict)
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
+ uuid=uuidsentinel.fake_compute_node,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
@@ -7293,6 +7332,7 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
extra_resources='',
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0,
stats='', numa_topology='')
# add some random stats
self.stats = dict(num_instances=3, num_proj_12345=2,
@@ -7358,7 +7398,7 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
service_data['host'] = 'host2'
service = db.service_create(self.ctxt, service_data)
- existing_node = dict(self.item.iteritems())
+ existing_node = dict(self.item.items())
expected = [existing_node]
for name in ['bm_node1', 'bm_node2']:
@@ -7390,9 +7430,9 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
node = db.compute_node_create(self.ctxt, compute_node_another_host)
- result = db.compute_node_get_all_by_host(self.ctxt, 'host1', False)
+ result = db.compute_node_get_all_by_host(self.ctxt, 'host1')
self._assertEqualListsOfObjects([self.item], result)
- result = db.compute_node_get_all_by_host(self.ctxt, 'host2', False)
+ result = db.compute_node_get_all_by_host(self.ctxt, 'host2')
self._assertEqualListsOfObjects([node], result)
def test_compute_node_get_all_by_host_with_same_host(self):
@@ -7405,7 +7445,7 @@ class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
expected = [self.item, node]
result = sorted(db.compute_node_get_all_by_host(
- self.ctxt, 'host1', False),
+ self.ctxt, 'host1'),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
@@ -8316,7 +8356,11 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
# NOTE(snikitin): migration 266 introduced a new table 'tags',
# which have no shadow table and it's
# completely OK, so we should skip it here
- if table_name == 'tags':
+ # NOTE(cdent): migration 314 introduced three new
+ # ('resource_providers', 'allocations' and 'inventories')
+ # with no shadow table and it's OK, so skip.
+ if table_name in ['tags', 'resource_providers', 'allocations',
+ 'inventories']:
continue
if table_name.startswith("shadow_"):
@@ -8393,6 +8437,11 @@ class ArchiveTestCase(test.TestCase, ModelsObjectComparatorMixin):
def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
""":returns: 0 on success, 1 if no uuid column, 2 if insert failed."""
+ # NOTE(cdent): migration 314 adds the resource_providers
+ # table with a uuid column that does not archive, so skip.
+ skip_tables = ['resource_providers']
+ if tablename in skip_tables:
+ return 1
main_table = sqlalchemyutils.get_table(self.engine, tablename)
if not hasattr(main_table.c, "uuid"):
# Not a uuid table, so skip it.
diff --git a/nova/tests/unit/db/test_migrations.py b/nova/tests/unit/db/test_migrations.py
index d1286e7481..d46fcb30a9 100644
--- a/nova/tests/unit/db/test_migrations.py
+++ b/nova/tests/unit/db/test_migrations.py
@@ -819,6 +819,42 @@ class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
'ix_pci_devices_compute_node_id_parent_addr_deleted',
['compute_node_id', 'parent_addr', 'deleted'])
+ def _check_314(self, engine, data):
+ self.assertColumnExists(engine, 'inventories', 'resource_class_id')
+ self.assertColumnExists(engine, 'allocations', 'resource_class_id')
+
+ self.assertColumnExists(engine, 'resource_providers', 'id')
+ self.assertColumnExists(engine, 'resource_providers', 'uuid')
+
+ self.assertColumnExists(engine, 'compute_nodes', 'uuid')
+ self.assertColumnExists(engine, 'shadow_compute_nodes', 'uuid')
+
+ self.assertIndexMembers(engine, 'allocations',
+ 'allocations_resource_provider_class_id_idx',
+ ['resource_provider_id', 'resource_class_id'])
+
+ def _check_315(self, engine, data):
+ self.assertColumnExists(engine, 'migrations',
+ 'memory_total')
+ self.assertColumnExists(engine, 'migrations',
+ 'memory_processed')
+ self.assertColumnExists(engine, 'migrations',
+ 'memory_remaining')
+ self.assertColumnExists(engine, 'migrations',
+ 'disk_total')
+ self.assertColumnExists(engine, 'migrations',
+ 'disk_processed')
+ self.assertColumnExists(engine, 'migrations',
+ 'disk_remaining')
+
+ def _check_316(self, engine, data):
+ self.assertColumnExists(engine, 'compute_nodes',
+ 'disk_allocation_ratio')
+
+ def _check_317(self, engine, data):
+ self.assertColumnExists(engine, 'aggregates', 'uuid')
+ self.assertColumnExists(engine, 'shadow_aggregates', 'uuid')
+
class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
test_base.DbTestCase,
diff --git a/nova/tests/unit/db/test_models.py b/nova/tests/unit/db/test_models.py
new file mode 100644
index 0000000000..fd85758950
--- /dev/null
+++ b/nova/tests/unit/db/test_models.py
@@ -0,0 +1,86 @@
+# Copyright 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.db.sqlalchemy import api_models
+from nova.db.sqlalchemy import models
+from nova import test
+
+
+class TestSoftDeletesDeprecated(test.NoDBTestCase):
+
+ def test_no_new_soft_deletes(self):
+ whitelist = [
+ 'agent_builds',
+ 'aggregate_hosts',
+ 'aggregate_metadata',
+ 'aggregates',
+ 'block_device_mapping',
+ 'bw_usage_cache',
+ 'cells',
+ 'certificates',
+ 'compute_nodes',
+ 'console_pools',
+ 'consoles',
+ 'dns_domains',
+ 'fixed_ips',
+ 'floating_ips',
+ 'instance_actions',
+ 'instance_actions_events',
+ 'instance_extra',
+ 'instance_faults',
+ 'instance_group_member',
+ 'instance_group_policy',
+ 'instance_groups',
+ 'instance_id_mappings',
+ 'instance_info_caches',
+ 'instance_metadata',
+ 'instance_system_metadata',
+ 'instance_type_extra_specs',
+ 'instance_type_projects',
+ 'instance_types',
+ 'instances',
+ 'key_pairs',
+ 'migrations',
+ 'networks',
+ 'pci_devices',
+ 'project_user_quotas',
+ 'provider_fw_rules',
+ 'quota_classes',
+ 'quota_usages',
+ 'quotas',
+ 'reservations',
+ 's3_images',
+ 'security_group_default_rules',
+ 'security_group_instance_association',
+ 'security_group_rules',
+ 'security_groups',
+ 'services',
+ 'snapshot_id_mappings',
+ 'snapshots',
+ 'task_log',
+ 'virtual_interfaces',
+ 'volume_id_mappings',
+ 'volume_usage_cache'
+ ]
+
+ # Soft deletes are deprecated. Whitelist the tables that currently
+ # allow soft deletes. No new tables should be added to this whitelist.
+ tables = []
+ for base in [models.BASE, api_models.API_BASE]:
+ for table_name, table in base.metadata.tables.items():
+ columns = [column.name for column in table.columns]
+ if 'deleted' in columns or 'deleted_at' in columns:
+ tables.append(table_name)
+ self.assertEqual(whitelist, sorted(tables))
diff --git a/nova/tests/unit/fake_notifier.py b/nova/tests/unit/fake_notifier.py
index 4c599adec1..59432872bc 100644
--- a/nova/tests/unit/fake_notifier.py
+++ b/nova/tests/unit/fake_notifier.py
@@ -21,10 +21,12 @@ from oslo_serialization import jsonutils
from nova import rpc
NOTIFICATIONS = []
+VERSIONED_NOTIFICATIONS = []
def reset():
del NOTIFICATIONS[:]
+ del VERSIONED_NOTIFICATIONS[:]
FakeMessage = collections.namedtuple('Message',
@@ -64,11 +66,27 @@ class FakeNotifier(object):
NOTIFICATIONS.append(msg)
+class FakeVersionedNotifier(FakeNotifier):
+ def _notify(self, priority, ctxt, event_type, payload):
+ payload = self._serializer.serialize_entity(ctxt, payload)
+ VERSIONED_NOTIFICATIONS.append({'publisher_id': self.publisher_id,
+ 'priority': priority,
+ 'event_type': event_type,
+ 'payload': payload})
+
+
def stub_notifier(stubs):
stubs.Set(messaging, 'Notifier', FakeNotifier)
- if rpc.NOTIFIER:
- stubs.Set(rpc, 'NOTIFIER',
- FakeNotifier(rpc.NOTIFIER.transport,
- rpc.NOTIFIER.publisher_id,
- serializer=getattr(rpc.NOTIFIER, '_serializer',
+ if rpc.LEGACY_NOTIFIER and rpc.NOTIFIER:
+ stubs.Set(rpc, 'LEGACY_NOTIFIER',
+ FakeNotifier(rpc.LEGACY_NOTIFIER.transport,
+ rpc.LEGACY_NOTIFIER.publisher_id,
+ serializer=getattr(rpc.LEGACY_NOTIFIER,
+ '_serializer',
None)))
+ stubs.Set(rpc, 'NOTIFIER',
+ FakeVersionedNotifier(rpc.NOTIFIER.transport,
+ rpc.NOTIFIER.publisher_id,
+ serializer=getattr(rpc.NOTIFIER,
+ '_serializer',
+ None)))
diff --git a/nova/tests/unit/fake_policy.py b/nova/tests/unit/fake_policy.py
index 7eec76f257..2444c2196b 100644
--- a/nova/tests/unit/fake_policy.py
+++ b/nova/tests/unit/fake_policy.py
@@ -118,11 +118,14 @@ policy_data = """
"os_compute_api:servers:resize": "",
"os_compute_api:servers:revert_resize": "",
"os_compute_api:servers:show": "",
+ "os_compute_api:servers:show:host_status": "",
"os_compute_api:servers:create_image": "",
"os_compute_api:servers:create_image:allow_volume_backed": "",
"os_compute_api:servers:update": "",
"os_compute_api:servers:start": "",
"os_compute_api:servers:stop": "",
+ "os_compute_api:servers:trigger_crash_dump": "",
+ "os_compute_api:servers:migrations:force_complete": "",
"os_compute_api:os-access-ips": "",
"compute_extension:accounts": "",
"compute_extension:admin_actions:pause": "",
diff --git a/nova/tests/unit/fake_volume.py b/nova/tests/unit/fake_volume.py
index f910b7b83b..d2e2d090b1 100644
--- a/nova/tests/unit/fake_volume.py
+++ b/nova/tests/unit/fake_volume.py
@@ -66,7 +66,8 @@ class fake_volume(object):
'display_description': description,
'provider_location': 'fake-location',
'provider_auth': 'fake-auth',
- 'volume_type_id': 99
+ 'volume_type_id': 99,
+ 'multiattach': False
}
def get(self, key, default=None):
@@ -193,31 +194,38 @@ class API(object):
msg = "Instance and volume not in same availability_zone"
raise exception.InvalidVolume(reason=msg)
- def check_detach(self, context, volume):
+ def check_detach(self, context, volume, instance=None):
if volume['status'] == "available":
msg = "already detached"
raise exception.InvalidVolume(reason=msg)
+ if volume['attach_status'] == 'detached':
+ msg = "Volume must be attached in order to detach."
+ raise exception.InvalidVolume(reason=msg)
+
+ if instance and not volume.get('attachments', {}).get(instance.uuid):
+ raise exception.VolumeUnattached(volume_id=volume['id'])
+
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
LOG.info('attaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'in-use'
- volume['mountpoint'] = mountpoint
volume['attach_status'] = 'attached'
- volume['instance_uuid'] = instance_uuid
volume['attach_time'] = timeutils.utcnow()
+ volume['multiattach'] = True
+ volume['attachments'] = {instance_uuid:
+ {'attachment_id': str(uuid.uuid4()),
+ 'mountpoint': mountpoint}}
def reset_fake_api(self, context):
del self.volume_list[:]
del self.snapshot_list[:]
- def detach(self, context, volume_id):
+ def detach(self, context, volume_id, instance_uuid, attachment_id=None):
LOG.info('detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'available'
- volume['mountpoint'] = None
volume['attach_status'] = 'detached'
- volume['instance_uuid'] = None
def initialize_connection(self, context, volume_id, connector):
return {'driver_volume_type': 'iscsi', 'data': {}}
diff --git a/nova/tests/unit/image/test_glance.py b/nova/tests/unit/image/test_glance.py
index 10f4ae31ae..09aee7d552 100644
--- a/nova/tests/unit/image/test_glance.py
+++ b/nova/tests/unit/image/test_glance.py
@@ -17,6 +17,7 @@
import datetime
from six.moves import StringIO
+import cryptography
import glanceclient.exc
import mock
from oslo_config import cfg
@@ -673,6 +674,147 @@ class TestDownloadNoDirectUri(test.NoDBTestCase):
writer.close.assert_called_once_with()
+class TestDownloadSignatureVerification(test.NoDBTestCase):
+
+ class MockVerifier(object):
+ def update(self, data):
+ return
+
+ def verify(self):
+ return True
+
+ class BadVerifier(object):
+ def update(self, data):
+ return
+
+ def verify(self):
+ raise cryptography.exceptions.InvalidSignature(
+ 'Invalid signature.'
+ )
+
+ def setUp(self):
+ super(TestDownloadSignatureVerification, self).setUp()
+ self.flags(verify_glance_signatures=True, group='glance')
+ self.fake_img_props = {
+ 'properties': {
+ 'img_signature': 'signature',
+ 'img_signature_hash_method': 'SHA-224',
+ 'img_signature_certificate_uuid': 'uuid',
+ 'img_signature_key_type': 'RSA-PSS',
+ }
+ }
+ self.fake_img_data = ['A' * 256, 'B' * 256]
+ client = mock.MagicMock()
+ client.call.return_value = self.fake_img_data
+ self.service = glance.GlanceImageService(client)
+
+ @mock.patch('nova.image.glance.LOG')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ @mock.patch('nova.signature_utils.get_verifier')
+ def test_download_with_signature_verification(self,
+ mock_get_verifier,
+ mock_show,
+ mock_log):
+ mock_get_verifier.return_value = self.MockVerifier()
+ mock_show.return_value = self.fake_img_props
+ res = self.service.download(context=None, image_id=None,
+ data=None, dst_path=None)
+ self.assertEqual(self.fake_img_data, res)
+ mock_get_verifier.assert_called_once_with(None, 'uuid', 'SHA-224',
+ 'signature', 'RSA-PSS')
+ mock_log.info.assert_called_once_with(mock.ANY, mock.ANY)
+
+ @mock.patch.object(six.moves.builtins, 'open')
+ @mock.patch('nova.image.glance.LOG')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ @mock.patch('nova.signature_utils.get_verifier')
+ def test_download_dst_path_signature_verification(self,
+ mock_get_verifier,
+ mock_show,
+ mock_log,
+ mock_open):
+ mock_get_verifier.return_value = self.MockVerifier()
+ mock_show.return_value = self.fake_img_props
+ mock_dest = mock.MagicMock()
+ fake_path = 'FAKE_PATH'
+ mock_open.return_value = mock_dest
+ self.service.download(context=None, image_id=None,
+ data=None, dst_path=fake_path)
+ mock_get_verifier.assert_called_once_with(None, 'uuid', 'SHA-224',
+ 'signature', 'RSA-PSS')
+ mock_log.info.assert_called_once_with(mock.ANY, mock.ANY)
+ self.assertEqual(len(self.fake_img_data), mock_dest.write.call_count)
+ self.assertTrue(mock_dest.close.called)
+
+ @mock.patch('nova.image.glance.LOG')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ @mock.patch('nova.signature_utils.get_verifier')
+ def test_download_with_get_verifier_failure(self,
+ mock_get_verifier,
+ mock_show,
+ mock_log):
+ mock_get_verifier.side_effect = exception.SignatureVerificationError(
+ reason='Signature verification '
+ 'failed.'
+ )
+ mock_show.return_value = self.fake_img_props
+ self.assertRaises(exception.SignatureVerificationError,
+ self.service.download,
+ context=None, image_id=None,
+ data=None, dst_path=None)
+ mock_log.error.assert_called_once_with(mock.ANY, mock.ANY)
+
+ @mock.patch('nova.image.glance.LOG')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ @mock.patch('nova.signature_utils.get_verifier')
+ def test_download_with_invalid_signature(self,
+ mock_get_verifier,
+ mock_show,
+ mock_log):
+ mock_get_verifier.return_value = self.BadVerifier()
+ mock_show.return_value = self.fake_img_props
+ self.assertRaises(cryptography.exceptions.InvalidSignature,
+ self.service.download,
+ context=None, image_id=None,
+ data=None, dst_path=None)
+ mock_log.error.assert_called_once_with(mock.ANY, mock.ANY)
+
+ @mock.patch('nova.image.glance.LOG')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_missing_signature_metadata(self,
+ mock_show,
+ mock_log):
+ mock_show.return_value = {'properties': {}}
+ self.assertRaisesRegex(exception.SignatureVerificationError,
+ 'Required image properties for signature '
+ 'verification do not exist. Cannot verify '
+ 'signature. Missing property: .*',
+ self.service.download,
+ context=None, image_id=None,
+ data=None, dst_path=None)
+
+ @mock.patch.object(six.moves.builtins, 'open')
+ @mock.patch('nova.signature_utils.get_verifier')
+ @mock.patch('nova.image.glance.LOG')
+ @mock.patch('nova.image.glance.GlanceImageService.show')
+ def test_download_dst_path_signature_fail(self, mock_show,
+ mock_log, mock_get_verifier,
+ mock_open):
+ mock_get_verifier.return_value = self.BadVerifier()
+ mock_dest = mock.MagicMock()
+ fake_path = 'FAKE_PATH'
+ mock_open.return_value = mock_dest
+ mock_show.return_value = self.fake_img_props
+ self.assertRaises(cryptography.exceptions.InvalidSignature,
+ self.service.download,
+ context=None, image_id=None,
+ data=None, dst_path=fake_path)
+ mock_log.error.assert_called_once_with(mock.ANY, mock.ANY)
+ mock_open.assert_called_once_with(fake_path, 'wb')
+ mock_dest.truncate.assert_called_once_with(0)
+ self.assertTrue(mock_dest.close.called)
+
+
class TestIsImageAvailable(test.NoDBTestCase):
"""Tests the internal _is_image_available function."""
diff --git a/nova/tests/unit/image/test_transfer_modules.py b/nova/tests/unit/image/test_transfer_modules.py
index 34024065a8..af7bc96e90 100644
--- a/nova/tests/unit/image/test_transfer_modules.py
+++ b/nova/tests/unit/image/test_transfer_modules.py
@@ -12,9 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six.moves.urllib.parse as urlparse
import mock
+import six.moves.urllib.parse as urlparse
from nova import exception
from nova.image.download import file as tm_file
diff --git a/nova/tests/unit/keymgr/test_barbican.py b/nova/tests/unit/keymgr/test_barbican.py
index a1c32cfeca..0028acf40d 100644
--- a/nova/tests/unit/keymgr/test_barbican.py
+++ b/nova/tests/unit/keymgr/test_barbican.py
@@ -224,38 +224,33 @@ class BarbicanKeyManagerTestCase(test_key_mgr.KeyManagerTestCase):
self.assertRaises(exception.Forbidden,
self.key_mgr.store_key, None, None)
- @mock.patch('keystoneclient.session.Session')
+ @mock.patch('keystoneauth1.session.Session')
@mock.patch('barbicanclient.client.Client')
def test_get_barbican_client_new(self, mock_barbican, mock_keystone):
manager = self._create_key_manager()
manager._get_barbican_client(self.ctxt)
- self.assertEqual(mock_keystone.call_count, 1)
self.assertEqual(mock_barbican.call_count, 1)
- @mock.patch('keystoneclient.session.Session')
+ @mock.patch('keystoneauth1.session.Session')
@mock.patch('barbicanclient.client.Client')
def test_get_barbican_client_reused(self, mock_barbican, mock_keystone):
manager = self._create_key_manager()
manager._get_barbican_client(self.ctxt)
- self.assertEqual(mock_keystone.call_count, 1)
self.assertEqual(mock_barbican.call_count, 1)
manager._get_barbican_client(self.ctxt)
- self.assertEqual(mock_keystone.call_count, 1)
self.assertEqual(mock_barbican.call_count, 1)
- @mock.patch('keystoneclient.session.Session')
+ @mock.patch('keystoneauth1.session.Session')
@mock.patch('barbicanclient.client.Client')
def test_get_barbican_client_not_reused(self, mock_barbican,
mock_keystone):
manager = self._create_key_manager()
manager._get_barbican_client(self.ctxt)
- self.assertEqual(mock_keystone.call_count, 1)
self.assertEqual(mock_barbican.call_count, 1)
ctxt2 = mock.MagicMock()
ctxt2.auth_token = "fake_token2"
ctxt2.project = "fake_project2"
manager._get_barbican_client(ctxt2)
- self.assertEqual(mock_keystone.call_count, 2)
self.assertEqual(mock_barbican.call_count, 2)
def test_get_barbican_client_null_context(self):
diff --git a/nova/tests/unit/network/security_group/test_neutron_driver.py b/nova/tests/unit/network/security_group/test_neutron_driver.py
index 1d2bd92cdf..79bbea4414 100644
--- a/nova/tests/unit/network/security_group/test_neutron_driver.py
+++ b/nova/tests/unit/network/security_group/test_neutron_driver.py
@@ -25,6 +25,7 @@ from nova import exception
from nova.network.neutronv2 import api as neutronapi
from nova.network.security_group import neutron_driver
from nova.network.security_group import openstack_driver
+from nova import objects
from nova import test
@@ -414,6 +415,12 @@ class TestNeutronDriverWithoutMock(test.NoDBTestCase):
self.assertRaises(exception.Invalid, sg_api.validate_property,
None, 'name', None)
+ def test_populate_security_groups(self):
+ sg_api = neutron_driver.SecurityGroupAPI()
+ r = sg_api.populate_security_groups('ignore')
+ self.assertIsInstance(r, objects.SecurityGroupList)
+ self.assertEqual(0, len(r))
+
class TestGetter(test.NoDBTestCase):
@mock.patch('nova.network.security_group.openstack_driver.'
diff --git a/nova/tests/unit/network/test_api.py b/nova/tests/unit/network/test_api.py
index dadc6d7a35..c6a30be7dc 100644
--- a/nova/tests/unit/network/test_api.py
+++ b/nova/tests/unit/network/test_api.py
@@ -138,7 +138,7 @@ class ApiTestCase(test.TestCase):
self.assertEqual(123, vifs[0].network_id)
self.assertEqual(str(mock.sentinel.network_uuid), vifs[0].net_uuid)
mock_get_by_instance.assert_called_once_with(
- self.context, str(mock.sentinel.inst_uuid), use_slave=False)
+ self.context, str(mock.sentinel.inst_uuid))
mock_get_by_id.assert_called_once_with(self.context, 123,
project_only='allow_none')
@@ -196,6 +196,8 @@ class ApiTestCase(test.TestCase):
def fake_instance_get_by_uuid(context, instance_uuid,
columns_to_join=None,
use_slave=None):
+ if instance_uuid == orig_instance_uuid:
+ self.assertIn('extra.flavor', columns_to_join)
return fake_instance.fake_db_instance(uuid=instance_uuid)
self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
diff --git a/nova/tests/unit/network/test_l3.py b/nova/tests/unit/network/test_l3.py
new file mode 100644
index 0000000000..9df8f49e78
--- /dev/null
+++ b/nova/tests/unit/network/test_l3.py
@@ -0,0 +1,26 @@
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.network import l3
+from nova import test
+
+
+class L3DriverTestCase(test.NoDBTestCase):
+
+ def test_linuxnetl3_driver_signatures(self):
+ self.assertPublicAPISignatures(l3.L3Driver, l3.LinuxNetL3)
+
+ def test_nulll3_driver_signatures(self):
+ self.assertPublicAPISignatures(l3.L3Driver, l3.NullL3)
diff --git a/nova/tests/unit/network/test_linux_net.py b/nova/tests/unit/network/test_linux_net.py
index f9c2ee607a..0e2b1d28a4 100644
--- a/nova/tests/unit/network/test_linux_net.py
+++ b/nova/tests/unit/network/test_linux_net.py
@@ -394,9 +394,10 @@ class LinuxNetworkTestCase(test.NoDBTestCase):
def get_instance(_context, instance_id):
return instances[instance_id]
- self.stubs.Set(db, 'virtual_interface_get_by_instance', get_vifs)
- self.stubs.Set(db, 'instance_get', get_instance)
- self.stubs.Set(db, 'network_get_associated_fixed_ips', get_associated)
+ self.stub_out('nova.db.virtual_interface_get_by_instance', get_vifs)
+ self.stub_out('nova.db.instance_get', get_instance)
+ self.stub_out('nova.db.network_get_associated_fixed_ips',
+ get_associated)
def _test_add_snat_rule(self, expected, is_external):
@@ -575,7 +576,7 @@ class LinuxNetworkTestCase(test.NoDBTestCase):
data = get_associated(self.context, 0, address=lease[2])[0]
self.assertTrue(data['allocated'])
self.assertTrue(data['leased'])
- self.assertTrue(lease[0] > seconds_since_epoch)
+ self.assertTrue(int(lease[0]) > seconds_since_epoch)
self.assertEqual(data['vif_address'], lease[1])
self.assertEqual(data['address'], lease[2])
self.assertEqual(data['instance_hostname'], lease[3])
@@ -592,7 +593,7 @@ class LinuxNetworkTestCase(test.NoDBTestCase):
lease = lease.split(' ')
data = get_associated(self.context, 1, address=lease[2])[0]
self.assertTrue(data['leased'])
- self.assertTrue(lease[0] > seconds_since_epoch)
+ self.assertTrue(int(lease[0]) > seconds_since_epoch)
self.assertEqual(data['vif_address'], lease[1])
self.assertEqual(data['address'], lease[2])
self.assertEqual(data['instance_hostname'], lease[3])
@@ -748,7 +749,7 @@ class LinuxNetworkTestCase(test.NoDBTestCase):
self.stubs.Set(linux_net, '_add_dhcp_mangle_rule',
fake_add_dhcp_mangle_rule)
- self.stubs.Set(os, 'chmod', lambda *a, **kw: None)
+ self.stub_out('os.chmod', lambda *a, **kw: None)
self.stubs.Set(linux_net, 'write_to_file', lambda *a, **kw: None)
self.stubs.Set(linux_net, '_dnsmasq_pid_for', lambda *a, **kw: None)
dev = 'br100'
diff --git a/nova/tests/unit/network/test_manager.py b/nova/tests/unit/network/test_manager.py
index 6a85d83177..4274a8b550 100644
--- a/nova/tests/unit/network/test_manager.py
+++ b/nova/tests/unit/network/test_manager.py
@@ -884,7 +884,7 @@ class VlanNetworkTestCase(test.TestCase):
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -918,7 +918,7 @@ class VlanNetworkTestCase(test.TestCase):
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -1604,7 +1604,7 @@ class VlanNetworkTestCase(test.TestCase):
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
- mox.IgnoreArg(), use_slave=False,
+ mox.IgnoreArg(),
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
@@ -1688,7 +1688,7 @@ class VlanNetworkTestCase(test.TestCase):
def vif_get(_context, _vif_id):
return vifs[0]
- self.stubs.Set(db, 'virtual_interface_get', vif_get)
+ self.stub_out('nova.db.virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
@@ -1814,7 +1814,7 @@ class VlanNetworkTestCase(test.TestCase):
def vif_get(_context, _vif_id):
return None
- self.stubs.Set(db, 'virtual_interface_get', vif_get)
+ self.stub_out('nova.db.virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
@@ -1946,7 +1946,7 @@ class CommonNetworkTestCase(test.TestCase):
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
- self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
+ self.stub_out('nova.db.dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
@@ -3207,7 +3207,7 @@ class FloatingIPTestCase(test.TestCase):
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
- def fake_vif_save(vif):
+ def fake_vif_save(vif, session=None):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
diff --git a/nova/tests/unit/network/test_neutronv2.py b/nova/tests/unit/network/test_neutronv2.py
index ad5080f302..85304c3ff5 100644
--- a/nova/tests/unit/network/test_neutronv2.py
+++ b/nova/tests/unit/network/test_neutronv2.py
@@ -18,8 +18,8 @@ import collections
import copy
import uuid
-from keystoneclient.auth import base as ksc_auth_base
-from keystoneclient.fixture import V2Token
+from keystoneauth1.fixture import V2Token
+from keystoneauth1 import loading as ks_loading
import mock
from mox3 import mox
from neutronclient.common import exceptions
@@ -192,6 +192,7 @@ class TestNeutronv2Base(test.TestCase):
self.instance = {'project_id': self.tenant_id,
'uuid': str(uuid.uuid4()),
'display_name': 'test_instance',
+ 'hostname': 'test-instance',
'availability_zone': 'nova',
'host': 'some_host',
'info_cache': {'network_info': []},
@@ -240,10 +241,16 @@ class TestNeutronv2Base(test.TestCase):
# A network that is both shared and external
self.nets10 = [{'id': 'net_id', 'name': 'net_name',
'router:external': True, 'shared': True}]
+ # A network with non-blank dns_domain to test _update_port_dns_name
+ self.nets11 = [{'id': 'my_netid1',
+ 'name': 'my_netname1',
+ 'subnets': ['mysubnid1'],
+ 'tenant_id': 'my_tenantid',
+ 'dns_domain': 'my-domain.org.'}]
self.nets = [self.nets1, self.nets2, self.nets3, self.nets4,
self.nets5, self.nets6, self.nets7, self.nets8,
- self.nets9, self.nets10]
+ self.nets9, self.nets10, self.nets11]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': 'my_netid1',
@@ -383,6 +390,11 @@ class TestNeutronv2Base(test.TestCase):
if dhcp_options is not None:
has_extra_dhcp_opts = True
+ has_dns_extension = False
+ if kwargs.get('dns_extension'):
+ has_dns_extension = True
+ api.extensions[constants.DNS_INTEGRATION] = 1
+
if kwargs.get('portbinding'):
has_portbinding = True
api.extensions[constants.PORTBINDING_EXT] = 1
@@ -398,6 +410,10 @@ class TestNeutronv2Base(test.TestCase):
api._has_port_binding_extension(mox.IgnoreArg(),
neutron=self.moxed_client,
refresh_cache=True).AndReturn(has_portbinding)
+ elif has_dns_extension:
+ self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
+ api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
+ neutron=self.moxed_client)
else:
self.mox.StubOutWithMock(api, '_refresh_neutron_extensions_cache')
api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
@@ -444,7 +460,9 @@ class TestNeutronv2Base(test.TestCase):
'mac_address': 'my_mac1',
'device_id': kwargs.get('_device') and
self.instance2.uuid or
- ''}})
+ '',
+ 'dns_name': kwargs.get('_dns_name') or
+ ''}})
ports[request.port_id] = self.port_data1[0]
request.network_id = 'my_netid1'
if macs is not None:
@@ -508,17 +526,23 @@ class TestNeutronv2Base(test.TestCase):
if has_portbinding:
port_req_body['port']['binding:host_id'] = (
self.instance.get('host'))
- if not has_portbinding:
+ if has_dns_extension and not network.get('dns_domain'):
+ port_req_body['port']['dns_name'] = self.instance.hostname
+ if not has_portbinding and not has_dns_extension:
api._populate_neutron_extension_values(mox.IgnoreArg(),
self.instance, mox.IgnoreArg(),
- mox.IgnoreArg(), neutron=self.moxed_client,
+ mox.IgnoreArg(), network=network,
+ neutron=self.moxed_client,
bind_host_id=None).AndReturn(None)
- else:
+ elif has_portbinding:
# since _populate_neutron_extension_values() will call
# _has_port_binding_extension()
api._has_port_binding_extension(mox.IgnoreArg(),
neutron=self.moxed_client).\
AndReturn(has_portbinding)
+ else:
+ api._refresh_neutron_extensions_cache(mox.IgnoreArg(),
+ neutron=self.moxed_client)
if request.port_id:
port = ports[request.port_id]
self.moxed_client.update_port(request.port_id,
@@ -551,6 +575,24 @@ class TestNeutronv2Base(test.TestCase):
MyComparator(port_req_body)).AndReturn(res_port)
ports_in_requested_net_order.append(res_port['port']['id'])
+ if has_portbinding and has_dns_extension:
+ api._has_port_binding_extension(mox.IgnoreArg()).\
+ AndReturn(has_portbinding)
+ if net_idx == 11:
+ port_req_body_dns = {
+ 'port': {
+ 'dns_name': self.instance.hostname
+ }
+ }
+ res_port_dns = {
+ 'port': {
+ 'id': ports_in_requested_net_order[-1]
+ }
+ }
+ self.moxed_client.update_port(
+ ports_in_requested_net_order[-1],
+ MyComparator(port_req_body_dns)
+ ).AndReturn(res_port_dns)
nets_in_requested_net_order.append(network)
api.get_instance_nw_info(mox.IgnoreArg(),
@@ -727,6 +769,21 @@ class TestNeutronv2(TestNeutronv2Base):
None,
None)
+ def test_get_instance_nw_info_ignores_neutron_ports_empty_cache(self):
+ # Tests that ports returned from neutron that match the same
+ # instance_id/device_id are ignored when the instance info cache is
+ # empty.
+ port_data2 = copy.copy(self.port_data2)
+
+ # set device_id on the ports to be the same.
+ port_data2[1]['device_id'] = port_data2[0]['device_id']
+ network_cache = {'info_cache': {'network_info': []}}
+
+ self._fake_get_instance_nw_info_helper(network_cache,
+ port_data2,
+ None,
+ None)
+
def _fake_get_instance_nw_info_helper(self, network_cache,
current_neutron_ports,
networks=None, port_ids=None):
@@ -766,8 +823,26 @@ class TestNeutronv2(TestNeutronv2Base):
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
if networks is None:
- self.moxed_client.list_networks(
- id=net_ids).AndReturn({'networks': nets})
+ if ifaces:
+ self.moxed_client.list_networks(
+ id=net_ids).AndReturn({'networks': nets})
+ else:
+ non_shared_nets = [
+ {'id': iface['network']['id'],
+ 'name': iface['network']['label'],
+ 'tenant_id': iface['network']['meta']['tenant_id']}
+ for iface in ifaces if not iface['shared']]
+ shared_nets = [
+ {'id': iface['network']['id'],
+ 'name': iface['network']['label'],
+ 'tenant_id': iface['network']['meta']['tenant_id']}
+ for iface in ifaces if iface['shared']]
+ self.moxed_client.list_networks(
+ shared=False,
+ tenant_id=self.instance['project_id']
+ ).AndReturn({'networks': non_shared_nets})
+ self.moxed_client.list_networks(
+ shared=True).AndReturn({'networks': shared_nets})
else:
networks = networks + [
dict(id=iface['network']['id'],
@@ -1107,7 +1182,7 @@ class TestNeutronv2(TestNeutronv2Base):
port = {'id': 'portid_' + network['id']}
api._populate_neutron_extension_values(self.context,
- self.instance, None, binding_port_req_body,
+ self.instance, None, binding_port_req_body, network=network,
neutron=self.moxed_client, bind_host_id=None).AndReturn(None)
if index == 0:
self.moxed_client.create_port(
@@ -1162,7 +1237,8 @@ class TestNeutronv2(TestNeutronv2Base):
}
api._populate_neutron_extension_values(self.context,
self.instance, None, binding_port_req_body,
- neutron=self.moxed_client, bind_host_id=None).AndReturn(None)
+ network=self.nets2[0], neutron=self.moxed_client,
+ bind_host_id=None).AndReturn(None)
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(
Exception("fail to create port"))
@@ -2483,6 +2559,38 @@ class TestNeutronv2(TestNeutronv2Base):
self.assertNotIn('should_create_bridge', net)
self.assertEqual('port-id', iid)
+ def _test_nw_info_build_custom_bridge(self, vif_type, extra_details=None):
+ fake_port = {
+ 'fixed_ips': [{'ip_address': '1.1.1.1'}],
+ 'id': 'port-id',
+ 'network_id': 'net-id',
+ 'binding:vif_type': vif_type,
+ 'binding:vif_details': {
+ model.VIF_DETAILS_BRIDGE_NAME: 'custom-bridge',
+ }
+ }
+ if extra_details:
+ fake_port['binding:vif_details'].update(extra_details)
+ fake_subnets = [model.Subnet(cidr='1.0.0.0/8')]
+ fake_nets = [{'id': 'net-id', 'name': 'foo', 'tenant_id': 'tenant'}]
+ api = neutronapi.API()
+ self.mox.ReplayAll()
+ neutronapi.get_client('fake')
+ net, iid = api._nw_info_build_network(fake_port, fake_nets,
+ fake_subnets)
+ self.assertNotEqual(CONF.neutron.ovs_bridge, net['bridge'])
+ self.assertEqual('custom-bridge', net['bridge'])
+
+ def test_nw_info_build_custom_ovs_bridge(self):
+ self._test_nw_info_build_custom_bridge(model.VIF_TYPE_OVS)
+
+ def test_nw_info_build_custom_ovs_bridge_vhostuser(self):
+ self._test_nw_info_build_custom_bridge(model.VIF_TYPE_VHOSTUSER,
+ {model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True})
+
+ def test_nw_info_build_custom_lb_bridge(self):
+ self._test_nw_info_build_custom_bridge(model.VIF_TYPE_BRIDGE)
+
def test_build_network_info_model(self):
api = neutronapi.API()
@@ -2668,6 +2776,8 @@ class TestNeutronv2(TestNeutronv2Base):
mock_nw_info_build_network,
mock_nw_info_get_ips,
mock_nw_info_get_subnets):
+ # An empty instance info network cache should not be populated from
+ # ports found in Neutron.
api = neutronapi.API()
fake_inst = objects.Instance()
@@ -2696,7 +2806,7 @@ class TestNeutronv2(TestNeutronv2Base):
tenant_id='fake', device_id='uuid').AndReturn(
{'ports': fake_ports})
- mock_gather_port_ids_and_networks.return_value = (None, None)
+ mock_gather_port_ids_and_networks.return_value = ([], [])
mock_get_preexisting_port_ids.return_value = []
mock_nw_info_build_network.return_value = (None, None)
mock_nw_info_get_ips.return_value = []
@@ -2707,7 +2817,7 @@ class TestNeutronv2(TestNeutronv2Base):
nw_infos = api._build_network_info_model(
self.context, fake_inst)
- self.assertEqual(1, len(nw_infos))
+ self.assertEqual(0, len(nw_infos))
def test_get_subnets_from_port(self):
api = neutronapi.API()
@@ -2850,6 +2960,58 @@ class TestNeutronv2WithMock(test.TestCase):
api.get_instance_nw_info, 'context', instance)
mock_lock.assert_called_once_with('refresh_cache-%s' % instance.uuid)
+ @mock.patch('nova.network.neutronv2.api.LOG')
+ def test_get_instance_nw_info_verify_duplicates_ignored(self, mock_log):
+ """test that the returned networks & port_ids from
+ _gather_port_ids_and_networks doesn't contain any duplicates
+
+ The test fakes an instance with two ports connected to two networks.
+ The _gather_port_ids_and_networks method will be called with the
+ instance and a list of port ids of which one port id is configured
+ already to the instance (== duplicate #1) and a list of
+ networks that already contains a network to which an instance port
+ is connected (== duplicate #2).
+
+ All-in-all, we expect the resulting port ids list to contain 3 items
+ (["instance_port_1", "port_1", "port_2"]) and the resulting networks
+ list to contain 3 items (["net_1", "net_2", "instance_network_1"])
+ while the warning message for duplicate items was executed twice
+ (due to "duplicate #1" & "duplicate #2")
+ """
+
+ networks = [model.Network(id="net_1"),
+ model.Network(id="net_2")]
+ port_ids = ["port_1", "port_2"]
+
+ instance_networks = [{"id": "instance_network_1",
+ "name": "fake_network",
+ "tenant_id": "fake_tenant_id"}]
+ instance_port_ids = ["instance_port_1"]
+
+ network_info = model.NetworkInfo(
+ [{'id': port_ids[0],
+ 'network': networks[0]},
+ {'id': instance_port_ids[0],
+ 'network': model.Network(
+ id=instance_networks[0]["id"],
+ label=instance_networks[0]["name"],
+ meta={"tenant_id": instance_networks[0]["tenant_id"]})}]
+ )
+
+ instance_uuid = uuid.uuid4()
+ instance = objects.Instance(uuid=instance_uuid,
+ info_cache=objects.InstanceInfoCache(
+ context=self.context,
+ instance_uuid=instance_uuid,
+ network_info=network_info))
+
+ new_networks, new_port_ids = self.api._gather_port_ids_and_networks(
+ self.context, instance, networks, port_ids)
+
+ self.assertEqual(new_networks, networks + instance_networks)
+ self.assertEqual(new_port_ids, instance_port_ids + port_ids)
+ self.assertEqual(2, mock_log.warning.call_count)
+
@mock.patch('oslo_concurrency.lockutils.lock')
@mock.patch.object(neutronapi.API, '_get_instance_nw_info')
@mock.patch('nova.network.base_api.update_instance_cache_with_nw_info')
@@ -3838,6 +4000,89 @@ class TestNeutronv2ExtraDhcpOpts(TestNeutronv2Base):
self._allocate_for_instance(1, dhcp_options=dhcp_opts)
+class TestNeutronv2NeutronHostnameDNS(TestNeutronv2Base):
+ def setUp(self):
+ super(TestNeutronv2NeutronHostnameDNS, self).setUp()
+ neutronapi.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
+ self.moxed_client)
+
+ def test_allocate_for_instance_create_port(self):
+ # The port's dns_name attribute should be set by the port create
+ # request in allocate_for_instance
+ self._allocate_for_instance(1, dns_extension=True)
+
+ def test_allocate_for_instance_with_requested_port(self):
+ # The port's dns_name attribute should be set by the port update
+ # request in allocate_for_instance
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ self._allocate_for_instance(net_idx=1, dns_extension=True,
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_port_dns_name_preset_equal_hostname(self):
+ # The port's dns_name attribute should be set by the port update
+ # request in allocate_for_instance. The port's dns_name was preset by
+ # the user with a value equal to the instance's hostname
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ self._allocate_for_instance(net_idx=1, dns_extension=True,
+ requested_networks=requested_networks,
+ _dns_name='test-instance')
+
+ def test_allocate_for_instance_port_dns_name_preset_noteq_hostname(self):
+ # If a pre-existing port has dns_name set, an exception should be
+ # raised if dns_name is not equal to the instance's hostname
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ api = self._stub_allocate_for_instance(
+ requested_networks=requested_networks,
+ dns_extension=True,
+ _break='pre_list_networks',
+ _dns_name='my-instance')
+ self.assertRaises(exception.PortNotUsableDNS,
+ api.allocate_for_instance, self.context,
+ self.instance, requested_networks=requested_networks)
+
+
+class TestNeutronv2NeutronHostnameDNSPortbinding(TestNeutronv2Base):
+
+ def test_allocate_for_instance_create_port(self):
+ # The port's dns_name attribute should be set by the port create
+ # request in allocate_for_instance
+ self._allocate_for_instance(1, portbinding=True, dns_extension=True,
+ bind_host_id=self.instance.get('host'))
+
+ def test_allocate_for_instance_with_requested_port(self):
+ # The port's dns_name attribute should be set by the port update
+ # request in allocate_for_instance
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ self._allocate_for_instance(net_idx=1, dns_extension=True,
+ portbinding=True,
+ bind_host_id=self.instance.get('host'),
+ requested_networks=requested_networks)
+
+ def test_allocate_for_instance_create_port_with_dns_domain(self):
+ # The port's dns_name attribute should be set by the port update
+ # request in _update_port_dns_name. This should happen only when the
+ # port binding extension is enabled and the port's network has a
+ # non-blank dns_domain attribute
+ self._allocate_for_instance(11, portbinding=True, dns_extension=True,
+ bind_host_id=self.instance.get('host'))
+
+ def test_allocate_for_instance_with_requested_port_with_dns_domain(self):
+ # The port's dns_name attribute should be set by the port update
+ # request in _update_port_dns_name. This should happen only when the
+ # port binding extension is enabled and the port's network has a
+ # non-blank dns_domain attribute
+ requested_networks = objects.NetworkRequestList(
+ objects=[objects.NetworkRequest(port_id='my_portid1')])
+ self._allocate_for_instance(net_idx=11, dns_extension=True,
+ portbinding=True,
+ bind_host_id=self.instance.get('host'),
+ requested_networks=requested_networks)
+
+
class TestNeutronClientForAdminScenarios(test.NoDBTestCase):
def setUp(self):
@@ -3850,8 +4095,8 @@ class TestNeutronClientForAdminScenarios(test.NoDBTestCase):
# these are run (due to glonal conf object) and not be fully
# representative of a "clean" slate at the start of a test.
self.config_fixture = self.useFixture(config_fixture.Config())
- plugin_class = ksc_auth_base.get_plugin_class('v2password')
- plugin_class.register_conf_options(self.config_fixture, 'neutron')
+ oslo_opts = ks_loading.get_auth_plugin_conf_options('v2password')
+ self.config_fixture.register_opts(oslo_opts, 'neutron')
@requests_mock.mock()
def _test_get_client_for_admin(self, req_mock,
@@ -3862,7 +4107,7 @@ class TestNeutronClientForAdminScenarios(test.NoDBTestCase):
req_mock.post(auth_url + '/tokens', json=token_resp)
self.flags(url='http://anyhost/', group='neutron')
- self.flags(auth_plugin='v2password', group='neutron')
+ self.flags(auth_type='v2password', group='neutron')
self.flags(auth_url=auth_url, group='neutron')
self.flags(timeout=30, group='neutron')
if use_id:
@@ -3907,11 +4152,15 @@ class TestNeutronClientForAdminScenarios(test.NoDBTestCase):
self.assertIsNone(admin_auth.tenant_id)
self.assertIsNone(admin_auth.user_id)
- self.assertEqual(CONF.neutron.timeout, neutronapi._SESSION.timeout)
+ self.assertEqual(CONF.neutron.timeout,
+ neutronapi._SESSION.timeout)
- self.assertEqual(token_value, context_client.httpclient.auth.token)
- self.assertEqual(CONF.neutron.url,
- context_client.httpclient.auth.endpoint)
+ self.assertEqual(
+ token_value,
+ context_client.httpclient.auth.get_token(neutronapi._SESSION))
+ self.assertEqual(
+ CONF.neutron.url,
+ context_client.httpclient.get_endpoint())
def test_get_client_for_admin(self):
self._test_get_client_for_admin()
diff --git a/nova/tests/unit/network/test_rpcapi.py b/nova/tests/unit/network/test_rpcapi.py
index 6929488316..03d7363e91 100644
--- a/nova/tests/unit/network/test_rpcapi.py
+++ b/nova/tests/unit/network/test_rpcapi.py
@@ -94,6 +94,7 @@ class NetworkRpcAPITestCase(test.NoDBTestCase):
version_check = [
'deallocate_for_instance', 'deallocate_fixed_ip',
'allocate_for_instance', 'release_fixed_ip', 'set_network_host',
+ 'setup_networks_on_host'
]
if method in version_check:
rpcapi.client.can_send_version(mox.IgnoreArg()).AndReturn(True)
@@ -194,8 +195,36 @@ class NetworkRpcAPITestCase(test.NoDBTestCase):
domain='fake_domain', project='fake_project')
def test_setup_networks_on_host(self):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ instance = fake_instance.fake_instance_obj(ctxt)
self._test_network_api('setup_networks_on_host', rpc_method='call',
- instance_id='fake_id', host='fake_host', teardown=False)
+ instance_id=instance.id, host='fake_host', teardown=False,
+ instance=instance, version='1.16')
+
+ def test_setup_networks_on_host_v1_0(self):
+ ctxt = context.RequestContext('fake_user', 'fake_project')
+ instance = fake_instance.fake_instance_obj(ctxt)
+ host = 'fake_host'
+ teardown = True
+ rpcapi = network_rpcapi.NetworkAPI()
+ call_mock = mock.Mock()
+ cctxt_mock = mock.Mock(call=call_mock)
+ with test.nested(
+ mock.patch.object(rpcapi.client, 'can_send_version',
+ return_value=False),
+ mock.patch.object(rpcapi.client, 'prepare',
+ return_value=cctxt_mock)
+ ) as (
+ can_send_mock, prepare_mock
+ ):
+ rpcapi.setup_networks_on_host(ctxt, instance.id, host, teardown,
+ instance)
+ # assert our mocks were called as expected
+ can_send_mock.assert_called_once_with('1.16')
+ prepare_mock.assert_called_once_with(version='1.0')
+ call_mock.assert_called_once_with(ctxt, 'setup_networks_on_host',
+ host=host, teardown=teardown,
+ instance_id=instance.id)
def test_lease_fixed_ip(self):
self._test_network_api('lease_fixed_ip', rpc_method='cast',
diff --git a/nova/tests/unit/objects/test_aggregate.py b/nova/tests/unit/objects/test_aggregate.py
index 6e74395cee..70c0c3fa20 100644
--- a/nova/tests/unit/objects/test_aggregate.py
+++ b/nova/tests/unit/objects/test_aggregate.py
@@ -20,6 +20,7 @@ from nova import exception
from nova.objects import aggregate
from nova.tests.unit import fake_notifier
from nova.tests.unit.objects import test_objects
+from nova.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
@@ -29,6 +30,7 @@ fake_aggregate = {
'deleted_at': None,
'deleted': False,
'id': 123,
+ 'uuid': uuidsentinel.fake_aggregate,
'name': 'fake-aggregate',
'hosts': ['foo', 'bar'],
'metadetails': {'this': 'that'},
@@ -45,25 +47,43 @@ class _TestAggregateObject(object):
agg = aggregate.Aggregate.get_by_id(self.context, 123)
self.compare_obj(agg, fake_aggregate, subs=SUBS)
+ @mock.patch('nova.objects.Aggregate.save')
+ @mock.patch('nova.db.aggregate_get')
+ def test_load_allocates_uuid(self, mock_get, mock_save):
+ fake_agg = dict(fake_aggregate)
+ del fake_agg['uuid']
+ mock_get.return_value = fake_agg
+ uuid = uuidsentinel.aggregate
+ with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_g:
+ mock_g.return_value = uuid
+ obj = aggregate.Aggregate.get_by_id(self.context, 123)
+ mock_g.assert_called_once_with()
+ self.assertEqual(uuid, obj.uuid)
+ mock_save.assert_called_once_with()
+
def test_create(self):
self.mox.StubOutWithMock(db, 'aggregate_create')
- db.aggregate_create(self.context, {'name': 'foo'},
+ db.aggregate_create(self.context, {'name': 'foo',
+ 'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'}).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'one': 'two'}
+ agg.uuid = uuidsentinel.fake_agg
agg.create()
self.compare_obj(agg, fake_aggregate, subs=SUBS)
def test_recreate_fails(self):
self.mox.StubOutWithMock(db, 'aggregate_create')
- db.aggregate_create(self.context, {'name': 'foo'},
+ db.aggregate_create(self.context, {'name': 'foo',
+ 'uuid': uuidsentinel.fake_agg},
metadata={'one': 'two'}).AndReturn(fake_aggregate)
self.mox.ReplayAll()
agg = aggregate.Aggregate(context=self.context)
agg.name = 'foo'
agg.metadata = {'one': 'two'}
+ agg.uuid = uuidsentinel.fake_agg
agg.create()
self.assertRaises(exception.ObjectActionError, agg.create)
diff --git a/nova/tests/unit/objects/test_compute_node.py b/nova/tests/unit/objects/test_compute_node.py
index 7f549044ca..38dd0145d1 100644
--- a/nova/tests/unit/objects/test_compute_node.py
+++ b/nova/tests/unit/objects/test_compute_node.py
@@ -29,6 +29,7 @@ from nova.objects import hv_spec
from nova.objects import service
from nova.tests.unit import fake_pci_device_pools
from nova.tests.unit.objects import test_objects
+from nova.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
fake_stats = {'num_foo': '10'}
@@ -61,6 +62,7 @@ fake_compute_node = {
'deleted_at': None,
'deleted': False,
'id': 123,
+ 'uuid': uuidsentinel.fake_compute_node,
'service_id': None,
'host': 'fake',
'vcpus': 4,
@@ -86,6 +88,7 @@ fake_compute_node = {
'pci_stats': fake_pci,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5,
+ 'disk_allocation_ratio': 1.0,
}
# FIXME(sbauza) : For compatibility checking, to be removed once we are sure
# that all computes are running latest DB version with host field in it.
@@ -154,6 +157,7 @@ class _TestComputeNodeObject(object):
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
+ self.assertNotIn('uuid', compute.obj_what_changed())
@mock.patch.object(objects.Service, 'get_by_id')
@mock.patch.object(db, 'compute_node_get')
@@ -194,77 +198,6 @@ class _TestComputeNodeObject(object):
subs=self.subs(),
comparators=self.comparators())
- @mock.patch('nova.objects.Service.get_by_id')
- @mock.patch('nova.db.compute_nodes_get_by_service_id')
- @mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
- def test_get_by_host_and_nodename_with_old_compute(self, cn_get_by_h_and_n,
- svc_get_by_ch,
- cn_get_by_svc_id,
- svc_get_by_id):
- cn_get_by_h_and_n.side_effect = exception.ComputeHostNotFound(
- host='fake')
- fake_service = service.Service(id=123)
- fake_service.host = 'fake'
- svc_get_by_ch.return_value = fake_service
- cn_get_by_svc_id.return_value = [fake_old_compute_node]
- svc_get_by_id.return_value = fake_service
-
- compute = compute_node.ComputeNode.get_by_host_and_nodename(
- self.context, 'fake', 'vm.danplanet.com')
- # NOTE(sbauza): Result is still converted to new style Compute
- self.compare_obj(compute, fake_compute_node,
- subs=self.subs(),
- comparators=self.comparators())
-
- @mock.patch('nova.objects.Service.get_by_id')
- @mock.patch('nova.db.compute_nodes_get_by_service_id')
- @mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
- def test_get_by_host_and_nodename_not_found(self, cn_get_by_h_and_n,
- svc_get_by_ch,
- cn_get_by_svc_id,
- svc_get_by_id):
- cn_get_by_h_and_n.side_effect = exception.ComputeHostNotFound(
- host='fake')
- fake_service = service.Service(id=123)
- fake_service.host = 'fake'
- another_node = fake_old_compute_node.copy()
- another_node['hypervisor_hostname'] = 'elsewhere'
- svc_get_by_ch.return_value = fake_service
- cn_get_by_svc_id.return_value = [another_node]
- svc_get_by_id.return_value = fake_service
-
- self.assertRaises(exception.ComputeHostNotFound,
- compute_node.ComputeNode.get_by_host_and_nodename,
- self.context, 'fake', 'vm.danplanet.com')
-
- @mock.patch('nova.objects.Service.get_by_id')
- @mock.patch('nova.db.compute_nodes_get_by_service_id')
- @mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch.object(db, 'compute_node_get_by_host_and_nodename')
- def test_get_by_host_and_nodename_good_and_bad(self, cn_get_by_h_and_n,
- svc_get_by_ch,
- cn_get_by_svc_id,
- svc_get_by_id):
- cn_get_by_h_and_n.side_effect = exception.ComputeHostNotFound(
- host='fake')
- fake_service = service.Service(id=123)
- fake_service.host = 'fake'
- bad_node = fake_old_compute_node.copy()
- bad_node['hypervisor_hostname'] = 'elsewhere'
- good_node = fake_old_compute_node.copy()
- svc_get_by_ch.return_value = fake_service
- cn_get_by_svc_id.return_value = [bad_node, good_node]
- svc_get_by_id.return_value = fake_service
-
- compute = compute_node.ComputeNode.get_by_host_and_nodename(
- self.context, 'fake', 'vm.danplanet.com')
- # NOTE(sbauza): Result is still converted to new style Compute
- self.compare_obj(compute, good_node,
- subs=self.subs(),
- comparators=self.comparators())
-
@mock.patch('nova.db.compute_node_get_all_by_host')
def test_get_first_node_by_host_for_old_compat(
self, cn_get_all_by_host):
@@ -300,26 +233,44 @@ class _TestComputeNodeObject(object):
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_hv_specs_db_format,
+ 'uuid': uuidsentinel.fake_compute_node,
}).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.service_id = 456
+ compute.uuid = uuidsentinel.fake_compute_node
compute.stats = fake_stats
# NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.supported_hv_specs = fake_supported_hv_specs
- compute.create()
+ with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu:
+ compute.create()
+ self.assertFalse(mock_gu.called)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
+ @mock.patch('nova.db.compute_node_create')
+ @mock.patch('oslo_utils.uuidutils.generate_uuid')
+ def test_create_allocates_uuid(self, mock_gu, mock_create):
+ mock_create.return_value = fake_compute_node
+ mock_gu.return_value = fake_compute_node['uuid']
+ obj = objects.ComputeNode(context=self.context)
+ obj.create()
+ mock_gu.assert_called_once_with()
+ mock_create.assert_called_once_with(
+ self.context, {'uuid': fake_compute_node['uuid']})
+
def test_recreate_fails(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
- db.compute_node_create(self.context, {'service_id': 456}).AndReturn(
+ db.compute_node_create(
+ self.context, {'service_id': 456,
+ 'uuid': uuidsentinel.fake_compute_node}).AndReturn(
fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.service_id = 456
+ compute.uuid = uuidsentinel.fake_compute_node
compute.create()
self.assertRaises(exception.ObjectActionError, compute.create)
@@ -332,12 +283,14 @@ class _TestComputeNodeObject(object):
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_hv_specs_db_format,
+ 'uuid': uuidsentinel.fake_compute_node,
}).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.vcpus_used = 3
compute.stats = fake_stats
+ compute.uuid = uuidsentinel.fake_compute_node
# NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.supported_hv_specs = fake_supported_hv_specs
@@ -346,6 +299,21 @@ class _TestComputeNodeObject(object):
subs=self.subs(),
comparators=self.comparators())
+ def test_query_allocates_uuid(self):
+ fake = dict(fake_compute_node)
+ fake.pop('uuid')
+ db.compute_node_create(self.context, fake)
+ with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu:
+ mock_gu.return_value = uuidsentinel.fake_compute_node
+ obj = objects.ComputeNode.get_by_id(self.context, fake['id'])
+ mock_gu.assert_called_once_with()
+ self.assertEqual(uuidsentinel.fake_compute_node, obj.uuid)
+ self.assertNotIn('uuid', obj.obj_get_changes())
+ with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu:
+ obj = objects.ComputeNode.get_by_id(self.context, fake['id'])
+ self.assertEqual(uuidsentinel.fake_compute_node, obj.uuid)
+ self.assertFalse(mock_gu.called)
+
@mock.patch.object(db, 'compute_node_create',
return_value=fake_compute_node)
def test_set_id_failure(self, db_mock):
@@ -404,30 +372,6 @@ class _TestComputeNodeObject(object):
subs=self.subs(),
comparators=self.comparators())
- @mock.patch('nova.objects.Service.get_by_id')
- @mock.patch('nova.db.compute_nodes_get_by_service_id')
- @mock.patch('nova.objects.Service.get_by_compute_host')
- @mock.patch('nova.db.compute_node_get_all_by_host')
- def test_get_all_by_host_with_old_compute(self, cn_get_all_by_host,
- svc_get_by_ch,
- cn_get_by_svc_id,
- svc_get_by_id):
- cn_get_all_by_host.side_effect = exception.ComputeHostNotFound(
- host='fake')
- fake_service = service.Service(id=123)
- fake_service.host = 'fake'
- svc_get_by_ch.return_value = fake_service
- cn_get_by_svc_id.return_value = [fake_old_compute_node]
- svc_get_by_id.return_value = fake_service
-
- computes = compute_node.ComputeNodeList.get_all_by_host(self.context,
- 'fake')
- self.assertEqual(1, len(computes))
- # NOTE(sbauza): Result is still converted to new style Compute
- self.compare_obj(computes[0], fake_compute_node,
- subs=self.subs(),
- comparators=self.comparators())
-
def test_compat_numa_topology(self):
compute = compute_node.ComputeNode()
versions = ovo_base.obj_tree_get_versions('ComputeNode')
@@ -513,39 +457,51 @@ class _TestComputeNodeObject(object):
self.assertNotIn('cpu_allocation_ratio', primitive)
self.assertNotIn('ram_allocation_ratio', primitive)
+ def test_compat_disk_allocation_ratio(self):
+ compute = compute_node.ComputeNode()
+ primitive = compute.obj_to_primitive(target_version='1.15')
+ self.assertNotIn('disk_allocation_ratio', primitive)
+
def test_compat_allocation_ratios_old_compute(self):
- self.flags(cpu_allocation_ratio=2.0, ram_allocation_ratio=3.0)
+ self.flags(cpu_allocation_ratio=2.0, ram_allocation_ratio=3.0,
+ disk_allocation_ratio=0.9)
compute_dict = fake_compute_node.copy()
# old computes don't provide allocation ratios to the table
compute_dict['cpu_allocation_ratio'] = None
compute_dict['ram_allocation_ratio'] = None
+ compute_dict['disk_allocation_ratio'] = None
cls = objects.ComputeNode
compute = cls._from_db_object(self.context, cls(), compute_dict)
self.assertEqual(2.0, compute.cpu_allocation_ratio)
self.assertEqual(3.0, compute.ram_allocation_ratio)
+ self.assertEqual(0.9, compute.disk_allocation_ratio)
def test_compat_allocation_ratios_default_values(self):
compute_dict = fake_compute_node.copy()
# new computes provide allocation ratios defaulted to 0.0
compute_dict['cpu_allocation_ratio'] = 0.0
compute_dict['ram_allocation_ratio'] = 0.0
+ compute_dict['disk_allocation_ratio'] = 0.0
cls = objects.ComputeNode
compute = cls._from_db_object(self.context, cls(), compute_dict)
self.assertEqual(16.0, compute.cpu_allocation_ratio)
self.assertEqual(1.5, compute.ram_allocation_ratio)
+ self.assertEqual(1.0, compute.disk_allocation_ratio)
def test_compat_allocation_ratios_old_compute_default_values(self):
compute_dict = fake_compute_node.copy()
# old computes don't provide allocation ratios to the table
compute_dict['cpu_allocation_ratio'] = None
compute_dict['ram_allocation_ratio'] = None
+ compute_dict['disk_allocation_ratio'] = None
cls = objects.ComputeNode
compute = cls._from_db_object(self.context, cls(), compute_dict)
self.assertEqual(16.0, compute.cpu_allocation_ratio)
self.assertEqual(1.5, compute.ram_allocation_ratio)
+ self.assertEqual(1.0, compute.disk_allocation_ratio)
class TestComputeNodeObject(test_objects._LocalTest,
diff --git a/nova/tests/unit/objects/test_fields.py b/nova/tests/unit/objects/test_fields.py
index 4187d1d798..7243491548 100644
--- a/nova/tests/unit/objects/test_fields.py
+++ b/nova/tests/unit/objects/test_fields.py
@@ -21,6 +21,7 @@ import six
from nova.network import model as network_model
from nova.objects import fields
+from nova import signature_utils
from nova import test
from nova import utils
@@ -422,6 +423,25 @@ class TestHVType(TestField):
self.assertRaises(ValueError, self.field.stringify, 'acme')
+class TestImageSignatureTypes(TestField):
+ # Ensure that the object definition is updated
+ # in step with the signature_utils module
+ def setUp(self):
+ super(TestImageSignatureTypes, self).setUp()
+ self.hash_field = fields.ImageSignatureHashType()
+ self.key_type_field = fields.ImageSignatureKeyType()
+
+ def test_hashes(self):
+ for hash_name in list(signature_utils.HASH_METHODS.keys()):
+ self.assertIn(hash_name, self.hash_field.hashes)
+
+ def test_key_types(self):
+ key_type_dict = signature_utils.SignatureKeyType._REGISTERED_TYPES
+ key_types = list(key_type_dict.keys())
+ for key_type in key_types:
+ self.assertIn(key_type, self.key_type_field.key_types)
+
+
class TestOSType(TestField):
def setUp(self):
super(TestOSType, self).setUp()
@@ -440,6 +460,60 @@ class TestOSType(TestField):
self.assertRaises(ValueError, self.field.stringify, 'acme')
+class TestResourceClass(TestField):
+ def setUp(self):
+ super(TestResourceClass, self).setUp()
+ self.field = fields.ResourceClassField()
+ self.coerce_good_values = [
+ ('VCPU', 'VCPU'),
+ ('MEMORY_MB', 'MEMORY_MB'),
+ ('DISK_GB', 'DISK_GB'),
+ ('PCI_DEVICE', 'PCI_DEVICE'),
+ ('SRIOV_NET_VF', 'SRIOV_NET_VF'),
+ ('NUMA_SOCKET', 'NUMA_SOCKET'),
+ ('NUMA_CORE', 'NUMA_CORE'),
+ ('NUMA_THREAD', 'NUMA_THREAD'),
+ ('NUMA_MEMORY_MB', 'NUMA_MEMORY_MB'),
+ ('IPV4_ADDRESS', 'IPV4_ADDRESS'),
+ ]
+ self.expected_indexes = [
+ ('VCPU', 0),
+ ('MEMORY_MB', 1),
+ ('DISK_GB', 2),
+ ('PCI_DEVICE', 3),
+ ('SRIOV_NET_VF', 4),
+ ('NUMA_SOCKET', 5),
+ ('NUMA_CORE', 6),
+ ('NUMA_THREAD', 7),
+ ('NUMA_MEMORY_MB', 8),
+ ('IPV4_ADDRESS', 9),
+ ]
+ self.coerce_bad_values = ['acme']
+ self.to_primitive_values = self.coerce_good_values[0:1]
+ self.from_primitive_values = self.coerce_good_values[0:1]
+
+ def test_stringify(self):
+ self.assertEqual("'VCPU'", self.field.stringify(
+ fields.ResourceClass.VCPU))
+
+ def test_stringify_invalid(self):
+ self.assertRaises(ValueError, self.field.stringify, 'cow')
+
+ def test_index(self):
+ for name, index in self.expected_indexes:
+ self.assertEqual(index, self.field.index(name))
+
+ def test_index_invalid(self):
+ self.assertRaises(ValueError, self.field.index, 'cow')
+
+ def test_from_index(self):
+ for name, index in self.expected_indexes:
+ self.assertEqual(name, self.field.from_index(index))
+
+ def test_from_index_invalid(self):
+ self.assertRaises(IndexError, self.field.from_index, 999)
+
+
class TestRNGModel(TestField):
def setUp(self):
super(TestRNGModel, self).setUp()
@@ -600,6 +674,31 @@ class TestMonitorMetricType(TestField):
self.assertRaises(ValueError, self.field.stringify, 'cpufrequency')
+class TestDiskFormat(TestField):
+ def setUp(self):
+ super(TestDiskFormat, self).setUp()
+ self.field = fields.DiskFormatField()
+ self.coerce_good_values = [('qcow2', 'qcow2'),
+ ('raw', 'raw'),
+ ('lvm', 'lvm'),
+ ('rbd', 'rbd'),
+ ('ploop', 'ploop'),
+ ('vhd', 'vhd'),
+ ('vmdk', 'vmdk'),
+ ('vdi', 'vdi'),
+ ('iso', 'iso')]
+
+ self.coerce_bad_values = ['acme']
+ self.to_primitive_values = self.coerce_good_values[0:1]
+ self.from_primitive_values = self.coerce_good_values[0:1]
+
+ def test_stringify(self):
+ self.assertEqual("'rbd'", self.field.stringify('rbd'))
+
+ def test_stringify_invalid(self):
+ self.assertRaises(ValueError, self.field.stringify, 'acme')
+
+
class TestInteger(TestField):
def setUp(self):
super(TestInteger, self).setUp()
@@ -916,3 +1015,58 @@ class TestIPV6Network(TestField):
for x in good]
self.from_primitive_values = [(x, netaddr.IPNetwork(x))
for x in good]
+
+
+class TestNotificationPriority(TestField):
+ def setUp(self):
+ super(TestNotificationPriority, self).setUp()
+ self.field = fields.NotificationPriorityField()
+ self.coerce_good_values = [('audit', 'audit'),
+ ('critical', 'critical'),
+ ('debug', 'debug'),
+ ('error', 'error'),
+ ('sample', 'sample'),
+ ('warn', 'warn')]
+ self.coerce_bad_values = ['warning']
+ self.to_primitive_values = self.coerce_good_values[0:1]
+ self.from_primitive_values = self.coerce_good_values[0:1]
+
+ def test_stringify(self):
+ self.assertEqual("'warn'", self.field.stringify('warn'))
+
+ def test_stringify_invalid(self):
+ self.assertRaises(ValueError, self.field.stringify, 'warning')
+
+
+class TestNotificationPhase(TestField):
+ def setUp(self):
+ super(TestNotificationPhase, self).setUp()
+ self.field = fields.NotificationPhaseField()
+ self.coerce_good_values = [('start', 'start'),
+ ('end', 'end'),
+ ('error', 'error')]
+ self.coerce_bad_values = ['begin']
+ self.to_primitive_values = self.coerce_good_values[0:1]
+ self.from_primitive_values = self.coerce_good_values[0:1]
+
+ def test_stringify(self):
+ self.assertEqual("'error'", self.field.stringify('error'))
+
+ def test_stringify_invalid(self):
+ self.assertRaises(ValueError, self.field.stringify, 'begin')
+
+
+class TestNotificationAction(TestField):
+ def setUp(self):
+ super(TestNotificationAction, self).setUp()
+ self.field = fields.NotificationActionField()
+ self.coerce_good_values = [('update', 'update')]
+ self.coerce_bad_values = ['magic']
+ self.to_primitive_values = self.coerce_good_values[0:1]
+ self.from_primitive_values = self.coerce_good_values[0:1]
+
+ def test_stringify(self):
+ self.assertEqual("'update'", self.field.stringify('update'))
+
+ def test_stringify_invalid(self):
+ self.assertRaises(ValueError, self.field.stringify, 'magic')
diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py
index 579eb7873f..a8dbf1b0b2 100644
--- a/nova/tests/unit/objects/test_instance.py
+++ b/nova/tests/unit/objects/test_instance.py
@@ -72,6 +72,12 @@ class _TestInstanceObject(object):
db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache,
instance_uuid=db_inst['uuid'])
+ db_inst['system_metadata'] = {
+ 'image_name': 'os2-warp',
+ 'image_min_ram': 100,
+ 'image_hw_disk_bus': 'ide',
+ 'image_hw_vif_model': 'ne2k_pci',
+ }
return db_inst
def test_datetime_deserialization(self):
@@ -115,8 +121,7 @@ class _TestInstanceObject(object):
def test_get_without_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, 'uuid',
- columns_to_join=[],
- use_slave=False
+ columns_to_join=[]
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, 'uuid',
@@ -172,9 +177,7 @@ class _TestInstanceObject(object):
})
db.instance_get_by_uuid(
self.context, 'uuid',
- columns_to_join=exp_cols,
- use_slave=False
- ).AndReturn(fake_instance)
+ columns_to_join=exp_cols).AndReturn(fake_instance)
fake_faults = test_instance_fault.fake_faults
db.instance_fault_get_by_instance_uuids(
self.context, [fake_instance['uuid']]
@@ -203,14 +206,12 @@ class _TestInstanceObject(object):
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(self.fake_instance)
fake_inst2 = dict(self.fake_instance,
metadata=[{'key': 'foo', 'value': 'bar'}])
db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['metadata'],
- use_slave=False
+ columns_to_join=['metadata']
).AndReturn(fake_inst2)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
@@ -233,8 +234,7 @@ class _TestInstanceObject(object):
fake_instance = self.fake_instance
db.instance_get_by_uuid(self.context, uuids.instance,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(fake_instance)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, uuids.instance)
@@ -251,14 +251,12 @@ class _TestInstanceObject(object):
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(dict(self.fake_instance,
host='orig-host'))
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(dict(self.fake_instance,
host='new-host'))
self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache,
@@ -325,8 +323,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
@@ -402,8 +399,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
@@ -613,8 +609,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
@@ -627,8 +622,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
@@ -641,8 +635,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
@@ -666,8 +659,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(fake_inst)
db.instance_info_cache_update(self.context, fake_uuid,
{'network_info': nwinfo2_json}).AndReturn(fake_info_cache)
@@ -683,8 +675,7 @@ class _TestInstanceObject(object):
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['info_cache'],
- use_slave=False
+ columns_to_join=['info_cache']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
@@ -709,8 +700,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(db, 'security_group_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(fake_inst)
db.security_group_update(self.context, 1, {'description': 'changed'}
).AndReturn(fake_inst['security_groups'][0])
@@ -734,8 +724,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
@@ -746,8 +735,7 @@ class _TestInstanceObject(object):
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['pci_devices'],
- use_slave=False
+ columns_to_join=['pci_devices']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
@@ -798,8 +786,7 @@ class _TestInstanceObject(object):
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=['pci_devices'],
- use_slave=False
+ columns_to_join=['pci_devices']
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
@@ -816,8 +803,7 @@ class _TestInstanceObject(object):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_by_uuid(self.context, fake_uuid,
- columns_to_join=[],
- use_slave=False
+ columns_to_join=[]
).AndReturn(self.fake_instance)
db.instance_fault_get_by_instance_uuids(
self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
@@ -841,6 +827,21 @@ class _TestInstanceObject(object):
self.assertEqual(fake_ec2_ids.instance_id, inst.ec2_ids.instance_id)
+ @mock.patch('nova.db.instance_get_by_uuid')
+ def test_with_image_meta(self, mock_get):
+ fake_inst = dict(self.fake_instance)
+ mock_get.return_value = fake_inst
+
+ inst = instance.Instance.get_by_uuid(self.context,
+ fake_inst['uuid'],
+ expected_attrs=['image_meta'])
+
+ image_meta = inst.image_meta
+ self.assertIsInstance(image_meta, objects.ImageMeta)
+ self.assertEqual(100, image_meta.min_ram)
+ self.assertEqual('ide', image_meta.properties.hw_disk_bus)
+ self.assertEqual('ne2k_pci', image_meta.properties.hw_vif_model)
+
def test_iteritems_with_extra_attrs(self):
self.stubs.Set(objects.Instance, 'name', 'foo')
inst = objects.Instance(uuid=uuids.instance)
@@ -862,7 +863,11 @@ class _TestInstanceObject(object):
vals = {'host': 'foo-host',
'memory_mb': 128,
'system_metadata': {'foo': 'bar'},
- 'extra': {}}
+ 'extra': {
+ 'vcpu_model': None,
+ 'numa_topology': None,
+ 'pci_requests': None,
+ }}
fake_inst = fake_instance.fake_db_instance(**vals)
db.instance_create(self.context, vals).AndReturn(fake_inst)
self.mox.ReplayAll()
@@ -884,7 +889,11 @@ class _TestInstanceObject(object):
vals = {'host': 'foo-host',
'memory_mb': 128,
'system_metadata': {'foo': 'bar'},
- 'extra': {}}
+ 'extra': {
+ 'vcpu_model': None,
+ 'numa_topology': None,
+ 'pci_requests': None,
+ }}
fake_inst = fake_instance.fake_db_instance(**vals)
db.instance_create(self.context, vals).AndReturn(fake_inst)
self.mox.ReplayAll()
@@ -895,7 +904,10 @@ class _TestInstanceObject(object):
def test_create(self):
self.mox.StubOutWithMock(db, 'instance_create')
- db.instance_create(self.context, {'extra': {}}).AndReturn(
+ extras = {'vcpu_model': None,
+ 'numa_topology': None,
+ 'pci_requests': None}
+ db.instance_create(self.context, {'extra': extras}).AndReturn(
self.fake_instance)
self.mox.ReplayAll()
inst = objects.Instance(context=self.context)
@@ -953,7 +965,11 @@ class _TestInstanceObject(object):
{'host': 'foo-host',
'security_groups': ['foo', 'bar'],
'info_cache': {'network_info': '[]'},
- 'extra': {},
+ 'extra': {
+ 'vcpu_model': None,
+ 'numa_topology': None,
+ 'pci_requests': None,
+ },
}
).AndReturn(fake_inst)
self.mox.ReplayAll()
@@ -1240,6 +1256,20 @@ class _TestInstanceObject(object):
mock_get.assert_called_once_with(self.context, inst)
self.assertEqual(fake_ec2_ids, ec2_ids)
+ @mock.patch('nova.objects.SecurityGroupList.get_by_instance')
+ def test_load_security_groups(self, mock_get):
+ secgroups = []
+ for name in ('foo', 'bar'):
+ secgroup = security_group.SecurityGroup()
+ secgroup.name = name
+ secgroups.append(secgroup)
+ fake_secgroups = security_group.SecurityGroupList(objects=secgroups)
+ mock_get.return_value = fake_secgroups
+ inst = objects.Instance(context=self.context, uuid='fake')
+ secgroups = inst.security_groups
+ mock_get.assert_called_once_with(self.context, inst)
+ self.assertEqual(fake_secgroups, secgroups)
+
def test_get_with_extras(self):
pci_requests = objects.InstancePCIRequests(requests=[
objects.InstancePCIRequest(count=123, spec=[])])
@@ -1324,8 +1354,8 @@ class _TestInstanceListObject(object):
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
'asc', limit=None, marker=None,
- columns_to_join=['metadata'],
- use_slave=False).AndReturn(fakes)
+ columns_to_join=['metadata']
+ ).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, 'uuid', 'asc',
@@ -1341,7 +1371,6 @@ class _TestInstanceListObject(object):
db.instance_get_all_by_filters_sort(self.context, {'foo': 'bar'},
limit=None, marker=None,
columns_to_join=['metadata'],
- use_slave=False,
sort_keys=['uuid'],
sort_dirs=['asc']).AndReturn(fakes)
self.mox.ReplayAll()
@@ -1365,7 +1394,7 @@ class _TestInstanceListObject(object):
limit=100, marker='uuid', use_slave=True)
mock_get_by_filters.assert_called_once_with(
self.context, {'foo': 'bar'}, 'key', 'dir', limit=100,
- marker='uuid', columns_to_join=None, use_slave=True)
+ marker='uuid', columns_to_join=None)
self.assertEqual(0, mock_get_by_filters_sort.call_count)
@mock.patch.object(db, 'instance_get_all_by_filters_sort')
@@ -1381,7 +1410,7 @@ class _TestInstanceListObject(object):
sort_dirs=['dir1', 'dir2'])
mock_get_by_filters_sort.assert_called_once_with(
self.context, {'foo': 'bar'}, limit=100,
- marker='uuid', columns_to_join=None, use_slave=True,
+ marker='uuid', columns_to_join=None,
sort_keys=['key1', 'key2'], sort_dirs=['dir1', 'dir2'])
self.assertEqual(0, mock_get_by_filters.call_count)
@@ -1394,8 +1423,7 @@ class _TestInstanceListObject(object):
db.instance_get_all_by_filters(self.context,
{'deleted': True, 'cleaned': False},
'uuid', 'asc', limit=None, marker=None,
- columns_to_join=['metadata'],
- use_slave=False).AndReturn(
+ columns_to_join=['metadata']).AndReturn(
[fakes[1]])
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_filters(
@@ -1411,8 +1439,7 @@ class _TestInstanceListObject(object):
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.instance_get_all_by_host(self.context, 'foo',
- columns_to_join=None,
- use_slave=False).AndReturn(fakes)
+ columns_to_join=None).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_host(self.context, 'foo')
for i in range(0, len(fakes)):
@@ -1513,9 +1540,7 @@ class _TestInstanceListObject(object):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_all_by_host(self.context, 'host',
- columns_to_join=[],
- use_slave=False
- ).AndReturn(fake_insts)
+ columns_to_join=[]).AndReturn(fake_insts)
db.instance_fault_get_by_instance_uuids(
self.context, [x['uuid'] for x in fake_insts]
).AndReturn(fake_faults)
diff --git a/nova/tests/unit/objects/test_keypair.py b/nova/tests/unit/objects/test_keypair.py
index 3ca43b4aab..ad7d474781 100644
--- a/nova/tests/unit/objects/test_keypair.py
+++ b/nova/tests/unit/objects/test_keypair.py
@@ -12,9 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
from oslo_utils import timeutils
-from nova import db
from nova import exception
from nova.objects import keypair
from nova.tests.unit.objects import test_objects
@@ -35,68 +35,76 @@ fake_keypair = {
class _TestKeyPairObject(object):
- def test_get_by_name(self):
- self.mox.StubOutWithMock(db, 'key_pair_get')
- db.key_pair_get(self.context, 'fake-user', 'foo-keypair').AndReturn(
- fake_keypair)
- self.mox.ReplayAll()
+
+ @mock.patch('nova.db.key_pair_get')
+ def test_get_by_name(self, mock_kp_get):
+ mock_kp_get.return_value = fake_keypair
+
keypair_obj = keypair.KeyPair.get_by_name(self.context, 'fake-user',
'foo-keypair')
self.compare_obj(keypair_obj, fake_keypair)
- def test_create(self):
- self.mox.StubOutWithMock(db, 'key_pair_create')
- db.key_pair_create(self.context,
- {'name': 'foo-keypair',
- 'public_key': 'keydata'}).AndReturn(fake_keypair)
- self.mox.ReplayAll()
+ mock_kp_get.assert_called_once_with(self.context, 'fake-user',
+ 'foo-keypair')
+
+ @mock.patch('nova.db.key_pair_create')
+ def test_create(self, mock_kp_create):
+ mock_kp_create.return_value = fake_keypair
+
keypair_obj = keypair.KeyPair(context=self.context)
keypair_obj.name = 'foo-keypair'
keypair_obj.public_key = 'keydata'
keypair_obj.create()
self.compare_obj(keypair_obj, fake_keypair)
- def test_recreate_fails(self):
- self.mox.StubOutWithMock(db, 'key_pair_create')
- db.key_pair_create(self.context,
- {'name': 'foo-keypair',
- 'public_key': 'keydata'}).AndReturn(fake_keypair)
- self.mox.ReplayAll()
+ mock_kp_create.assert_called_once_with(self.context,
+ {'name': 'foo-keypair', 'public_key': 'keydata'})
+
+ @mock.patch('nova.db.key_pair_create')
+ def test_recreate_fails(self, mock_kp_create):
+ mock_kp_create.return_value = fake_keypair
+
keypair_obj = keypair.KeyPair(context=self.context)
keypair_obj.name = 'foo-keypair'
keypair_obj.public_key = 'keydata'
keypair_obj.create()
self.assertRaises(exception.ObjectActionError, keypair_obj.create)
- def test_destroy(self):
- self.mox.StubOutWithMock(db, 'key_pair_destroy')
- db.key_pair_destroy(self.context, 'fake-user', 'foo-keypair')
- self.mox.ReplayAll()
+ mock_kp_create.assert_called_once_with(self.context,
+ {'name': 'foo-keypair', 'public_key': 'keydata'})
+
+ @mock.patch('nova.db.key_pair_destroy')
+ def test_destroy(self, mock_kp_destroy):
keypair_obj = keypair.KeyPair(context=self.context)
keypair_obj.id = 123
keypair_obj.user_id = 'fake-user'
keypair_obj.name = 'foo-keypair'
keypair_obj.destroy()
- def test_destroy_by_name(self):
- self.mox.StubOutWithMock(db, 'key_pair_destroy')
- db.key_pair_destroy(self.context, 'fake-user', 'foo-keypair')
- self.mox.ReplayAll()
+ mock_kp_destroy.assert_called_once_with(
+ self.context, 'fake-user', 'foo-keypair')
+
+ @mock.patch('nova.db.key_pair_destroy')
+ def test_destroy_by_name(self, mock_kp_destroy):
keypair.KeyPair.destroy_by_name(self.context, 'fake-user',
'foo-keypair')
- def test_get_by_user(self):
- self.mox.StubOutWithMock(db, 'key_pair_get_all_by_user')
- self.mox.StubOutWithMock(db, 'key_pair_count_by_user')
- db.key_pair_get_all_by_user(self.context, 'fake-user').AndReturn(
- [fake_keypair])
- db.key_pair_count_by_user(self.context, 'fake-user').AndReturn(1)
- self.mox.ReplayAll()
+ mock_kp_destroy.assert_called_once_with(
+ self.context, 'fake-user', 'foo-keypair')
+
+ @mock.patch('nova.db.key_pair_get_all_by_user')
+ @mock.patch('nova.db.key_pair_count_by_user')
+ def test_get_by_user(self, mock_kp_count, mock_kp_get):
+ mock_kp_get.return_value = [fake_keypair]
+ mock_kp_count.return_value = 1
+
keypairs = keypair.KeyPairList.get_by_user(self.context, 'fake-user')
self.assertEqual(1, len(keypairs))
self.compare_obj(keypairs[0], fake_keypair)
self.assertEqual(1, keypair.KeyPairList.get_count_by_user(self.context,
'fake-user'))
+ mock_kp_get.assert_called_once_with(self.context, 'fake-user')
+ mock_kp_count.assert_called_once_with(self.context, 'fake-user')
def test_obj_make_compatible(self):
keypair_obj = keypair.KeyPair(context=self.context)
diff --git a/nova/tests/unit/objects/test_migrate_data.py b/nova/tests/unit/objects/test_migrate_data.py
index 68ceb454d8..ffc07773ea 100644
--- a/nova/tests/unit/objects/test_migrate_data.py
+++ b/nova/tests/unit/objects/test_migrate_data.py
@@ -42,6 +42,34 @@ class _TestLiveMigrateData(object):
'is_volume_backed': False},
obj.to_legacy_dict(pre_migration_result=True))
+ def test_detect_implementation_none(self):
+ legacy = migrate_data.LiveMigrateData().to_legacy_dict()
+ self.assertIsInstance(
+ migrate_data.LiveMigrateData.detect_implementation(legacy),
+ migrate_data.LiveMigrateData)
+
+ def test_detect_implementation_libvirt(self):
+ legacy = migrate_data.LibvirtLiveMigrateData(
+ instance_relative_path='foo').to_legacy_dict()
+ self.assertIsInstance(
+ migrate_data.LiveMigrateData.detect_implementation(legacy),
+ migrate_data.LibvirtLiveMigrateData)
+
+ def test_detect_implementation_libvirt_early(self):
+ legacy = migrate_data.LibvirtLiveMigrateData(
+ image_type='foo').to_legacy_dict()
+ self.assertIsInstance(
+ migrate_data.LiveMigrateData.detect_implementation(legacy),
+ migrate_data.LibvirtLiveMigrateData)
+
+ def test_detect_implementation_xenapi(self):
+ legacy = migrate_data.XenapiLiveMigrateData(
+ migrate_send_data={},
+ destination_sr_ref='foo').to_legacy_dict()
+ self.assertIsInstance(
+ migrate_data.LiveMigrateData.detect_implementation(legacy),
+ migrate_data.XenapiLiveMigrateData)
+
class TestLiveMigrateData(test_objects._LocalTest,
_TestLiveMigrateData):
@@ -137,6 +165,7 @@ class _TestLibvirtLiveMigrateData(object):
expected = {
'graphics_listen_addrs': {'vnc': '127.0.0.1',
'spice': None},
+ 'target_connect_addr': None,
'serial_listen_addr': '127.0.0.1',
'volume': {
'123': {
diff --git a/nova/tests/unit/objects/test_migration.py b/nova/tests/unit/objects/test_migration.py
index db18e9c35c..2c3c5ee51e 100644
--- a/nova/tests/unit/objects/test_migration.py
+++ b/nova/tests/unit/objects/test_migration.py
@@ -22,6 +22,7 @@ from nova import objects
from nova.objects import migration
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_objects
+from nova.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
@@ -45,6 +46,12 @@ def fake_db_migration(**updates):
'status': 'migrating',
'migration_type': 'resize',
'hidden': False,
+ 'memory_total': 123456,
+ 'memory_processed': 12345,
+ 'memory_remaining': 120000,
+ 'disk_total': 234567,
+ 'disk_processed': 23456,
+ 'disk_remaining': 230000,
}
if updates:
@@ -133,8 +140,7 @@ class _TestMigrationObject(object):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(ctxt, fake_migration['instance_uuid'],
columns_to_join=['info_cache',
- 'security_groups'],
- use_slave=False
+ 'security_groups']
).AndReturn(fake_inst)
mig = migration.Migration._from_db_object(ctxt,
migration.Migration(),
@@ -143,6 +149,16 @@ class _TestMigrationObject(object):
self.mox.ReplayAll()
self.assertEqual(mig.instance.host, fake_inst['host'])
+ def test_instance_setter(self):
+ migration = objects.Migration(instance_uuid=uuidsentinel.instance)
+ inst = objects.Instance(uuid=uuidsentinel.instance)
+ with mock.patch('nova.objects.Instance.get_by_uuid') as mock_get:
+ migration.instance = inst
+ migration.instance
+ self.assertFalse(mock_get.called)
+ self.assertEqual(inst, migration._cached_instance)
+ self.assertEqual(inst, migration.instance)
+
def test_get_unconfirmed_by_dest_compute(self):
ctxt = context.get_admin_context()
fake_migration = fake_db_migration()
@@ -219,6 +235,14 @@ class _TestMigrationObject(object):
self.assertEqual('migration', mig.migration_type)
self.assertTrue(mig.obj_attr_is_set('migration_type'))
+ @mock.patch('nova.db.migration_get_by_id_and_instance')
+ def test_get_by_id_and_instance(self, fake_get):
+ ctxt = context.get_admin_context()
+ fake_migration = fake_db_migration()
+ fake_get.return_value = fake_migration
+ migration = objects.Migration.get_by_id_and_instance(ctxt, '1', '1')
+ self.compare_obj(migration, fake_migration)
+
class TestMigrationObject(test_objects._LocalTest,
_TestMigrationObject):
diff --git a/nova/tests/unit/objects/test_notification.py b/nova/tests/unit/objects/test_notification.py
new file mode 100644
index 0000000000..ada34b80dd
--- /dev/null
+++ b/nova/tests/unit/objects/test_notification.py
@@ -0,0 +1,244 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+from oslo_utils import timeutils
+
+from nova import objects
+from nova.objects import base
+from nova.objects import fields
+from nova.objects import notification
+from nova import test
+
+
+class TestNotificationBase(test.NoDBTestCase):
+
+ @base.NovaObjectRegistry.register_if(False)
+ class TestObject(base.NovaObject):
+ VERSION = '1.0'
+ fields = {
+ 'field_1': fields.StringField(),
+ 'field_2': fields.IntegerField(),
+ 'not_important_field': fields.IntegerField(),
+ }
+
+ @base.NovaObjectRegistry.register_if(False)
+ class TestNotificationPayload(notification.NotificationPayloadBase):
+ VERSION = '1.0'
+
+ SCHEMA = {
+ 'field_1': ('source_field', 'field_1'),
+ 'field_2': ('source_field', 'field_2'),
+ }
+
+ fields = {
+ 'extra_field': fields.StringField(), # filled by ctor
+ 'field_1': fields.StringField(), # filled by the schema
+ 'field_2': fields.IntegerField(), # filled by the schema
+ }
+
+ def populate_schema(self, source_field):
+ super(TestNotificationBase.TestNotificationPayload,
+ self).populate_schema(source_field=source_field)
+
+ @base.NovaObjectRegistry.register_if(False)
+ class TestNotificationPayloadEmptySchema(
+ notification.NotificationPayloadBase):
+ VERSION = '1.0'
+
+ fields = {
+ 'extra_field': fields.StringField(), # filled by ctor
+ }
+
+ @base.NovaObjectRegistry.register_if(False)
+ class TestNotification(notification.NotificationBase):
+ VERSION = '1.0'
+ fields = {
+ 'payload': fields.ObjectField('TestNotificationPayload')
+ }
+
+ @base.NovaObjectRegistry.register_if(False)
+ class TestNotificationEmptySchema(notification.NotificationBase):
+ VERSION = '1.0'
+ fields = {
+ 'payload': fields.ObjectField('TestNotificationPayloadEmptySchema')
+ }
+
+ fake_service = {
+ 'created_at': timeutils.utcnow().replace(microsecond=0),
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': False,
+ 'id': 123,
+ 'host': 'fake-host',
+ 'binary': 'nova-fake',
+ 'topic': 'fake-service-topic',
+ 'report_count': 1,
+ 'forced_down': False,
+ 'disabled': False,
+ 'disabled_reason': None,
+ 'last_seen_up': None,
+ 'version': 1}
+
+ expected_payload = {
+ 'nova_object.name': 'TestNotificationPayload',
+ 'nova_object.data': {
+ 'extra_field': 'test string',
+ 'field_1': 'test1',
+ 'field_2': 42},
+ 'nova_object.version': '1.0',
+ 'nova_object.namespace': 'nova'}
+
+ def setUp(self):
+ super(TestNotificationBase, self).setUp()
+ with mock.patch('nova.db.service_update') as mock_db_service_update:
+ self.service_obj = objects.Service(context=mock.sentinel.context,
+ id=self.fake_service['id'])
+ self.service_obj.obj_reset_changes(['version'])
+ mock_db_service_update.return_value = self.fake_service
+ self.service_obj.save()
+
+ self.my_obj = self.TestObject(field_1='test1',
+ field_2=42,
+ not_important_field=13)
+
+ self.payload = self.TestNotificationPayload(
+ extra_field='test string')
+ self.payload.populate_schema(source_field=self.my_obj)
+
+ self.notification = self.TestNotification(
+ event_type=notification.EventType(
+ object='test_object',
+ action=fields.NotificationAction.UPDATE,
+ phase=fields.NotificationPhase.START),
+ publisher=notification.NotificationPublisher.from_service_obj(
+ self.service_obj),
+ priority=fields.NotificationPriority.INFO,
+ payload=self.payload)
+
+ def _verify_notification(self, mock_notifier, mock_context,
+ expected_event_type,
+ expected_payload):
+ mock_notifier.prepare.assert_called_once_with(
+ publisher_id='nova-fake:fake-host')
+ mock_notify = mock_notifier.prepare.return_value.info
+ self.assertTrue(mock_notify.called)
+ self.assertEqual(mock_notify.call_args[0][0], mock_context)
+ self.assertEqual(mock_notify.call_args[1]['event_type'],
+ expected_event_type)
+ actual_payload = mock_notify.call_args[1]['payload']
+ self.assertJsonEqual(expected_payload, actual_payload)
+
+ @mock.patch('nova.rpc.LEGACY_NOTIFIER')
+ @mock.patch('nova.rpc.NOTIFIER')
+ def test_emit_notification(self, mock_notifier, mock_legacy):
+
+ mock_context = mock.Mock()
+ mock_context.to_dict.return_value = {}
+ self.notification.emit(mock_context)
+
+ self._verify_notification(
+ mock_notifier,
+ mock_context,
+ expected_event_type='test_object.update.start',
+ expected_payload=self.expected_payload)
+ self.assertFalse(mock_legacy.called)
+
+ @mock.patch('nova.rpc.NOTIFIER')
+ def test_emit_with_host_and_binary_as_publisher(self, mock_notifier):
+ noti = self.TestNotification(
+ event_type=notification.EventType(
+ object='test_object',
+ action=fields.NotificationAction.UPDATE),
+ publisher=notification.NotificationPublisher(host='fake-host',
+ binary='nova-fake'),
+ priority=fields.NotificationPriority.INFO,
+ payload=self.payload)
+
+ mock_context = mock.Mock()
+ mock_context.to_dict.return_value = {}
+ noti.emit(mock_context)
+
+ self._verify_notification(
+ mock_notifier,
+ mock_context,
+ expected_event_type='test_object.update',
+ expected_payload=self.expected_payload)
+
+ @mock.patch('nova.rpc.LEGACY_NOTIFIER')
+ @mock.patch('nova.rpc.NOTIFIER')
+ def test_emit_event_type_without_phase(self, mock_notifier, mock_legacy):
+ noti = self.TestNotification(
+ event_type=notification.EventType(
+ object='test_object',
+ action=fields.NotificationAction.UPDATE),
+ publisher=notification.NotificationPublisher.from_service_obj(
+ self.service_obj),
+ priority=fields.NotificationPriority.INFO,
+ payload=self.payload)
+
+ mock_context = mock.Mock()
+ mock_context.to_dict.return_value = {}
+ noti.emit(mock_context)
+
+ self._verify_notification(
+ mock_notifier,
+ mock_context,
+ expected_event_type='test_object.update',
+ expected_payload=self.expected_payload)
+ self.assertFalse(mock_legacy.called)
+
+ @mock.patch('nova.rpc.NOTIFIER')
+ def test_not_possible_to_emit_if_not_populated(self, mock_notifier):
+ non_populated_payload = self.TestNotificationPayload(
+ extra_field='test string')
+ noti = self.TestNotification(
+ event_type=notification.EventType(
+ object='test_object',
+ action=fields.NotificationAction.UPDATE),
+ publisher=notification.NotificationPublisher.from_service_obj(
+ self.service_obj),
+ priority=fields.NotificationPriority.INFO,
+ payload=non_populated_payload)
+
+ mock_context = mock.Mock()
+ self.assertRaises(AssertionError, noti.emit, mock_context)
+ self.assertFalse(mock_notifier.called)
+
+ @mock.patch('nova.rpc.NOTIFIER')
+ def test_empty_schema(self, mock_notifier):
+ non_populated_payload = self.TestNotificationPayloadEmptySchema(
+ extra_field='test string')
+ noti = self.TestNotificationEmptySchema(
+ event_type=notification.EventType(
+ object='test_object',
+ action=fields.NotificationAction.UPDATE),
+ publisher=notification.NotificationPublisher.from_service_obj(
+ self.service_obj),
+ priority=fields.NotificationPriority.INFO,
+ payload=non_populated_payload)
+
+ mock_context = mock.Mock()
+ mock_context.to_dict.return_value = {}
+ noti.emit(mock_context)
+
+ self._verify_notification(
+ mock_notifier,
+ mock_context,
+ expected_event_type='test_object.update',
+ expected_payload=
+ {'nova_object.name': 'TestNotificationPayloadEmptySchema',
+ 'nova_object.data': {'extra_field': u'test string'},
+ 'nova_object.version': '1.0',
+ 'nova_object.namespace': 'nova'})
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index 958e102bdc..94691935b3 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -12,6 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import collections
import contextlib
import copy
import datetime
@@ -28,13 +29,13 @@ from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
from oslo_versionedobjects import fixture
import six
-from testtools import matchers
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
+from nova.objects import notification
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_notifier
@@ -256,17 +257,6 @@ class _BaseTestCase(test.TestCase):
"""
self.assertEqual(expected, str(obj_val))
- def assertNotIsInstance(self, obj, cls, msg=None):
- """Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
- try:
- f = super(_BaseTestCase, self).assertNotIsInstance
- except AttributeError:
- self.assertThat(obj,
- matchers.Not(matchers.IsInstance(cls)),
- message=msg or '')
- else:
- f(obj, cls, msg=msg)
-
class _LocalTest(_BaseTestCase):
def setUp(self):
@@ -1109,14 +1099,14 @@ class TestRegistry(test.NoDBTestCase):
object_data = {
'Agent': '1.0-c0c092abaceb6f51efe5d82175f15eba',
'AgentList': '1.0-5a7380d02c3aaf2a32fc8115ae7ca98c',
- 'Aggregate': '1.1-1ab35c4516f71de0bef7087026ab10d1',
+ 'Aggregate': '1.2-fe9d8c93feb37919753e9e44fe6818a7',
'AggregateList': '1.2-fb6e19f3c3a3186b04eceb98b5dadbfa',
'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c',
'BandwidthUsageList': '1.2-5fe7475ada6fe62413cbfcc06ec70746',
'BlockDeviceMapping': '1.16-12319f6f47f740a67a88a23f7c7ee6ef',
'BlockDeviceMappingList': '1.17-1e568eecb91d06d4112db9fd656de235',
'CellMapping': '1.0-7f1a7e85a22bbb7559fc730ab658b9bd',
- 'ComputeNode': '1.14-a396975707b66281c5f404a68fccd395',
+ 'ComputeNode': '1.16-2436e5b836fa0306a3c4e6d9e5ddacec',
'ComputeNodeList': '1.14-3b6f4f5ade621c40e70cb116db237844',
'DNSDomain': '1.0-7b0b2dab778454b6a7b6c66afe163a1a',
'DNSDomainList': '1.0-4ee0d9efdfd681fed822da88376e04d2',
@@ -1124,6 +1114,7 @@ object_data = {
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'EC2SnapshotMapping': '1.0-47e7ddabe1af966dce0cfd0ed6cd7cd1',
'EC2VolumeMapping': '1.0-5b713751d6f97bad620f3378a521020d',
+ 'EventType': '1.0-21dc35de314fc5fc0a7965211c0c00f7',
'FixedIP': '1.14-53e1c10b539f1a82fe83b1af4720efae',
'FixedIPList': '1.14-87a39361c8f08f059004d6b15103cdfd',
'Flavor': '1.1-b6bb7a730a79d720344accefafacf7ee',
@@ -1133,7 +1124,7 @@ object_data = {
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HVSpec': '1.2-db672e73304da86139086d003f3977e7',
'ImageMeta': '1.8-642d1b2eb3e880a367f37d72dd76162d',
- 'ImageMetaProps': '1.11-96aa14a8ba226701bbd22e63557a63ea',
+ 'ImageMetaProps': '1.12-6a132dee47931447bf86c03c7006d96c',
'Instance': '2.1-416fdd0dfc33dfa12ff2cfdd8cc32e17',
'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914',
'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33',
@@ -1152,15 +1143,18 @@ object_data = {
'InstanceNUMATopology': '1.2-d944a7d6c21e1c773ffdf09c6d025954',
'InstancePCIRequest': '1.1-b1d75ebc716cb12906d9d513890092bf',
'InstancePCIRequests': '1.1-65e38083177726d806684cb1cc0136d2',
+ 'Inventory': '1.0-f4160797d47a533a58700e9ddcc9c5e2',
+ 'InventoryList': '1.0-de53f0fd078c27cc1d43400f4e8bcef8',
'LibvirtLiveMigrateBDMInfo': '1.0-252aabb723ca79d5469fa56f64b57811',
- 'LibvirtLiveMigrateData': '1.0-eb8b5f6c49ae3858213a7012558a2f3d',
+ 'LibvirtLiveMigrateData': '1.1-4ecf40aae7fee7bb37fc3b2123e760de',
'KeyPair': '1.3-bfaa2a8b148cdf11e0c72435d9dd097a',
'KeyPairList': '1.2-58b94f96e776bedaf1e192ddb2a24c4e',
- 'Migration': '1.2-8784125bedcea0a9227318511904e853',
+ 'Migration': '1.4-17979b9f2ae7f28d97043a220b2a8350',
'MigrationContext': '1.0-d8c2f10069e410f639c49082b5932c92',
'MigrationList': '1.2-02c0ec0c50b75ca86a2a74c5e8c911cc',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
+ 'NotificationPublisher': '1.0-bbbc1402fb0e443a3eb227cc52b61545',
'NUMACell': '1.2-74fc993ac5c83005e76e34e8487f1c05',
'NUMAPagesTopology': '1.0-c71d86317283266dc8364c149155e48e',
'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
@@ -1169,13 +1163,14 @@ object_data = {
'NetworkList': '1.2-69eca910d8fa035dfecd8ba10877ee59',
'NetworkRequest': '1.1-7a3e4ca2ce1e7b62d8400488f2f2b756',
'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
- 'PciDevice': '1.4-4f54e80054bbb6414e17eb9babc97a44',
+ 'PciDevice': '1.5-0d5abe5c91645b8469eb2a93fc53f932',
'PciDeviceList': '1.3-52ff14355491c8c580bdc0ba34c26210',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Quotas': '1.2-1fe4cd50593aaf5d36a6dc5ab3f98fb3',
'QuotasNoOp': '1.2-e041ddeb7dc8188ca71706f78aad41c1',
'RequestSpec': '1.5-576a249869c161e17b7cd6d55f9d85f3',
+ 'ResourceProvider': '1.0-57a9a344b0faed9cf6d6811835b6deb6',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
'SchedulerLimits': '1.0-249c4bd8e62a9b327b7026b7f19cc641',
'SchedulerRetries': '1.1-3c9c8b16143ebbb6ad7030e999d14cc0',
@@ -1185,6 +1180,8 @@ object_data = {
'SecurityGroupRuleList': '1.2-0005c47fcd0fb78dd6d7fd32a1409f5b',
'Service': '1.19-8914320cbeb4ec29f252d72ce55d07e1',
'ServiceList': '1.17-b767102cba7cbed290e396114c3f86b3',
+ 'ServiceStatusNotification': '1.0-a73147b93b520ff0061865849d3dfa56',
+ 'ServiceStatusPayload': '1.0-a5e7b4fd6cc5581be45b31ff1f3a3f7f',
'TaskLog': '1.0-78b0534366f29aa3eebb01860fbe18fe',
'TaskLogList': '1.0-cc8cce1af8a283b9d28b55fcd682e777',
'Tag': '1.1-8b8d7d5b48887651a0e01241672e2963',
@@ -1200,55 +1197,13 @@ object_data = {
class TestObjectVersions(test.NoDBTestCase):
- @staticmethod
- def _is_method(thing):
- # NOTE(dims): In Python3, The concept of 'unbound methods' has
- # been removed from the language. When referencing a method
- # as a class attribute, you now get a plain function object.
- # so let's check for both
- return inspect.isfunction(thing) or inspect.ismethod(thing)
-
- def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
- """Follow a chain of remotable things down to the original function."""
- if isinstance(thing, classmethod):
- return self._find_remotable_method(cls, thing.__get__(None, cls))
- elif self._is_method(thing) and hasattr(thing, 'remotable'):
- return self._find_remotable_method(cls, thing.original_fn,
- parent_was_remotable=True)
- elif parent_was_remotable:
- # We must be the first non-remotable thing underneath a stack of
- # remotable things (i.e. the actual implementation method)
- return thing
- else:
- # This means the top-level thing never hit a remotable layer
- return None
-
- def _un_unicodify_enum_valid_values(self, _fields):
- for name, field in _fields:
- if not isinstance(field, (fields.BaseEnumField,
- fields.EnumField)):
- continue
- orig_type = type(field._type._valid_values)
- field._type._valid_values = orig_type(
- [x.encode('utf-8') for x in
- field._type._valid_values])
-
- def test_find_remotable_method(self):
- class MyObject(object):
- @base.remotable
- def my_method(self):
- return 'Hello World!'
- thing = self._find_remotable_method(MyObject,
- getattr(MyObject, 'my_method'))
- self.assertIsNotNone(thing)
-
def test_versions(self):
checker = fixture.ObjectVersionChecker(
- base.NovaObjectRegistry.obj_classes())
- fingerprints = checker.get_hashes()
+ base.NovaObjectRegistry.obj_classes())
+ fingerprints = checker.get_hashes(extra_data_func=get_extra_data)
if os.getenv('GENERATE_HASHES'):
- file('object_hashes.txt', 'w').write(
+ open('object_hashes.txt', 'w').write(
pprint.pformat(fingerprints))
raise test.TestingException(
'Generated hashes in object_hashes.txt')
@@ -1259,6 +1214,32 @@ class TestObjectVersions(test.NoDBTestCase):
'versions have been bumped, and then update their '
'hashes here.')
+ def test_notification_payload_version_depends_on_the_schema(self):
+ @base.NovaObjectRegistry.register_if(False)
+ class TestNotificationPayload(notification.NotificationPayloadBase):
+ VERSION = '1.0'
+
+ SCHEMA = {
+ 'field_1': ('source_field', 'field_1'),
+ 'field_2': ('source_field', 'field_2'),
+ }
+
+ fields = {
+ 'extra_field': fields.StringField(), # filled by ctor
+ 'field_1': fields.StringField(), # filled by the schema
+ 'field_2': fields.IntegerField(), # filled by the schema
+ }
+
+ checker = fixture.ObjectVersionChecker(
+ {'TestNotificationPayload': (TestNotificationPayload,)})
+
+ old_hash = checker.get_hashes(extra_data_func=get_extra_data)
+ TestNotificationPayload.SCHEMA['field_3'] = ('source_field',
+ 'field_3')
+ new_hash = checker.get_hashes(extra_data_func=get_extra_data)
+
+ self.assertNotEqual(old_hash, new_hash)
+
def test_obj_make_compatible(self):
# Iterate all object classes and verify that we can run
# obj_make_compatible with every older version than current.
@@ -1374,3 +1355,17 @@ class TestObjMethodOverrides(test.NoDBTestCase):
obj_class = obj_classes[obj_name][0]
self.assertEqual(args,
inspect.getargspec(obj_class.obj_reset_changes))
+
+
+def get_extra_data(obj_class):
+ extra_data = tuple()
+
+ # Get the SCHEMA items to add to the fingerprint
+ # if we are looking at a notification
+ if issubclass(obj_class, notification.NotificationPayloadBase):
+ schema_data = collections.OrderedDict(
+ sorted(obj_class.SCHEMA.items()))
+
+ extra_data += (schema_data,)
+
+ return extra_data
diff --git a/nova/tests/unit/objects/test_pci_device.py b/nova/tests/unit/objects/test_pci_device.py
index 16b2870533..ea07e86613 100644
--- a/nova/tests/unit/objects/test_pci_device.py
+++ b/nova/tests/unit/objects/test_pci_device.py
@@ -25,6 +25,7 @@ from nova import objects
from nova.objects import fields
from nova.objects import instance
from nova.objects import pci_device
+from nova import test
from nova.tests.unit.objects import test_objects
dev_dict = {
@@ -33,6 +34,8 @@ dev_dict = {
'product_id': 'p',
'vendor_id': 'v',
'numa_node': 0,
+ 'dev_type': fields.PciDeviceType.STANDARD,
+ 'parent_addr': None,
'status': fields.PciDeviceStatus.AVAILABLE}
@@ -117,43 +120,44 @@ class _TestPciDeviceObject(object):
self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
def test_create_pci_device(self):
- self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device = pci_device.PciDevice.create(None, dev_dict)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'product_id', 'vendor_id',
- 'numa_node', 'status', 'address', 'extra_info']))
+ 'numa_node', 'status', 'address', 'extra_info',
+ 'dev_type', 'parent_addr']))
def test_pci_device_extra_info(self):
self.dev_dict = copy.copy(dev_dict)
self.dev_dict['k1'] = 'v1'
self.dev_dict['k2'] = 'v2'
- self.pci_device = pci_device.PciDevice.create(self.dev_dict)
+ self.pci_device = pci_device.PciDevice.create(None, self.dev_dict)
extra_value = self.pci_device.extra_info
self.assertEqual(extra_value.get('k1'), 'v1')
self.assertEqual(set(extra_value.keys()), set(('k1', 'k2')))
self.assertEqual(self.pci_device.obj_what_changed(),
set(['compute_node_id', 'address', 'product_id',
'vendor_id', 'numa_node', 'status',
- 'extra_info']))
+ 'extra_info', 'dev_type', 'parent_addr']))
def test_update_device(self):
- self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device = pci_device.PciDevice.create(None, dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p2', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
- set(['vendor_id', 'product_id']))
+ set(['vendor_id', 'product_id', 'parent_addr']))
def test_update_device_same_value(self):
- self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device = pci_device.PciDevice.create(None, dev_dict)
self.pci_device.obj_reset_changes()
changes = {'product_id': 'p', 'vendor_id': 'v2'}
self.pci_device.update_device(changes)
self.assertEqual(self.pci_device.product_id, 'p')
self.assertEqual(self.pci_device.vendor_id, 'v2')
self.assertEqual(self.pci_device.obj_what_changed(),
- set(['vendor_id', 'product_id']))
+ set(['vendor_id', 'product_id', 'parent_addr']))
def test_get_by_dev_addr(self):
ctxt = context.get_admin_context()
@@ -180,6 +184,15 @@ class _TestPciDeviceObject(object):
self.assertEqual('blah', dev.parent_addr)
self.assertEqual({'phys_function': 'blah'}, dev.extra_info)
+ def test_from_db_obj_pre_1_5_format(self):
+ ctxt = context.get_admin_context()
+ fake_dev_pre_1_5 = copy.deepcopy(fake_db_dev_old)
+ fake_dev_pre_1_5['status'] = fields.PciDeviceStatus.UNAVAILABLE
+ dev = pci_device.PciDevice._from_db_object(
+ ctxt, pci_device.PciDevice(), fake_dev_pre_1_5)
+ self.assertRaises(exception.ObjectActionError,
+ dev.obj_to_primitive, '1.4')
+
def test_save_empty_parent_addr(self):
ctxt = context.get_admin_context()
dev = pci_device.PciDevice._from_db_object(
@@ -219,8 +232,8 @@ class _TestPciDeviceObject(object):
return return_dev
ctxt = context.get_admin_context()
- self.stubs.Set(db, 'pci_device_update', _fake_update)
- self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.stub_out('nova.db.pci_device_update', _fake_update)
+ self.pci_device = pci_device.PciDevice.create(None, dev_dict)
self.pci_device._context = ctxt
self.pci_device.save()
self.assertEqual(self.extra_info, '{}')
@@ -242,8 +255,8 @@ class _TestPciDeviceObject(object):
def _fake_update(ctxt, node_id, addr, updates):
self.called = True
- self.stubs.Set(db, 'pci_device_destroy', _fake_destroy)
- self.stubs.Set(db, 'pci_device_update', _fake_update)
+ self.stub_out('nova.db.pci_device_destroy', _fake_destroy)
+ self.stub_out('nova.db.pci_device_update', _fake_update)
self._create_fake_pci_device()
self.pci_device.status = fields.PciDeviceStatus.DELETED
self.called = False
@@ -294,48 +307,48 @@ class _TestPciDeviceObject(object):
update_mock.call_args[0][3]['extra_info'])
def test_update_numa_node(self):
- self.pci_device = pci_device.PciDevice.create(dev_dict)
+ self.pci_device = pci_device.PciDevice.create(None, dev_dict)
self.assertEqual(0, self.pci_device.numa_node)
self.dev_dict = copy.copy(dev_dict)
self.dev_dict['numa_node'] = '1'
- self.pci_device = pci_device.PciDevice.create(self.dev_dict)
+ self.pci_device = pci_device.PciDevice.create(None, self.dev_dict)
self.assertEqual(1, self.pci_device.numa_node)
def test_pci_device_equivalent(self):
- pci_device1 = pci_device.PciDevice.create(dev_dict)
- pci_device2 = pci_device.PciDevice.create(dev_dict)
+ pci_device1 = pci_device.PciDevice.create(None, dev_dict)
+ pci_device2 = pci_device.PciDevice.create(None, dev_dict)
self.assertEqual(pci_device1, pci_device2)
def test_pci_device_equivalent_with_ignore_field(self):
- pci_device1 = pci_device.PciDevice.create(dev_dict)
- pci_device2 = pci_device.PciDevice.create(dev_dict)
+ pci_device1 = pci_device.PciDevice.create(None, dev_dict)
+ pci_device2 = pci_device.PciDevice.create(None, dev_dict)
pci_device2.updated_at = timeutils.utcnow()
self.assertEqual(pci_device1, pci_device2)
def test_pci_device_not_equivalent1(self):
- pci_device1 = pci_device.PciDevice.create(dev_dict)
+ pci_device1 = pci_device.PciDevice.create(None, dev_dict)
dev_dict2 = copy.copy(dev_dict)
dev_dict2['address'] = 'b'
- pci_device2 = pci_device.PciDevice.create(dev_dict2)
+ pci_device2 = pci_device.PciDevice.create(None, dev_dict2)
self.assertNotEqual(pci_device1, pci_device2)
def test_pci_device_not_equivalent2(self):
- pci_device1 = pci_device.PciDevice.create(dev_dict)
- pci_device2 = pci_device.PciDevice.create(dev_dict)
+ pci_device1 = pci_device.PciDevice.create(None, dev_dict)
+ pci_device2 = pci_device.PciDevice.create(None, dev_dict)
delattr(pci_device2, 'address')
self.assertNotEqual(pci_device1, pci_device2)
def test_pci_device_not_equivalent_with_none(self):
- pci_device1 = pci_device.PciDevice.create(dev_dict)
- pci_device2 = pci_device.PciDevice.create(dev_dict)
+ pci_device1 = pci_device.PciDevice.create(None, dev_dict)
+ pci_device2 = pci_device.PciDevice.create(None, dev_dict)
pci_device1.instance_uuid = 'aaa'
pci_device2.instance_uuid = None
self.assertNotEqual(pci_device1, pci_device2)
def test_claim_device(self):
self._create_fake_instance()
- devobj = pci_device.PciDevice.create(dev_dict)
+ devobj = pci_device.PciDevice.create(None, dev_dict)
devobj.claim(self.inst)
self.assertEqual(devobj.status,
fields.PciDeviceStatus.CLAIMED)
@@ -345,14 +358,14 @@ class _TestPciDeviceObject(object):
def test_claim_device_fail(self):
self._create_fake_instance()
- devobj = pci_device.PciDevice.create(dev_dict)
+ devobj = pci_device.PciDevice.create(None, dev_dict)
devobj.status = fields.PciDeviceStatus.ALLOCATED
self.assertRaises(exception.PciDeviceInvalidStatus,
devobj.claim, self.inst)
def test_allocate_device(self):
self._create_fake_instance()
- devobj = pci_device.PciDevice.create(dev_dict)
+ devobj = pci_device.PciDevice.create(None, dev_dict)
devobj.claim(self.inst)
devobj.allocate(self.inst)
self.assertEqual(devobj.status,
@@ -366,7 +379,7 @@ class _TestPciDeviceObject(object):
def test_allocate_device_fail_status(self):
self._create_fake_instance()
- devobj = pci_device.PciDevice.create(dev_dict)
+ devobj = pci_device.PciDevice.create(None, dev_dict)
devobj.status = 'removed'
self.assertRaises(exception.PciDeviceInvalidStatus,
devobj.allocate, self.inst)
@@ -375,14 +388,14 @@ class _TestPciDeviceObject(object):
self._create_fake_instance()
inst_2 = instance.Instance()
inst_2.uuid = 'fake-inst-uuid-2'
- devobj = pci_device.PciDevice.create(dev_dict)
+ devobj = pci_device.PciDevice.create(None, dev_dict)
devobj.claim(self.inst)
self.assertRaises(exception.PciDeviceInvalidOwner,
devobj.allocate, inst_2)
def test_free_claimed_device(self):
self._create_fake_instance()
- devobj = pci_device.PciDevice.create(dev_dict)
+ devobj = pci_device.PciDevice.create(None, dev_dict)
devobj.claim(self.inst)
devobj.free(self.inst)
self.assertEqual(devobj.status,
@@ -405,20 +418,20 @@ class _TestPciDeviceObject(object):
def test_free_device_fail(self):
self._create_fake_instance()
- devobj = pci_device.PciDevice.create(dev_dict)
+ devobj = pci_device.PciDevice.create(None, dev_dict)
devobj.status = fields.PciDeviceStatus.REMOVED
self.assertRaises(exception.PciDeviceInvalidStatus, devobj.free)
def test_remove_device(self):
self._create_fake_instance()
- devobj = pci_device.PciDevice.create(dev_dict)
+ devobj = pci_device.PciDevice.create(None, dev_dict)
devobj.remove()
self.assertEqual(devobj.status, fields.PciDeviceStatus.REMOVED)
self.assertIsNone(devobj.instance_uuid)
def test_remove_device_fail(self):
self._create_fake_instance()
- devobj = pci_device.PciDevice.create(dev_dict)
+ devobj = pci_device.PciDevice.create(None, dev_dict)
devobj.claim(self.inst)
self.assertRaises(exception.PciDeviceInvalidStatus, devobj.remove)
@@ -475,3 +488,243 @@ class TestPciDeviceListObject(test_objects._LocalTest,
class TestPciDeviceListObjectRemote(test_objects._RemoteTest,
_TestPciDeviceListObject):
pass
+
+
+class _TestSRIOVPciDeviceObject(object):
+ def _create_pci_devices(self, vf_product_id=1515, pf_product_id=1528,
+ num_pfs=2, num_vfs=8):
+ self.sriov_pf_devices = []
+ for dev in range(num_pfs):
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:81:00.%d' % dev,
+ 'vendor_id': '8086',
+ 'product_id': '%d' % pf_product_id,
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_PF,
+ 'parent_addr': None,
+ 'numa_node': 0}
+ pci_dev_obj = objects.PciDevice.create(None, pci_dev)
+ pci_dev_obj.id = num_pfs + 81
+ self.sriov_pf_devices.append(pci_dev_obj)
+
+ self.sriov_vf_devices = []
+ for dev in range(num_vfs):
+ pci_dev = {'compute_node_id': 1,
+ 'address': '0000:81:10.%d' % dev,
+ 'vendor_id': '8086',
+ 'product_id': '%d' % vf_product_id,
+ 'status': 'available',
+ 'request_id': None,
+ 'dev_type': fields.PciDeviceType.SRIOV_VF,
+ 'parent_addr': '0000:81:00.%d' % int(dev / 4),
+ 'numa_node': 0}
+ pci_dev_obj = objects.PciDevice.create(None, pci_dev)
+ pci_dev_obj.id = num_vfs + 1
+ self.sriov_vf_devices.append(pci_dev_obj)
+
+ def _create_fake_instance(self):
+ self.inst = instance.Instance()
+ self.inst.uuid = 'fake-inst-uuid'
+ self.inst.pci_devices = pci_device.PciDeviceList()
+
+ def _create_fake_pci_device(self, ctxt=None):
+ if not ctxt:
+ ctxt = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'pci_device_get_by_addr')
+ db.pci_device_get_by_addr(ctxt, 1, 'a').AndReturn(fake_db_dev)
+ self.mox.ReplayAll()
+ self.pci_device = pci_device.PciDevice.get_by_dev_addr(ctxt, 1, 'a')
+
+ def _fake_get_by_parent_address(self, ctxt, node_id, addr):
+ vf_devs = []
+ for dev in self.sriov_vf_devices:
+ if dev.parent_addr == addr:
+ vf_devs.append(dev)
+ return vf_devs
+
+ def _fake_pci_device_get_by_addr(self, ctxt, id, addr):
+ for dev in self.sriov_pf_devices:
+ if dev.address == addr:
+ return dev
+
+ def test_claim_PF(self):
+ self._create_fake_instance()
+ with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address',
+ side_effect=self._fake_get_by_parent_address):
+ self._create_pci_devices()
+ devobj = self.sriov_pf_devices[0]
+ devobj.claim(self.inst)
+ self.assertEqual(devobj.status,
+ fields.PciDeviceStatus.CLAIMED)
+ self.assertEqual(devobj.instance_uuid,
+ self.inst.uuid)
+ self.assertEqual(len(self.inst.pci_devices), 0)
+ # check if the all the dependants are UNCLAIMABLE
+ self.assertTrue(all(
+ [dev.status == fields.PciDeviceStatus.UNCLAIMABLE for
+ dev in self._fake_get_by_parent_address(None, None,
+ self.sriov_pf_devices[0].address)]))
+
+ def test_claim_VF(self):
+ self._create_fake_instance()
+ with mock.patch.object(objects.PciDevice, 'get_by_dev_addr',
+ side_effect=self._fake_pci_device_get_by_addr):
+ self._create_pci_devices()
+ devobj = self.sriov_vf_devices[0]
+ devobj.claim(self.inst)
+ self.assertEqual(devobj.status,
+ fields.PciDeviceStatus.CLAIMED)
+ self.assertEqual(devobj.instance_uuid,
+ self.inst.uuid)
+ self.assertEqual(len(self.inst.pci_devices), 0)
+
+ # check if parent device status has been changed to UNCLAIMABLE
+ parent = self._fake_pci_device_get_by_addr(None, None,
+ devobj.parent_addr)
+ self.assertTrue(fields.PciDeviceStatus.UNCLAIMABLE, parent.status)
+
+ def test_allocate_PF(self):
+ self._create_fake_instance()
+ with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address',
+ side_effect=self._fake_get_by_parent_address):
+ self._create_pci_devices()
+ devobj = self.sriov_pf_devices[0]
+ devobj.claim(self.inst)
+ devobj.allocate(self.inst)
+ self.assertEqual(devobj.status,
+ fields.PciDeviceStatus.ALLOCATED)
+ self.assertEqual(devobj.instance_uuid,
+ self.inst.uuid)
+ self.assertEqual(len(self.inst.pci_devices), 1)
+ # check if the all the dependants are UNAVAILABLE
+ self.assertTrue(all(
+ [dev.status == fields.PciDeviceStatus.UNAVAILABLE for
+ dev in self._fake_get_by_parent_address(None, None,
+ self.sriov_pf_devices[0].address)]))
+
+ def test_allocate_VF(self):
+ self._create_fake_instance()
+ with mock.patch.object(objects.PciDevice, 'get_by_dev_addr',
+ side_effect=self._fake_pci_device_get_by_addr):
+ self._create_pci_devices()
+ devobj = self.sriov_vf_devices[0]
+ devobj.claim(self.inst)
+ devobj.allocate(self.inst)
+ self.assertEqual(devobj.status,
+ fields.PciDeviceStatus.ALLOCATED)
+ self.assertEqual(devobj.instance_uuid,
+ self.inst.uuid)
+ self.assertEqual(len(self.inst.pci_devices), 1)
+
+ # check if parent device status has been changed to UNAVAILABLE
+ parent = self._fake_pci_device_get_by_addr(None, None,
+ devobj.parent_addr)
+ self.assertTrue(fields.PciDeviceStatus.UNAVAILABLE, parent.status)
+
+ def test_claim_PF_fail(self):
+ self._create_fake_instance()
+ with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address',
+ side_effect=self._fake_get_by_parent_address):
+ self._create_pci_devices()
+ devobj = self.sriov_pf_devices[0]
+ self.sriov_vf_devices[0].status = fields.PciDeviceStatus.CLAIMED
+
+ self.assertRaises(exception.PciDeviceVFInvalidStatus,
+ devobj.claim, self.inst)
+
+ def test_claim_VF_fail(self):
+ self._create_fake_instance()
+ with mock.patch.object(objects.PciDevice, 'get_by_dev_addr',
+ side_effect=self._fake_pci_device_get_by_addr):
+ self._create_pci_devices()
+ devobj = self.sriov_vf_devices[0]
+ parent = self._fake_pci_device_get_by_addr(None, None,
+ devobj.parent_addr)
+ parent.status = fields.PciDeviceStatus.CLAIMED
+
+ self.assertRaises(exception.PciDevicePFInvalidStatus,
+ devobj.claim, self.inst)
+
+ def test_allocate_PF_fail(self):
+ self._create_fake_instance()
+ with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address',
+ side_effect=self._fake_get_by_parent_address):
+ self._create_pci_devices()
+ devobj = self.sriov_pf_devices[0]
+ self.sriov_vf_devices[0].status = fields.PciDeviceStatus.CLAIMED
+
+ self.assertRaises(exception.PciDeviceVFInvalidStatus,
+ devobj.allocate, self.inst)
+
+ def test_allocate_VF_fail(self):
+ self._create_fake_instance()
+ with mock.patch.object(objects.PciDevice, 'get_by_dev_addr',
+ side_effect=self._fake_pci_device_get_by_addr):
+ self._create_pci_devices()
+ devobj = self.sriov_vf_devices[0]
+ parent = self._fake_pci_device_get_by_addr(None, None,
+ devobj.parent_addr)
+ parent.status = fields.PciDeviceStatus.CLAIMED
+
+ self.assertRaises(exception.PciDevicePFInvalidStatus,
+ devobj.allocate, self.inst)
+
+ def test_free_allocated_PF(self):
+ self._create_fake_instance()
+ with mock.patch.object(objects.PciDeviceList, 'get_by_parent_address',
+ side_effect=self._fake_get_by_parent_address):
+ self._create_pci_devices()
+ devobj = self.sriov_pf_devices[0]
+ devobj.claim(self.inst)
+ devobj.allocate(self.inst)
+ devobj.free(self.inst)
+ self.assertEqual(devobj.status,
+ fields.PciDeviceStatus.AVAILABLE)
+ self.assertIsNone(devobj.instance_uuid)
+ # check if the all the dependants are AVAILABLE
+ self.assertTrue(all(
+ [dev.status == fields.PciDeviceStatus.AVAILABLE for
+ dev in self._fake_get_by_parent_address(None, None,
+ self.sriov_pf_devices[0].address)]))
+
+ def test_free_allocated_VF(self):
+ self._create_fake_instance()
+ with test.nested(
+ mock.patch.object(objects.PciDevice, 'get_by_dev_addr',
+ side_effect=self._fake_pci_device_get_by_addr),
+ mock.patch.object(objects.PciDeviceList, 'get_by_parent_address',
+ side_effect=self._fake_get_by_parent_address)):
+ self._create_pci_devices()
+ vf = self.sriov_vf_devices[0]
+ dependents = self._fake_get_by_parent_address(None, None,
+ vf.parent_addr)
+ for devobj in dependents:
+ devobj.claim(self.inst)
+ devobj.allocate(self.inst)
+ self.assertEqual(devobj.status,
+ fields.PciDeviceStatus.ALLOCATED)
+ for devobj in dependents[:3]:
+ devobj.free(self.inst)
+ # check if parent device status is still UNAVAILABLE
+ parent = self._fake_pci_device_get_by_addr(None, None,
+ devobj.parent_addr)
+ self.assertTrue(fields.PciDeviceStatus.UNAVAILABLE,
+ parent.status)
+ for devobj in dependents[3:]:
+ devobj.free(self.inst)
+ # check if parent device status is now AVAILABLE
+ parent = self._fake_pci_device_get_by_addr(None, None,
+ devobj.parent_addr)
+ self.assertTrue(fields.PciDeviceStatus.AVAILABLE,
+ parent.status)
+
+
+class TestSRIOVPciDeviceListObject(test_objects._LocalTest,
+ _TestSRIOVPciDeviceObject):
+ pass
+
+
+class TestSRIOVPciDeviceListObjectRemote(test_objects._RemoteTest,
+ _TestSRIOVPciDeviceObject):
+ pass
diff --git a/nova/tests/unit/objects/test_request_spec.py b/nova/tests/unit/objects/test_request_spec.py
index 2df7cdceb0..d68df0171f 100644
--- a/nova/tests/unit/objects/test_request_spec.py
+++ b/nova/tests/unit/objects/test_request_spec.py
@@ -21,6 +21,8 @@ from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import request_spec
+from nova.tests.unit import fake_flavor
+from nova.tests.unit import fake_instance
from nova.tests.unit import fake_request_spec
from nova.tests.unit.objects import test_objects
@@ -293,6 +295,25 @@ class _TestRequestSpecObject(object):
# just making sure that the context is set by the method
self.assertEqual(ctxt, spec._context)
+ def test_from_components(self):
+ ctxt = context.RequestContext('fake-user', 'fake-project')
+ instance = fake_instance.fake_instance_obj(ctxt)
+ image = {'id': 'fake-image-id', 'properties': {'mappings': []},
+ 'status': 'fake-status', 'location': 'far-away'}
+ flavor = fake_flavor.fake_flavor_obj(ctxt)
+ filter_properties = {}
+ instance_group = None
+
+ spec = objects.RequestSpec.from_components(ctxt, instance, image,
+ flavor, instance.numa_topology, instance.pci_requests,
+ filter_properties, instance_group, instance.availability_zone)
+ # Make sure that all fields are set using that helper method
+ for field in [f for f in spec.obj_fields if f != 'id']:
+ self.assertEqual(True, spec.obj_attr_is_set(field),
+ 'Field: %s is not set' % field)
+ # just making sure that the context is set by the method
+ self.assertEqual(ctxt, spec._context)
+
def test_get_scheduler_hint(self):
spec_obj = objects.RequestSpec(scheduler_hints={'foo_single': ['1'],
'foo_mul': ['1', '2']})
diff --git a/nova/tests/unit/objects/test_resource_provider.py b/nova/tests/unit/objects/test_resource_provider.py
new file mode 100644
index 0000000000..c918fada80
--- /dev/null
+++ b/nova/tests/unit/objects/test_resource_provider.py
@@ -0,0 +1,270 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova import objects
+from nova.tests.unit.objects import test_objects
+from nova.tests import uuidsentinel as uuids
+
+
+_RESOURCE_CLASS_NAME = 'DISK_GB'
+_RESOURCE_CLASS_ID = 2
+_RESOURCE_PROVIDER_ID = 1
+_RESOURCE_PROVIDER_UUID = uuids.resource_provider
+_RESOURCE_PROVIDER_DB = {
+ 'id': _RESOURCE_PROVIDER_ID,
+ 'uuid': _RESOURCE_PROVIDER_UUID,
+}
+_INVENTORY_ID = 2
+_INVENTORY_DB = {
+ 'id': _INVENTORY_ID,
+ 'resource_provider_id': _RESOURCE_PROVIDER_ID,
+ 'resource_class_id': _RESOURCE_CLASS_ID,
+ 'total': 16,
+ 'reserved': 2,
+ 'min_unit': 1,
+ 'max_unit': 8,
+ 'step_size': 1,
+ 'allocation_ratio': 1.0,
+}
+
+
+class _TestResourceProviderNoDB(object):
+
+ @mock.patch('nova.objects.ResourceProvider._get_by_uuid_from_db',
+ return_value=_RESOURCE_PROVIDER_DB)
+ def test_object_get_by_uuid(self, mock_db_get):
+ resource_provider_object = objects.ResourceProvider.get_by_uuid(
+ mock.sentinel.ctx, _RESOURCE_PROVIDER_UUID)
+ self.assertEqual(_RESOURCE_PROVIDER_ID, resource_provider_object.id)
+ self.assertEqual(_RESOURCE_PROVIDER_UUID,
+ resource_provider_object.uuid)
+
+ @mock.patch('nova.objects.ResourceProvider._create_in_db',
+ return_value=_RESOURCE_PROVIDER_DB)
+ def test_create(self, mock_db_create):
+ obj = objects.ResourceProvider(context=self.context,
+ uuid=_RESOURCE_PROVIDER_UUID)
+ obj.create()
+ self.assertEqual(_RESOURCE_PROVIDER_UUID, obj.uuid)
+ self.assertIsInstance(obj.id, int)
+ mock_db_create.assert_called_once_with(
+ self.context, {'uuid': _RESOURCE_PROVIDER_UUID})
+
+ def test_create_id_fail(self):
+ obj = objects.ResourceProvider(context=self.context,
+ uuid=_RESOURCE_PROVIDER_UUID,
+ id=_RESOURCE_PROVIDER_ID)
+ self.assertRaises(exception.ObjectActionError,
+ obj.create)
+
+ def test_create_no_uuid_fail(self):
+ obj = objects.ResourceProvider(context=self.context)
+ self.assertRaises(exception.ObjectActionError,
+ obj.create)
+
+
+class TestResourceProviderNoDB(test_objects._LocalTest,
+ _TestResourceProviderNoDB):
+ USES_DB = False
+
+
+class TestRemoteResourceProviderNoDB(test_objects._RemoteTest,
+ _TestResourceProviderNoDB):
+ USES_DB = False
+
+
+class TestResourceProvider(test_objects._LocalTest):
+
+ def test_create_in_db(self):
+ updates = {'uuid': _RESOURCE_PROVIDER_UUID}
+ db_rp = objects.ResourceProvider._create_in_db(
+ self.context, updates)
+ self.assertIsInstance(db_rp.id, int)
+ self.assertEqual(_RESOURCE_PROVIDER_UUID, db_rp.uuid)
+
+ def test_get_by_uuid_from_db(self):
+ rp = objects.ResourceProvider(context=self.context,
+ uuid=_RESOURCE_PROVIDER_UUID)
+ rp.create()
+ retrieved_rp = objects.ResourceProvider._get_by_uuid_from_db(
+ self.context, _RESOURCE_PROVIDER_UUID)
+ self.assertEqual(rp.uuid, retrieved_rp.uuid)
+
+ self.assertRaises(exception.NotFound,
+ objects.ResourceProvider._get_by_uuid_from_db,
+ self.context,
+ uuids.missing)
+
+
+class _TestInventoryNoDB(object):
+ @mock.patch('nova.objects.Inventory._create_in_db',
+ return_value=_INVENTORY_DB)
+ def test_create(self, mock_db_create):
+ rp = objects.ResourceProvider(id=_RESOURCE_PROVIDER_ID,
+ uuid=_RESOURCE_PROVIDER_UUID)
+ obj = objects.Inventory(context=self.context,
+ resource_provider=rp,
+ resource_class=_RESOURCE_CLASS_NAME,
+ total=16,
+ reserved=2,
+ min_unit=1,
+ max_unit=8,
+ step_size=1,
+ allocation_ratio=1.0)
+ obj.create()
+ self.assertEqual(_INVENTORY_ID, obj.id)
+ expected = dict(_INVENTORY_DB)
+ expected.pop('id')
+ mock_db_create.assert_called_once_with(self.context, expected)
+
+ @mock.patch('nova.objects.Inventory._update_in_db',
+ return_value=_INVENTORY_DB)
+ def test_save(self, mock_db_save):
+ obj = objects.Inventory(context=self.context,
+ id=_INVENTORY_ID,
+ reserved=4)
+ obj.save()
+ mock_db_save.assert_called_once_with(self.context,
+ _INVENTORY_ID,
+ {'reserved': 4})
+
+ @mock.patch('nova.objects.InventoryList._get_all_by_resource_provider')
+ def test_get_all_by_resource_provider(self, mock_get):
+ expected = [dict(_INVENTORY_DB,
+ resource_provider=dict(_RESOURCE_PROVIDER_DB)),
+ dict(_INVENTORY_DB,
+ id=_INVENTORY_DB['id'] + 1,
+ resource_provider=dict(_RESOURCE_PROVIDER_DB))]
+ mock_get.return_value = expected
+ objs = objects.InventoryList.get_all_by_resource_provider_uuid(
+ self.context, _RESOURCE_PROVIDER_DB['uuid'])
+ self.assertEqual(2, len(objs))
+ self.assertEqual(_INVENTORY_DB['id'], objs[0].id)
+ self.assertEqual(_INVENTORY_DB['id'] + 1, objs[1].id)
+
+
+class TestInventoryNoDB(test_objects._LocalTest,
+ _TestInventoryNoDB):
+ USES_DB = False
+
+
+class TestRemoteInventoryNoDB(test_objects._RemoteTest,
+ _TestInventoryNoDB):
+ USES_DB = False
+
+
+class TestInventory(test_objects._LocalTest):
+
+ def _make_inventory(self):
+ db_rp = objects.ResourceProvider(
+ context=self.context, uuid=uuids.inventory_resource_provider)
+ db_rp.create()
+ updates = dict(_INVENTORY_DB,
+ resource_provider_id=db_rp.id)
+ updates.pop('id')
+ db_inventory = objects.Inventory._create_in_db(
+ self.context, updates)
+ return db_rp, db_inventory
+
+ def test_create_in_db(self):
+ updates = dict(_INVENTORY_DB)
+ updates.pop('id')
+ db_inventory = objects.Inventory._create_in_db(
+ self.context, updates)
+ self.assertEqual(_INVENTORY_DB['total'], db_inventory.total)
+
+ def test_update_in_db(self):
+ db_rp, db_inventory = self._make_inventory()
+ objects.Inventory._update_in_db(self.context,
+ db_inventory.id,
+ {'total': 32})
+ inventories = objects.InventoryList.\
+ get_all_by_resource_provider_uuid(self.context, db_rp.uuid)
+ self.assertEqual(32, inventories[0].total)
+
+ def test_update_in_db_fails_bad_id(self):
+ db_rp, db_inventory = self._make_inventory()
+ self.assertRaises(exception.NotFound,
+ objects.Inventory._update_in_db,
+ self.context, 99, {'total': 32})
+
+ def test_get_all_by_resource_provider_uuid(self):
+ db_rp, db_inventory = self._make_inventory()
+
+ retrieved_inventories = (
+ objects.InventoryList._get_all_by_resource_provider(
+ self.context, db_rp.uuid)
+ )
+
+ self.assertEqual(1, len(retrieved_inventories))
+ self.assertEqual(db_inventory.id, retrieved_inventories[0].id)
+ self.assertEqual(db_inventory.total, retrieved_inventories[0].total)
+
+ retrieved_inventories = (
+ objects.InventoryList._get_all_by_resource_provider(
+ self.context, uuids.bad_rp_uuid)
+ )
+ self.assertEqual(0, len(retrieved_inventories))
+
+ def test_create_requires_resource_provider(self):
+ inventory_dict = dict(_INVENTORY_DB)
+ inventory_dict.pop('id')
+ inventory_dict.pop('resource_provider_id')
+ inventory_dict.pop('resource_class_id')
+ inventory_dict['resource_class'] = _RESOURCE_CLASS_NAME
+ inventory = objects.Inventory(context=self.context,
+ **inventory_dict)
+ error = self.assertRaises(exception.ObjectActionError,
+ inventory.create)
+ self.assertIn('resource_provider required', str(error))
+
+ def test_create_requires_created_resource_provider(self):
+ rp = objects.ResourceProvider(
+ context=self.context, uuid=uuids.inventory_resource_provider)
+ inventory_dict = dict(_INVENTORY_DB)
+ inventory_dict.pop('id')
+ inventory_dict.pop('resource_provider_id')
+ inventory_dict.pop('resource_class_id')
+ inventory_dict['resource_provider'] = rp
+ inventory = objects.Inventory(context=self.context,
+ **inventory_dict)
+ error = self.assertRaises(exception.ObjectActionError,
+ inventory.create)
+ self.assertIn('resource_provider required', str(error))
+
+ def test_create_requires_resource_class(self):
+ rp = objects.ResourceProvider(
+ context=self.context, uuid=uuids.inventory_resource_provider)
+ rp.create()
+ inventory_dict = dict(_INVENTORY_DB)
+ inventory_dict.pop('id')
+ inventory_dict.pop('resource_provider_id')
+ inventory_dict.pop('resource_class_id')
+ inventory_dict['resource_provider'] = rp
+ inventory = objects.Inventory(context=self.context,
+ **inventory_dict)
+ error = self.assertRaises(exception.ObjectActionError,
+ inventory.create)
+ self.assertIn('resource_class required', str(error))
+
+ def test_create_id_fails(self):
+ inventory = objects.Inventory(self.context, **_INVENTORY_DB)
+ self.assertRaises(exception.ObjectActionError, inventory.create)
+
+ def test_save_without_id_fails(self):
+ inventory_dict = dict(_INVENTORY_DB)
+ inventory_dict.pop('id')
+ inventory = objects.Inventory(self.context, **inventory_dict)
+ self.assertRaises(exception.ObjectActionError, inventory.save)
diff --git a/nova/tests/unit/objects/test_security_group_rule.py b/nova/tests/unit/objects/test_security_group_rule.py
index c6e5af1984..5fcd37b658 100644
--- a/nova/tests/unit/objects/test_security_group_rule.py
+++ b/nova/tests/unit/objects/test_security_group_rule.py
@@ -42,9 +42,10 @@ class _TestSecurityGroupRuleObject(object):
self.context, 1)
for field in fake_rule:
if field == 'cidr':
- self.assertEqual(fake_rule[field], str(rule[field]))
+ self.assertEqual(fake_rule[field], str(getattr(rule,
+ field)))
else:
- self.assertEqual(fake_rule[field], rule[field])
+ self.assertEqual(fake_rule[field], getattr(rule, field))
sgrg.assert_called_with(self.context, 1)
def test_get_by_security_group(self):
diff --git a/nova/tests/unit/objects/test_service.py b/nova/tests/unit/objects/test_service.py
index 2504f64a7c..40c9367c77 100644
--- a/nova/tests/unit/objects/test_service.py
+++ b/nova/tests/unit/objects/test_service.py
@@ -18,10 +18,12 @@ from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
from nova.compute import manager as compute_manager
+from nova import context
from nova import db
from nova import exception
from nova import objects
from nova.objects import aggregate
+from nova.objects import fields
from nova.objects import service
from nova import test
from nova.tests.unit.objects import test_compute_node
@@ -270,8 +272,7 @@ class _TestServiceObject(object):
self.assertEqual(0,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
- mock_get.assert_called_once_with(self.context, 'nova-compute',
- use_slave=False)
+ mock_get.assert_called_once_with(self.context, 'nova-compute')
@mock.patch('nova.db.service_get_minimum_version')
def test_get_minimum_version(self, mock_get):
@@ -279,8 +280,7 @@ class _TestServiceObject(object):
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
- mock_get.assert_called_once_with(self.context, 'nova-compute',
- use_slave=False)
+ mock_get.assert_called_once_with(self.context, 'nova-compute')
@mock.patch('nova.db.service_get_minimum_version')
@mock.patch('nova.objects.service.LOG')
@@ -308,8 +308,7 @@ class _TestServiceObject(object):
self.assertEqual(123,
objects.Service.get_minimum_version(self.context,
'nova-compute'))
- mock_get.assert_called_once_with(self.context, 'nova-compute',
- use_slave=False)
+ mock_get.assert_called_once_with(self.context, 'nova-compute')
objects.Service._SERVICE_VERSION_CACHING = False
objects.Service.clear_min_version_cache()
@@ -348,6 +347,10 @@ class TestRemoteServiceObject(test_objects._RemoteTest,
class TestServiceVersion(test.TestCase):
+ def setUp(self):
+ self.ctxt = context.get_admin_context()
+ super(TestServiceVersion, self).setUp()
+
def _collect_things(self):
data = {
'compute_rpc': compute_manager.ComputeManager.target.version,
@@ -365,7 +368,8 @@ class TestServiceVersion(test.TestCase):
self.assertEqual(
current, calculated,
'Changes detected that require a SERVICE_VERSION change. Please '
- 'increment nova.objects.service.SERVICE_VERSION')
+ 'increment nova.objects.service.SERVICE_VERSION, and make sure it'
+ 'is equal to nova.compute.manager.ComputeManager.target.version.')
def test_version_in_init(self):
self.assertRaises(exception.ObjectActionError,
@@ -381,12 +385,11 @@ class TestServiceVersion(test.TestCase):
fake_different_service = dict(fake_service)
fake_different_service['version'] = fake_version
obj = objects.Service()
- obj._from_db_object(mock.sentinel.context, obj, fake_different_service)
+ obj._from_db_object(self.ctxt, obj, fake_different_service)
self.assertEqual(fake_version, obj.version)
def test_save_noop_with_only_version(self):
- o = objects.Service(context=mock.sentinel.context,
- id=fake_service['id'])
+ o = objects.Service(context=self.ctxt, id=fake_service['id'])
o.obj_reset_changes(['id'])
self.assertEqual(set(['version']), o.obj_what_changed())
with mock.patch('nova.db.service_update') as mock_update:
@@ -397,6 +400,59 @@ class TestServiceVersion(test.TestCase):
mock_update.return_value = fake_service
o.save()
mock_update.assert_called_once_with(
- mock.sentinel.context, fake_service['id'],
+ self.ctxt, fake_service['id'],
{'version': service.SERVICE_VERSION,
'host': 'foo'})
+
+
+class TestServiceStatusNotification(test.TestCase):
+ def setUp(self):
+ self.ctxt = context.get_admin_context()
+ super(TestServiceStatusNotification, self).setUp()
+
+ @mock.patch('nova.objects.service.ServiceStatusNotification')
+ def _verify_notification(self, service_obj, mock_notification):
+ service_obj.save()
+
+ self.assertTrue(mock_notification.called)
+
+ event_type = mock_notification.call_args[1]['event_type']
+ priority = mock_notification.call_args[1]['priority']
+ publisher = mock_notification.call_args[1]['publisher']
+ payload = mock_notification.call_args[1]['payload']
+
+ self.assertEqual(service_obj.host, publisher.host)
+ self.assertEqual(service_obj.binary, publisher.binary)
+ self.assertEqual(fields.NotificationPriority.INFO, priority)
+ self.assertEqual('service', event_type.object)
+ self.assertEqual(fields.NotificationAction.UPDATE,
+ event_type.action)
+ for field in service.ServiceStatusPayload.SCHEMA:
+ if field in fake_service:
+ self.assertEqual(fake_service[field], getattr(payload, field))
+
+ mock_notification.return_value.emit.assert_called_once_with(self.ctxt)
+
+ @mock.patch('nova.db.service_update')
+ def test_service_update_with_notification(self, mock_db_service_update):
+ service_obj = objects.Service(context=self.ctxt, id=fake_service['id'])
+ mock_db_service_update.return_value = fake_service
+ for key, value in {'disabled': True,
+ 'disabled_reason': 'my reason',
+ 'forced_down': True}.items():
+ setattr(service_obj, key, value)
+ self._verify_notification(service_obj)
+
+ @mock.patch('nova.objects.service.ServiceStatusNotification')
+ @mock.patch('nova.db.service_update')
+ def test_service_update_without_notification(self,
+ mock_db_service_update,
+ mock_notification):
+ service_obj = objects.Service(context=self.ctxt, id=fake_service['id'])
+ mock_db_service_update.return_value = fake_service
+
+ for key, value in {'report_count': 13,
+ 'last_seen_up': timeutils.utcnow()}.items():
+ setattr(service_obj, key, value)
+ service_obj.save()
+ self.assertFalse(mock_notification.called)
diff --git a/nova/tests/unit/pci/test_devspec.py b/nova/tests/unit/pci/test_devspec.py
index 1ed6b15516..420dfaadb2 100644
--- a/nova/tests/unit/pci/test_devspec.py
+++ b/nova/tests/unit/pci/test_devspec.py
@@ -173,5 +173,5 @@ class PciDevSpecTestCase(test.NoDBTestCase):
'extra_k1': 'v1',
}
- pci_obj = objects.PciDevice.create(pci_dev)
+ pci_obj = objects.PciDevice.create(None, pci_dev)
self.assertTrue(pci.match_pci_obj(pci_obj))
diff --git a/nova/tests/unit/pci/test_manager.py b/nova/tests/unit/pci/test_manager.py
index 0dc976a101..665d20920e 100644
--- a/nova/tests/unit/pci/test_manager.py
+++ b/nova/tests/unit/pci/test_manager.py
@@ -25,7 +25,6 @@ from nova import objects
from nova.objects import fields
from nova.pci import manager
from nova import test
-from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.pci import fakes as pci_fakes
@@ -275,7 +274,7 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
dev in self.tracker.pci_devs]),
set(['v', 'v1']))
- @mock.patch.object(objects.PciDevice, '_migrate_parent_addr',
+ @mock.patch.object(objects.PciDevice, 'should_migrate_data',
return_value=False)
def test_save(self, migrate_mock):
self.stub_out(
@@ -370,29 +369,33 @@ class PciDevTrackerTestCase(test.NoDBTestCase):
set([dev.address for dev in free_devs]),
set(['0000:00:00.1', '0000:00:00.2', '0000:00:00.3']))
+ @mock.patch('nova.objects.InstancePCIRequests.get_by_instance')
+ def test_free_devices(self, mock_get):
+ self._create_pci_requests_object(mock_get,
+ [{'count': 1, 'spec': [{'vendor_id': 'v'}]}])
+ self.tracker.claim_instance(None, self.inst)
+ self.tracker.update_pci_for_instance(None, self.inst, sign=1)
-class PciGetInstanceDevs(test.TestCase):
- def setUp(self):
- super(PciGetInstanceDevs, self).setUp()
- self.fake_context = context.get_admin_context()
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 2)
+
+ self.tracker.free_instance(None, self.inst)
+ free_devs = self.tracker.pci_stats.get_free_devs()
+ self.assertEqual(len(free_devs), 3)
+
+
+class PciGetInstanceDevs(test.NoDBTestCase):
- @mock.patch('nova.db.instance_get')
- def test_get_devs_object(self, mock_instance_get):
+ def test_get_devs_object(self):
def _fake_obj_load_attr(foo, attrname):
if attrname == 'pci_devices':
self.load_attr_called = True
foo.pci_devices = objects.PciDeviceList()
- inst = fakes.stub_instance(id='1')
- mock_instance_get.return_value = inst
- inst = objects.Instance.get_by_id(self.fake_context, '1',
- expected_attrs=[])
self.stub_out(
'nova.objects.Instance.obj_load_attr',
_fake_obj_load_attr)
self.load_attr_called = False
- manager.get_instance_pci_devs(inst)
+ manager.get_instance_pci_devs(objects.Instance())
self.assertTrue(self.load_attr_called)
- mock_instance_get.assert_called_with(self.fake_context, '1',
- columns_to_join=[])
diff --git a/nova/tests/unit/pci/test_stats.py b/nova/tests/unit/pci/test_stats.py
index f0ee494c5f..f81efbaf19 100644
--- a/nova/tests/unit/pci/test_stats.py
+++ b/nova/tests/unit/pci/test_stats.py
@@ -63,10 +63,10 @@ pci_requests_multiple = [objects.InstancePCIRequest(count=1,
class PciDeviceStatsTestCase(test.NoDBTestCase):
def _create_fake_devs(self):
- self.fake_dev_1 = objects.PciDevice.create(fake_pci_1)
- self.fake_dev_2 = objects.PciDevice.create(fake_pci_2)
- self.fake_dev_3 = objects.PciDevice.create(fake_pci_3)
- self.fake_dev_4 = objects.PciDevice.create(fake_pci_4)
+ self.fake_dev_1 = objects.PciDevice.create(None, fake_pci_1)
+ self.fake_dev_2 = objects.PciDevice.create(None, fake_pci_2)
+ self.fake_dev_3 = objects.PciDevice.create(None, fake_pci_3)
+ self.fake_dev_4 = objects.PciDevice.create(None, fake_pci_4)
map(self.pci_stats.add_device,
[self.fake_dev_1, self.fake_dev_2,
@@ -225,7 +225,8 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
'dev_type': 'type-PCI',
'parent_addr': None,
'numa_node': 0}
- self.pci_tagged_devices.append(objects.PciDevice.create(pci_dev))
+ self.pci_tagged_devices.append(objects.PciDevice.create(None,
+ pci_dev))
self.pci_untagged_devices = []
for dev in range(3):
@@ -238,7 +239,8 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
'dev_type': 'type-PCI',
'parent_addr': None,
'numa_node': 0}
- self.pci_untagged_devices.append(objects.PciDevice.create(pci_dev))
+ self.pci_untagged_devices.append(objects.PciDevice.create(None,
+ pci_dev))
map(self.pci_stats.add_device, self.pci_tagged_devices)
map(self.pci_stats.add_device, self.pci_untagged_devices)
@@ -296,7 +298,7 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
'status': 'available',
'parent_addr': None,
'request_id': None}
- pci_dev_obj = objects.PciDevice.create(pci_dev)
+ pci_dev_obj = objects.PciDevice.create(None, pci_dev)
self.pci_stats.add_device(pci_dev_obj)
# There should be no change
self.assertIsNone(
@@ -312,7 +314,7 @@ class PciDeviceStatsWithTagsTestCase(test.NoDBTestCase):
'status': 'available',
'parent_addr': None,
'request_id': None}
- pci_dev_obj = objects.PciDevice.create(pci_dev)
+ pci_dev_obj = objects.PciDevice.create(None, pci_dev)
self.pci_stats.remove_device(pci_dev_obj)
# There should be no change
self.assertIsNone(
@@ -349,7 +351,8 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
'dev_type': fields.PciDeviceType.SRIOV_PF,
'parent_addr': None,
'numa_node': 0}
- self.sriov_pf_devices.append(objects.PciDevice.create(pci_dev))
+ self.sriov_pf_devices.append(objects.PciDevice.create(None,
+ pci_dev))
self.sriov_vf_devices = []
for dev in range(8):
@@ -362,7 +365,8 @@ class PciDeviceVFPFStatsTestCase(test.NoDBTestCase):
'dev_type': fields.PciDeviceType.SRIOV_VF,
'parent_addr': '0000:81:00.%d' % int(dev / 4),
'numa_node': 0}
- self.sriov_vf_devices.append(objects.PciDevice.create(pci_dev))
+ self.sriov_vf_devices.append(objects.PciDevice.create(None,
+ pci_dev))
list(map(self.pci_stats.add_device, self.sriov_pf_devices))
list(map(self.pci_stats.add_device, self.sriov_vf_devices))
diff --git a/nova/tests/unit/scheduler/fakes.py b/nova/tests/unit/scheduler/fakes.py
index 646e6cdb9c..8f39ed717a 100644
--- a/nova/tests/unit/scheduler/fakes.py
+++ b/nova/tests/unit/scheduler/fakes.py
@@ -52,7 +52,8 @@ COMPUTE_NODES = [
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0),
objects.ComputeNode(
id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
@@ -61,7 +62,8 @@ COMPUTE_NODES = [
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0),
objects.ComputeNode(
id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
@@ -70,7 +72,8 @@ COMPUTE_NODES = [
hypervisor_version=0, numa_topology=NUMA_TOPOLOGY._to_json(),
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0),
objects.ComputeNode(
id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
@@ -79,7 +82,8 @@ COMPUTE_NODES = [
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0),
# Broken entry
objects.ComputeNode(
id=5, local_gb=1024, memory_mb=1024, vcpus=1,
diff --git a/nova/tests/unit/scheduler/filters/test_disk_filters.py b/nova/tests/unit/scheduler/filters/test_disk_filters.py
index 8095c70d31..d25f39898b 100644
--- a/nova/tests/unit/scheduler/filters/test_disk_filters.py
+++ b/nova/tests/unit/scheduler/filters/test_disk_filters.py
@@ -24,58 +24,58 @@ class TestDiskFilter(test.NoDBTestCase):
super(TestDiskFilter, self).setUp()
def test_disk_filter_passes(self):
- self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=1, ephemeral_gb=1, swap=512))
host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
+ 'disk_allocation_ratio': 1.0})
self.assertTrue(filt_cls.host_passes(host, spec_obj))
def test_disk_filter_fails(self):
- self.flags(disk_allocation_ratio=1.0)
filt_cls = disk_filter.DiskFilter()
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(
root_gb=10, ephemeral_gb=1, swap=1024))
host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13})
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 13,
+ 'disk_allocation_ratio': 1.0})
self.assertFalse(filt_cls.host_passes(host, spec_obj))
def test_disk_filter_oversubscribe(self):
- self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(
root_gb=100, ephemeral_gb=18, swap=1024))
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
+ 'disk_allocation_ratio': 10.0})
self.assertTrue(filt_cls.host_passes(host, spec_obj))
self.assertEqual(12 * 10.0, host.limits['disk_gb'])
def test_disk_filter_oversubscribe_fail(self):
- self.flags(disk_allocation_ratio=10.0)
filt_cls = disk_filter.DiskFilter()
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(
root_gb=100, ephemeral_gb=19, swap=1024))
# 1GB used... so 119GB allowed...
host = fakes.FakeHostState('host1', 'node1',
- {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12})
+ {'free_disk_mb': 11 * 1024, 'total_usable_disk_gb': 12,
+ 'disk_allocation_ratio': 10.0})
self.assertFalse(filt_cls.host_passes(host, spec_obj))
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_value_error(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
- self.flags(disk_allocation_ratio=1.0)
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(
root_gb=1, ephemeral_gb=1, swap=1024))
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
- 'total_usable_disk_gb': 1})
+ 'total_usable_disk_gb': 1,
+ 'disk_allocation_ratio': 1.0})
agg_mock.return_value = set(['XXX'])
self.assertTrue(filt_cls.host_passes(host, spec_obj))
agg_mock.assert_called_once_with(host, 'disk_allocation_ratio')
@@ -83,14 +83,14 @@ class TestDiskFilter(test.NoDBTestCase):
@mock.patch('nova.scheduler.filters.utils.aggregate_values_from_key')
def test_aggregate_disk_filter_default_value(self, agg_mock):
filt_cls = disk_filter.AggregateDiskFilter()
- self.flags(disk_allocation_ratio=1.0)
spec_obj = objects.RequestSpec(
context=mock.sentinel.ctx,
flavor=objects.Flavor(
root_gb=2, ephemeral_gb=1, swap=1024))
host = fakes.FakeHostState('host1', 'node1',
{'free_disk_mb': 3 * 1024,
- 'total_usable_disk_gb': 1})
+ 'total_usable_disk_gb': 1,
+ 'disk_allocation_ratio': 1.0})
# Uses global conf.
agg_mock.return_value = set([])
self.assertFalse(filt_cls.host_passes(host, spec_obj))
diff --git a/nova/tests/unit/scheduler/filters/test_exact_core_filter.py b/nova/tests/unit/scheduler/filters/test_exact_core_filter.py
index b7b7bcdb83..4a092f9136 100644
--- a/nova/tests/unit/scheduler/filters/test_exact_core_filter.py
+++ b/nova/tests/unit/scheduler/filters/test_exact_core_filter.py
@@ -25,20 +25,24 @@ class TestExactCoreFilter(test.NoDBTestCase):
def test_exact_core_filter_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(vcpus=1))
- host = self._get_host({'vcpus_total': 3, 'vcpus_used': 2})
+ vcpus = 3
+ host = self._get_host({'vcpus_total': vcpus, 'vcpus_used': vcpus - 1})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+ self.assertEqual(host.limits.get('vcpu'), vcpus)
def test_exact_core_filter_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(vcpus=2))
host = self._get_host({'vcpus_total': 3, 'vcpus_used': 2})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+ self.assertNotIn('vcpu', host.limits)
def test_exact_core_filter_fails_host_vcpus_not_set(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(vcpus=1))
host = self._get_host({})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+ self.assertNotIn('vcpu', host.limits)
def _get_host(self, host_attributes):
return fakes.FakeHostState('host1', 'node1', host_attributes)
diff --git a/nova/tests/unit/scheduler/filters/test_exact_disk_filter.py b/nova/tests/unit/scheduler/filters/test_exact_disk_filter.py
index f5e11da8b6..4f590ff73c 100644
--- a/nova/tests/unit/scheduler/filters/test_exact_disk_filter.py
+++ b/nova/tests/unit/scheduler/filters/test_exact_disk_filter.py
@@ -25,14 +25,18 @@ class TestExactDiskFilter(test.NoDBTestCase):
def test_exact_disk_filter_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=1, ephemeral_gb=1, swap=1024))
- host = self._get_host({'free_disk_mb': 3 * 1024})
+ disk_gb = 3
+ host = self._get_host({'free_disk_mb': disk_gb * 1024,
+ 'total_usable_disk_gb': disk_gb})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+ self.assertEqual(host.limits.get('disk_gb'), disk_gb)
def test_exact_disk_filter_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(root_gb=1, ephemeral_gb=1, swap=1024))
host = self._get_host({'free_disk_mb': 2 * 1024})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+ self.assertNotIn('disk_gb', host.limits)
def _get_host(self, host_attributes):
return fakes.FakeHostState('host1', 'node1', host_attributes)
diff --git a/nova/tests/unit/scheduler/filters/test_exact_ram_filter.py b/nova/tests/unit/scheduler/filters/test_exact_ram_filter.py
index 556131f0ac..0e37b2f071 100644
--- a/nova/tests/unit/scheduler/filters/test_exact_ram_filter.py
+++ b/nova/tests/unit/scheduler/filters/test_exact_ram_filter.py
@@ -25,14 +25,18 @@ class TestRamFilter(test.NoDBTestCase):
def test_exact_ram_filter_passes(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=1024))
- host = self._get_host({'free_ram_mb': 1024})
+ ram_mb = 1024
+ host = self._get_host({'free_ram_mb': ram_mb,
+ 'total_usable_ram_mb': ram_mb})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
+ self.assertEqual(host.limits.get('memory_mb'), ram_mb)
def test_exact_ram_filter_fails(self):
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=512))
host = self._get_host({'free_ram_mb': 1024})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
+ self.assertNotIn('memory_mb', host.limits)
def _get_host(self, host_attributes):
return fakes.FakeHostState('host1', 'node1', host_attributes)
diff --git a/nova/tests/unit/scheduler/ironic_fakes.py b/nova/tests/unit/scheduler/ironic_fakes.py
index 07de2f27a5..b1e23427f7 100644
--- a/nova/tests/unit/scheduler/ironic_fakes.py
+++ b/nova/tests/unit/scheduler/ironic_fakes.py
@@ -33,7 +33,8 @@ COMPUTE_NODES = [
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=10, free_ram_mb=1024,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0),
objects.ComputeNode(
id=2, local_gb=20, memory_mb=2048, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
@@ -47,7 +48,8 @@ COMPUTE_NODES = [
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=20, free_ram_mb=2048,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0),
objects.ComputeNode(
id=3, local_gb=30, memory_mb=3072, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
@@ -61,7 +63,8 @@ COMPUTE_NODES = [
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=30, free_ram_mb=3072,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0),
objects.ComputeNode(
id=4, local_gb=40, memory_mb=4096, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
@@ -75,7 +78,8 @@ COMPUTE_NODES = [
supported_hv_specs=[objects.HVSpec.from_list(
["i386", "baremetal", "baremetal"])],
free_disk_gb=40, free_ram_mb=4096,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5),
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0),
# Broken entry
objects.ComputeNode(
id=5, local_gb=50, memory_mb=5120, vcpus=1,
diff --git a/nova/tests/unit/scheduler/test_caching_scheduler.py b/nova/tests/unit/scheduler/test_caching_scheduler.py
index 4198706acf..489e51e45e 100644
--- a/nova/tests/unit/scheduler/test_caching_scheduler.py
+++ b/nova/tests/unit/scheduler/test_caching_scheduler.py
@@ -145,6 +145,7 @@ class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
}
host_state.cpu_allocation_ratio = 16.0
host_state.ram_allocation_ratio = 1.5
+ host_state.disk_allocation_ratio = 1.0
host_state.metrics = objects.MonitorMetricList(objects=[])
return host_state
diff --git a/nova/tests/unit/scheduler/test_chance_scheduler.py b/nova/tests/unit/scheduler/test_chance_scheduler.py
index 0a273d25ee..b26849b5a4 100644
--- a/nova/tests/unit/scheduler/test_chance_scheduler.py
+++ b/nova/tests/unit/scheduler/test_chance_scheduler.py
@@ -15,12 +15,8 @@
"""
Tests For Chance Scheduler.
"""
+import mock
-import random
-
-from mox3 import mox
-
-from nova import context
from nova import exception
from nova import objects
from nova.scheduler import chance
@@ -54,27 +50,20 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
filtered = self.driver._filter_hosts(hosts, spec_obj=spec_obj)
self.assertEqual(filtered, hosts)
- def test_select_destinations(self):
- ctxt = context.RequestContext('fake', 'fake', False)
- ctxt_elevated = 'fake-context-elevated'
- spec_obj = objects.RequestSpec(num_instances=2, ignore_hosts=None)
-
- self.mox.StubOutWithMock(ctxt, 'elevated')
- self.mox.StubOutWithMock(self.driver, 'hosts_up')
- self.mox.StubOutWithMock(random, 'choice')
+ @mock.patch('random.choice')
+ def test_select_destinations(self, mock_random_choice):
+ all_hosts = ['host1', 'host2', 'host3', 'host4']
- hosts_full = ['host1', 'host2', 'host3', 'host4']
+ def _return_hosts(*args, **kwargs):
+ return all_hosts
- ctxt.elevated().AndReturn(ctxt_elevated)
- self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
- random.choice(hosts_full).AndReturn('host3')
+ mock_random_choice.side_effect = ['host3', 'host2']
+ self.stub_out('nova.scheduler.chance.ChanceScheduler.hosts_up',
+ _return_hosts)
- ctxt.elevated().AndReturn(ctxt_elevated)
- self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
- random.choice(hosts_full).AndReturn('host2')
+ spec_obj = objects.RequestSpec(num_instances=2, ignore_hosts=None)
+ dests = self.driver.select_destinations(self.context, spec_obj)
- self.mox.ReplayAll()
- dests = self.driver.select_destinations(ctxt, spec_obj)
self.assertEqual(2, len(dests))
(host, node) = (dests[0]['host'], dests[0]['nodename'])
self.assertEqual('host3', host)
@@ -83,16 +72,21 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual('host2', host)
self.assertIsNone(node)
+ calls = [mock.call(all_hosts), mock.call(all_hosts)]
+ self.assertEqual(calls, mock_random_choice.call_args_list)
+
def test_select_destinations_no_valid_host(self):
+ def _return_hosts(*args, **kwargs):
+ return ['host1', 'host2']
+
def _return_no_host(*args, **kwargs):
return []
- self.mox.StubOutWithMock(self.driver, 'hosts_up')
- self.driver.hosts_up(mox.IgnoreArg(),
- mox.IgnoreArg()).AndReturn([1, 2])
- self.stubs.Set(self.driver, '_filter_hosts', _return_no_host)
- self.mox.ReplayAll()
+ self.stub_out('nova.scheduler.chance.ChanceScheduler.hosts_up',
+ _return_hosts)
+ self.stub_out('nova.scheduler.chance.ChanceScheduler._filter_hosts',
+ _return_no_host)
spec_obj = objects.RequestSpec(num_instances=1)
self.assertRaises(exception.NoValidHost,
diff --git a/nova/tests/unit/scheduler/test_filter_scheduler.py b/nova/tests/unit/scheduler/test_filter_scheduler.py
index 2c93762361..7a064a4de5 100644
--- a/nova/tests/unit/scheduler/test_filter_scheduler.py
+++ b/nova/tests/unit/scheduler/test_filter_scheduler.py
@@ -59,9 +59,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = hosts[0]
return [weights.WeighedHost(host_state, self.next_weight)]
- self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
- self.stubs.Set(weights.HostWeightHandler,
+ self.stub_out('nova.scheduler.weights.HostWeightHandler.'
'get_weighed_objects', _fake_weigh_objects)
spec_obj = objects.RequestSpec(
@@ -76,8 +74,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
pci_requests=None,
numa_topology=None,
instance_group=None)
- self.mox.ReplayAll()
- weighed_hosts = self.driver._schedule(self.context, spec_obj)
+
+ with mock.patch.object(self.driver.host_manager,
+ 'get_filtered_hosts') as mock_get_hosts:
+ mock_get_hosts.side_effect = fake_get_filtered_hosts
+ weighed_hosts = self.driver._schedule(self.context, spec_obj)
+
self.assertEqual(len(weighed_hosts), 10)
for weighed_host in weighed_hosts:
self.assertIsNotNone(weighed_host.obj)
@@ -122,8 +124,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Make sure the scheduler_host_subset_size property works properly."""
self.flags(scheduler_host_subset_size=2)
- self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
spec_obj = objects.RequestSpec(
num_instances=1,
@@ -137,8 +137,11 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
pci_requests=None,
numa_topology=None,
instance_group=None)
- self.mox.ReplayAll()
- hosts = self.driver._schedule(self.context, spec_obj)
+
+ with mock.patch.object(self.driver.host_manager,
+ 'get_filtered_hosts') as mock_get_hosts:
+ mock_get_hosts.side_effect = fake_get_filtered_hosts
+ hosts = self.driver._schedule(self.context, spec_obj)
# one host should be chosen
self.assertEqual(len(hosts), 1)
@@ -158,8 +161,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""
self.flags(scheduler_host_subset_size=20)
- self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
spec_obj = objects.RequestSpec(
num_instances=1,
@@ -173,10 +174,13 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
pci_requests=None,
numa_topology=None,
instance_group=None)
- self.mox.ReplayAll()
- hosts = self.driver._schedule(self.context, spec_obj)
- # one host should be chose
+ with mock.patch.object(self.driver.host_manager,
+ 'get_filtered_hosts') as mock_get_hosts:
+ mock_get_hosts.side_effect = fake_get_filtered_hosts
+ hosts = self.driver._schedule(self.context, spec_obj)
+
+ # one host should be chosen
self.assertEqual(len(hosts), 1)
@mock.patch('nova.scheduler.host_manager.HostManager._get_instance_info')
@@ -195,9 +199,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""
self.flags(scheduler_host_subset_size=1)
- self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
-
self.next_weight = 50
def _fake_weigh_objects(_self, functions, hosts, options):
@@ -206,6 +207,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = hosts[0]
return [weights.WeighedHost(host_state, this_weight)]
+ self.stub_out('nova.scheduler.weights.HostWeightHandler.'
+ 'get_weighed_objects', _fake_weigh_objects)
+
spec_obj = objects.RequestSpec(
num_instances=1,
project_id=1,
@@ -219,11 +223,10 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
numa_topology=None,
instance_group=None)
- self.stubs.Set(weights.HostWeightHandler,
- 'get_weighed_objects', _fake_weigh_objects)
-
- self.mox.ReplayAll()
- hosts = self.driver._schedule(self.context, spec_obj)
+ with mock.patch.object(self.driver.host_manager,
+ 'get_filtered_hosts') as mock_get_hosts:
+ mock_get_hosts.side_effect = fake_get_filtered_hosts
+ hosts = self.driver._schedule(self.context, spec_obj)
# one host should be chosen
self.assertEqual(1, len(hosts))
@@ -258,10 +261,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
selected_nodes.append(host_state.nodename)
return [weights.WeighedHost(host_state, self.next_weight)]
- self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
- fake_get_filtered_hosts)
- self.stubs.Set(weights.HostWeightHandler,
- 'get_weighed_objects', _fake_weigh_objects)
+ self.stub_out('nova.scheduler.weights.HostWeightHandler.'
+ 'get_weighed_objects', _fake_weigh_objects)
spec_obj = objects.RequestSpec(
flavor=objects.Flavor(memory_mb=512,
@@ -275,8 +276,12 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
pci_requests=None,
numa_topology=None,
instance_group=None)
- self.mox.ReplayAll()
- dests = self.driver.select_destinations(self.context, spec_obj)
+
+ with mock.patch.object(self.driver.host_manager,
+ 'get_filtered_hosts') as mock_get_hosts:
+ mock_get_hosts.side_effect = fake_get_filtered_hosts
+ dests = self.driver.select_destinations(self.context, spec_obj)
+
(host, node) = (dests[0]['host'], dests[0]['nodename'])
self.assertEqual(host, selected_hosts[0])
self.assertEqual(node, selected_nodes[0])
@@ -302,12 +307,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
dict(request_spec=expected))]
self.assertEqual(expected, mock_info.call_args_list)
- def test_select_destinations_no_valid_host(self):
-
- def _return_no_host(*args, **kwargs):
- return []
-
- self.stubs.Set(self.driver, '_schedule', _return_no_host)
+ @mock.patch.object(filter_scheduler.FilterScheduler, '_schedule')
+ def test_select_destinations_no_valid_host(self, mock_schedule):
+ mock_schedule.return_value = []
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
objects.RequestSpec(num_instances=1))
diff --git a/nova/tests/unit/scheduler/test_filters.py b/nova/tests/unit/scheduler/test_filters.py
index 7f87b25d4b..c1977e16cc 100644
--- a/nova/tests/unit/scheduler/test_filters.py
+++ b/nova/tests/unit/scheduler/test_filters.py
@@ -16,7 +16,6 @@ Tests For Scheduler Host Filters.
"""
import inspect
-import sys
import mock
from six.moves import range
@@ -45,24 +44,19 @@ class FiltersTestCase(test.NoDBTestCase):
mock_load.return_value = None
self.filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
- def test_filter_all(self):
+ @mock.patch('nova.filters.BaseFilter._filter_one')
+ def test_filter_all(self, mock_filter_one):
+ mock_filter_one.side_effect = [True, False, True]
filter_obj_list = ['obj1', 'obj2', 'obj3']
spec_obj = objects.RequestSpec()
base_filter = filters.BaseFilter()
- self.mox.StubOutWithMock(base_filter, '_filter_one')
-
- base_filter._filter_one('obj1', spec_obj).AndReturn(True)
- base_filter._filter_one('obj2', spec_obj).AndReturn(False)
- base_filter._filter_one('obj3', spec_obj).AndReturn(True)
-
- self.mox.ReplayAll()
-
result = base_filter.filter_all(filter_obj_list, spec_obj)
self.assertTrue(inspect.isgenerator(result))
self.assertEqual(['obj1', 'obj3'], list(result))
- def test_filter_all_recursive_yields(self):
+ @mock.patch('nova.filters.BaseFilter._filter_one')
+ def test_filter_all_recursive_yields(self, mock_filter_one):
# Test filter_all() allows generators from previous filter_all()s.
# filter_all() yields results. We want to make sure that we can
# call filter_all() with generators returned from previous calls
@@ -71,10 +65,6 @@ class FiltersTestCase(test.NoDBTestCase):
spec_obj = objects.RequestSpec()
base_filter = filters.BaseFilter()
- self.mox.StubOutWithMock(base_filter, '_filter_one')
-
- total_iterations = 200
-
# The order that _filter_one is going to get called gets
# confusing because we will be recursively yielding things..
# We are going to simulate the first call to filter_all()
@@ -83,12 +73,14 @@ class FiltersTestCase(test.NoDBTestCase):
# call gets to processing 'obj2'. We then return 'False' for it.
# After that, 'obj3' gets yielded 'total_iterations' number of
# times.
+ mock_results = []
+ total_iterations = 200
for x in range(total_iterations):
- base_filter._filter_one('obj1', spec_obj).AndReturn(True)
- base_filter._filter_one('obj2', spec_obj).AndReturn(False)
+ mock_results.append(True)
+ mock_results.append(False)
for x in range(total_iterations):
- base_filter._filter_one('obj3', spec_obj).AndReturn(True)
- self.mox.ReplayAll()
+ mock_results.append(True)
+ mock_filter_one.side_effect = mock_results
objs = iter(filter_obj_list)
for x in range(total_iterations):
@@ -106,29 +98,15 @@ class FiltersTestCase(test.NoDBTestCase):
def _fake_base_loader_init(*args, **kwargs):
pass
- self.stubs.Set(loadables.BaseLoader, '__init__',
- _fake_base_loader_init)
-
- filt1_mock = self.mox.CreateMock(Filter1)
- filt2_mock = self.mox.CreateMock(Filter2)
-
- self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
- use_mock_anything=True)
- self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
- self.mox.StubOutWithMock(filt1_mock, 'filter_all')
- self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
- use_mock_anything=True)
- self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
- self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+ self.stub_out('nova.loadables.BaseLoader.__init__',
+ _fake_base_loader_init)
- filt1_mock.run_filter_for_index(0).AndReturn(True)
- filt1_mock.filter_all(filter_objs_initial,
- spec_obj).AndReturn(filter_objs_second)
- filt2_mock.run_filter_for_index(0).AndReturn(True)
- filt2_mock.filter_all(filter_objs_second,
- spec_obj).AndReturn(filter_objs_last)
-
- self.mox.ReplayAll()
+ filt1_mock = mock.Mock(Filter1)
+ filt1_mock.run_filter_for_index.return_value = True
+ filt1_mock.filter_all.return_value = filter_objs_second
+ filt2_mock = mock.Mock(Filter2)
+ filt2_mock.run_filter_for_index.return_value = True
+ filt2_mock.filter_all.return_value = filter_objs_last
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
@@ -136,6 +114,10 @@ class FiltersTestCase(test.NoDBTestCase):
filter_objs_initial,
spec_obj)
self.assertEqual(filter_objs_last, result)
+ filt1_mock.filter_all.assert_called_once_with(filter_objs_initial,
+ spec_obj)
+ filt2_mock.filter_all.assert_called_once_with(filter_objs_second,
+ spec_obj)
def test_get_filtered_objects_for_index(self):
"""Test that we don't call a filter when its
@@ -148,34 +130,24 @@ class FiltersTestCase(test.NoDBTestCase):
def _fake_base_loader_init(*args, **kwargs):
pass
- self.stubs.Set(loadables.BaseLoader, '__init__',
- _fake_base_loader_init)
-
- filt1_mock = self.mox.CreateMock(Filter1)
- filt2_mock = self.mox.CreateMock(Filter2)
-
- self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
- use_mock_anything=True)
- self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
- self.mox.StubOutWithMock(filt1_mock, 'filter_all')
- self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
- use_mock_anything=True)
- self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
- self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+ self.stub_out('nova.loadables.BaseLoader.__init__',
+ _fake_base_loader_init)
- filt1_mock.run_filter_for_index(0).AndReturn(True)
- filt1_mock.filter_all(filter_objs_initial,
- spec_obj).AndReturn(filter_objs_second)
- # return false so filter_all will not be called
- filt2_mock.run_filter_for_index(0).AndReturn(False)
-
- self.mox.ReplayAll()
+ filt1_mock = mock.Mock(Filter1)
+ filt1_mock.run_filter_for_index.return_value = True
+ filt1_mock.filter_all.return_value = filter_objs_second
+ filt2_mock = mock.Mock(Filter2)
+ filt2_mock.run_filter_for_index.return_value = False
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
- filter_handler.get_filtered_objects(filter_mocks,
- filter_objs_initial,
- spec_obj)
+ result = filter_handler.get_filtered_objects(filter_mocks,
+ filter_objs_initial,
+ spec_obj)
+ self.assertEqual(filter_objs_second, result)
+ filt1_mock.filter_all.assert_called_once_with(filter_objs_initial,
+ spec_obj)
+ filt2_mock.filter_all.assert_not_called()
def test_get_filtered_objects_none_response(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
@@ -184,25 +156,13 @@ class FiltersTestCase(test.NoDBTestCase):
def _fake_base_loader_init(*args, **kwargs):
pass
- self.stubs.Set(loadables.BaseLoader, '__init__',
- _fake_base_loader_init)
-
- filt1_mock = self.mox.CreateMock(Filter1)
- filt2_mock = self.mox.CreateMock(Filter2)
-
- self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
- use_mock_anything=True)
- self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
- self.mox.StubOutWithMock(filt1_mock, 'filter_all')
- # Shouldn't be called.
- self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
- use_mock_anything=True)
- self.mox.StubOutWithMock(filt2_mock, 'filter_all')
+ self.stub_out('nova.loadables.BaseLoader.__init__',
+ _fake_base_loader_init)
- filt1_mock.run_filter_for_index(0).AndReturn(True)
- filt1_mock.filter_all(filter_objs_initial,
- spec_obj).AndReturn(None)
- self.mox.ReplayAll()
+ filt1_mock = mock.Mock(Filter1)
+ filt1_mock.run_filter_for_index.return_value = True
+ filt1_mock.filter_all.return_value = None
+ filt2_mock = mock.Mock(Filter2)
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
@@ -210,6 +170,9 @@ class FiltersTestCase(test.NoDBTestCase):
filter_objs_initial,
spec_obj)
self.assertIsNone(result)
+ filt1_mock.filter_all.assert_called_once_with(filter_objs_initial,
+ spec_obj)
+ filt2_mock.filter_all.assert_not_called()
def test_get_filtered_objects_info_log_none_returned(self):
LOG = filters.LOG
diff --git a/nova/tests/unit/scheduler/test_host_manager.py b/nova/tests/unit/scheduler/test_host_manager.py
index 7554485d1b..96af7a36a7 100644
--- a/nova/tests/unit/scheduler/test_host_manager.py
+++ b/nova/tests/unit/scheduler/test_host_manager.py
@@ -198,7 +198,8 @@ class HostManagerTestCase(test.NoDBTestCase):
info['got_fprops'].append(filter_props)
return True
- self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.stub_out(__name__ + '.FakeFilterClass1._filter_one',
+ fake_filter_one)
def _verify_result(self, info, result, filters=True):
for x in info['got_fprops']:
@@ -405,32 +406,34 @@ class HostManagerTestCase(test.NoDBTestCase):
fake_properties)
self._verify_result(info, result, False)
- @mock.patch.object(nova.objects.InstanceList, 'get_by_host')
- def test_get_all_host_states(self, mock_get_by_host):
+ @mock.patch('nova.scheduler.host_manager.LOG')
+ @mock.patch('nova.objects.ServiceList.get_by_binary')
+ @mock.patch('nova.objects.ComputeNodeList.get_all')
+ @mock.patch('nova.objects.InstanceList.get_by_host')
+ def test_get_all_host_states(self, mock_get_by_host, mock_get_all,
+ mock_get_by_binary, mock_log):
mock_get_by_host.return_value = objects.InstanceList()
+ mock_get_all.return_value = fakes.COMPUTE_NODES
+ mock_get_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
- self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary')
- self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
- self.mox.StubOutWithMock(host_manager.LOG, 'warning')
-
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES)
- # node 3 host physical disk space is greater than database
- host_manager.LOG.warning("Host %(hostname)s has more disk space "
- "than database expected (%(physical)s GB >"
- " %(database)s GB)",
- {'physical': 3333, 'database': 3072,
- 'hostname': 'node3'})
- # Invalid service
- host_manager.LOG.warning("No compute service record found for "
- "host %(host)s",
- {'host': 'fake'})
- self.mox.ReplayAll()
+
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
-
self.assertEqual(len(host_states_map), 4)
+
+ calls = [
+ mock.call(
+ "Host %(hostname)s has more disk space than database "
+ "expected (%(physical)s GB > %(database)s GB)",
+ {'physical': 3333, 'database': 3072, 'hostname': 'node3'}
+ ),
+ mock.call(
+ "No compute service record found for host %(host)s",
+ {'host': 'fake'}
+ )
+ ]
+ self.assertEqual(calls, mock_log.warning.call_args_list)
+
# Check that .service is set properly
for i in range(4):
compute_node = fakes.COMPUTE_NODES[i]
@@ -439,6 +442,7 @@ class HostManagerTestCase(test.NoDBTestCase):
state_key = (host, node)
self.assertEqual(host_states_map[state_key].service,
obj_base.obj_to_primitive(fakes.get_service_by_host(host)))
+
self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
512)
# 511GB
@@ -735,64 +739,61 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
host_manager.HostState('host4', 'node4')
]
+ @mock.patch('nova.objects.ServiceList.get_by_binary')
+ @mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
- def test_get_all_host_states(self, mock_get_by_host):
+ def test_get_all_host_states(self, mock_get_by_host, mock_get_all,
+ mock_get_by_binary):
mock_get_by_host.return_value = objects.InstanceList()
+ mock_get_all.return_value = fakes.COMPUTE_NODES
+ mock_get_by_binary.return_value = fakes.SERVICES
context = 'fake_context'
- self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary')
- self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES)
- self.mox.ReplayAll()
-
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 4)
+ @mock.patch('nova.objects.ServiceList.get_by_binary')
+ @mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
- def test_get_all_host_states_after_delete_one(self, mock_get_by_host):
- mock_get_by_host.return_value = objects.InstanceList()
- context = 'fake_context'
-
- self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary')
- self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
- # all nodes active for first call
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES)
- # remove node4 for second call
+ def test_get_all_host_states_after_delete_one(self, mock_get_by_host,
+ mock_get_all,
+ mock_get_by_binary):
running_nodes = [n for n in fakes.COMPUTE_NODES
if n.get('hypervisor_hostname') != 'node4']
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn(running_nodes)
- self.mox.ReplayAll()
+ mock_get_by_host.return_value = objects.InstanceList()
+ mock_get_all.side_effect = [fakes.COMPUTE_NODES, running_nodes]
+ mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES]
+ context = 'fake_context'
+
+ # first call: all nodes
self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 4)
+
+ # second call: just running nodes
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 3)
+ @mock.patch('nova.objects.ServiceList.get_by_binary')
+ @mock.patch('nova.objects.ComputeNodeList.get_all')
@mock.patch('nova.objects.InstanceList.get_by_host')
- def test_get_all_host_states_after_delete_all(self, mock_get_by_host):
+ def test_get_all_host_states_after_delete_all(self, mock_get_by_host,
+ mock_get_all,
+ mock_get_by_binary):
mock_get_by_host.return_value = objects.InstanceList()
+ mock_get_all.side_effect = [fakes.COMPUTE_NODES, []]
+ mock_get_by_binary.side_effect = [fakes.SERVICES, fakes.SERVICES]
context = 'fake_context'
- self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary')
- self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
- # all nodes active for first call
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES)
- # remove all nodes for second call
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn([])
- self.mox.ReplayAll()
-
+ # first call: all nodes
self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 4)
+
+ # second call: no nodes
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 0)
@@ -831,7 +832,8 @@ class HostStateTestCase(test.NoDBTestCase):
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode")
host.update(compute=compute)
@@ -873,7 +875,8 @@ class HostStateTestCase(test.NoDBTestCase):
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode")
host.update(compute=compute)
@@ -905,7 +908,8 @@ class HostStateTestCase(test.NoDBTestCase):
supported_hv_specs=[],
hypervisor_version=hyper_ver_int, numa_topology=None,
pci_device_pools=None, metrics=None,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode")
host.update(compute=compute)
@@ -1065,7 +1069,8 @@ class HostStateTestCase(test.NoDBTestCase):
hypervisor_version=hyper_ver_int,
numa_topology=fakes.NUMA_TOPOLOGY._to_json(),
stats=None, pci_device_pools=None,
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0)
host = host_manager.HostState("fakehost", "fakenode")
host.update(compute=compute)
diff --git a/nova/tests/unit/scheduler/test_ironic_host_manager.py b/nova/tests/unit/scheduler/test_ironic_host_manager.py
index 0c1b30bd76..c5e8279bc3 100644
--- a/nova/tests/unit/scheduler/test_ironic_host_manager.py
+++ b/nova/tests/unit/scheduler/test_ironic_host_manager.py
@@ -19,7 +19,6 @@ Tests For IronicHostManager
import mock
-import nova
from nova import exception
from nova import objects
from nova.objects import base as obj_base
@@ -64,24 +63,20 @@ class IronicHostManagerTestCase(test.NoDBTestCase):
"dummy")
)
- @mock.patch.object(nova.objects.InstanceList, 'get_by_host')
- def test_get_all_host_states(self, mock_gbh):
- # Ensure .service is set and we have the values we expect to.
+ @mock.patch('nova.objects.ServiceList.get_by_binary')
+ @mock.patch('nova.objects.ComputeNodeList.get_all')
+ @mock.patch('nova.objects.InstanceList.get_by_host')
+ def test_get_all_host_states(self, mock_get_by_host, mock_get_all,
+ mock_get_by_binary):
+ mock_get_all.return_value = ironic_fakes.COMPUTE_NODES
+ mock_get_by_binary.return_value = ironic_fakes.SERVICES
context = 'fake_context'
- self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary')
- self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(ironic_fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn(
- ironic_fakes.COMPUTE_NODES)
- self.mox.ReplayAll()
-
self.host_manager.get_all_host_states(context)
- self.assertEqual(0, mock_gbh.call_count)
+ self.assertEqual(0, mock_get_by_host.call_count)
host_states_map = self.host_manager.host_state_map
-
self.assertEqual(len(host_states_map), 4)
+
for i in range(4):
compute_node = ironic_fakes.COMPUTE_NODES[i]
host = compute_node.host
@@ -121,7 +116,8 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
hypervisor_type='ironic',
hypervisor_version=1,
hypervisor_hostname='fake_host',
- cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5)
+ cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
+ disk_allocation_ratio=1.0)
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
def test_create_ironic_node_state(self, init_mock):
@@ -139,51 +135,48 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
compute=compute)
self.assertIs(host_manager.HostState, type(host_state))
- def test_get_all_host_states_after_delete_one(self):
- context = 'fake_context'
-
- self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary')
- self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
- # all nodes active for first call
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(ironic_fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn(
- ironic_fakes.COMPUTE_NODES)
- # remove node4 for second call
+ @mock.patch('nova.objects.ServiceList.get_by_binary')
+ @mock.patch('nova.objects.ComputeNodeList.get_all')
+ def test_get_all_host_states_after_delete_one(self, mock_get_all,
+ mock_get_by_binary):
running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
if n.get('hypervisor_hostname') != 'node4uuid']
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(ironic_fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn(running_nodes)
- self.mox.ReplayAll()
-
- with mock.patch.object(nova.objects.InstanceList, 'get_by_host'):
- self.host_manager.get_all_host_states(context)
- self.host_manager.get_all_host_states(context)
+
+ mock_get_all.side_effect = [
+ ironic_fakes.COMPUTE_NODES, running_nodes]
+ mock_get_by_binary.side_effect = [
+ ironic_fakes.SERVICES, ironic_fakes.SERVICES]
+ context = 'fake_context'
+
+ # first call: all nodes
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(4, len(host_states_map))
+
+ # second call: just running nodes
+ self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(3, len(host_states_map))
- def test_get_all_host_states_after_delete_all(self):
+ @mock.patch('nova.objects.ServiceList.get_by_binary')
+ @mock.patch('nova.objects.ComputeNodeList.get_all')
+ def test_get_all_host_states_after_delete_all(self, mock_get_all,
+ mock_get_by_binary):
+ mock_get_all.side_effect = [
+ ironic_fakes.COMPUTE_NODES, []]
+ mock_get_by_binary.side_effect = [
+ ironic_fakes.SERVICES, ironic_fakes.SERVICES]
context = 'fake_context'
- self.mox.StubOutWithMock(objects.ServiceList, 'get_by_binary')
- self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
- # all nodes active for first call
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(ironic_fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn(
- ironic_fakes.COMPUTE_NODES)
- # remove all nodes for second call
- objects.ServiceList.get_by_binary(
- context, 'nova-compute').AndReturn(ironic_fakes.SERVICES)
- objects.ComputeNodeList.get_all(context).AndReturn([])
- self.mox.ReplayAll()
-
- with mock.patch.object(nova.objects.InstanceList, 'get_by_host'):
- self.host_manager.get_all_host_states(context)
- self.host_manager.get_all_host_states(context)
+ # first call: all nodes
+ self.host_manager.get_all_host_states(context)
+ host_states_map = self.host_manager.host_state_map
+ self.assertEqual(len(host_states_map), 4)
+
+ # second call: no nodes
+ self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
- self.assertEqual(0, len(host_states_map))
+ self.assertEqual(len(host_states_map), 0)
def test_update_from_compute_node(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
@@ -319,7 +312,8 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
info['got_fprops'].append(filter_props)
return True
- self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
+ self.stub_out(__name__ + '.FakeFilterClass1._filter_one',
+ fake_filter_one)
def _verify_result(self, info, result, filters=True):
for x in info['got_fprops']:
diff --git a/nova/tests/unit/scheduler/test_scheduler.py b/nova/tests/unit/scheduler/test_scheduler.py
index a0b0ec9679..cbf7182ee3 100644
--- a/nova/tests/unit/scheduler/test_scheduler.py
+++ b/nova/tests/unit/scheduler/test_scheduler.py
@@ -21,7 +21,12 @@ import mock
from nova import context
from nova import objects
+from nova.scheduler import caching_scheduler
+from nova.scheduler import chance
+from nova.scheduler import driver
+from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
+from nova.scheduler import ironic_host_manager
from nova.scheduler import manager
from nova import servicegroup
from nova import test
@@ -29,18 +34,74 @@ from nova.tests.unit import fake_server_actions
from nova.tests.unit.scheduler import fakes
+class SchedulerManagerInitTestCase(test.NoDBTestCase):
+ """Test case for scheduler manager initiation."""
+ manager_cls = manager.SchedulerManager
+
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(host_manager.HostManager, '_init_aggregates')
+ def test_init_using_default_schedulerdriver(self,
+ mock_init_agg,
+ mock_init_inst):
+ driver = self.manager_cls().driver
+ self.assertIsInstance(driver, filter_scheduler.FilterScheduler)
+
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(host_manager.HostManager, '_init_aggregates')
+ def test_init_using_chance_schedulerdriver(self,
+ mock_init_agg,
+ mock_init_inst):
+ self.flags(scheduler_driver='chance_scheduler')
+ driver = self.manager_cls().driver
+ self.assertIsInstance(driver, chance.ChanceScheduler)
+
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(host_manager.HostManager, '_init_aggregates')
+ def test_init_using_caching_schedulerdriver(self,
+ mock_init_agg,
+ mock_init_inst):
+ self.flags(scheduler_driver='caching_scheduler')
+ driver = self.manager_cls().driver
+ self.assertIsInstance(driver, caching_scheduler.CachingScheduler)
+
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(host_manager.HostManager, '_init_aggregates')
+ def test_init_nonexist_schedulerdriver(self,
+ mock_init_agg,
+ mock_init_inst):
+ self.flags(scheduler_driver='nonexist_scheduler')
+ self.assertRaises(RuntimeError, self.manager_cls)
+
+ # NOTE(Yingxin): Loading full class path is deprecated and should be
+ # removed in the N release.
+ @mock.patch.object(manager.LOG, 'warning')
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(host_manager.HostManager, '_init_aggregates')
+ def test_init_using_classpath_to_schedulerdriver(self,
+ mock_init_agg,
+ mock_init_inst,
+ mock_warning):
+ self.flags(
+ scheduler_driver=
+ 'nova.scheduler.chance.ChanceScheduler')
+ driver = self.manager_cls().driver
+ self.assertIsInstance(driver, chance.ChanceScheduler)
+ warn_args, kwargs = mock_warning.call_args
+ self.assertIn("DEPRECATED", warn_args[0])
+
+
class SchedulerManagerTestCase(test.NoDBTestCase):
"""Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = fakes.FakeScheduler
- driver_cls_name = 'nova.tests.unit.scheduler.fakes.FakeScheduler'
+ driver_plugin_name = 'fake_scheduler'
@mock.patch.object(host_manager.HostManager, '_init_instance_info')
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
def setUp(self, mock_init_agg, mock_init_inst):
super(SchedulerManagerTestCase, self).setUp()
- self.flags(scheduler_driver=self.driver_cls_name)
+ self.flags(scheduler_driver=self.driver_plugin_name)
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
self.manager = self.manager_cls()
self.context = context.RequestContext('fake_user', 'fake_project')
@@ -117,6 +178,54 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
mock.sentinel.instance_uuids)
+class SchedulerInitTestCase(test.NoDBTestCase):
+ """Test case for base scheduler driver initiation."""
+
+ driver_cls = fakes.FakeScheduler
+
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(host_manager.HostManager, '_init_aggregates')
+ def test_init_using_default_hostmanager(self,
+ mock_init_agg,
+ mock_init_inst):
+ manager = self.driver_cls().host_manager
+ self.assertIsInstance(manager, host_manager.HostManager)
+
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(host_manager.HostManager, '_init_aggregates')
+ def test_init_using_ironic_hostmanager(self,
+ mock_init_agg,
+ mock_init_inst):
+ self.flags(scheduler_host_manager='ironic_host_manager')
+ manager = self.driver_cls().host_manager
+ self.assertIsInstance(manager, ironic_host_manager.IronicHostManager)
+
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(host_manager.HostManager, '_init_aggregates')
+ def test_init_nonexist_hostmanager(self,
+ mock_init_agg,
+ mock_init_inst):
+ self.flags(scheduler_host_manager='nonexist_host_manager')
+ self.assertRaises(RuntimeError, self.driver_cls)
+
+ # NOTE(Yingxin): Loading full class path is deprecated and should be
+ # removed in the N release.
+ @mock.patch.object(driver.LOG, 'warning')
+ @mock.patch.object(host_manager.HostManager, '_init_instance_info')
+ @mock.patch.object(host_manager.HostManager, '_init_aggregates')
+ def test_init_using_classpath_to_hostmanager(self,
+ mock_init_agg,
+ mock_init_inst,
+ mock_warning):
+ self.flags(
+ scheduler_host_manager=
+ 'nova.scheduler.ironic_host_manager.IronicHostManager')
+ manager = self.driver_cls().host_manager
+ self.assertIsInstance(manager, ironic_host_manager.IronicHostManager)
+ warn_args, kwargs = mock_warning.call_args
+ self.assertIn("DEPRECATED", warn_args[0])
+
+
class SchedulerTestCase(test.NoDBTestCase):
"""Test case for base scheduler driver class."""
@@ -132,19 +241,19 @@ class SchedulerTestCase(test.NoDBTestCase):
self.topic = 'fake_topic'
self.servicegroup_api = servicegroup.API()
- def test_hosts_up(self):
+ @mock.patch('nova.objects.ServiceList.get_by_topic')
+ @mock.patch('nova.servicegroup.API.service_is_up')
+ def test_hosts_up(self, mock_service_is_up, mock_get_by_topic):
service1 = objects.Service(host='host1')
service2 = objects.Service(host='host2')
services = objects.ServiceList(objects=[service1, service2])
- self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic')
- self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
+ mock_get_by_topic.return_value = services
+ mock_service_is_up.side_effect = [False, True]
- objects.ServiceList.get_by_topic(self.context,
- self.topic).AndReturn(services)
- self.servicegroup_api.service_is_up(service1).AndReturn(False)
- self.servicegroup_api.service_is_up(service2).AndReturn(True)
-
- self.mox.ReplayAll()
result = self.driver.hosts_up(self.context, self.topic)
self.assertEqual(result, ['host2'])
+
+ mock_get_by_topic.assert_called_once_with(self.context, self.topic)
+ calls = [mock.call(service1), mock.call(service2)]
+ self.assertEqual(calls, mock_service_is_up.call_args_list)
diff --git a/nova/tests/unit/scheduler/test_scheduler_utils.py b/nova/tests/unit/scheduler/test_scheduler_utils.py
index 27ef76c546..23325f2a90 100644
--- a/nova/tests/unit/scheduler/test_scheduler_utils.py
+++ b/nova/tests/unit/scheduler/test_scheduler_utils.py
@@ -18,13 +18,10 @@ Tests For Scheduler Utils
import uuid
import mock
-from mox3 import mox
-from oslo_config import cfg
import six
from nova.compute import flavors
from nova.compute import utils as compute_utils
-from nova import db
from nova import exception
from nova import objects
from nova import rpc
@@ -33,8 +30,6 @@ from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_flavor
-CONF = cfg.CONF
-
class SchedulerUtilsTestCase(test.NoDBTestCase):
"""Test case for scheduler utils methods."""
@@ -42,20 +37,16 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
super(SchedulerUtilsTestCase, self).setUp()
self.context = 'fake-context'
- @mock.patch('nova.objects.Flavor.get_by_flavor_id')
- def test_build_request_spec_without_image(self, mock_get):
- image = None
+ def test_build_request_spec_without_image(self):
instance = {'uuid': 'fake-uuid'}
instance_type = objects.Flavor(**test_flavor.fake_flavor)
- mock_get.return_value = objects.Flavor(extra_specs={})
-
- self.mox.StubOutWithMock(flavors, 'extract_flavor')
- flavors.extract_flavor(mox.IgnoreArg()).AndReturn(instance_type)
- self.mox.ReplayAll()
-
- request_spec = scheduler_utils.build_request_spec(self.context, image,
- [instance])
+ with mock.patch.object(flavors, 'extract_flavor') as mock_extract:
+ mock_extract.return_value = instance_type
+ request_spec = scheduler_utils.build_request_spec(self.context,
+ None,
+ [instance])
+ mock_extract.assert_called_once_with({'uuid': 'fake-uuid'})
self.assertEqual({}, request_spec['image'])
def test_build_request_spec_with_object(self):
@@ -96,8 +87,7 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
method,
updates,
exc_info,
- request_spec,
- db)
+ request_spec)
mock_save.assert_called_once_with()
mock_add.assert_called_once_with(self.context, mock.ANY,
exc_info, mock.ANY)
@@ -107,6 +97,30 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
event_type,
payload)
+ def test_build_filter_properties(self):
+ sched_hints = {'hint': ['over-there']}
+ forced_host = 'forced-host1'
+ forced_node = 'forced-node1'
+ instance_type = objects.Flavor()
+ filt_props = scheduler_utils.build_filter_properties(sched_hints,
+ forced_host, forced_node, instance_type)
+ self.assertEqual(sched_hints, filt_props['scheduler_hints'])
+ self.assertEqual([forced_host], filt_props['force_hosts'])
+ self.assertEqual([forced_node], filt_props['force_nodes'])
+ self.assertEqual(instance_type, filt_props['instance_type'])
+
+ def test_build_filter_properties_no_forced_host_no_force_node(self):
+ sched_hints = {'hint': ['over-there']}
+ forced_host = None
+ forced_node = None
+ instance_type = objects.Flavor()
+ filt_props = scheduler_utils.build_filter_properties(sched_hints,
+ forced_host, forced_node, instance_type)
+ self.assertEqual(sched_hints, filt_props['scheduler_hints'])
+ self.assertEqual(instance_type, filt_props['instance_type'])
+ self.assertNotIn('forced_host', filt_props)
+ self.assertNotIn('forced_node', filt_props)
+
def _test_populate_filter_props(self, host_state_obj=True,
with_retry=True,
force_hosts=None,
diff --git a/nova/tests/unit/scheduler/weights/test_weights_disk.py b/nova/tests/unit/scheduler/weights/test_weights_disk.py
new file mode 100644
index 0000000000..d84922271e
--- /dev/null
+++ b/nova/tests/unit/scheduler/weights/test_weights_disk.py
@@ -0,0 +1,111 @@
+# Copyright 2011-2016 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler disk weights.
+"""
+
+from nova.scheduler import weights
+from nova.scheduler.weights import disk
+from nova import test
+from nova.tests.unit.scheduler import fakes
+
+
+class DiskWeigherTestCase(test.NoDBTestCase):
+ def setUp(self):
+ super(DiskWeigherTestCase, self).setUp()
+ self.weight_handler = weights.HostWeightHandler()
+ self.weighers = [disk.DiskWeigher()]
+
+ def _get_weighed_host(self, hosts, weight_properties=None):
+ if weight_properties is None:
+ weight_properties = {}
+ return self.weight_handler.get_weighed_objects(self.weighers,
+ hosts, weight_properties)[0]
+
+ def _get_all_hosts(self):
+ host_values = [
+ ('host1', 'node1', {'free_disk_mb': 5120}),
+ ('host2', 'node2', {'free_disk_mb': 10240}),
+ ('host3', 'node3', {'free_disk_mb': 30720}),
+ ('host4', 'node4', {'free_disk_mb': 81920})
+ ]
+ return [fakes.FakeHostState(host, node, values)
+ for host, node, values in host_values]
+
+ def test_default_of_spreading_first(self):
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_disk_mb=5120
+ # host2: free_disk_mb=10240
+ # host3: free_disk_mb=30720
+ # host4: free_disk_mb=81920
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_disk_filter_multiplier1(self):
+ self.flags(disk_weight_multiplier=0.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_disk_mb=5120
+ # host2: free_disk_mb=10240
+ # host3: free_disk_mb=30720
+ # host4: free_disk_mb=81920
+
+ # We do not know the host, all have same weight.
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(0.0, weighed_host.weight)
+
+ def test_disk_filter_multiplier2(self):
+ self.flags(disk_weight_multiplier=2.0)
+ hostinfo_list = self._get_all_hosts()
+
+ # host1: free_disk_mb=5120
+ # host2: free_disk_mb=10240
+ # host3: free_disk_mb=30720
+ # host4: free_disk_mb=81920
+
+ # so, host4 should win:
+ weighed_host = self._get_weighed_host(hostinfo_list)
+ self.assertEqual(1.0 * 2, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ def test_disk_filter_negative(self):
+ self.flags(disk_weight_multiplier=1.0)
+ hostinfo_list = self._get_all_hosts()
+ host_attr = {'id': 100, 'disk_mb': 81920, 'free_disk_mb': -5120}
+ host_state = fakes.FakeHostState('negative', 'negative', host_attr)
+ hostinfo_list = list(hostinfo_list) + [host_state]
+
+ # host1: free_disk_mb=5120
+ # host2: free_disk_mb=10240
+ # host3: free_disk_mb=30720
+ # host4: free_disk_mb=81920
+ # negativehost: free_disk_mb=-5120
+
+ # so, host4 should win
+ weights = self.weight_handler.get_weighed_objects(self.weighers,
+ hostinfo_list, {})
+
+ weighed_host = weights[0]
+ self.assertEqual(1, weighed_host.weight)
+ self.assertEqual('host4', weighed_host.obj.host)
+
+ # and negativehost should lose
+ weighed_host = weights[-1]
+ self.assertEqual(0, weighed_host.weight)
+ self.assertEqual('negative', weighed_host.obj.host)
diff --git a/nova/tests/unit/servicegroup/test_mc_servicegroup.py b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
index 6ee5142f0f..5ca51af592 100644
--- a/nova/tests/unit/servicegroup/test_mc_servicegroup.py
+++ b/nova/tests/unit/servicegroup/test_mc_servicegroup.py
@@ -23,7 +23,7 @@ from nova import test
class MemcachedServiceGroupTestCase(test.NoDBTestCase):
- @mock.patch('nova.openstack.common.memorycache.get_client')
+ @mock.patch('nova.cache_utils.get_memcached_client')
def setUp(self, mgc_mock):
super(MemcachedServiceGroupTestCase, self).setUp()
self.mc_client = mock.MagicMock()
@@ -63,4 +63,4 @@ class MemcachedServiceGroupTestCase(test.NoDBTestCase):
fn = self.servicegroup_api._driver._report_state
fn(service)
self.mc_client.set.assert_called_once_with('compute:fake-host',
- mock.ANY, time=60)
+ mock.ANY)
diff --git a/nova/tests/unit/servicegroup/test_zk_driver.py b/nova/tests/unit/servicegroup/test_zk_driver.py
deleted file mode 100644
index 1af71902d9..0000000000
--- a/nova/tests/unit/servicegroup/test_zk_driver.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# Copyright (c) AT&T 2012-2013 Yun Mao <yunmao@gmail.com>
-# Copyright 2012 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Test the ZooKeeper driver for servicegroup.
-
-You need to install ZooKeeper locally and related dependencies
-to run the test. It's unclear how to install python-zookeeper lib
-in venv so you might have to run the test without it.
-
-To set up in Ubuntu 12.04:
-$ sudo apt-get install zookeeper zookeeperd python-zookeeper
-$ sudo pip install evzookeeper
-$ nosetests nova.tests.unit.servicegroup.test_zk_driver
-"""
-import os
-
-import mock
-
-from nova import servicegroup
-from nova.servicegroup.drivers import zk
-from nova import test
-
-
-class ZKServiceGroupTestCase(test.NoDBTestCase):
-
- def setUp(self):
- super(ZKServiceGroupTestCase, self).setUp()
- self.flags(servicegroup_driver='zk')
- self.flags(address='localhost:2181', group="zookeeper")
- try:
- __import__('evzookeeper')
- __import__('zookeeper')
- except ImportError:
- self.skipTest("Unable to test due to lack of ZooKeeper")
-
- # Need to do this here, as opposed to the setUp() method, otherwise
- # the decorate will cause an import error...
- @mock.patch('evzookeeper.ZKSession')
- def _setup_sg_api(self, zk_sess_mock):
- self.zk_sess = mock.MagicMock()
- zk_sess_mock.return_value = self.zk_sess
- self.flags(servicegroup_driver='zk')
- self.flags(address='ignored', group="zookeeper")
- self.servicegroup_api = servicegroup.API()
-
- def test_zookeeper_hierarchy_structure(self):
- """Test that hierarchy created by join method contains process id."""
- from zookeeper import NoNodeException
- self.servicegroup_api = servicegroup.API()
- service_id = {'topic': 'unittest', 'host': 'serviceC'}
- # use existing session object
- session = self.servicegroup_api._driver._session
- # prepare a path that contains process id
- pid = os.getpid()
- path = '/servicegroups/%s/%s/%s' % (service_id['topic'],
- service_id['host'],
- pid)
- # assert that node doesn't exist yet
- self.assertRaises(NoNodeException, session.get, path)
- # join
- self.servicegroup_api.join(service_id['host'],
- service_id['topic'],
- None)
- # expected existing "process id" node
- self.assertTrue(session.get(path))
-
- def test_lazy_session(self):
- """Session object (contains zk handle) should be created in
- lazy manner, because handle cannot be shared by forked processes.
- """
- # insied import because this test runs conditionaly (look at setUp)
- import evzookeeper
- driver = zk.ZooKeeperDriver()
- # check that internal private attribute session is empty
- self.assertIsNone(driver.__dict__['_ZooKeeperDriver__session'])
- # after first use of property ...
- driver._session
- # check that internal private session attribute is ready
- self.assertIsInstance(driver.__dict__['_ZooKeeperDriver__session'],
- evzookeeper.ZKSession)
-
- @mock.patch('evzookeeper.membership.Membership')
- def test_join(self, mem_mock):
- self._setup_sg_api()
- mem_mock.return_value = mock.sentinel.zk_mem
- self.servicegroup_api.join('fake-host', 'fake-topic')
- mem_mock.assert_called_once_with(self.zk_sess,
- '/fake-topic',
- 'fake-host')
diff --git a/nova/tests/unit/test_api_validation.py b/nova/tests/unit/test_api_validation.py
index b8010fe691..69c89bea44 100644
--- a/nova/tests/unit/test_api_validation.py
+++ b/nova/tests/unit/test_api_validation.py
@@ -15,6 +15,9 @@
import copy
import re
+import fixtures
+import six
+
from nova.api.openstack import api_version_request as api_version
from nova.api import validation
from nova.api.validation import parameter_types
@@ -31,6 +34,62 @@ class FakeRequest(object):
return self.legacy_v2
+class ValidationRegex(test.NoDBTestCase):
+ def test_cell_names(self):
+ cellre = re.compile(parameter_types.valid_cell_name_regex)
+ self.assertTrue(cellre.search('foo'))
+ self.assertFalse(cellre.search('foo.bar'))
+ self.assertFalse(cellre.search('foo@bar'))
+ self.assertFalse(cellre.search('foo!bar'))
+ self.assertFalse(cellre.search(' foo!bar'))
+ self.assertFalse(cellre.search('\nfoo!bar'))
+
+ def test_build_regex_range(self):
+ # this is much easier to think about if we only use the ascii
+ # subset because it's a printable range we can think
+ # about. The algorithm works for all ranges.
+ def _get_all_chars():
+ for i in range(0x7F):
+ yield six.unichr(i)
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.validation.parameter_types._get_all_chars',
+ _get_all_chars))
+
+ r = parameter_types._build_regex_range(ws=False)
+ self.assertEqual(r, re.escape('!') + '-' + re.escape('~'))
+
+ # if we allow whitespace the range starts earlier
+ r = parameter_types._build_regex_range(ws=True)
+ self.assertEqual(r, re.escape(' ') + '-' + re.escape('~'))
+
+ # excluding a character will give us 2 ranges
+ r = parameter_types._build_regex_range(ws=True, exclude=['A'])
+ self.assertEqual(r,
+ re.escape(' ') + '-' + re.escape('@') +
+ 'B' + '-' + re.escape('~'))
+
+ # inverting which gives us all the initial unprintable characters.
+ r = parameter_types._build_regex_range(ws=False, invert=True)
+ self.assertEqual(r,
+ re.escape('\x00') + '-' + re.escape(' '))
+
+ # excluding characters that create a singleton. Naively this would be:
+ # ' -@B-BD-~' which seems to work, but ' -@BD-~' is more natural.
+ r = parameter_types._build_regex_range(ws=True, exclude=['A', 'C'])
+ self.assertEqual(r,
+ re.escape(' ') + '-' + re.escape('@') +
+ 'B' + 'D' + '-' + re.escape('~'))
+
+ # ws=True means the positive regex has printable whitespaces,
+ # so the inverse will not. The inverse will include things we
+ # exclude.
+ r = parameter_types._build_regex_range(
+ ws=True, exclude=['A', 'B', 'C', 'Z'], invert=True)
+ self.assertEqual(r,
+ re.escape('\x00') + '-' + re.escape('\x1f') + 'A-CZ')
+
+
class APIValidationTestCase(test.NoDBTestCase):
def check_validation_error(self, method, body, expected_detail, req=None):
diff --git a/nova/tests/unit/test_availability_zones.py b/nova/tests/unit/test_availability_zones.py
index a6273c86d1..63831a4c2e 100644
--- a/nova/tests/unit/test_availability_zones.py
+++ b/nova/tests/unit/test_availability_zones.py
@@ -18,20 +18,16 @@ Tests for availability zones
"""
import mock
-from oslo_config import cfg
import six
from nova import availability_zones as az
+import nova.conf
from nova import context
from nova import db
from nova import objects
from nova import test
-CONF = cfg.CONF
-CONF.import_opt('internal_service_availability_zone',
- 'nova.availability_zones')
-CONF.import_opt('default_availability_zone',
- 'nova.availability_zones')
+CONF = nova.conf.CONF
class AvailabilityZoneTestCases(test.TestCase):
diff --git a/nova/tests/unit/test_block_device.py b/nova/tests/unit/test_block_device.py
index 5e9625c484..2777cbc670 100644
--- a/nova/tests/unit/test_block_device.py
+++ b/nova/tests/unit/test_block_device.py
@@ -125,6 +125,7 @@ class BlockDeviceTestCase(test.NoDBTestCase):
def test_strip_dev(self):
self.assertEqual('sda', block_device.strip_dev('/dev/sda'))
self.assertEqual('sda', block_device.strip_dev('sda'))
+ self.assertIsNone(block_device.strip_dev(None))
def test_strip_prefix(self):
self.assertEqual('a', block_device.strip_prefix('/dev/sda'))
@@ -132,6 +133,7 @@ class BlockDeviceTestCase(test.NoDBTestCase):
self.assertEqual('a', block_device.strip_prefix('xvda'))
self.assertEqual('a', block_device.strip_prefix('vda'))
self.assertEqual('a', block_device.strip_prefix('hda'))
+ self.assertIsNone(block_device.strip_prefix(None))
def test_get_device_letter(self):
self.assertEqual('', block_device.get_device_letter(''))
@@ -142,6 +144,7 @@ class BlockDeviceTestCase(test.NoDBTestCase):
self.assertEqual('b', block_device.get_device_letter('sdb2'))
self.assertEqual('c', block_device.get_device_letter('vdc'))
self.assertEqual('c', block_device.get_device_letter('hdc'))
+ self.assertIsNone(block_device.get_device_letter(None))
def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
diff --git a/nova/tests/unit/test_cache.py b/nova/tests/unit/test_cache.py
new file mode 100644
index 0000000000..5991121621
--- /dev/null
+++ b/nova/tests/unit/test_cache.py
@@ -0,0 +1,121 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import cache_utils
+from nova import test
+
+
+class TestOsloCache(test.NoDBTestCase):
+ def test_get_default_cache_region(self):
+ region = cache_utils._get_default_cache_region(expiration_time=60)
+ self.assertEqual(60, region.expiration_time)
+ self.assertIsNotNone(region)
+
+ def test_get_default_cache_region_default_expiration_time(self):
+ region = cache_utils._get_default_cache_region(expiration_time=0)
+ # default oslo.cache expiration_time value 600 was taken
+ self.assertEqual(600, region.expiration_time)
+ self.assertIsNotNone(region)
+
+ @mock.patch('dogpile.cache.region.CacheRegion.configure')
+ def test_get_client(self, mock_cacheregion):
+ self.assertIsNotNone(
+ cache_utils.get_client(expiration_time=60))
+
+ self.flags(memcached_servers=['localhost:11211'])
+ self.assertIsNotNone(
+ cache_utils.get_client(expiration_time=60))
+
+ self.flags(memcached_servers=None)
+ self.flags(group='cache', enabled=True)
+ self.assertIsNotNone(
+ cache_utils.get_client(expiration_time=60))
+
+ self.flags(memcached_servers=None)
+ self.flags(group='cache', enabled=False)
+ client = cache_utils.get_client(expiration_time=60)
+ self.assertIsNotNone(client.region)
+
+ mock_cacheregion.assert_has_calls(
+ [mock.call('oslo_cache.dict',
+ arguments={'expiration_time': 60},
+ expiration_time=60),
+ mock.call('dogpile.cache.memcached',
+ arguments={'url': ['localhost:11211']},
+ expiration_time=60),
+ mock.call('dogpile.cache.null',
+ _config_argument_dict=mock.ANY,
+ _config_prefix='cache.oslo.arguments.',
+ expiration_time=60,
+ wrap=None),
+ mock.call('oslo_cache.dict',
+ arguments={'expiration_time': 60},
+ expiration_time=60)],
+ )
+
+ @mock.patch('dogpile.cache.region.CacheRegion.configure')
+ def test_get_custom_cache_region(self, mock_cacheregion):
+ self.assertRaises(RuntimeError,
+ cache_utils._get_custom_cache_region)
+ self.assertIsNotNone(
+ cache_utils._get_custom_cache_region(
+ backend='oslo_cache.dict'))
+ self.assertIsNotNone(
+ cache_utils._get_custom_cache_region(
+ backend='dogpile.cache.memcached',
+ url=['localhost:11211']))
+ mock_cacheregion.assert_has_calls(
+ [mock.call('oslo_cache.dict',
+ arguments={'expiration_time': 604800},
+ expiration_time=604800),
+ mock.call('dogpile.cache.memcached',
+ arguments={'url': ['localhost:11211']},
+ expiration_time=604800)]
+ )
+
+ @mock.patch('dogpile.cache.region.CacheRegion.configure')
+ def test_get_memcached_client(self, mock_cacheregion):
+ self.flags(memcached_servers=None)
+ self.flags(group='cache', enabled=False)
+ self.assertRaises(
+ RuntimeError,
+ cache_utils.get_memcached_client,
+ expiration_time=60)
+
+ self.flags(memcached_servers=['localhost:11211'])
+ self.assertIsNotNone(
+ cache_utils.get_memcached_client(expiration_time=60))
+
+ self.flags(memcached_servers=['localhost:11211'])
+ self.assertIsNotNone(
+ cache_utils.get_memcached_client(expiration_time=60))
+
+ self.flags(memcached_servers=None)
+ self.flags(group='cache', enabled=True)
+ self.flags(group='cache', memcache_servers=['localhost:11211'])
+ self.assertIsNotNone(
+ cache_utils.get_memcached_client(expiration_time=60))
+
+ mock_cacheregion.assert_has_calls(
+ [mock.call('dogpile.cache.memcached',
+ arguments={'url': ['localhost:11211']},
+ expiration_time=60),
+ mock.call('dogpile.cache.memcached',
+ arguments={'url': ['localhost:11211']},
+ expiration_time=60),
+ mock.call('dogpile.cache.null',
+ _config_argument_dict=mock.ANY,
+ _config_prefix='cache.oslo.arguments.',
+ expiration_time=60, wrap=None)]
+ )
diff --git a/nova/tests/unit/test_cinder.py b/nova/tests/unit/test_cinder.py
index 1a32cb7e53..482d0dee47 100644
--- a/nova/tests/unit/test_cinder.py
+++ b/nova/tests/unit/test_cinder.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import collections
+
from cinderclient.v1 import client as cinder_client_v1
from cinderclient.v2 import client as cinder_client_v2
from requests_mock.contrib import fixture
@@ -29,6 +31,42 @@ _image_metadata = {
}
+_volume_id = "6edbc2f4-1507-44f8-ac0d-eed1d2608d38"
+_instance_uuid = "f4fda93b-06e0-4743-8117-bc8bcecd651b"
+_instance_uuid_2 = "f4fda93b-06e0-4743-8117-bc8bcecd651c"
+_attachment_id = "3b4db356-253d-4fab-bfa0-e3626c0b8405"
+_attachment_id_2 = "3b4db356-253d-4fab-bfa0-e3626c0b8406"
+_device = "/dev/vdb"
+_device_2 = "/dev/vdc"
+
+
+_volume_attachment = \
+ [{"server_id": _instance_uuid,
+ "attachment_id": _attachment_id,
+ "host_name": "",
+ "volume_id": _volume_id,
+ "device": _device,
+ "id": _volume_id
+ }]
+
+
+_volume_attachment_2 = _volume_attachment
+_volume_attachment_2.append({"server_id": _instance_uuid_2,
+ "attachment_id": _attachment_id_2,
+ "host_name": "",
+ "volume_id": _volume_id,
+ "device": _device_2,
+ "id": _volume_id})
+
+
+exp_volume_attachment = collections.OrderedDict()
+exp_volume_attachment[_instance_uuid] = {'attachment_id': _attachment_id,
+ 'mountpoint': _device}
+exp_volume_attachment_2 = exp_volume_attachment
+exp_volume_attachment_2[_instance_uuid_2] = {'attachment_id': _attachment_id_2,
+ 'mountpoint': _device_2}
+
+
class BaseCinderTestCase(object):
def setUp(self):
@@ -97,13 +135,14 @@ class CinderTestCase(BaseCinderTestCase, test.NoDBTestCase):
"attachments": [],
"availability_zone": "cinder",
"created_at": "2012-09-10T00:00:00.000000",
- "id": '00000000-0000-0000-0000-000000000000',
+ "id": _volume_id,
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
- "bootable": "true"
+ "bootable": "true",
+ "multiattach": "true"
}
volume.update(kwargs)
return volume
@@ -161,13 +200,14 @@ class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
"attachments": [],
"availability_zone": "cinderv2",
"created_at": "2013-08-10T00:00:00.000000",
- "id": '00000000-0000-0000-0000-000000000000',
+ "id": _volume_id,
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
- "bootable": "true"
+ "bootable": "true",
+ "multiattach": "true"
}
volume.update(kwargs)
return volume
@@ -191,3 +231,23 @@ class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
volume = self.api.get(self.context, '5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
+
+ def test_volume_without_attachment(self):
+ v = self.stub_volume(id='1234')
+ self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
+ volume = self.api.get(self.context, '5678')
+ self.assertIsNone(volume.get('attachments'))
+
+ def test_volume_with_one_attachment(self):
+ v = self.stub_volume(id='1234', attachments=_volume_attachment)
+ self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
+ volume = self.api.get(self.context, '5678')
+ self.assertIn('attachments', volume)
+ self.assertEqual(exp_volume_attachment, volume['attachments'])
+
+ def test_volume_with_two_attachments(self):
+ v = self.stub_volume(id='1234', attachments=_volume_attachment_2)
+ self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
+ volume = self.api.get(self.context, '5678')
+ self.assertIn('attachments', volume)
+ self.assertEqual(exp_volume_attachment_2, volume['attachments'])
diff --git a/nova/tests/unit/test_context.py b/nova/tests/unit/test_context.py
index 5a4651d803..65feeb71a4 100644
--- a/nova/tests/unit/test_context.py
+++ b/nova/tests/unit/test_context.py
@@ -12,10 +12,12 @@
# License for the specific language governing permissions and limitations
# under the License.
+import mock
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
from nova import context
+from nova import objects
from nova import test
@@ -223,3 +225,16 @@ class ContextTestCase(test.NoDBTestCase):
self.assertEqual('222', ctx.project_id)
values2 = ctx.to_dict()
self.assertEqual(values, values2)
+
+ @mock.patch('nova.db.create_context_manager')
+ def test_target_cell(self, mock_create_ctxt_mgr):
+ mock_create_ctxt_mgr.return_value = mock.sentinel.cm
+ ctxt = context.RequestContext('111',
+ '222',
+ roles=['admin', 'weasel'])
+ # Verify the existing db_connection, if any, is restored
+ ctxt.db_connection = mock.sentinel.db_conn
+ mapping = objects.CellMapping(database_connection='fake://')
+ with context.target_cell(ctxt, mapping):
+ self.assertEqual(ctxt.db_connection, mock.sentinel.cm)
+ self.assertEqual(mock.sentinel.db_conn, ctxt.db_connection)
diff --git a/nova/tests/unit/test_crypto.py b/nova/tests/unit/test_crypto.py
index 09e77f4329..a32e9678f9 100644
--- a/nova/tests/unit/test_crypto.py
+++ b/nova/tests/unit/test_crypto.py
@@ -91,7 +91,7 @@ class X509Test(test.TestCase):
self.assertEqual(start, os.getcwd())
-class RevokeCertsTest(test.TestCase):
+class RevokeCertsTest(test.NoDBTestCase):
@mock.patch('nova.crypto.revoke_cert')
def test_revoke_certs_by_user_and_project(self, mock_revoke):
@@ -107,7 +107,7 @@ class RevokeCertsTest(test.TestCase):
"file_name": file_name}]
self.stub_out('nova.db.certificate_get_all_by_user_and_project',
- mock_certificate_get_all_by_user_and_project)
+ mock_certificate_get_all_by_user_and_project)
crypto.revoke_certs_by_user_and_project(user_id, project_id)
@@ -125,7 +125,7 @@ class RevokeCertsTest(test.TestCase):
"file_name": file_name}]
self.stub_out('nova.db.certificate_get_all_by_user',
- mock_certificate_get_all_by_user)
+ mock_certificate_get_all_by_user)
crypto.revoke_certs_by_user(user_id)
mock_revoke.assert_called_once_with(project_id, mock.ANY)
@@ -142,7 +142,7 @@ class RevokeCertsTest(test.TestCase):
"file_name": file_name}]
self.stub_out('nova.db.certificate_get_all_by_project',
- mock_certificate_get_all_by_project)
+ mock_certificate_get_all_by_project)
crypto.revoke_certs_by_project(project_id)
mock_revoke.assert_called_once_with(project_id, mock.ANY)
@@ -160,7 +160,7 @@ class RevokeCertsTest(test.TestCase):
str(uuid.uuid4()), 'test_file')
-class CertExceptionTests(test.TestCase):
+class CertExceptionTests(test.NoDBTestCase):
def test_fetch_ca_file_not_found(self):
with utils.tempdir() as tmpdir:
self.flags(ca_path=tmpdir)
@@ -178,7 +178,7 @@ class CertExceptionTests(test.TestCase):
crypto.fetch_crl, project_id='fake')
-class EncryptionTests(test.TestCase):
+class EncryptionTests(test.NoDBTestCase):
pubkey = ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDArtgrfBu/g2o28o+H2ng/crv"
"zgES91i/NNPPFTOutXelrJ9QiPTPTm+B8yspLsXifmbsmXztNOlBQgQXs6usxb4"
"fnJKNUZ84Vkp5esbqK/L7eyRqwPvqo7btKBMoAMVX/kUyojMpxb7Ssh6M6Y8cpi"
@@ -255,7 +255,7 @@ e6fCXWECgYEAqgpGvva5kJ1ISgNwnJbwiNw0sOT9BMOsdNZBElf0kJIIy6FMPvap
crypto.ssh_encrypt_text, '', self.text)
-class KeyPairTest(test.TestCase):
+class KeyPairTest(test.NoDBTestCase):
rsa_prv = (
"-----BEGIN RSA PRIVATE KEY-----\n"
"MIIEowIBAAKCAQEA5G44D6lEgMj6cRwCPydsMl1VRN2B9DVyV5lmwssGeJClywZM\n"
@@ -361,7 +361,7 @@ class KeyPairTest(test.TestCase):
keyin.seek(0)
key = paramiko.RSAKey.from_private_key(keyin)
- with mock.patch.object(paramiko.RSAKey, 'generate') as mock_generate:
+ with mock.patch.object(crypto, 'generate_key') as mock_generate:
mock_generate.return_value = key
(private_key, public_key, fingerprint) = crypto.generate_key_pair()
self.assertEqual(self.rsa_pub, public_key)
diff --git a/nova/tests/unit/test_exception.py b/nova/tests/unit/test_exception.py
index fab2c5a303..6a3b2b70a0 100644
--- a/nova/tests/unit/test_exception.py
+++ b/nova/tests/unit/test_exception.py
@@ -124,12 +124,12 @@ class NovaExceptionTestCase(test.NoDBTestCase):
class FakeNovaException_Remote(exception.NovaException):
msg_fmt = "some message"
- if six.PY3:
- def __str__(self):
- return "print the whole trace"
- else:
+ if six.PY2:
def __unicode__(self):
return u"print the whole trace"
+ else:
+ def __str__(self):
+ return "print the whole trace"
exc = FakeNovaException_Remote()
self.assertEqual(u"print the whole trace", six.text_type(exc))
diff --git a/nova/tests/unit/test_fixtures.py b/nova/tests/unit/test_fixtures.py
index 16b87cea47..cbaea9f408 100644
--- a/nova/tests/unit/test_fixtures.py
+++ b/nova/tests/unit/test_fixtures.py
@@ -162,6 +162,7 @@ class TestOSAPIFixture(testtools.TestCase):
self.useFixture(fixtures.OutputStreamCapture())
self.useFixture(fixtures.StandardLogging())
self.useFixture(conf_fixture.ConfFixture())
+ self.useFixture(fixtures.RPCFixture('nova.test'))
api = self.useFixture(fixtures.OSAPIFixture()).api
# request the API root, which provides us the versions of the API
@@ -331,3 +332,17 @@ class TestBannedDBSchemaOperations(testtools.TestCase):
table.drop)
self.assertRaises(exception.DBNotAllowed,
table.alter)
+
+
+class TestStableObjectJsonFixture(testtools.TestCase):
+ def test_changes_sort(self):
+ class TestObject(obj_base.NovaObject):
+ def obj_what_changed(self):
+ return ['z', 'a']
+
+ obj = TestObject()
+ self.assertEqual(['z', 'a'],
+ obj.obj_to_primitive()['nova_object.changes'])
+ with fixtures.StableObjectJsonFixture():
+ self.assertEqual(['a', 'z'],
+ obj.obj_to_primitive()['nova_object.changes'])
diff --git a/nova/tests/unit/test_hacking.py b/nova/tests/unit/test_hacking.py
index 0469095e51..48eff888e7 100644
--- a/nova/tests/unit/test_hacking.py
+++ b/nova/tests/unit/test_hacking.py
@@ -628,3 +628,107 @@ class HackingTestCase(test.NoDBTestCase):
checks.check_config_option_in_central_place,
filename="nova/cmd/serialproxy.py",
expected_errors=errors)
+
+ def test_check_doubled_words(self):
+ errors = [(1, 0, "N343")]
+
+ # Artificial break to stop pep8 detecting the test !
+ code = "This is the" + " the best comment"
+ self._assert_has_errors(code, checks.check_doubled_words,
+ expected_errors=errors)
+
+ code = "This is the then best comment"
+ self._assert_has_no_errors(code, checks.check_doubled_words)
+
+ def test_dict_iteritems(self):
+ self.assertEqual(1, len(list(checks.check_python3_no_iteritems(
+ "obj.iteritems()"))))
+
+ self.assertEqual(0, len(list(checks.check_python3_no_iteritems(
+ "six.iteritems(ob))"))))
+
+ def test_dict_iterkeys(self):
+ self.assertEqual(1, len(list(checks.check_python3_no_iterkeys(
+ "obj.iterkeys()"))))
+
+ self.assertEqual(0, len(list(checks.check_python3_no_iterkeys(
+ "six.iterkeys(ob))"))))
+
+ def test_dict_itervalues(self):
+ self.assertEqual(1, len(list(checks.check_python3_no_itervalues(
+ "obj.itervalues()"))))
+
+ self.assertEqual(0, len(list(checks.check_python3_no_itervalues(
+ "six.itervalues(ob))"))))
+
+ def test_cfg_help_with_enough_text(self):
+ errors = [(1, 0, 'N347')]
+
+ # Doesn't have help text at all => should raise error
+ code1 = """
+ opt = cfg.StrOpt("opt1")
+ """
+ self._assert_has_errors(code1, checks.cfg_help_with_enough_text,
+ expected_errors=errors)
+
+ # Explicitly sets an empty string => should raise error
+ code2 = """
+ opt = cfg.StrOpt("opt2", help="")
+ """
+ self._assert_has_errors(code2, checks.cfg_help_with_enough_text,
+ expected_errors=errors)
+
+ # Has help text but too few characters => should raise error
+ code3 = """
+ opt = cfg.StrOpt("opt3", help="meh")
+ """
+ self._assert_has_errors(code3, checks.cfg_help_with_enough_text,
+ expected_errors=errors)
+
+ # Has long enough help text => should *not* raise an error
+ code4 = """
+ opt = cfg.StrOpt("opt4", help="This option does stuff")
+ """
+ self._assert_has_no_errors(code4, checks.cfg_help_with_enough_text)
+
+ # OptGroup objects help is optional => should *not* raise error
+ code5 = """
+ opt_group = cfg.OptGroup(name="group1", title="group title")
+ """
+ self._assert_has_no_errors(code5, checks.cfg_help_with_enough_text)
+
+ # The help text gets translated
+ code6 = """
+ opt = cfg.StrOpt("opt6",
+ help=_("help with translation usage"))
+ """
+ self._assert_has_no_errors(code6, checks.cfg_help_with_enough_text)
+
+ # The help text uses a paranthesis (weird, but produces a valid string)
+ code7 = """
+ opt = cfg.StrOpt("opt7",
+ help=("help text uses extra paranthesis"))
+ """
+ self._assert_has_no_errors(code7, checks.cfg_help_with_enough_text)
+
+ # Ignore deprecated options. They should be in the release notes
+ code8 = """
+ opt = cfg.DeprecatedOpt('opt8')
+ """
+ self._assert_has_no_errors(code8, checks.cfg_help_with_enough_text)
+
+ code9 = """
+ opt = cfg.StrOpt("opt9",
+ help=\"\"\"
+ This
+
+ is
+
+ multiline
+
+ help
+
+ text.
+ \"\"\")
+ """
+ self._assert_has_no_errors(code9, checks.cfg_help_with_enough_text)
diff --git a/nova/tests/unit/test_metadata.py b/nova/tests/unit/test_metadata.py
index a056fa6d88..44f8972963 100644
--- a/nova/tests/unit/test_metadata.py
+++ b/nova/tests/unit/test_metadata.py
@@ -208,7 +208,6 @@ class MetadataTestCase(test.TestCase):
def test_format_instance_mapping(self):
# Make sure that _format_instance_mappings works.
- ctxt = None
instance_ref0 = objects.Instance(**{'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
'root_device_name': None,
@@ -220,7 +219,7 @@ class MetadataTestCase(test.TestCase):
'default_ephemeral_device': None,
'default_swap_device': None})
- def fake_bdm_get(ctxt, uuid, use_slave=False):
+ def fake_bdm_get(ctxt, uuid):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 87654321,
'snapshot_id': None,
@@ -249,7 +248,7 @@ class MetadataTestCase(test.TestCase):
'device_name': '/dev/sdb'})]
self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
- fake_bdm_get)
+ fake_bdm_get)
expected = {'ami': 'sda1',
'root': '/dev/sda1',
@@ -259,9 +258,9 @@ class MetadataTestCase(test.TestCase):
conductor_api.LocalAPI()
- self.assertEqual(base._format_instance_mapping(ctxt,
+ self.assertEqual(base._format_instance_mapping(self.context,
instance_ref0), block_device._DEFAULT_MAPPINGS)
- self.assertEqual(base._format_instance_mapping(ctxt,
+ self.assertEqual(base._format_instance_mapping(self.context,
instance_ref1), expected)
def test_pubkey(self):
@@ -332,8 +331,32 @@ class MetadataTestCase(test.TestCase):
fakes.stub_out_key_pair_funcs(self.stubs)
inst = self.instance.obj_clone()
inst_md = base.InstanceMetadata(inst)
+ expected_paths = [
+ 'ec2/2009-04-04/user-data',
+ 'ec2/2009-04-04/meta-data.json',
+ 'ec2/latest/user-data',
+ 'ec2/latest/meta-data.json',
+ 'openstack/2012-08-10/meta_data.json',
+ 'openstack/2012-08-10/user_data',
+ 'openstack/2013-04-04/meta_data.json',
+ 'openstack/2013-04-04/user_data',
+ 'openstack/2013-10-17/meta_data.json',
+ 'openstack/2013-10-17/user_data',
+ 'openstack/2013-10-17/vendor_data.json',
+ 'openstack/2015-10-15/meta_data.json',
+ 'openstack/2015-10-15/user_data',
+ 'openstack/2015-10-15/vendor_data.json',
+ 'openstack/2015-10-15/network_data.json',
+ 'openstack/latest/meta_data.json',
+ 'openstack/latest/user_data',
+ 'openstack/latest/vendor_data.json',
+ 'openstack/latest/network_data.json',
+ ]
+ actual_paths = []
for (path, value) in inst_md.metadata_for_config_drive():
+ actual_paths.append(path)
self.assertIsNotNone(path)
+ self.assertEqual(expected_paths, actual_paths)
@mock.patch('nova.virt.netutils.get_injected_network_template')
def test_InstanceMetadata_queries_network_API_when_needed(self, mock_get):
@@ -1206,7 +1229,8 @@ class MetadataHandlerTestCase(test.TestCase):
self.assertFalse(mock_context.called, "get_admin_context() should not"
"have been called, the context was given")
mock_uuid.assert_called_once_with('CONTEXT', 'foo',
- expected_attrs=['ec2_ids', 'flavor', 'info_cache'])
+ expected_attrs=['ec2_ids', 'flavor', 'info_cache',
+ 'metadata', 'system_metadata'])
imd.assert_called_once_with(inst, 'bar')
@mock.patch.object(context, 'get_admin_context')
@@ -1222,7 +1246,8 @@ class MetadataHandlerTestCase(test.TestCase):
mock_context.assert_called_once_with()
mock_uuid.assert_called_once_with('CONTEXT', 'foo',
- expected_attrs=['ec2_ids', 'flavor', 'info_cache'])
+ expected_attrs=['ec2_ids', 'flavor', 'info_cache',
+ 'metadata', 'system_metadata'])
imd.assert_called_once_with(inst, 'bar')
diff --git a/nova/tests/unit/test_notifications.py b/nova/tests/unit/test_notifications.py
index 7f4f8436a8..83f2d3cbb4 100644
--- a/nova/tests/unit/test_notifications.py
+++ b/nova/tests/unit/test_notifications.py
@@ -18,7 +18,6 @@
import copy
import mock
-from oslo_config import cfg
from oslo_context import context as o_context
from oslo_context import fixture as o_fixture
@@ -34,9 +33,6 @@ from nova import test
from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
-CONF = cfg.CONF
-CONF.import_opt('compute_driver', 'nova.virt.driver')
-
class NotificationsTestCase(test.TestCase):
diff --git a/nova/tests/unit/test_notifier.py b/nova/tests/unit/test_notifier.py
new file mode 100644
index 0000000000..3fc2b37652
--- /dev/null
+++ b/nova/tests/unit/test_notifier.py
@@ -0,0 +1,53 @@
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import rpc
+from nova import test
+
+
+class TestNotifier(test.NoDBTestCase):
+
+ @mock.patch('oslo_messaging.get_transport')
+ @mock.patch('oslo_messaging.get_notification_transport')
+ @mock.patch('oslo_messaging.Notifier')
+ def test_notification_format_affects_notification_driver(self,
+ mock_notifier,
+ mock_noti_trans,
+ mock_transport):
+ conf = mock.Mock()
+
+ cases = {
+ 'unversioned': [
+ mock.call(mock.ANY, serializer=mock.ANY),
+ mock.call(mock.ANY, serializer=mock.ANY, driver='noop')],
+ 'both': [
+ mock.call(mock.ANY, serializer=mock.ANY),
+ mock.call(mock.ANY, serializer=mock.ANY,
+ topic='versioned_notifications')],
+ 'versioned': [
+ mock.call(mock.ANY, serializer=mock.ANY, driver='noop'),
+ mock.call(mock.ANY, serializer=mock.ANY,
+ topic='versioned_notifications')]}
+
+ for config in cases:
+ mock_notifier.reset_mock()
+ mock_notifier.side_effect = ['first', 'second']
+ conf.notification_format = config
+ rpc.init(conf)
+ self.assertEqual(cases[config], mock_notifier.call_args_list)
+ self.assertEqual('first', rpc.LEGACY_NOTIFIER)
+ self.assertEqual('second', rpc.NOTIFIER)
diff --git a/nova/tests/unit/test_nova_manage.py b/nova/tests/unit/test_nova_manage.py
index c0ee2e4f4a..af6694f23d 100644
--- a/nova/tests/unit/test_nova_manage.py
+++ b/nova/tests/unit/test_nova_manage.py
@@ -36,7 +36,7 @@ from nova.tests.unit import test_flavors
class FixedIpCommandsTestCase(test.TestCase):
def setUp(self):
super(FixedIpCommandsTestCase, self).setUp()
- db_fakes.stub_out_db_network_api(self.stubs)
+ db_fakes.stub_out_db_network_api(self)
self.commands = manage.FixedIpCommands()
def test_reserve(self):
@@ -76,10 +76,10 @@ class FixedIpCommandsTestCase(test.TestCase):
self.assertNotEqual(1, sys.stdout.getvalue().find('192.168.0.100'))
-class FloatingIpCommandsTestCase(test.TestCase):
+class FloatingIpCommandsTestCase(test.NoDBTestCase):
def setUp(self):
super(FloatingIpCommandsTestCase, self).setUp()
- db_fakes.stub_out_db_network_api(self.stubs)
+ db_fakes.stub_out_db_network_api(self)
self.commands = manage.FloatingIpCommands()
def test_address_to_hosts(self):
@@ -117,7 +117,7 @@ class FloatingIpCommandsTestCase(test.TestCase):
'192.168.100.1/12')
-class NetworkCommandsTestCase(test.TestCase):
+class NetworkCommandsTestCase(test.NoDBTestCase):
def setUp(self):
super(NetworkCommandsTestCase, self).setUp()
self.commands = manage.NetworkCommands()
@@ -211,7 +211,7 @@ class NetworkCommandsTestCase(test.TestCase):
def fake_network_get_all(context):
return [db_fakes.FakeModel(self.net)]
- self.stubs.Set(db, 'network_get_all', fake_network_get_all)
+ self.stub_out('nova.db.network_get_all', fake_network_get_all)
output = StringIO()
sys.stdout = output
self.commands.list()
@@ -245,35 +245,35 @@ class NetworkCommandsTestCase(test.TestCase):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
- self.stubs.Set(db, 'network_get_by_uuid',
- self.fake_network_get_by_uuid)
+ self.stub_out('nova.db.network_get_by_uuid',
+ self.fake_network_get_by_uuid)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
- self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
+ self.stub_out('nova.db.network_delete_safe', fake_network_delete_safe)
self.commands.delete(uuid=self.fake_net['uuid'])
def test_delete_by_cidr(self):
self.fake_net = self.net
self.fake_net['project_id'] = None
self.fake_net['host'] = None
- self.stubs.Set(db, 'network_get_by_cidr',
- self.fake_network_get_by_cidr)
+ self.stub_out('nova.db.network_get_by_cidr',
+ self.fake_network_get_by_cidr)
def fake_network_delete_safe(context, network_id):
self.assertTrue(context.to_dict()['is_admin'])
self.assertEqual(network_id, self.fake_net['id'])
- self.stubs.Set(db, 'network_delete_safe', fake_network_delete_safe)
+ self.stub_out('nova.db.network_delete_safe', fake_network_delete_safe)
self.commands.delete(fixed_range=self.fake_net['cidr'])
def _test_modify_base(self, update_value, project, host, dis_project=None,
dis_host=None):
self.fake_net = self.net
self.fake_update_value = update_value
- self.stubs.Set(db, 'network_get_by_cidr',
- self.fake_network_get_by_cidr)
- self.stubs.Set(db, 'network_update', self.fake_network_update)
+ self.stub_out('nova.db.network_get_by_cidr',
+ self.fake_network_get_by_cidr)
+ self.stub_out('nova.db.network_update', self.fake_network_update)
self.commands.modify(self.fake_net['cidr'], project=project, host=host,
dis_project=dis_project, dis_host=dis_host)
@@ -291,7 +291,7 @@ class NetworkCommandsTestCase(test.TestCase):
dis_host=True)
-class NeutronV2NetworkCommandsTestCase(test.TestCase):
+class NeutronV2NetworkCommandsTestCase(test.NoDBTestCase):
def setUp(self):
super(NeutronV2NetworkCommandsTestCase, self).setUp()
self.flags(network_api_class='nova.network.neutronv2.api.API')
@@ -332,7 +332,7 @@ class ProjectCommandsTestCase(test.TestCase):
self.assertEqual(2, self.commands.quota('admin', 'volumes1', '10'))
-class VmCommandsTestCase(test.TestCase):
+class VmCommandsTestCase(test.NoDBTestCase):
def setUp(self):
super(VmCommandsTestCase, self).setUp()
self.commands = manage.VmCommands()
@@ -459,8 +459,36 @@ class DBCommandsTestCase(test.NoDBTestCase):
self.commands.sync(version=4)
sqla_sync.assert_called_once_with(version=4, database='main')
+ def _fake_db_command(self):
+ mock_mig_1 = mock.MagicMock(__name__="mock_mig_1")
+ mock_mig_2 = mock.MagicMock(__name__="mock_mig_2")
+ mock_mig_1.return_value = (5, 4)
+ mock_mig_2.return_value = (6, 6)
-class ApiDbCommandsTestCase(test.TestCase):
+ class _CommandSub(manage.DbCommands):
+ online_migrations = (
+ mock_mig_1,
+ mock_mig_2,
+ )
+
+ return _CommandSub
+
+ def test_online_migrations(self):
+ command_cls = self._fake_db_command()
+ command = command_cls()
+ command.online_data_migrations(10)
+ command_cls.online_migrations[0].assert_called_once_with(10)
+ command_cls.online_migrations[1].assert_called_once_with(6)
+
+ def test_online_migrations_no_max_count(self):
+ command_cls = self._fake_db_command()
+ command = command_cls()
+ command.online_data_migrations(None)
+ command_cls.online_migrations[0].assert_called_once_with(None)
+ command_cls.online_migrations[1].assert_called_once_with(None)
+
+
+class ApiDbCommandsTestCase(test.NoDBTestCase):
def setUp(self):
super(ApiDbCommandsTestCase, self).setUp()
self.commands = manage.ApiDbCommands()
@@ -488,7 +516,7 @@ class ServiceCommandsTestCase(test.TestCase):
self.assertEqual(2, self.commands.disable('nohost', 'noservice'))
-class CellCommandsTestCase(test.TestCase):
+class CellCommandsTestCase(test.NoDBTestCase):
def setUp(self):
super(CellCommandsTestCase, self).setUp()
self.commands = manage.CellCommands()
diff --git a/nova/tests/unit/test_policy.py b/nova/tests/unit/test_policy.py
index 63f118aec4..1e86629eed 100644
--- a/nova/tests/unit/test_policy.py
+++ b/nova/tests/unit/test_policy.py
@@ -299,6 +299,8 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:servers:create:forced_host",
"os_compute_api:servers:detail:get_all_tenants",
"os_compute_api:servers:index:get_all_tenants",
+"os_compute_api:servers:show:host_status",
+"os_compute_api:servers:migrations:force_complete",
"network:attach_external_network",
"os_compute_api:os-admin-actions",
"os_compute_api:os-admin-actions:reset_network",
@@ -381,6 +383,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"compute_extension:simple_tenant_usage:show",
"os_compute_api:servers:start",
"os_compute_api:servers:stop",
+"os_compute_api:servers:trigger_crash_dump",
"os_compute_api:os-create-backup",
"os_compute_api:ips:index",
"os_compute_api:ips:show",
@@ -402,9 +405,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"os_compute_api:os-simple-tenant-usage:show",
"os_compute_api:os-suspend-server:suspend",
"os_compute_api:os-suspend-server:resume",
-"os_compute_api:os-tenant-networks")
-
- self.empty_rules = (
+"os_compute_api:os-tenant-networks",
"compute:create",
"compute:create:attach_network",
"compute:create:attach_volume",
@@ -460,9 +461,7 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"compute_extension:os-tenant-networks",
"network:get_vif_by_mac_address",
"os_compute_api:extensions",
-"os_compute_api:extensions:discoverable",
"os_compute_api:os-config-drive",
-"os_compute_api:os-quota-sets:defaults",
"os_compute_api:servers:confirm_resize",
"os_compute_api:servers:create",
"os_compute_api:servers:create:attach_network",
@@ -570,135 +569,141 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
"network:create_public_dns_domain",
"network:delete_dns_domain",
"os_compute_api:servers:create_image:allow_volume_backed",
-"os_compute_api:os-access-ips:discoverable",
"os_compute_api:os-access-ips",
-"os_compute_api:os-admin-actions:discoverable",
"os_compute_api:os-admin-password",
+"os_compute_api:os-attach-interfaces",
+"os_compute_api:os-certificates:create",
+"os_compute_api:os-certificates:show",
+"os_compute_api:os-consoles:create",
+"os_compute_api:os-consoles:delete",
+"os_compute_api:os-consoles:index",
+"os_compute_api:os-consoles:show",
+"os_compute_api:os-console-output",
+"os_compute_api:os-remote-consoles",
+"os_compute_api:os-deferred-delete",
+"os_compute_api:os-disk-config",
+"os_compute_api:os-extended-status",
+"os_compute_api:os-extended-availability-zone",
+"os_compute_api:os-extended-volumes",
+"os_compute_api:os-flavor-access",
+"os_compute_api:os-flavor-rxtx",
+"os_compute_api:flavors",
+"os_compute_api:os-flavor-extra-specs:index",
+"os_compute_api:os-flavor-extra-specs:show",
+"os_compute_api:os-floating-ip-dns",
+"os_compute_api:os-floating-ip-pools",
+"os_compute_api:os-floating-ips",
+"os_compute_api:os-fping",
+"os_compute_api:image-size",
+"os_compute_api:os-instance-actions",
+"os_compute_api:os-keypairs",
+"os_compute_api:limits",
+"os_compute_api:os-multinic",
+"os_compute_api:os-networks:view",
+"os_compute_api:os-pci:pci_servers",
+"os_compute_api:os-rescue",
+"os_compute_api:os-security-groups",
+"os_compute_api:os-server-password",
+"os_compute_api:os-server-usage",
+"os_compute_api:os-server-groups",
+"os_compute_api:os-shelve:shelve",
+"os_compute_api:os-shelve:unshelve",
+"os_compute_api:os-virtual-interfaces",
+"os_compute_api:os-volumes",
+"os_compute_api:os-volumes-attachments:index",
+"os_compute_api:os-volumes-attachments:show",
+"os_compute_api:os-volumes-attachments:create",
+"os_compute_api:os-volumes-attachments:update",
+"os_compute_api:os-volumes-attachments:delete",
+"os_compute_api:os-availability-zone:list",
+)
+
+ self.non_admin_only_rules = (
+"compute_extension:hide_server_addresses",
+"os_compute_api:os-hide-server-addresses")
+
+ self.allow_all_rules = (
+"os_compute_api:os-quota-sets:defaults",
+"os_compute_api:extensions:discoverable",
+"os_compute_api:os-access-ips:discoverable",
+"os_compute_api:os-admin-actions:discoverable",
"os_compute_api:os-admin-password:discoverable",
"os_compute_api:os-aggregates:discoverable",
"os_compute_api:os-agents:discoverable",
-"os_compute_api:os-attach-interfaces",
"os_compute_api:os-attach-interfaces:discoverable",
"os_compute_api:os-baremetal-nodes:discoverable",
"os_compute_api:os-block-device-mapping-v1:discoverable",
"os_compute_api:os-cells:discoverable",
-"os_compute_api:os-certificates:create",
-"os_compute_api:os-certificates:show",
"os_compute_api:os-certificates:discoverable",
"os_compute_api:os-cloudpipe:discoverable",
"os_compute_api:os-consoles:discoverable",
-"os_compute_api:os-consoles:create",
-"os_compute_api:os-consoles:delete",
-"os_compute_api:os-consoles:index",
-"os_compute_api:os-consoles:show",
"os_compute_api:os-console-output:discoverable",
-"os_compute_api:os-console-output",
-"os_compute_api:os-remote-consoles",
"os_compute_api:os-remote-consoles:discoverable",
"os_compute_api:os-create-backup:discoverable",
-"os_compute_api:os-deferred-delete",
"os_compute_api:os-deferred-delete:discoverable",
-"os_compute_api:os-disk-config",
"os_compute_api:os-disk-config:discoverable",
"os_compute_api:os-evacuate:discoverable",
"os_compute_api:os-extended-server-attributes:discoverable",
-"os_compute_api:os-extended-status",
"os_compute_api:os-extended-status:discoverable",
-"os_compute_api:os-extended-availability-zone",
"os_compute_api:os-extended-availability-zone:discoverable",
"os_compute_api:extension_info:discoverable",
-"os_compute_api:os-extended-volumes",
"os_compute_api:os-extended-volumes:discoverable",
"os_compute_api:os-fixed-ips:discoverable",
-"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-access:discoverable",
-"os_compute_api:os-flavor-rxtx",
"os_compute_api:os-flavor-rxtx:discoverable",
-"os_compute_api:flavors",
"os_compute_api:flavors:discoverable",
"os_compute_api:os-flavor-extra-specs:discoverable",
-"os_compute_api:os-flavor-extra-specs:index",
-"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-flavor-manage:discoverable",
-"os_compute_api:os-floating-ip-dns",
"os_compute_api:os-floating-ip-dns:discoverable",
-"os_compute_api:os-floating-ip-pools",
"os_compute_api:os-floating-ip-pools:discoverable",
-"os_compute_api:os-floating-ips",
"os_compute_api:os-floating-ips:discoverable",
"os_compute_api:os-floating-ips-bulk:discoverable",
-"os_compute_api:os-fping",
"os_compute_api:os-fping:discoverable",
"os_compute_api:os-hide-server-addresses:discoverable",
"os_compute_api:os-hosts:discoverable",
"os_compute_api:os-hypervisors:discoverable",
"os_compute_api:images:discoverable",
-"os_compute_api:image-size",
"os_compute_api:image-size:discoverable",
-"os_compute_api:os-instance-actions",
"os_compute_api:os-instance-actions:discoverable",
"os_compute_api:os-instance-usage-audit-log:discoverable",
"os_compute_api:ips:discoverable",
"os_compute_api:os-keypairs:discoverable",
-"os_compute_api:os-keypairs",
-"os_compute_api:limits",
"os_compute_api:limits:discoverable",
"os_compute_api:os-lock-server:discoverable",
"os_compute_api:os-migrate-server:discoverable",
-"os_compute_api:os-multinic",
"os_compute_api:os-multinic:discoverable",
-"os_compute_api:os-networks:view",
"os_compute_api:os-networks:discoverable",
"os_compute_api:os-networks-associate:discoverable",
"os_compute_api:os-pause-server:discoverable",
-"os_compute_api:os-pci:pci_servers",
"os_compute_api:os-pci:discoverable",
"os_compute_api:os-personality:discoverable",
"os_compute_api:os-preserve-ephemeral-rebuild:discoverable",
"os_compute_api:os-quota-sets:discoverable",
"os_compute_api:os-quota-class-sets:discoverable",
-"os_compute_api:os-rescue",
"os_compute_api:os-rescue:discoverable",
"os_compute_api:os-scheduler-hints:discoverable",
"os_compute_api:os-security-group-default-rules:discoverable",
-"os_compute_api:os-security-groups",
"os_compute_api:os-security-groups:discoverable",
"os_compute_api:os-server-diagnostics:discoverable",
-"os_compute_api:os-server-password",
"os_compute_api:os-server-password:discoverable",
-"os_compute_api:os-server-usage",
"os_compute_api:os-server-usage:discoverable",
-"os_compute_api:os-server-groups",
"os_compute_api:os-server-groups:discoverable",
"os_compute_api:os-services:discoverable",
"os_compute_api:server-metadata:discoverable",
"os_compute_api:servers:discoverable",
-"os_compute_api:os-shelve:shelve",
"os_compute_api:os-shelve:shelve:discoverable",
"os_compute_api:os-simple-tenant-usage:discoverable",
"os_compute_api:os-suspend-server:discoverable",
"os_compute_api:os-tenant-networks:discoverable",
-"os_compute_api:os-shelve:unshelve",
"os_compute_api:os-user-data:discoverable",
-"os_compute_api:os-virtual-interfaces",
"os_compute_api:os-virtual-interfaces:discoverable",
-"os_compute_api:os-volumes",
"os_compute_api:os-volumes:discoverable",
-"os_compute_api:os-volumes-attachments:index",
-"os_compute_api:os-volumes-attachments:show",
-"os_compute_api:os-volumes-attachments:create",
-"os_compute_api:os-volumes-attachments:update",
-"os_compute_api:os-volumes-attachments:delete",
"os_compute_api:os-volumes-attachments:discoverable",
-"os_compute_api:os-availability-zone:list",
"os_compute_api:os-availability-zone:discoverable",
"os_compute_api:os-used-limits:discoverable",
"os_compute_api:os-migrations:discoverable",
-"os_compute_api:os-assisted-volume-snapshots:discoverable")
-
- self.non_admin_only_rules = (
-"compute_extension:hide_server_addresses",
-"os_compute_api:os-hide-server-addresses")
+"os_compute_api:os-assisted-volume-snapshots:discoverable",
+)
def test_all_rules_in_sample_file(self):
special_rules = ["context_is_admin", "admin_or_owner", "default"]
@@ -726,20 +731,25 @@ class RealRolePolicyTestCase(test.NoDBTestCase):
policy.enforce(self.non_admin_context, rule,
{'project_id': 'fake', 'user_id': 'fake'})
- def test_empty_rules(self):
+ def test_no_empty_rules(self):
rules = policy.get_rules()
- for rule in self.empty_rules:
- self.assertEqual('@', str(rules[rule]),
- "%s isn't empty rule" % rule)
+ for rule in rules:
+ self.assertNotEqual('', str(rule),
+ '%s should not be empty, use "@" instead if the policy '
+ 'should allow everything' % rule)
+
+ def test_allow_all_rules(self):
+ for rule in self.allow_all_rules:
+ policy.enforce(self.non_admin_context, rule, self.target)
def test_rule_missing(self):
rules = policy.get_rules()
# eliqiao os_compute_api:os-quota-class-sets:show requires
- # admin=True or quota_class match, this rule wont' belone to
+ # admin=True or quota_class match, this rule won't belong to
# admin_only, non_admin, admin_or_user, empty_rule
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show')
result = set(rules.keys()) - set(self.admin_only_rules +
- self.admin_or_owner_rules + self.empty_rules +
- self.non_admin_only_rules + special_rules)
+ self.admin_or_owner_rules + self.non_admin_only_rules +
+ self.allow_all_rules + special_rules)
self.assertEqual(set([]), result)
diff --git a/nova/tests/unit/test_quota.py b/nova/tests/unit/test_quota.py
index b17b7932c7..363792d482 100644
--- a/nova/tests/unit/test_quota.py
+++ b/nova/tests/unit/test_quota.py
@@ -16,12 +16,13 @@
import datetime
-from oslo_config import cfg
+from oslo_db.sqlalchemy import enginefacade
from oslo_utils import timeutils
from six.moves import range
from nova import compute
from nova.compute import flavors
+import nova.conf
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqa_api
@@ -31,8 +32,7 @@ from nova import quota
from nova import test
import nova.tests.unit.image.fake
-CONF = cfg.CONF
-CONF.import_opt('compute_driver', 'nova.virt.driver')
+CONF = nova.conf.CONF
class QuotaIntegrationTestCase(test.TestCase):
@@ -239,6 +239,7 @@ class QuotaIntegrationTestCase(test.TestCase):
assertInstancesReserved(0)
+@enginefacade.transaction_context_provider
class FakeContext(object):
def __init__(self, project_id, quota_class):
self.is_admin = False
@@ -2389,7 +2390,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
)
def make_sync(res_name):
- def sync(context, project_id, user_id, session):
+ def sync(context, project_id, user_id):
self.sync_called.add(res_name)
if res_name in self.usages:
if self.usages[res_name].in_use < 0:
@@ -2445,16 +2446,12 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
until_refresh=None),
]
- def fake_get_session():
- return FakeSession()
-
- def fake_get_project_user_quota_usages(context, session, project_id,
- user_id):
+ def fake_get_project_user_quota_usages(context, project_id, user_id):
return self.usages.copy(), self.usages.copy()
def fake_quota_usage_create(project_id, user_id, resource,
in_use, reserved, until_refresh,
- session=None, save=True):
+ session):
quota_usage_ref = self._make_quota_usage(
project_id, user_id, resource, in_use, reserved, until_refresh,
timeutils.utcnow(), timeutils.utcnow())
@@ -2465,7 +2462,7 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
def fake_reservation_create(uuid, usage_id, project_id,
user_id, resource, delta, expire,
- session=None):
+ session):
reservation_ref = self._make_reservation(
uuid, usage_id, project_id, user_id, resource, delta, expire,
timeutils.utcnow(), timeutils.utcnow())
@@ -2474,7 +2471,6 @@ class QuotaReserveSqlAlchemyTestCase(test.TestCase):
return reservation_ref
- self.stub_out('nova.db.sqlalchemy.api.get_session', fake_get_session)
self.stub_out('nova.db.sqlalchemy.api._get_project_user_quota_usages',
fake_get_project_user_quota_usages)
self.stub_out('nova.db.sqlalchemy.api._quota_usage_create',
diff --git a/nova/tests/unit/test_rpc.py b/nova/tests/unit/test_rpc.py
new file mode 100644
index 0000000000..ece4b86d76
--- /dev/null
+++ b/nova/tests/unit/test_rpc.py
@@ -0,0 +1,342 @@
+# Copyright 2016 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import copy
+
+import fixtures
+import mock
+import oslo_messaging as messaging
+from oslo_serialization import jsonutils
+import testtools
+
+from nova import context
+from nova import rpc
+from nova import test
+
+
+# Make a class that resets all of the global variables in nova.rpc
+class RPCResetFixture(fixtures.Fixture):
+ def _setUp(self):
+ self.trans = copy.copy(rpc.TRANSPORT)
+ self.noti_trans = copy.copy(rpc.NOTIFICATION_TRANSPORT)
+ self.noti = copy.copy(rpc.NOTIFIER)
+ self.all_mods = copy.copy(rpc.ALLOWED_EXMODS)
+ self.ext_mods = copy.copy(rpc.EXTRA_EXMODS)
+ self.addCleanup(self._reset_everything)
+
+ def _reset_everything(self):
+ rpc.TRANSPORT = self.trans
+ rpc.NOTIFICATION_TRANSPORT = self.noti_trans
+ rpc.NOTIFIER = self.noti
+ rpc.ALLOWED_EXMODS = self.all_mods
+ rpc.EXTRA_EXMODS = self.ext_mods
+
+
+# We can't import nova.test.TestCase because that sets up an RPCFixture
+# that pretty much nullifies all of this testing
+class TestRPC(testtools.TestCase):
+ def setUp(self):
+ super(TestRPC, self).setUp()
+ self.useFixture(RPCResetFixture())
+
+ @mock.patch.object(rpc, 'get_allowed_exmods')
+ @mock.patch.object(rpc, 'RequestContextSerializer')
+ @mock.patch.object(messaging, 'get_transport')
+ @mock.patch.object(messaging, 'get_notification_transport')
+ @mock.patch.object(messaging, 'Notifier')
+ def test_init_unversioned(self, mock_notif, mock_noti_trans, mock_trans,
+ mock_ser, mock_exmods):
+ # The expected call to get the legacy notifier will require no new
+ # kwargs, and we expect the new notifier will need the noop driver
+ expected = [{}, {'driver': 'noop'}]
+ self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser,
+ mock_exmods, 'unversioned', expected)
+
+ @mock.patch.object(rpc, 'get_allowed_exmods')
+ @mock.patch.object(rpc, 'RequestContextSerializer')
+ @mock.patch.object(messaging, 'get_transport')
+ @mock.patch.object(messaging, 'get_notification_transport')
+ @mock.patch.object(messaging, 'Notifier')
+ def test_init_both(self, mock_notif, mock_noti_trans, mock_trans,
+ mock_ser, mock_exmods):
+ expected = [{}, {'topic': 'versioned_notifications'}]
+ self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser,
+ mock_exmods, 'both', expected)
+
+ @mock.patch.object(rpc, 'get_allowed_exmods')
+ @mock.patch.object(rpc, 'RequestContextSerializer')
+ @mock.patch.object(messaging, 'get_transport')
+ @mock.patch.object(messaging, 'get_notification_transport')
+ @mock.patch.object(messaging, 'Notifier')
+ def test_init_versioned(self, mock_notif, mock_noti_trans, mock_trans,
+ mock_ser, mock_exmods):
+ expected = [{'driver': 'noop'}, {'topic': 'versioned_notifications'}]
+ self._test_init(mock_notif, mock_noti_trans, mock_trans, mock_ser,
+ mock_exmods, 'versioned', expected)
+
+ def test_cleanup_transport_null(self):
+ rpc.NOTIFICATION_TRANSPORT = mock.Mock()
+ rpc.LEGACY_NOTIFIER = mock.Mock()
+ rpc.NOTIFIER = mock.Mock()
+ self.assertRaises(AssertionError, rpc.cleanup)
+
+ def test_cleanup_notification_transport_null(self):
+ rpc.TRANSPORT = mock.Mock()
+ rpc.NOTIFIER = mock.Mock()
+ self.assertRaises(AssertionError, rpc.cleanup)
+
+ def test_cleanup_legacy_notifier_null(self):
+ rpc.TRANSPORT = mock.Mock()
+ rpc.NOTIFICATION_TRANSPORT = mock.Mock()
+ rpc.NOTIFIER = mock.Mock()
+
+ def test_cleanup_notifier_null(self):
+ rpc.TRANSPORT = mock.Mock()
+ rpc.LEGACY_NOTIFIER = mock.Mock()
+ rpc.NOTIFICATION_TRANSPORT = mock.Mock()
+ self.assertRaises(AssertionError, rpc.cleanup)
+
+ def test_cleanup(self):
+ rpc.LEGACY_NOTIFIER = mock.Mock()
+ rpc.NOTIFIER = mock.Mock()
+ rpc.NOTIFICATION_TRANSPORT = mock.Mock()
+ rpc.TRANSPORT = mock.Mock()
+ trans_cleanup = mock.Mock()
+ not_trans_cleanup = mock.Mock()
+ rpc.TRANSPORT.cleanup = trans_cleanup
+ rpc.NOTIFICATION_TRANSPORT.cleanup = not_trans_cleanup
+
+ rpc.cleanup()
+
+ trans_cleanup.assert_called_once_with()
+ not_trans_cleanup.assert_called_once_with()
+ self.assertIsNone(rpc.TRANSPORT)
+ self.assertIsNone(rpc.NOTIFICATION_TRANSPORT)
+ self.assertIsNone(rpc.LEGACY_NOTIFIER)
+ self.assertIsNone(rpc.NOTIFIER)
+
+ @mock.patch.object(messaging, 'set_transport_defaults')
+ def test_set_defaults(self, mock_set):
+ control_exchange = mock.Mock()
+
+ rpc.set_defaults(control_exchange)
+
+ mock_set.assert_called_once_with(control_exchange)
+
+ def test_add_extra_exmods(self):
+ rpc.EXTRA_EXMODS = []
+
+ rpc.add_extra_exmods('foo', 'bar')
+
+ self.assertEqual(['foo', 'bar'], rpc.EXTRA_EXMODS)
+
+ def test_clear_extra_exmods(self):
+ rpc.EXTRA_EXMODS = ['foo', 'bar']
+
+ rpc.clear_extra_exmods()
+
+ self.assertEqual(0, len(rpc.EXTRA_EXMODS))
+
+ def test_get_allowed_exmods(self):
+ rpc.ALLOWED_EXMODS = ['foo']
+ rpc.EXTRA_EXMODS = ['bar']
+
+ exmods = rpc.get_allowed_exmods()
+
+ self.assertEqual(['foo', 'bar'], exmods)
+
+ @mock.patch.object(messaging, 'TransportURL')
+ def test_get_transport_url(self, mock_url):
+ conf = mock.Mock()
+ rpc.CONF = conf
+ mock_url.parse.return_value = 'foo'
+
+ url = rpc.get_transport_url(url_str='bar')
+
+ self.assertEqual('foo', url)
+ mock_url.parse.assert_called_once_with(conf, 'bar',
+ rpc.TRANSPORT_ALIASES)
+
+ @mock.patch.object(messaging, 'TransportURL')
+ def test_get_transport_url_null(self, mock_url):
+ conf = mock.Mock()
+ rpc.CONF = conf
+ mock_url.parse.return_value = 'foo'
+
+ url = rpc.get_transport_url()
+
+ self.assertEqual('foo', url)
+ mock_url.parse.assert_called_once_with(conf, None,
+ rpc.TRANSPORT_ALIASES)
+
+ @mock.patch.object(rpc, 'RequestContextSerializer')
+ @mock.patch.object(messaging, 'RPCClient')
+ def test_get_client(self, mock_client, mock_ser):
+ rpc.TRANSPORT = mock.Mock()
+ tgt = mock.Mock()
+ ser = mock.Mock()
+ mock_client.return_value = 'client'
+ mock_ser.return_value = ser
+
+ client = rpc.get_client(tgt, version_cap='1.0', serializer='foo')
+
+ mock_ser.assert_called_once_with('foo')
+ mock_client.assert_called_once_with(rpc.TRANSPORT,
+ tgt, version_cap='1.0',
+ serializer=ser)
+ self.assertEqual('client', client)
+
+ @mock.patch.object(rpc, 'RequestContextSerializer')
+ @mock.patch.object(messaging, 'get_rpc_server')
+ def test_get_server(self, mock_get, mock_ser):
+ rpc.TRANSPORT = mock.Mock()
+ ser = mock.Mock()
+ tgt = mock.Mock()
+ ends = mock.Mock()
+ mock_ser.return_value = ser
+ mock_get.return_value = 'server'
+
+ server = rpc.get_server(tgt, ends, serializer='foo')
+
+ mock_ser.assert_called_once_with('foo')
+ mock_get.assert_called_once_with(rpc.TRANSPORT, tgt, ends,
+ executor='eventlet', serializer=ser)
+ self.assertEqual('server', server)
+
+ def test_get_notifier(self):
+ rpc.LEGACY_NOTIFIER = mock.Mock()
+ mock_prep = mock.Mock()
+ mock_prep.return_value = 'notifier'
+ rpc.LEGACY_NOTIFIER.prepare = mock_prep
+
+ notifier = rpc.get_notifier('service', publisher_id='foo')
+
+ mock_prep.assert_called_once_with(publisher_id='foo')
+ self.assertEqual('notifier', notifier)
+
+ def test_get_notifier_null_publisher(self):
+ rpc.LEGACY_NOTIFIER = mock.Mock()
+ mock_prep = mock.Mock()
+ mock_prep.return_value = 'notifier'
+ rpc.LEGACY_NOTIFIER.prepare = mock_prep
+
+ notifier = rpc.get_notifier('service', host='bar')
+
+ mock_prep.assert_called_once_with(publisher_id='service.bar')
+ self.assertEqual('notifier', notifier)
+
+ def test_get_versioned_notifier(self):
+ rpc.NOTIFIER = mock.Mock()
+ mock_prep = mock.Mock()
+ mock_prep.return_value = 'notifier'
+ rpc.NOTIFIER.prepare = mock_prep
+
+ notifier = rpc.get_versioned_notifier('service.foo')
+
+ mock_prep.assert_called_once_with(publisher_id='service.foo')
+ self.assertEqual('notifier', notifier)
+
+ def _test_init(self, mock_notif, mock_noti_trans, mock_trans, mock_ser,
+ mock_exmods, notif_format, expected_driver_topic_kwargs):
+ legacy_notifier = mock.Mock()
+ notifier = mock.Mock()
+ notif_transport = mock.Mock()
+ transport = mock.Mock()
+ serializer = mock.Mock()
+ conf = mock.Mock()
+
+ conf.notification_format = notif_format
+ mock_exmods.return_value = ['foo']
+ mock_trans.return_value = transport
+ mock_noti_trans.return_value = notif_transport
+ mock_ser.return_value = serializer
+ mock_notif.side_effect = [legacy_notifier, notifier]
+
+ rpc.init(conf)
+
+ mock_exmods.assert_called_once_with()
+ mock_trans.assert_called_once_with(conf,
+ allowed_remote_exmods=['foo'],
+ aliases=rpc.TRANSPORT_ALIASES)
+ self.assertIsNotNone(rpc.TRANSPORT)
+ self.assertIsNotNone(rpc.LEGACY_NOTIFIER)
+ self.assertIsNotNone(rpc.NOTIFIER)
+ self.assertEqual(legacy_notifier, rpc.LEGACY_NOTIFIER)
+ self.assertEqual(notifier, rpc.NOTIFIER)
+
+ expected_calls = []
+ for kwargs in expected_driver_topic_kwargs:
+ expected_kwargs = {'serializer': serializer}
+ expected_kwargs.update(kwargs)
+ expected_calls.append(((notif_transport,), expected_kwargs))
+
+ self.assertEqual(expected_calls, mock_notif.call_args_list,
+ "The calls to messaging.Notifier() did not create "
+ "the legacy and versioned notifiers properly.")
+
+
+class TestJsonPayloadSerializer(test.NoDBTestCase):
+ def test_serialize_entity(self):
+ with mock.patch.object(jsonutils, 'to_primitive') as mock_prim:
+ rpc.JsonPayloadSerializer.serialize_entity('context', 'entity')
+
+ mock_prim.assert_called_once_with('entity', convert_instances=True)
+
+
+class TestRequestContextSerializer(test.NoDBTestCase):
+ def setUp(self):
+ super(TestRequestContextSerializer, self).setUp()
+ self.mock_base = mock.Mock()
+ self.ser = rpc.RequestContextSerializer(self.mock_base)
+ self.ser_null = rpc.RequestContextSerializer(None)
+
+ def test_serialize_entity(self):
+ self.mock_base.serialize_entity.return_value = 'foo'
+
+ ser_ent = self.ser.serialize_entity('context', 'entity')
+
+ self.mock_base.serialize_entity.assert_called_once_with('context',
+ 'entity')
+ self.assertEqual('foo', ser_ent)
+
+ def test_serialize_entity_null_base(self):
+ ser_ent = self.ser_null.serialize_entity('context', 'entity')
+
+ self.assertEqual('entity', ser_ent)
+
+ def test_deserialize_entity(self):
+ self.mock_base.deserialize_entity.return_value = 'foo'
+
+ deser_ent = self.ser.deserialize_entity('context', 'entity')
+
+ self.mock_base.deserialize_entity.assert_called_once_with('context',
+ 'entity')
+ self.assertEqual('foo', deser_ent)
+
+ def test_deserialize_entity_null_base(self):
+ deser_ent = self.ser_null.deserialize_entity('context', 'entity')
+
+ self.assertEqual('entity', deser_ent)
+
+ def test_serialize_context(self):
+ context = mock.Mock()
+
+ self.ser.serialize_context(context)
+
+ context.to_dict.assert_called_once_with()
+
+ @mock.patch.object(context, 'RequestContext')
+ def test_deserialize_context(self, mock_req):
+ self.ser.deserialize_context('context')
+
+ mock_req.from_dict.assert_called_once_with('context')
diff --git a/nova/tests/unit/test_utils.py b/nova/tests/unit/test_utils.py
index 59762633fd..4208d55d0d 100644
--- a/nova/tests/unit/test_utils.py
+++ b/nova/tests/unit/test_utils.py
@@ -127,6 +127,14 @@ class GenericUtilsTestCase(test.NoDBTestCase):
hostname = "a" * 64
self.assertEqual(63, len(utils.sanitize_hostname(hostname)))
+ def test_hostname_truncated_no_hyphen(self):
+ hostname = "a" * 62
+ hostname = hostname + '-' + 'a'
+ res = utils.sanitize_hostname(hostname)
+ # we trim to 63 and then trim the trailing dash
+ self.assertEqual(62, len(res))
+ self.assertFalse(res.endswith('-'), 'The hostname ends with a -')
+
def test_generate_password(self):
password = utils.generate_password()
self.assertTrue([c for c in password if c in '0123456789'])
@@ -971,10 +979,6 @@ class ValidateNeutronConfiguration(test.NoDBTestCase):
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.assertTrue(utils.is_neutron())
- def test_quantum(self):
- self.flags(network_api_class='nova.network.quantumv2.api.API')
- self.assertTrue(utils.is_neutron())
-
class AutoDiskConfigUtilTestCase(test.NoDBTestCase):
def test_is_auto_disk_config_disabled(self):
diff --git a/nova/tests/unit/test_wsgi.py b/nova/tests/unit/test_wsgi.py
index 4941af753d..6d6293204e 100644
--- a/nova/tests/unit/test_wsgi.py
+++ b/nova/tests/unit/test_wsgi.py
@@ -244,14 +244,10 @@ class TestWSGIServerWithSSL(test.NoDBTestCase):
fake_ssl_server.start()
self.assertNotEqual(0, fake_ssl_server.port)
- cli = eventlet.connect(("localhost", fake_ssl_server.port))
- cli = eventlet.wrap_ssl(cli,
- ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
-
- cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
- 'Connection: close\r\nContent-length:4\r\n\r\nPING')
- response = cli.read(8192)
- self.assertEqual(response[-4:], "PONG")
+ response = requests.post(
+ 'https://127.0.0.1:%s/' % fake_ssl_server.port,
+ verify=os.path.join(SSL_CERT_DIR, 'ca.crt'), data='PING')
+ self.assertEqual(response.text, 'PONG')
fake_ssl_server.stop()
fake_ssl_server.wait()
@@ -272,24 +268,19 @@ class TestWSGIServerWithSSL(test.NoDBTestCase):
fake_server.start()
self.assertNotEqual(0, fake_server.port)
- cli = eventlet.connect(("localhost", fake_ssl_server.port))
- cli = eventlet.wrap_ssl(cli,
- ca_certs=os.path.join(SSL_CERT_DIR, 'ca.crt'))
-
- cli.write('POST / HTTP/1.1\r\nHost: localhost\r\n'
- 'Connection: close\r\nContent-length:4\r\n\r\nPING')
- response = cli.read(8192)
- self.assertEqual(response[-4:], "PONG")
-
- cli = eventlet.connect(("localhost", fake_server.port))
+ response = requests.post(
+ 'https://127.0.0.1:%s/' % fake_ssl_server.port,
+ verify=os.path.join(SSL_CERT_DIR, 'ca.crt'), data='PING')
+ self.assertEqual(response.text, 'PONG')
- cli.sendall('POST / HTTP/1.1\r\nHost: localhost\r\n'
- 'Connection: close\r\nContent-length:4\r\n\r\nPING')
- response = cli.recv(8192)
- self.assertEqual(response[-4:], "PONG")
+ response = requests.post('http://127.0.0.1:%s/' % fake_server.port,
+ data='PING')
+ self.assertEqual(response.text, 'PONG')
fake_ssl_server.stop()
fake_ssl_server.wait()
+ fake_server.stop()
+ fake_server.wait()
@testtools.skipIf(not utils.is_linux(), 'SO_REUSEADDR behaves differently '
'on OSX and BSD, see bugs '
diff --git a/nova/tests/unit/utils.py b/nova/tests/unit/utils.py
index 7f6f052b73..026149e683 100644
--- a/nova/tests/unit/utils.py
+++ b/nova/tests/unit/utils.py
@@ -38,14 +38,15 @@ def get_test_admin_context():
return nova.context.get_admin_context()
-def get_test_image_info(context, instance_ref):
+def get_test_image_object(context, instance_ref):
if not context:
context = get_test_admin_context()
image_ref = instance_ref['image_ref']
image_service, image_id = glance.get_remote_image_service(context,
image_ref)
- return image_service.show(context, image_id)
+ return objects.ImageMeta.from_dict(
+ image_service.show(context, image_id))
def get_test_flavor(context=None, options=None):
diff --git a/nova/tests/unit/virt/disk/vfs/test_localfs.py b/nova/tests/unit/virt/disk/vfs/test_localfs.py
index e767ca0844..069424175a 100644
--- a/nova/tests/unit/virt/disk/vfs/test_localfs.py
+++ b/nova/tests/unit/virt/disk/vfs/test_localfs.py
@@ -16,7 +16,6 @@ import tempfile
import mock
from oslo_concurrency import processutils
-from oslo_config import cfg
from nova import exception
from nova import test
@@ -26,7 +25,6 @@ from nova.virt.disk.mount import nbd
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt.image import model as imgmodel
-CONF = cfg.CONF
dirs = []
files = {}
diff --git a/nova/tests/unit/virt/fakelibosinfo.py b/nova/tests/unit/virt/fakelibosinfo.py
new file mode 100644
index 0000000000..470d5c52b2
--- /dev/null
+++ b/nova/tests/unit/virt/fakelibosinfo.py
@@ -0,0 +1,131 @@
+# Copyright 2016 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def match_item(obj, fltr):
+ key, val = list(fltr._filter.items())[0]
+ if key == 'class':
+ key = '_class'
+ elif key == 'short-id':
+ key = 'short_id'
+ return getattr(obj, key, None) == val
+
+
+class Loader(object):
+
+ def process_default_path(self):
+ pass
+
+ def get_db(self):
+ return Db()
+
+
+class Db(object):
+
+ def __init__(self):
+ # Generate test devices
+ self.devs = []
+ self.oslist = None
+
+ net = Device()
+ net._class = 'net'
+ net.name = 'virtio-net'
+ self.devs.append(net)
+
+ net = Device()
+ net._class = 'block'
+ net.name = 'virtio-block'
+ self.devs.append(net)
+
+ devlist = DeviceList()
+ devlist.devices = self.devs
+
+ fedora = Os()
+ fedora.name = 'Fedora 22'
+ fedora.id = 'http://fedoraproject.org/fedora/22'
+ fedora.short_id = 'fedora22'
+ fedora.dev_list = devlist
+
+ self.oslist = OsList()
+ self.oslist.os_list = [fedora]
+
+ def get_os_list(self):
+ return self.oslist
+
+
+class Filter(object):
+ def __init__(self):
+ self._filter = {}
+
+ @classmethod
+ def new(cls):
+ return cls()
+
+ def add_constraint(self, flt_key, val):
+ self._filter[flt_key] = val
+
+
+class OsList(object):
+
+ def __init__(self):
+ self.os_list = []
+
+ def new_filtered(self, fltr):
+ new_list = OsList()
+ new_list.os_list = [os for os in self.os_list if match_item(os, fltr)]
+ return new_list
+
+ def get_length(self):
+ return len(self.os_list)
+
+ def get_nth(self, index):
+ return self.os_list[index]
+
+
+class Os(object):
+ def __init__(self):
+ self.name = None
+ self.short_id = None
+ self.id = None
+ self.dev_list = None
+
+ def get_all_devices(self, fltr):
+ new_list = DeviceList()
+ new_list.devices = [dev for dev in self.dev_list.devices
+ if match_item(dev, fltr)]
+ return new_list
+
+ def get_name(self):
+ self.name
+
+
+class DeviceList(object):
+
+ def __init__(self):
+ self.devices = []
+
+ def get_length(self):
+ return len(self.devices)
+
+ def get_nth(self, index):
+ return self.devices[index]
+
+
+class Device(object):
+ def __init__(self):
+ self.name = None
+ self._class = None
+
+ def get_name(self):
+ return self.name
diff --git a/nova/tests/unit/virt/hyperv/test_driver.py b/nova/tests/unit/virt/hyperv/test_driver.py
index 39b72f0a74..4b124e7610 100644
--- a/nova/tests/unit/virt/hyperv/test_driver.py
+++ b/nova/tests/unit/virt/hyperv/test_driver.py
@@ -18,6 +18,7 @@ Unit tests for the Hyper-V Driver.
"""
import platform
+import sys
import mock
from os_win import exceptions as os_win_exc
@@ -88,6 +89,25 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase):
self.assertRaises(exception.InstanceNotFound,
self.driver.get_info, mock.sentinel.instance)
+ def test_assert_original_traceback_maintained(self):
+ def bar(self):
+ foo = "foofoo"
+ raise os_win_exc.HyperVVMNotFoundException(vm_name=foo)
+
+ self.driver._vmops.get_info.side_effect = bar
+ try:
+ self.driver.get_info(mock.sentinel.instance)
+ self.fail("Test expected exception, but it was not raised.")
+ except exception.InstanceNotFound:
+ # exception has been raised as expected.
+ _, _, trace = sys.exc_info()
+ while trace.tb_next:
+ # iterate until the original exception source, bar.
+ trace = trace.tb_next
+
+ # original frame will contain the 'foo' variable.
+ self.assertEqual('foofoo', trace.tb_frame.f_locals['foo'])
+
@mock.patch.object(driver.eventhandler, 'InstanceEventHandler')
def test_init_host(self, mock_InstanceEventHandler):
self.driver.init_host(mock.sentinel.host)
@@ -107,8 +127,7 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase):
self.driver.list_instances()
self.driver._vmops.list_instances.assert_called_once_with()
- @mock.patch.object(driver.objects.ImageMeta, 'from_dict')
- def test_spawn(self, mock_meta_from_dict):
+ def test_spawn(self):
self.driver.spawn(
mock.sentinel.context, mock.sentinel.instance,
mock.sentinel.image_meta, mock.sentinel.injected_files,
@@ -117,7 +136,7 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase):
self.driver._vmops.spawn.assert_called_once_with(
mock.sentinel.context, mock.sentinel.instance,
- mock_meta_from_dict.return_value, mock.sentinel.injected_files,
+ mock.sentinel.image_meta, mock.sentinel.injected_files,
mock.sentinel.admin_password, mock.sentinel.network_info,
mock.sentinel.block_device_info)
@@ -378,8 +397,7 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase):
mock.sentinel.network_info, mock.sentinel.block_device_info,
mock.sentinel.power_on)
- @mock.patch.object(driver.objects.ImageMeta, 'from_dict')
- def test_finish_migration(self, mock_meta_from_dict):
+ def test_finish_migration(self):
self.driver.finish_migration(
mock.sentinel.context, mock.sentinel.migration,
mock.sentinel.instance, mock.sentinel.disk_info,
@@ -390,7 +408,7 @@ class HyperVDriverTestCase(test_base.HyperVBaseTestCase):
self.driver._migrationops.finish_migration.assert_called_once_with(
mock.sentinel.context, mock.sentinel.migration,
mock.sentinel.instance, mock.sentinel.disk_info,
- mock.sentinel.network_info, mock_meta_from_dict.return_value,
+ mock.sentinel.network_info, mock.sentinel.image_meta,
mock.sentinel.resize_instance, mock.sentinel.block_device_info,
mock.sentinel.power_on)
diff --git a/nova/tests/unit/virt/hyperv/test_eventhandler.py b/nova/tests/unit/virt/hyperv/test_eventhandler.py
index 439c59ce03..c34e6aa3bc 100644
--- a/nova/tests/unit/virt/hyperv/test_eventhandler.py
+++ b/nova/tests/unit/virt/hyperv/test_eventhandler.py
@@ -15,12 +15,12 @@
import eventlet
import mock
+from os_win import constants
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from nova.tests.unit.virt.hyperv import test_base
from nova import utils
-from nova.virt.hyperv import constants
from nova.virt.hyperv import eventhandler
diff --git a/nova/tests/unit/virt/hyperv/test_hostops.py b/nova/tests/unit/virt/hyperv/test_hostops.py
index ab47d9b8b8..30f03cc111 100644
--- a/nova/tests/unit/virt/hyperv/test_hostops.py
+++ b/nova/tests/unit/virt/hyperv/test_hostops.py
@@ -16,6 +16,7 @@
import datetime
import mock
+from os_win import constants as os_win_const
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import units
@@ -62,7 +63,7 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
self._hostops._hostutils.get_cpus_info.assert_called_once_with()
expected = [mock.call(fkey)
- for fkey in constants.PROCESSOR_FEATURE.keys()]
+ for fkey in os_win_const.PROCESSOR_FEATURE.keys()]
self._hostops._hostutils.is_cpu_feature_present.has_calls(expected)
expected_response = self._get_mock_cpu_info()
self.assertEqual(expected_response, response)
@@ -72,7 +73,7 @@ class HostOpsTestCase(test_base.HyperVBaseTestCase):
'model': self.FAKE_NAME,
'arch': constants.WMI_WIN32_PROCESSOR_ARCHITECTURE[
self.FAKE_ARCHITECTURE],
- 'features': list(constants.PROCESSOR_FEATURE.values()),
+ 'features': list(os_win_const.PROCESSOR_FEATURE.values()),
'topology': {'cores': self.FAKE_NUM_CPUS,
'threads': self.FAKE_NUM_CPUS,
'sockets': self.FAKE_NUM_CPUS}}
diff --git a/nova/tests/unit/virt/hyperv/test_livemigrationops.py b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
index e2ab8f1311..705e5183b3 100644
--- a/nova/tests/unit/virt/hyperv/test_livemigrationops.py
+++ b/nova/tests/unit/virt/hyperv/test_livemigrationops.py
@@ -72,15 +72,6 @@ class LiveMigrationOpsTestCase(test_base.HyperVBaseTestCase):
def test_live_migration_exception(self):
self._test_live_migration(side_effect=os_win_exc.HyperVException)
- def test_live_migration_wrong_os_version(self):
- self._livemigrops._livemigrutils = None
- self.assertRaises(NotImplementedError,
- self._livemigrops.live_migration, self.context,
- instance_ref=mock.DEFAULT,
- dest=mock.sentinel.DESTINATION,
- post_method=mock.DEFAULT,
- recover_method=mock.DEFAULT)
-
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.ebs_root_in_block_devices')
@mock.patch('nova.virt.hyperv.imagecache.ImageCache.get_cached_image')
diff --git a/nova/tests/unit/virt/hyperv/test_vmops.py b/nova/tests/unit/virt/hyperv/test_vmops.py
index e1366be958..c0fab8be8e 100644
--- a/nova/tests/unit/virt/hyperv/test_vmops.py
+++ b/nova/tests/unit/virt/hyperv/test_vmops.py
@@ -16,6 +16,7 @@ import os
from eventlet import timeout as etimeout
import mock
+from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from oslo_concurrency import processutils
from oslo_config import cfg
@@ -59,6 +60,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self._vmops = vmops.VMOps()
self._vmops._vmutils = mock.MagicMock()
+ self._vmops._metricsutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()
self._vmops._pathutils = mock.MagicMock()
self._vmops._hostutils = mock.MagicMock()
@@ -416,7 +418,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS)
mock_vif_driver.plug.assert_called_once_with(mock_instance,
fake_network_info)
- mock_enable = self._vmops._vmutils.enable_vm_metrics_collection
+ mock_enable = self._vmops._metricsutils.enable_vm_metrics_collection
if enable_instance_metrics:
mock_enable.assert_called_once_with(mock_instance.name)
@@ -671,19 +673,19 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
def test_reboot_hard(self):
self._test_reboot(vmops.REBOOT_TYPE_HARD,
- constants.HYPERV_VM_STATE_REBOOT)
+ os_win_const.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = True
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
- constants.HYPERV_VM_STATE_ENABLED)
+ os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
def test_reboot_soft_failed(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
- constants.HYPERV_VM_STATE_REBOOT)
+ os_win_const.HYPERV_VM_STATE_REBOOT)
@mock.patch("nova.virt.hyperv.vmops.VMOps.power_on")
@mock.patch("nova.virt.hyperv.vmops.VMOps._soft_shutdown")
@@ -767,28 +769,28 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.pause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
- mock_instance, constants.HYPERV_VM_STATE_PAUSED)
+ mock_instance, os_win_const.HYPERV_VM_STATE_PAUSED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_unpause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.unpause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
- mock_instance, constants.HYPERV_VM_STATE_ENABLED)
+ mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_suspend(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.suspend(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
- mock_instance, constants.HYPERV_VM_STATE_SUSPENDED)
+ mock_instance, os_win_const.HYPERV_VM_STATE_SUSPENDED)
@mock.patch('nova.virt.hyperv.vmops.VMOps._set_vm_state')
def test_resume(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.resume(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
- mock_instance, constants.HYPERV_VM_STATE_ENABLED)
+ mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
def _test_power_off(self, timeout, set_state_expected=True):
instance = fake_instance.fake_instance_obj(self.context)
@@ -797,7 +799,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
if set_state_expected:
mock_set_state.assert_called_once_with(
- instance, constants.HYPERV_VM_STATE_DISABLED)
+ instance, os_win_const.HYPERV_VM_STATE_DISABLED)
def test_power_off_hard(self):
self._test_power_off(timeout=0)
@@ -832,7 +834,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self._vmops.power_on(mock_instance)
mock_set_vm_state.assert_called_once_with(
- mock_instance, constants.HYPERV_VM_STATE_ENABLED)
+ mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'
'.fix_instance_volume_disk_paths')
@@ -846,7 +848,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_fix_instance_vol_paths.assert_called_once_with(
mock_instance.name, mock.sentinel.block_device_info)
mock_set_vm_state.assert_called_once_with(
- mock_instance, constants.HYPERV_VM_STATE_ENABLED)
+ mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch.object(vmops.VMOps, 'log_vm_serial_output')
@mock.patch.object(vmops.VMOps, '_delete_vm_console_log')
@@ -857,22 +859,22 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
self._vmops._set_vm_state(mock_instance, state)
self._vmops._vmutils.set_vm_state.assert_called_once_with(
mock_instance.name, state)
- if state in (constants.HYPERV_VM_STATE_DISABLED,
- constants.HYPERV_VM_STATE_REBOOT):
+ if state in (os_win_const.HYPERV_VM_STATE_DISABLED,
+ os_win_const.HYPERV_VM_STATE_REBOOT):
mock_delete_vm_console_log.assert_called_once_with(mock_instance)
- if state in (constants.HYPERV_VM_STATE_ENABLED,
- constants.HYPERV_VM_STATE_REBOOT):
+ if state in (os_win_const.HYPERV_VM_STATE_ENABLED,
+ os_win_const.HYPERV_VM_STATE_REBOOT):
mock_log_vm_output.assert_called_once_with(mock_instance.name,
mock_instance.uuid)
def test_set_vm_state_disabled(self):
- self._test_set_vm_state(state=constants.HYPERV_VM_STATE_DISABLED)
+ self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_DISABLED)
def test_set_vm_state_enabled(self):
- self._test_set_vm_state(state=constants.HYPERV_VM_STATE_ENABLED)
+ self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_ENABLED)
def test_set_vm_state_reboot(self):
- self._test_set_vm_state(state=constants.HYPERV_VM_STATE_REBOOT)
+ self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_REBOOT)
def test_set_vm_state_exception(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
@@ -883,18 +885,18 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
mock_instance, mock.sentinel.STATE)
def test_get_vm_state(self):
- summary_info = {'EnabledState': constants.HYPERV_VM_STATE_DISABLED}
+ summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED}
with mock.patch.object(self._vmops._vmutils,
'get_vm_summary_info') as mock_get_summary_info:
mock_get_summary_info.return_value = summary_info
response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
- self.assertEqual(response, constants.HYPERV_VM_STATE_DISABLED)
+ self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
- mock_get_state.return_value = constants.HYPERV_VM_STATE_DISABLED
+ mock_get_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
@@ -1096,7 +1098,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_check_hotplug_available_vm_disabled(self, mock_get_vm_state):
fake_vm = fake_instance.fake_instance_obj(self.context)
- mock_get_vm_state.return_value = constants.HYPERV_VM_STATE_DISABLED
+ mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED
result = self._vmops._check_hotplug_available(fake_vm)
@@ -1112,7 +1114,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
vm_gen=constants.VM_GEN_2, windows_version=_WIN_VERSION_10):
fake_vm = fake_instance.fake_instance_obj(self.context)
- mock_get_vm_state.return_value = constants.HYPERV_VM_STATE_ENABLED
+ mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_ENABLED
self._vmops._vmutils.get_vm_generation.return_value = vm_gen
fake_check_win_vers = self._vmops._hostutils.check_min_windows_version
fake_check_win_vers.return_value = (
diff --git a/nova/tests/unit/virt/hyperv/test_volumeops.py b/nova/tests/unit/virt/hyperv/test_volumeops.py
index dd9387b7a1..f4a8401f1c 100644
--- a/nova/tests/unit/virt/hyperv/test_volumeops.py
+++ b/nova/tests/unit/virt/hyperv/test_volumeops.py
@@ -476,7 +476,7 @@ class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase):
super(SMBFSVolumeDriverTestCase, self).setUp()
self._volume_driver = volumeops.SMBFSVolumeDriver()
self._volume_driver._vmutils = mock.MagicMock()
- self._volume_driver._pathutils = mock.MagicMock()
+ self._volume_driver._smbutils = mock.MagicMock()
self._volume_driver._volutils = mock.MagicMock()
@mock.patch.object(volumeops.SMBFSVolumeDriver,
@@ -579,8 +579,8 @@ class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase):
@mock.patch.object(volumeops.SMBFSVolumeDriver, '_parse_credentials')
def _test_ensure_mounted(self, mock_parse_credentials, is_mounted=False):
- mock_mount_smb_share = self._volume_driver._pathutils.mount_smb_share
- self._volume_driver._pathutils.check_smb_mapping.return_value = (
+ mock_mount_smb_share = self._volume_driver._smbutils.mount_smb_share
+ self._volume_driver._smbutils.check_smb_mapping.return_value = (
is_mounted)
mock_parse_credentials.return_value = (
self._FAKE_USERNAME, self._FAKE_PASSWORD)
@@ -607,6 +607,6 @@ class SMBFSVolumeDriverTestCase(test_base.HyperVBaseTestCase):
block_device_mapping = [
{'connection_info': self._FAKE_CONNECTION_INFO}]
self._volume_driver.disconnect_volumes(block_device_mapping)
- mock_unmount_share = self._volume_driver._pathutils.unmount_smb_share
+ mock_unmount_share = self._volume_driver._smbutils.unmount_smb_share
mock_unmount_share.assert_called_once_with(
self._FAKE_SHARE_NORMALIZED)
diff --git a/nova/tests/unit/virt/ironic/test_client_wrapper.py b/nova/tests/unit/virt/ironic/test_client_wrapper.py
index 05bf9a6c2a..c37a0ef176 100644
--- a/nova/tests/unit/virt/ironic/test_client_wrapper.py
+++ b/nova/tests/unit/virt/ironic/test_client_wrapper.py
@@ -73,7 +73,8 @@ class IronicClientWrapperTestCase(test.NoDBTestCase):
'os_endpoint_type': 'public',
'ironic_url': CONF.ironic.api_endpoint,
'max_retries': CONF.ironic.api_max_retries,
- 'retry_interval': CONF.ironic.api_retry_interval}
+ 'retry_interval': CONF.ironic.api_retry_interval,
+ 'os_ironic_api_version': '1.8'}
mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
**expected)
@@ -86,7 +87,8 @@ class IronicClientWrapperTestCase(test.NoDBTestCase):
expected = {'os_auth_token': 'fake-token',
'ironic_url': CONF.ironic.api_endpoint,
'max_retries': CONF.ironic.api_max_retries,
- 'retry_interval': CONF.ironic.api_retry_interval}
+ 'retry_interval': CONF.ironic.api_retry_interval,
+ 'os_ironic_api_version': '1.8'}
mock_ir_cli.assert_called_once_with(CONF.ironic.api_version,
**expected)
diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py
index 01e835c74e..e573a76065 100644
--- a/nova/tests/unit/virt/ironic/test_driver.py
+++ b/nova/tests/unit/virt/ironic/test_driver.py
@@ -135,25 +135,25 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance_uuid=self.instance_uuid)
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
- ironicclient = cw.IronicClientWrapper()
-
mock_gbiui.return_value = node
- result = ironic_driver._validate_instance_and_node(ironicclient,
- instance)
+ result = self.driver._validate_instance_and_node(instance)
self.assertEqual(result.uuid, node_uuid)
+ mock_gbiui.assert_called_once_with(instance.uuid,
+ fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(FAKE_CLIENT.node, 'get_by_instance_uuid')
def test__validate_instance_and_node_failed(self, mock_gbiui):
- ironicclient = cw.IronicClientWrapper()
mock_gbiui.side_effect = ironic_exception.NotFound()
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=self.instance_uuid)
self.assertRaises(exception.InstanceNotFound,
- ironic_driver._validate_instance_and_node,
- ironicclient, instance)
+ self.driver._validate_instance_and_node, instance)
+ mock_gbiui.assert_called_once_with(instance.uuid,
+ fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(objects.Instance, 'refresh')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
def test__wait_for_active_pass(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
@@ -161,12 +161,13 @@ class IronicDriverTestCase(test.NoDBTestCase):
provision_state=ironic_states.DEPLOYING)
fake_validate.return_value = node
- self.driver._wait_for_active(FAKE_CLIENT, instance)
- fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
+ self.driver._wait_for_active(instance)
+ fake_validate.assert_called_once_with(instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
def test__wait_for_active_done(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
@@ -175,13 +176,13 @@ class IronicDriverTestCase(test.NoDBTestCase):
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
- self.driver._wait_for_active,
- FAKE_CLIENT, instance)
- fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
+ self.driver._wait_for_active, instance)
+ fake_validate.assert_called_once_with(instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
def test__wait_for_active_fail(self, fake_validate, fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
@@ -190,21 +191,20 @@ class IronicDriverTestCase(test.NoDBTestCase):
fake_validate.return_value = node
self.assertRaises(exception.InstanceDeployFailure,
- self.driver._wait_for_active,
- FAKE_CLIENT, instance)
- fake_validate.assert_called_once_with(FAKE_CLIENT, instance)
+ self.driver._wait_for_active, instance)
+ fake_validate.assert_called_once_with(instance)
fake_refresh.assert_called_once_with()
@mock.patch.object(objects.Instance, 'refresh')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
def _wait_for_active_abort(self, instance_params, fake_validate,
fake_refresh):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid(),
**instance_params)
self.assertRaises(exception.InstanceDeployFailure,
- self.driver._wait_for_active,
- FAKE_CLIENT, instance)
+ self.driver._wait_for_active, instance)
# Assert _validate_instance_and_node wasn't called
self.assertFalse(fake_validate.called)
fake_refresh.assert_called_once_with()
@@ -218,7 +218,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
def test__wait_for_active_abort_error(self):
self._wait_for_active_abort({'vm_state': vm_states.ERROR})
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
def test__wait_for_power_state_pass(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
@@ -226,11 +227,11 @@ class IronicDriverTestCase(test.NoDBTestCase):
target_power_state=ironic_states.POWER_OFF)
fake_validate.return_value = node
- self.driver._wait_for_power_state(
- FAKE_CLIENT, instance, 'fake message')
+ self.driver._wait_for_power_state(instance, 'fake message')
self.assertTrue(fake_validate.called)
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
def test__wait_for_power_state_ok(self, fake_validate):
instance = fake_instance.fake_instance_obj(self.ctx,
uuid=uuidutils.generate_uuid())
@@ -239,8 +240,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
fake_validate.return_value = node
self.assertRaises(loopingcall.LoopingCallDone,
- self.driver._wait_for_power_state,
- FAKE_CLIENT, instance, 'fake message')
+ self.driver._wait_for_power_state, instance, 'fake message')
self.assertTrue(fake_validate.called)
def _test__node_resource(self, has_inst_info):
@@ -526,7 +526,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
uuid=self.instance_uuid)
self.assertTrue(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
- self.instance_uuid)
+ self.instance_uuid,
+ fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(cw.IronicClientWrapper, 'call')
def test_instance_exists_fail(self, mock_call):
@@ -535,7 +536,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
uuid=self.instance_uuid)
self.assertFalse(self.driver.instance_exists(instance))
mock_call.assert_called_once_with('node.get_by_instance_uuid',
- self.instance_uuid)
+ self.instance_uuid,
+ fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(cw.IronicClientWrapper, 'call')
@mock.patch.object(objects.Instance, 'get_by_uuid')
@@ -591,7 +593,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_get.return_value = node
mock_list.return_value = []
self.assertTrue(self.driver.node_is_available(node.uuid))
- mock_get.assert_called_with(node.uuid)
+ mock_get.assert_called_with(node.uuid,
+ fields=ironic_driver._NODE_FIELDS)
mock_list.assert_called_with(detail=True, limit=0)
mock_get.side_effect = ironic_exception.NotFound
@@ -722,7 +725,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
result = self.driver.get_available_resource(node.uuid)
self.assertEqual(fake_resource, result)
mock_nr.assert_called_once_with(node)
- mock_get.assert_called_once_with(node.uuid)
+ mock_get.assert_called_once_with(node.uuid,
+ fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(FAKE_CLIENT.node, 'get')
@mock.patch.object(FAKE_CLIENT.node, 'list')
@@ -823,7 +827,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.spawn(self.ctx, instance, image_meta, [], None)
- mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.get.assert_called_once_with(
+ node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
mock_adf.assert_called_once_with(node, instance,
test.MatchType(objects.ImageMeta),
@@ -837,7 +842,6 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.assertFalse(mock_save.called)
mock_looping.assert_called_once_with(mock_wait_active,
- FAKE_CLIENT_WRAPPER,
instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
@@ -898,7 +902,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
- image_meta = ironic_utils.get_test_image_meta_object()
+ image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
self.driver._add_driver_fields(node, instance, image_meta, flavor)
expected_patch = [{'path': '/instance_info/image_source', 'op': 'add',
@@ -941,7 +945,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
node = ironic_utils.get_test_node(driver='fake')
instance = fake_instance.fake_instance_obj(self.ctx,
node=node.uuid)
- image_meta = ironic_utils.get_test_image_meta_object()
+ image_meta = ironic_utils.get_test_image_meta()
flavor = ironic_utils.get_test_flavor()
self.assertRaises(exception.InstanceDeployFailure,
self.driver._add_driver_fields,
@@ -959,7 +963,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
expected_patch = [{'path': '/instance_uuid', 'op': 'remove'}]
mock_update.assert_called_once_with(node.uuid, expected_patch)
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_instance_already_removed(self, mock_update,
mock_validate):
@@ -974,7 +979,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
flavor=flavor)
# assert node.update is not called
self.assertFalse(mock_update.called)
- mock_validate.assert_called_once_with(mock.ANY, instance)
+ mock_validate.assert_called_once_with(instance)
@mock.patch.object(FAKE_CLIENT.node, 'update')
def test__cleanup_deploy_without_flavor(self, mock_update):
@@ -1020,7 +1025,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.assertRaises(exception.ValidationError, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
- mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.get.assert_called_once_with(
+ node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
@mock.patch.object(configdrive, 'required_by')
@@ -1048,7 +1054,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.assertRaises(TestException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
- mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.get.assert_called_once_with(
+ node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_with(self.ctx, node, instance, None,
flavor=flavor)
@@ -1076,7 +1083,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.assertRaises(exception.NovaException, self.driver.spawn,
self.ctx, instance, image_meta, [], None)
- mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.get.assert_called_once_with(
+ node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
instance, None,
@@ -1105,7 +1113,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.driver.spawn,
self.ctx, instance, image_meta, [], None)
- mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.get.assert_called_once_with(
+ node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.validate.assert_called_once_with(node_uuid)
mock_cleanup_deploy.assert_called_once_with(self.ctx, node,
instance, None,
@@ -1184,7 +1193,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_node.set_provision_state.side_effect = fake_set_provision_state
self.driver.destroy(self.ctx, instance, network_info, None)
- mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+ mock_node.get_by_instance_uuid.assert_called_with(
+ instance.uuid, fields=ironic_driver._NODE_FIELDS)
mock_cleanup_deploy.assert_called_with(self.ctx, node,
instance, network_info)
@@ -1201,7 +1211,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
self._test_destroy(state)
@mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
def test_destroy_trigger_undeploy_fail(self, fake_validate, mock_sps):
node_uuid = 'aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee'
node = ironic_utils.get_test_node(driver='fake', uuid=node_uuid,
@@ -1213,19 +1224,19 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.assertRaises(exception.NovaException, self.driver.destroy,
self.ctx, instance, None, None)
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- def _test__unprovision_instance(self, mock_validate_inst, state=None):
- fake_ironic_client = mock.Mock()
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
+ def _test__unprovision_instance(self, mock_validate_inst, mock_set_pstate,
+ state=None):
node = ironic_utils.get_test_node(
driver='fake',
provision_state=state)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.return_value = node
- self.driver._unprovision(fake_ironic_client, instance, node)
- mock_validate_inst.assert_called_once_with(fake_ironic_client,
- instance)
- fake_ironic_client.call.assert_called_once_with(
- "node.set_provision_state", node.uuid, "deleted")
+ self.driver._unprovision(instance, node)
+ mock_validate_inst.assert_called_once_with(instance)
+ mock_set_pstate.assert_called_once_with(node.uuid, "deleted")
def test__unprovision_cleaning(self):
self._test__unprovision_instance(state=ironic_states.CLEANING)
@@ -1233,10 +1244,12 @@ class IronicDriverTestCase(test.NoDBTestCase):
def test__unprovision_cleanwait(self):
self._test__unprovision_instance(state=ironic_states.CLEANWAIT)
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- def test__unprovision_fail_max_retries(self, mock_validate_inst):
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
+ def test__unprovision_fail_max_retries(self, mock_validate_inst,
+ mock_set_pstate):
CONF.set_default('api_max_retries', default=2, group='ironic')
- fake_ironic_client = mock.Mock()
node = ironic_utils.get_test_node(
driver='fake',
provision_state=ironic_states.ACTIVE)
@@ -1244,27 +1257,26 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_validate_inst.return_value = node
self.assertRaises(exception.NovaException, self.driver._unprovision,
- fake_ironic_client, instance, node)
- expected_calls = (mock.call(mock.ANY, instance),
- mock.call(mock.ANY, instance))
+ instance, node)
+ expected_calls = (mock.call(instance),
+ mock.call(instance))
mock_validate_inst.assert_has_calls(expected_calls)
- fake_ironic_client.call.assert_called_once_with(
- "node.set_provision_state", node.uuid, "deleted")
+ mock_set_pstate.assert_called_once_with(node.uuid, "deleted")
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
- def test__unprovision_instance_not_found(self, mock_validate_inst):
- fake_ironic_client = mock.Mock()
+ @mock.patch.object(FAKE_CLIENT.node, 'set_provision_state')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
+ def test__unprovision_instance_not_found(self, mock_validate_inst,
+ mock_set_pstate):
node = ironic_utils.get_test_node(
driver='fake', provision_state=ironic_states.DELETING)
instance = fake_instance.fake_instance_obj(self.ctx, node=node.uuid)
mock_validate_inst.side_effect = exception.InstanceNotFound(
instance_id='fake')
- self.driver._unprovision(fake_ironic_client, instance, node)
- mock_validate_inst.assert_called_once_with(fake_ironic_client,
- instance)
- fake_ironic_client.call.assert_called_once_with(
- "node.set_provision_state", node.uuid, "deleted")
+ self.driver._unprovision(instance, node)
+ mock_validate_inst.assert_called_once_with(instance)
+ mock_set_pstate.assert_called_once_with(node.uuid, "deleted")
@mock.patch.object(FAKE_CLIENT, 'node')
def test_destroy_unassociate_fail(self, mock_node):
@@ -1279,10 +1291,12 @@ class IronicDriverTestCase(test.NoDBTestCase):
self.ctx, instance, None, None)
mock_node.set_provision_state.assert_called_once_with(node_uuid,
'deleted')
- mock_node.get_by_instance_uuid.assert_called_with(instance.uuid)
+ mock_node.get_by_instance_uuid.assert_called_with(
+ instance.uuid, fields=ironic_driver._NODE_FIELDS)
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_reboot(self, mock_sp, fake_validate, mock_looping):
node = ironic_utils.get_test_node()
@@ -1296,14 +1310,16 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_sp.assert_called_once_with(node.uuid, 'reboot')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_off(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
method_name='power_off')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
- @mock.patch.object(ironic_driver, '_validate_instance_and_node')
+ @mock.patch.object(ironic_driver.IronicDriver,
+ '_validate_instance_and_node')
@mock.patch.object(FAKE_CLIENT.node, 'set_power_state')
def test_power_on(self, mock_sp, fake_validate, mock_looping):
self._test_power_on_off(mock_sp, fake_validate, mock_looping,
@@ -1364,7 +1380,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
network_info = utils.get_test_network_info()
self.driver.plug_vifs(instance, network_info)
- mock_get.assert_called_once_with(node_uuid)
+ mock_get.assert_called_once_with(node_uuid,
+ fields=ironic_driver._NODE_FIELDS)
mock__plug_vifs.assert_called_once_with(node, instance, network_info)
@mock.patch.object(FAKE_CLIENT.port, 'update')
@@ -1433,7 +1450,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
utils.get_test_network_info())
# asserts
- mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.get.assert_called_once_with(
+ node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
mock_update.assert_called_once_with(port.uuid, expected_patch)
@@ -1449,7 +1467,8 @@ class IronicDriverTestCase(test.NoDBTestCase):
instance = fake_instance.fake_instance_obj(self.ctx, node=node_uuid)
self.driver.unplug_vifs(instance, utils.get_test_network_info())
- mock_node.get.assert_called_once_with(node_uuid)
+ mock_node.get.assert_called_once_with(
+ node_uuid, fields=ironic_driver._NODE_FIELDS)
mock_node.list_ports.assert_called_once_with(node_uuid, detail=True)
# assert port.update() was not called
self.assertFalse(mock_update.called)
@@ -1491,13 +1510,6 @@ class IronicDriverTestCase(test.NoDBTestCase):
mock_risr.assert_called_once_with(instance)
@mock.patch.object(firewall.NoopFirewallDriver,
- 'refresh_provider_fw_rules', create=True)
- def test_refresh_provider_fw_rules(self, mock_rpfr):
- fake_instance.fake_instance_obj(self.ctx)
- self.driver.refresh_provider_fw_rules()
- mock_rpfr.assert_called_once_with()
-
- @mock.patch.object(firewall.NoopFirewallDriver,
'refresh_instance_security_rules', create=True)
def test_refresh_security_group_rules(self, mock_risr):
fake_group = 'fake-security-group-members'
@@ -1546,9 +1558,7 @@ class IronicDriverTestCase(test.NoDBTestCase):
flavor, preserve)
mock_set_pstate.assert_called_once_with(node_uuid,
ironic_states.REBUILD)
- mock_looping.assert_called_once_with(mock_wait_active,
- FAKE_CLIENT_WRAPPER,
- instance)
+ mock_looping.assert_called_once_with(mock_wait_active, instance)
fake_looping_call.start.assert_called_once_with(
interval=CONF.ironic.api_retry_interval)
fake_looping_call.wait.assert_called_once_with()
diff --git a/nova/tests/unit/virt/ironic/test_patcher.py b/nova/tests/unit/virt/ironic/test_patcher.py
index f818db734e..423745e8b2 100644
--- a/nova/tests/unit/virt/ironic/test_patcher.py
+++ b/nova/tests/unit/virt/ironic/test_patcher.py
@@ -28,7 +28,7 @@ class IronicDriverFieldsTestCase(test.NoDBTestCase):
def setUp(self):
super(IronicDriverFieldsTestCase, self).setUp()
- self.image_meta = ironic_utils.get_test_image_meta_object()
+ self.image_meta = ironic_utils.get_test_image_meta()
self.flavor = ironic_utils.get_test_flavor()
self.ctx = nova_context.get_admin_context()
self.instance = fake_instance.fake_instance_obj(self.ctx)
diff --git a/nova/tests/unit/virt/ironic/utils.py b/nova/tests/unit/virt/ironic/utils.py
index ea8f626d39..c55966f713 100644
--- a/nova/tests/unit/virt/ironic/utils.py
+++ b/nova/tests/unit/virt/ironic/utils.py
@@ -74,12 +74,8 @@ def get_test_flavor(**kw):
def get_test_image_meta(**kw):
- return {'id': kw.get('id', 'cccccccc-cccc-cccc-cccc-cccccccccccc')}
-
-
-def get_test_image_meta_object(**kw):
return objects.ImageMeta.from_dict(
- get_test_image_meta(**kw))
+ {'id': kw.get('id', 'cccccccc-cccc-cccc-cccc-cccccccccccc')})
class FakePortClient(object):
@@ -96,10 +92,10 @@ class FakeNodeClient(object):
def list(self, detail=False):
return []
- def get(self, node_uuid):
+ def get(self, node_uuid, fields=None):
pass
- def get_by_instance_uuid(self, instance_uuid):
+ def get_by_instance_uuid(self, instance_uuid, fields=None):
pass
def list_ports(self, node_uuid):
diff --git a/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py b/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py
index 23fdef5911..ebd2f73580 100644
--- a/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py
+++ b/nova/tests/unit/virt/libvirt/fake_libvirt_utils.py
@@ -13,6 +13,7 @@
# under the License.
import os
+
from six.moves import StringIO
from nova.virt.libvirt import utils as libvirt_utils
@@ -23,6 +24,8 @@ disk_sizes = {}
disk_backing_files = {}
disk_type = "qcow2"
+RESIZE_SNAPSHOT_NAME = libvirt_utils.RESIZE_SNAPSHOT_NAME
+
def create_image(disk_format, path, size):
pass
@@ -74,6 +77,10 @@ def chown(path, owner):
pass
+def update_mtime(path):
+ pass
+
+
def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
files[out_path] = ''
diff --git a/nova/tests/unit/virt/libvirt/fakelibvirt.py b/nova/tests/unit/virt/libvirt/fakelibvirt.py
index 085064b892..d95661cdff 100644
--- a/nova/tests/unit/virt/libvirt/fakelibvirt.py
+++ b/nova/tests/unit/virt/libvirt/fakelibvirt.py
@@ -91,6 +91,7 @@ VIR_CRED_EXTERNAL = 9
VIR_MIGRATE_LIVE = 1
VIR_MIGRATE_PEER2PEER = 2
VIR_MIGRATE_TUNNELLED = 4
+VIR_MIGRATE_PERSIST_DEST = 8
VIR_MIGRATE_UNDEFINE_SOURCE = 16
VIR_MIGRATE_NON_SHARED_INC = 128
@@ -591,6 +592,13 @@ class Domain(object):
error_code=VIR_ERR_INTERNAL_ERROR,
error_domain=VIR_FROM_QEMU)
+ def migrateToURI3(self, dconnuri, params, logical_sum):
+ raise make_libvirtError(
+ libvirtError,
+ "Migration always fails for fake libvirt!",
+ error_code=VIR_ERR_INTERNAL_ERROR,
+ error_domain=VIR_FROM_QEMU)
+
def migrateSetMaxDowntime(self, downtime):
pass
diff --git a/nova/tests/unit/virt/libvirt/storage/test_rbd.py b/nova/tests/unit/virt/libvirt/storage/test_rbd.py
index 2ae0293831..6c25c22b6e 100644
--- a/nova/tests/unit/virt/libvirt/storage/test_rbd.py
+++ b/nova/tests/unit/virt/libvirt/storage/test_rbd.py
@@ -13,11 +13,13 @@
import mock
+from nova.compute import task_states
from nova import exception
from nova import objects
from nova import test
from nova import utils
from nova.virt.libvirt.storage import rbd_utils
+from nova.virt.libvirt import utils as libvirt_utils
CEPH_MON_DUMP = """dumped monmap epoch 1
@@ -75,6 +77,7 @@ class RbdTestCase(test.NoDBTestCase):
self.driver = rbd_utils.RBDDriver(self.rbd_pool, None, None)
self.volume_name = u'volume-00000001'
+ self.snap_name = u'test-snap'
def tearDown(self):
super(RbdTestCase, self).tearDown()
@@ -100,7 +103,7 @@ class RbdTestCase(test.NoDBTestCase):
self.assertFalse(self.driver.is_cloneable({'url': loc},
image_meta))
- @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid')
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
def test_cloneable(self, mock_rados, mock_rbd, mock_get_fsid):
@@ -110,7 +113,7 @@ class RbdTestCase(test.NoDBTestCase):
self.assertTrue(self.driver.is_cloneable(location, image_meta))
self.assertTrue(mock_get_fsid.called)
- @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid')
def test_uncloneable_different_fsid(self, mock_get_fsid):
mock_get_fsid.return_value = 'abc'
location = {'url': 'rbd://def/pool/image/snap'}
@@ -119,7 +122,7 @@ class RbdTestCase(test.NoDBTestCase):
self.driver.is_cloneable(location, image_meta))
self.assertTrue(mock_get_fsid.called)
- @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid')
@mock.patch.object(rbd_utils, 'RBDVolumeProxy')
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
@@ -137,7 +140,7 @@ class RbdTestCase(test.NoDBTestCase):
snapshot='snap', read_only=True)
self.assertTrue(mock_get_fsid.called)
- @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid')
def test_uncloneable_bad_format(self, mock_get_fsid):
mock_get_fsid.return_value = 'abc'
location = {'url': 'rbd://abc/pool/image/snap'}
@@ -148,7 +151,7 @@ class RbdTestCase(test.NoDBTestCase):
self.driver.is_cloneable(location, image_meta))
self.assertTrue(mock_get_fsid.called)
- @mock.patch.object(rbd_utils.RBDDriver, '_get_fsid')
+ @mock.patch.object(rbd_utils.RBDDriver, 'get_fsid')
def test_uncloneable_missing_format(self, mock_get_fsid):
mock_get_fsid.return_value = 'abc'
location = {'url': 'rbd://abc/pool/image/snap'}
@@ -209,6 +212,33 @@ class RbdTestCase(test.NoDBTestCase):
rbd.clone.assert_called_once_with(*args, **kwargs)
self.assertEqual(2, client.__enter__.call_count)
+ @mock.patch.object(rbd_utils, 'RADOSClient')
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ def test_clone_eperm(self, mock_rados, mock_rbd, mock_client):
+ pool = u'images'
+ image = u'image-name'
+ snap = u'snapshot-name'
+ location = {'url': u'rbd://fsid/%s/%s/%s' % (pool, image, snap)}
+
+ client_stack = []
+
+ def mock__enter__(inst):
+ def _inner():
+ client_stack.append(inst)
+ return inst
+ return _inner
+
+ client = mock_client.return_value
+ # capture both rados client used to perform the clone
+ client.__enter__.side_effect = mock__enter__(client)
+
+ setattr(mock_rbd, 'PermissionError', test.TestingException)
+ rbd = mock_rbd.RBD.return_value
+ rbd.clone.side_effect = test.TestingException
+ self.assertRaises(exception.Forbidden,
+ self.driver.clone, location, self.volume_name)
+
@mock.patch.object(rbd_utils, 'RBDVolumeProxy')
def test_resize(self, mock_proxy):
size = 1024
@@ -296,7 +326,7 @@ class RbdTestCase(test.NoDBTestCase):
@mock.patch.object(rbd_utils, 'rados')
@mock.patch.object(rbd_utils, 'RADOSClient')
def test_cleanup_volumes(self, mock_client, mock_rados, mock_rbd):
- instance = objects.Instance(id=1, uuid='12345')
+ instance = objects.Instance(id=1, uuid='12345', task_state=None)
rbd = mock_rbd.RBD.return_value
rbd.list.return_value = ['12345_test', '111_test']
@@ -312,7 +342,7 @@ class RbdTestCase(test.NoDBTestCase):
@mock.patch.object(rbd_utils, 'RADOSClient')
def _test_cleanup_exception(self, exception_name,
mock_client, mock_rados, mock_rbd):
- instance = objects.Instance(id=1, uuid='12345')
+ instance = objects.Instance(id=1, uuid='12345', task_state=None)
setattr(mock_rbd, exception_name, test.TestingException)
rbd = mock_rbd.RBD.return_value
@@ -339,6 +369,63 @@ class RbdTestCase(test.NoDBTestCase):
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
@mock.patch.object(rbd_utils, 'RADOSClient')
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_cleanup_volumes_pending_resize(self, mock_proxy, mock_client,
+ mock_rados, mock_rbd):
+ instance = objects.Instance(id=1, uuid='12345', task_state=None)
+
+ setattr(mock_rbd, 'ImageHasSnapshots', test.TestingException)
+ rbd = mock_rbd.RBD.return_value
+ rbd.remove.side_effect = [test.TestingException, None]
+ rbd.list.return_value = ['12345_test', '111_test']
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ proxy.list_snaps.return_value = [
+ {'name': libvirt_utils.RESIZE_SNAPSHOT_NAME}]
+ client = mock_client.return_value
+ self.driver.cleanup_volumes(instance)
+
+ remove_call = mock.call(client.ioctx, '12345_test')
+ rbd.remove.assert_has_calls([remove_call, remove_call])
+ proxy.remove_snap.assert_called_once_with(
+ libvirt_utils.RESIZE_SNAPSHOT_NAME)
+ client.__enter__.assert_called_once_with()
+ client.__exit__.assert_called_once_with(None, None, None)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ @mock.patch.object(rbd_utils, 'RADOSClient')
+ def test_cleanup_volumes_reverting_resize(self, mock_client, mock_rados,
+ mock_rbd):
+ instance = objects.Instance(id=1, uuid='12345',
+ task_state=task_states.RESIZE_REVERTING)
+
+ rbd = mock_rbd.RBD.return_value
+ rbd.list.return_value = ['12345_test', '111_test',
+ '12345_test_disk.local']
+
+ client = mock_client.return_value
+ self.driver.cleanup_volumes(instance)
+ rbd.remove.assert_called_once_with(client.ioctx,
+ '12345_test_disk.local')
+ client.__enter__.assert_called_once_with()
+ client.__exit__.assert_called_once_with(None, None, None)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ @mock.patch.object(rbd_utils, 'RADOSClient')
+ def test_destroy_volume(self, mock_client, mock_rados, mock_rbd):
+ rbd = mock_rbd.RBD.return_value
+ vol = '12345_test'
+ client = mock_client.return_value
+ self.driver.destroy_volume(vol)
+ rbd.remove.assert_called_once_with(client.ioctx, vol)
+ client.__enter__.assert_called_once_with()
+ client.__exit__.assert_called_once_with(None, None, None)
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'rados')
+ @mock.patch.object(rbd_utils, 'RADOSClient')
def test_remove_image(self, mock_client, mock_rados, mock_rbd):
name = '12345_disk.config.rescue'
@@ -350,3 +437,103 @@ class RbdTestCase(test.NoDBTestCase):
# Make sure that we entered and exited the RADOSClient
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_create_snap(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ self.driver.create_snap(self.volume_name, self.snap_name)
+ proxy.create_snap.assert_called_once_with(self.snap_name)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_create_protected_snap(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ proxy.is_protected_snap.return_value = False
+ self.driver.create_snap(self.volume_name, self.snap_name, protect=True)
+ proxy.create_snap.assert_called_once_with(self.snap_name)
+ proxy.is_protected_snap.assert_called_once_with(self.snap_name)
+ proxy.protect_snap.assert_called_once_with(self.snap_name)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_remove_snap(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ proxy.list_snaps.return_value = [{'name': self.snap_name}]
+ proxy.is_protected_snap.return_value = False
+ self.driver.remove_snap(self.volume_name, self.snap_name)
+ proxy.remove_snap.assert_called_once_with(self.snap_name)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_remove_snap_force(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ proxy.is_protected_snap.return_value = True
+ proxy.list_snaps.return_value = [{'name': self.snap_name}]
+ self.driver.remove_snap(self.volume_name, self.snap_name, force=True)
+ proxy.is_protected_snap.assert_called_once_with(self.snap_name)
+ proxy.unprotect_snap.assert_called_once_with(self.snap_name)
+ proxy.remove_snap.assert_called_once_with(self.snap_name)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_remove_snap_does_nothing_when_no_snapshot(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ proxy.list_snaps.return_value = [{'name': 'some-other-snaphot'}]
+ self.driver.remove_snap(self.volume_name, self.snap_name)
+ self.assertFalse(proxy.remove_snap.called)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_remove_snap_does_nothing_when_protected(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ proxy.is_protected_snap.return_value = True
+ proxy.list_snaps.return_value = [{'name': self.snap_name}]
+ self.driver.remove_snap(self.volume_name, self.snap_name)
+ self.assertFalse(proxy.remove_snap.called)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_remove_snap_protected_ignore_errors(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ proxy.is_protected_snap.return_value = True
+ proxy.list_snaps.return_value = [{'name': self.snap_name}]
+ self.driver.remove_snap(self.volume_name, self.snap_name,
+ ignore_errors=True)
+ proxy.remove_snap.assert_called_once_with(self.snap_name)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_parent_info(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ self.driver.parent_info(self.volume_name)
+ proxy.parent_info.assert_called_once_with()
+
+ @mock.patch.object(rbd_utils, 'rbd')
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_parent_info_throws_exception_on_error(self, mock_proxy, mock_rbd):
+ setattr(mock_rbd, 'ImageNotFound', test.TestingException)
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ proxy.parent_info.side_effect = test.TestingException
+ self.assertRaises(exception.ImageUnacceptable,
+ self.driver.parent_info, self.volume_name)
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_flatten(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ self.driver.flatten(self.volume_name)
+ proxy.flatten.assert_called_once_with()
+
+ @mock.patch.object(rbd_utils, 'RBDVolumeProxy')
+ def test_rollback_to_snap(self, mock_proxy):
+ proxy = mock_proxy.return_value
+ proxy.__enter__.return_value = proxy
+ self.assertRaises(exception.SnapshotNotFound,
+ self.driver.rollback_to_snap,
+ self.volume_name, self.snap_name)
+
+ proxy.list_snaps.return_value = [{'name': self.snap_name}, ]
+ self.driver.rollback_to_snap(self.volume_name, self.snap_name)
+ proxy.rollback_to_snap.assert_called_once_with(self.snap_name)
diff --git a/nova/tests/unit/virt/libvirt/test_blockinfo.py b/nova/tests/unit/virt/libvirt/test_blockinfo.py
index a1450b325e..146caabb16 100644
--- a/nova/tests/unit/virt/libvirt/test_blockinfo.py
+++ b/nova/tests/unit/virt/libvirt/test_blockinfo.py
@@ -15,6 +15,7 @@
import copy
+import fixtures
import mock
from nova import block_device
@@ -25,6 +26,7 @@ from nova import objects
from nova import test
from nova.tests.unit import fake_block_device
import nova.tests.unit.image.fake
+from nova.tests.unit.virt import fakelibosinfo
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.virt.libvirt import blockinfo
@@ -744,6 +746,17 @@ class LibvirtBlockInfoTest(test.NoDBTestCase):
blockinfo.get_disk_bus_for_device_type,
instance, 'kvm', image_meta)
+ def test_get_disk_bus_with_osinfo(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.osinfo.libosinfo',
+ fakelibosinfo))
+ instance = objects.Instance(**self.test_instance)
+ image_meta = {'properties': {'os_name': 'fedora22'}}
+ image_meta = objects.ImageMeta.from_dict(image_meta)
+ bus = blockinfo.get_disk_bus_for_device_type(instance,
+ 'kvm', image_meta)
+ self.assertEqual('virtio', bus)
+
def test_success_get_disk_bus_for_disk_dev(self):
expected = (
('ide', ("kvm", "hda")),
diff --git a/nova/tests/unit/virt/libvirt/test_config.py b/nova/tests/unit/virt/libvirt/test_config.py
index 7ab290123a..98a22df57f 100644
--- a/nova/tests/unit/virt/libvirt/test_config.py
+++ b/nova/tests/unit/virt/libvirt/test_config.py
@@ -706,6 +706,42 @@ class LibvirtConfigGuestDiskTest(LibvirtConfigBaseTest):
self.assertEqual('unmap', obj.driver_discard)
+ def test_config_file_io(self):
+ obj = config.LibvirtConfigGuestDisk()
+ obj.driver_name = "qemu"
+ obj.driver_format = "qcow2"
+ obj.driver_cache = "none"
+ obj.driver_io = "native"
+ obj.source_type = "file"
+ obj.source_path = "/tmp/hello.qcow2"
+ obj.target_dev = "/dev/hda"
+ obj.target_bus = "ide"
+ obj.serial = "7a97c4a3-6f59-41d4-bf47-191d7f97f8e9"
+
+ xml = obj.to_xml()
+ self.assertXmlEqual("""
+ <disk type="file" device="disk">
+ <driver name="qemu" type="qcow2" cache="none" io="native"/>
+ <source file="/tmp/hello.qcow2"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>""", xml)
+
+ def test_config_file_io_parse(self):
+ xml = """
+ <disk type="file" device="disk">
+ <driver name="qemu" type="qcow2" cache="none" io="native"/>
+ <source file="/tmp/hello.qcow2"/>
+ <target bus="ide" dev="/dev/hda"/>
+ <serial>7a97c4a3-6f59-41d4-bf47-191d7f97f8e9</serial>
+ </disk>"""
+ xmldoc = etree.fromstring(xml)
+
+ obj = config.LibvirtConfigGuestDisk()
+ obj.parse_dom(xmldoc)
+
+ self.assertEqual('native', obj.driver_io)
+
def test_config_block(self):
obj = config.LibvirtConfigGuestDisk()
obj.source_type = "block"
@@ -1266,6 +1302,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
</bandwidth>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
def test_config_driver_options(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "ethernet"
@@ -1284,6 +1326,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<target dev="vnet0"/>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
def test_config_bridge(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "bridge"
@@ -1316,6 +1364,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
</bandwidth>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
def test_config_bridge_ovs(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "bridge"
@@ -1338,6 +1392,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
</virtualport>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
def test_config_bridge_xen(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "bridge"
@@ -1353,6 +1413,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<script path="/path/to/test-vif-openstack"/>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
def test_config_8021Qbh(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "direct"
@@ -1372,6 +1438,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<virtualport type="802.1Qbh"/>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
def test_config_direct(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "direct"
@@ -1388,6 +1460,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<source dev="eth0" mode="passthrough"/>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
def test_config_8021Qbh_hostdev(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "hostdev"
@@ -1409,6 +1487,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
</virtualport>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
def test_config_hw_veb_hostdev(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "hostdev"
@@ -1429,6 +1513,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
</vlan>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
def test_config_vhostuser(self):
obj = config.LibvirtConfigGuestInterface()
obj.net_type = "vhostuser"
@@ -1445,6 +1535,12 @@ class LibvirtConfigGuestInterfaceTest(LibvirtConfigBaseTest):
<source type="unix" mode="server" path="/vhost-user/test.sock"/>
</interface>""")
+ # parse the xml from the first object into a new object and make sure
+ # they are the same
+ obj2 = config.LibvirtConfigGuestInterface()
+ obj2.parse_str(xml)
+ self.assertXmlEqual(xml, obj2.to_xml())
+
class LibvirtConfigGuestFeatureTest(LibvirtConfigBaseTest):
@@ -1792,6 +1888,31 @@ class LibvirtConfigGuestTest(LibvirtConfigBaseTest):
</devices>
</domain>""")
+ def test_config_uefi(self):
+ obj = config.LibvirtConfigGuest()
+ obj.virt_type = "kvm"
+ obj.memory = 100 * units.Mi
+ obj.vcpus = 1
+ obj.name = "uefi"
+ obj.uuid = "f01cf68d-515c-4daf-b85f-ef1424d93bfc"
+ obj.os_type = "x86_64"
+ obj.os_loader = '/tmp/OVMF.fd'
+ obj.os_loader_type = 'pflash'
+ xml = obj.to_xml()
+
+ self.assertXmlEqual(xml, """
+ <domain type="kvm">
+ <uuid>f01cf68d-515c-4daf-b85f-ef1424d93bfc</uuid>
+ <name>uefi</name>
+ <memory>104857600</memory>
+ <vcpu>1</vcpu>
+ <os>
+ <type>x86_64</type>
+ <loader readonly='yes' type='pflash'>/tmp/OVMF.fd</loader>
+ <nvram template="/tmp/OVMF.fd"></nvram>
+ </os>
+ </domain>""")
+
def test_config_boot_menu(self):
obj = config.LibvirtConfigGuest()
obj.virt_type = "kvm"
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index d6ad7ec0f4..0ab16cac0a 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -404,7 +404,7 @@ class CacheConcurrencyTestCase(test.NoDBTestCase):
def fake_extend(image, size, use_cow=False):
pass
- self.stubs.Set(os.path, 'exists', fake_exists)
+ self.stub_out('os.path.exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
@@ -586,6 +586,7 @@ def _create_test_instance():
'ephemeral_key_uuid': None,
'vcpu_model': None,
'host': 'fake-host',
+ 'task_state': None,
}
@@ -794,13 +795,60 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
- libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION) - 1)
+ libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
+ arch.PPC64)) - 1)
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
- libvirt_driver.MIN_QEMU_S390_VERSION))
+ libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
+ arch.PPC64)))
+ @mock.patch.object(arch, "from_host", return_value=arch.PPC64)
+ def test_min_version_ppc_old_libvirt(self, mock_libv, mock_qemu,
+ mock_arch):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertRaises(exception.NovaException,
+ drvr.init_host,
+ "dummyhost")
+
+ @mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
+ return_value=versionutils.convert_version_to_int(
+ libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
+ arch.PPC64)))
+ @mock.patch.object(fakelibvirt.Connection, 'getVersion',
+ return_value=versionutils.convert_version_to_int(
+ libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
+ arch.PPC64)) - 1)
+ @mock.patch.object(arch, "from_host", return_value=arch.PPC64)
+ def test_min_version_ppc_old_qemu(self, mock_libv, mock_qemu,
+ mock_arch):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertRaises(exception.NovaException,
+ drvr.init_host,
+ "dummyhost")
+
+ @mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
+ return_value=versionutils.convert_version_to_int(
+ libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
+ arch.PPC64)))
+ @mock.patch.object(fakelibvirt.Connection, 'getVersion',
+ return_value=versionutils.convert_version_to_int(
+ libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
+ arch.PPC64)))
+ @mock.patch.object(arch, "from_host", return_value=arch.PPC64)
+ def test_min_version_ppc_ok(self, mock_libv, mock_qemu, mock_arch):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr.init_host("dummyhost")
+
+ @mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
+ return_value=versionutils.convert_version_to_int(
+ libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
+ arch.S390X)) - 1)
+ @mock.patch.object(fakelibvirt.Connection, 'getVersion',
+ return_value=versionutils.convert_version_to_int(
+ libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
+ arch.S390X)))
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
- def test_min_version_s390_old_libvirt(self, mock_arch,
- mock_qemu_version, mock_lv_version):
+ def test_min_version_s390_old_libvirt(self, mock_libv, mock_qemu,
+ mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
@@ -808,13 +856,15 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
- libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION))
+ libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
+ arch.S390X)))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
- libvirt_driver.MIN_QEMU_S390_VERSION) - 1)
+ libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
+ arch.S390X)) - 1)
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
- def test_min_version_s390_old_qemu(self, mock_arch,
- mock_qemu_version, mock_lv_version):
+ def test_min_version_s390_old_qemu(self, mock_libv, mock_qemu,
+ mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
@@ -822,42 +872,253 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=versionutils.convert_version_to_int(
- libvirt_driver.MIN_LIBVIRT_KVM_S390_VERSION))
+ libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get(
+ arch.S390X)))
@mock.patch.object(fakelibvirt.Connection, 'getVersion',
return_value=versionutils.convert_version_to_int(
- libvirt_driver.MIN_QEMU_S390_VERSION))
+ libvirt_driver.MIN_QEMU_OTHER_ARCH.get(
+ arch.S390X)))
@mock.patch.object(arch, "from_host", return_value=arch.S390X)
- def test_min_version_s390_ok(self, mock_arch,
- mock_qemu_version, mock_lv_version):
+ def test_min_version_s390_ok(self, mock_libv, mock_qemu, mock_arch):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
+ def _do_test_parse_migration_flags(self, lm_config=None, lm_expected=None,
+ bm_config=None, bm_expected=None):
+ if lm_config is not None:
+ self.flags(live_migration_flag=lm_config, group='libvirt')
+ if bm_config is not None:
+ self.flags(block_migration_flag=bm_config, group='libvirt')
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ drvr._parse_migration_flags()
+
+ if lm_expected is not None:
+ self.assertEqual(lm_expected, drvr._live_migration_flags)
+ if bm_expected is not None:
+ self.assertEqual(bm_expected, drvr._block_migration_flags)
+
+ def test_parse_live_migration_flags_default(self):
+ self._do_test_parse_migration_flags(
+ lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'),
+ lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED))
+
+ def test_parse_live_migration_flags(self):
+ self._do_test_parse_migration_flags(
+ lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE'),
+ lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE))
+
+ def test_parse_block_migration_flags_default(self):
+ self._do_test_parse_migration_flags(
+ bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
+ 'VIR_MIGRATE_NON_SHARED_INC'),
+ bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
+ libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
+
+ def test_parse_block_migration_flags(self):
+ self._do_test_parse_migration_flags(
+ bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC'),
+ bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
+
+ @mock.patch('nova.virt.libvirt.driver.LOG')
+ def test_parse_live_migration_flag_with_invalid_flag(self, mock_log):
+ self._do_test_parse_migration_flags(
+ lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
+ 'VIR_MIGRATE_FOO_BAR'),
+ lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
+ bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
+ 'VIR_MIGRATE_NON_SHARED_INC, VIR_MIGRATE_FOO_BAR'),
+ bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
+ libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
+
+ msg = mock_log.warning.call_args_list[0]
+ self.assertIn("unknown libvirt live migration flag", msg[0][0])
+ msg = mock_log.warning.call_args_list[1]
+ self.assertIn("unknown libvirt live migration flag", msg[0][0])
+
@mock.patch('nova.virt.libvirt.driver.LOG')
- def test_init_host_migration_flags(self, mock_log):
+ def test_parse_migration_flags_unsafe_block(self, mock_log):
'''Test if the driver logs a warning if the live_migration_flag
and/or block_migration_flag config option uses a value which can
cause potential damage.
'''
- # should NOT have VIR_MIGRATE_NON_SHARED_INC
- self.flags(live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, "
- "VIR_MIGRATE_PEER2PEER, "
- "VIR_MIGRATE_LIVE, "
- "VIR_MIGRATE_TUNNELLED, "
- "VIR_MIGRATE_NON_SHARED_INC",
- group='libvirt')
- # should have VIR_MIGRATE_NON_SHARED_INC
- self.flags(block_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, "
- "VIR_MIGRATE_PEER2PEER, "
- "VIR_MIGRATE_LIVE, "
- "VIR_MIGRATE_TUNNELLED",
- group='libvirt')
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- drvr.init_host("dummyhost")
+ self._do_test_parse_migration_flags(
+ lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, '
+ 'VIR_MIGRATE_TUNNELLED, '
+ 'VIR_MIGRATE_NON_SHARED_INC'),
+ bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, '
+ 'VIR_MIGRATE_TUNNELLED'),
+ lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
+ bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
+ libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
+
+ msg = mock_log.warning.call_args_list[0]
+ self.assertIn('Removing the VIR_MIGRATE_NON_SHARED_INC', msg[0][0])
+ msg = mock_log.warning.call_args_list[1]
+ self.assertIn('Adding the VIR_MIGRATE_NON_SHARED_INC', msg[0][0])
+
+ @mock.patch('nova.virt.libvirt.driver.LOG')
+ def test_parse_migration_flags_p2p_missing(self, mock_log):
+ self._do_test_parse_migration_flags(
+ lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_LIVE, '
+ 'VIR_MIGRATE_TUNNELLED'),
+ bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_LIVE, '
+ 'VIR_MIGRATE_TUNNELLED, '
+ 'VIR_MIGRATE_NON_SHARED_INC'),
+ lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
+ bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
+ libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
msg = mock_log.warning.call_args_list[0]
- self.assertIn('Running Nova with a live_migration_flag', msg[0][0])
+ self.assertIn('Adding the VIR_MIGRATE_PEER2PEER flag', msg[0][0])
msg = mock_log.warning.call_args_list[1]
- self.assertIn('Running Nova with a block_migration_flag', msg[0][0])
+ self.assertIn('Adding the VIR_MIGRATE_PEER2PEER flag', msg[0][0])
+
+ @mock.patch('nova.virt.libvirt.driver.LOG')
+ def test_parse_migration_flags_p2p_xen(self, mock_log):
+ self.flags(virt_type='xen', group='libvirt')
+
+ self._do_test_parse_migration_flags(
+ lm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, '
+ 'VIR_MIGRATE_TUNNELLED'),
+ bm_config=('VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, '
+ 'VIR_MIGRATE_TUNNELLED, '
+ 'VIR_MIGRATE_NON_SHARED_INC'),
+ lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
+ bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
+ libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
+
+ msg = mock_log.warning.call_args_list[0]
+ self.assertIn('Removing the VIR_MIGRATE_PEER2PEER flag', msg[0][0])
+ msg = mock_log.warning.call_args_list[1]
+ self.assertIn('Removing the VIR_MIGRATE_PEER2PEER flag', msg[0][0])
+
+ @mock.patch('nova.virt.libvirt.driver.LOG')
+ def test_parse_migration_flags_config_mgmt(self, mock_log):
+ self._do_test_parse_migration_flags(
+ lm_config=('VIR_MIGRATE_PERSIST_DEST, '
+ 'VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, '
+ 'VIR_MIGRATE_TUNNELLED'),
+ bm_config=('VIR_MIGRATE_PERSIST_DEST, '
+ 'VIR_MIGRATE_PEER2PEER, '
+ 'VIR_MIGRATE_LIVE, '
+ 'VIR_MIGRATE_TUNNELLED, '
+ 'VIR_MIGRATE_NON_SHARED_INC'),
+ lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
+ bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED |
+ libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
+
+ msg = mock_log.warning.call_args_list[0]
+ self.assertIn('Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag', msg[0][0])
+ msg = mock_log.warning.call_args_list[1]
+ self.assertIn('Removing the VIR_MIGRATE_PERSIST_DEST flag', msg[0][0])
+ msg = mock_log.warning.call_args_list[2]
+ self.assertIn('Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag', msg[0][0])
+ msg = mock_log.warning.call_args_list[3]
+ self.assertIn('Removing the VIR_MIGRATE_PERSIST_DEST flag', msg[0][0])
+
+ @mock.patch('nova.virt.libvirt.driver.LOG')
+ def test_live_migration_tunnelled_true(self, mock_log):
+ self.flags(live_migration_tunnelled=True, group='libvirt')
+
+ self._do_test_parse_migration_flags(
+ lm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_LIVE'),
+ bm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC'),
+ lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED),
+ bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC |
+ libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED))
+
+ msg = mock_log.warning.call_args_list[0]
+ self.assertIn('does not contain the VIR_MIGRATE_TUNNELLED', msg[0][0])
+ msg = mock_log.warning.call_args_list[1]
+ self.assertIn('does not contain the VIR_MIGRATE_TUNNELLED', msg[0][0])
+
+ @mock.patch('nova.virt.libvirt.driver.LOG')
+ def test_live_migration_tunnelled_false(self, mock_log):
+ self.flags(live_migration_tunnelled=False, group='libvirt')
+
+ self._do_test_parse_migration_flags(
+ lm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED'),
+ bm_config=('VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_UNDEFINE_SOURCE, '
+ 'VIR_MIGRATE_LIVE, VIR_MIGRATE_NON_SHARED_INC, '
+ 'VIR_MIGRATE_TUNNELLED'),
+ lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE),
+ bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE |
+ libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER |
+ libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE |
+ libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC))
+
+ msg = mock_log.warning.call_args_list[0]
+ self.assertIn('contains the VIR_MIGRATE_TUNNELLED flag', msg[0][0])
+ msg = mock_log.warning.call_args_list[1]
+ self.assertIn('contains the VIR_MIGRATE_TUNNELLED flag', msg[0][0])
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch.object(host.Host,
@@ -1732,7 +1993,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(hasattr(drvr, '_bad_libvirt_numa_version_warn'))
with mock.patch.object(drvr._host, 'has_version', return_value=True):
- for i in xrange(2):
+ for i in range(2):
self.assertFalse(drvr._has_numa_support())
self.assertTrue(drvr._bad_libvirt_numa_version_warn)
self.assertEqual(1, mock_warn.call_count)
@@ -2632,6 +2893,56 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
+ def test_has_uefi_support_with_invalid_version(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ with mock.patch.object(drvr._host,
+ 'has_min_version', return_value=False):
+ self.assertFalse(drvr._has_uefi_support())
+
+ def test_has_uefi_support_not_supported_arch(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "alpha"
+ self.assertFalse(drvr._has_uefi_support())
+
+ @mock.patch('os.path.exists', return_value=False)
+ def test_has_uefi_support_with_no_loader_existed(self, mock_exist):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+ self.assertFalse(drvr._has_uefi_support())
+
+ @mock.patch('os.path.exists', return_value=True)
+ def test_has_uefi_support(self, mock_has_version):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ caps = vconfig.LibvirtConfigCaps()
+ caps.host = vconfig.LibvirtConfigCapsHost()
+ caps.host.cpu = vconfig.LibvirtConfigCPU()
+ caps.host.cpu.arch = "x86_64"
+
+ with mock.patch.object(drvr._host,
+ 'has_min_version', return_value=True):
+ self.assertTrue(drvr._has_uefi_support())
+
+ def test_get_guest_config_with_uefi(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
+
+ image_meta = objects.ImageMeta.from_dict({
+ "disk_format": "raw",
+ "properties": {"hw_firmware_type": "uefi"}})
+ instance_ref = objects.Instance(**self.test_instance)
+
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance_ref,
+ image_meta)
+ with test.nested(
+ mock.patch.object(drvr, "_has_uefi_support",
+ return_value=True)):
+ cfg = drvr._get_guest_config(instance_ref, [],
+ image_meta, disk_info)
+ self.assertEqual(cfg.os_loader_type, "pflash")
+
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
@@ -5144,9 +5455,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
- img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
- "os_require_quiesce": "yes"}}
- self.assertIsNone(drvr.quiesce(self.context, instance, img_meta))
+ image_meta = objects.ImageMeta.from_dict(
+ {"properties": {"hw_qemu_guest_agent": "yes"}})
+ self.assertIsNone(drvr.quiesce(self.context, instance, image_meta))
mock_fsfreeze.assert_called_once_with()
def test_quiesce_not_supported(self):
@@ -5163,9 +5474,10 @@ class LibvirtConnTestCase(test.NoDBTestCase):
with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
- img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
- "os_require_quiesce": "yes"}}
- self.assertIsNone(drvr.unquiesce(self.context, instance, img_meta))
+ image_meta = objects.ImageMeta.from_dict(
+ {"properties": {"hw_qemu_guest_agent": "yes"}})
+ self.assertIsNone(drvr.unquiesce(self.context, instance,
+ image_meta))
mock_fsthaw.assert_called_once_with()
def test_create_snapshot_metadata(self):
@@ -5589,7 +5901,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
- self.stubs.Set(os, 'open', os_open_stub)
+ self.stub_out('os.open', os_open_stub)
@staticmethod
def connection_supports_direct_io_stub(dirpath):
@@ -5857,7 +6169,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
filterref = './devices/interface/filterref'
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
- fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), drvr)
+ fw = firewall.NWFilterFirewall(drvr)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
@@ -6121,10 +6433,11 @@ class LibvirtConnTestCase(test.NoDBTestCase):
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
objects.Instance(**self.test_instance)
- dest_check_data = {"filename": "file",
- "block_migration": True,
- "disk_over_commit": False,
- "disk_available_mb": 1024}
+ dest_check_data = objects.LibvirtLiveMigrateData(
+ filename="file",
+ block_migration=True,
+ disk_over_commit=False,
+ disk_available_mb=1024)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file')
@@ -6261,6 +6574,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
+ @mock.patch.object(host.Host, 'has_min_version', return_value=False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_assert_dest_node_has_enough_disk')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
@@ -6270,15 +6584,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'get_instance_disk_info')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
- '_is_shared_block_storage')
+ '_is_shared_block_storage', return_value=False)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
- '_check_shared_storage_test_file')
- def test_check_can_live_migrate_source_block_migration_with_bdm(
+ '_check_shared_storage_test_file', return_value=False)
+ def test_check_can_live_migrate_source_block_migration_with_bdm_error(
self, mock_check, mock_shared_block, mock_get_bdi,
- mock_booted_from_volume, mock_has_local, mock_enough):
+ mock_booted_from_volume, mock_has_local, mock_enough,
+ mock_min_version):
- mock_check.return_value = False
- mock_shared_block.return_value = False
bdi = {'block_device_mapping': ['bdm']}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -6293,6 +6606,40 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.context, instance, dest_check_data,
block_device_info=bdi)
+ @mock.patch.object(host.Host, 'has_min_version', return_value=True)
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_assert_dest_node_has_enough_disk')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_has_local_disk')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_is_booted_from_volume')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ 'get_instance_disk_info')
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_is_shared_block_storage', return_value=False)
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
+ '_check_shared_storage_test_file', return_value=False)
+ def test_check_can_live_migrate_source_block_migration_with_bdm_success(
+ self, mock_check, mock_shared_block, mock_get_bdi,
+ mock_booted_from_volume, mock_has_local, mock_enough,
+ mock_min_version):
+
+ bdi = {'block_device_mapping': ['bdm']}
+ instance = objects.Instance(**self.test_instance)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
+ migrate_data = objects.LibvirtLiveMigrateData(
+ disk_over_commit=False,
+ filename='file',
+ disk_available_mb=100,
+ image_type='default',
+ block_migration=True)
+
+ return_value = drvr.check_can_live_migrate_source(
+ self.context, instance, migrate_data, block_device_info=bdi)
+
+ self.assertEqual(migrate_data, return_value)
+
def _is_shared_block_storage_test_create_mocks(self, disks):
# Test data
instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>"
@@ -6505,13 +6852,15 @@ class LibvirtConnTestCase(test.NoDBTestCase):
spice='10.0.0.2')
target_xml = etree.tostring(etree.fromstring(target_xml))
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
initial_xml)
- vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
+ vdmock.migrateToURI2(drvr._live_migration_uri('dest'),
None,
target_xml,
mox.IgnoreArg(),
@@ -6527,13 +6876,13 @@ class LibvirtConnTestCase(test.NoDBTestCase):
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
serial_listen_addr='127.0.0.1',
+ target_connect_addr=None,
bdms=[])
self.mox.ReplayAll()
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, vdmock)
+ False, migrate_data, vdmock, [])
def test_live_migration_update_volume_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
@@ -6564,6 +6913,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
connection_info=connection_info)
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
+ target_connect_addr=None,
bdms=[bdm])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -6581,10 +6931,55 @@ class LibvirtConnTestCase(test.NoDBTestCase):
test_mock.XMLDesc.return_value = target_xml
self.assertFalse(drvr._live_migration_operation(
self.context, instance_ref, 'dest', False,
- migrate_data, test_mock))
+ migrate_data, test_mock, []))
mupdate.assert_called_once_with(target_xml, migrate_data.bdms,
{}, '')
+ def test_live_migration_with_valid_target_connect_addr(self):
+ self.compute = importutils.import_object(CONF.compute_manager)
+ instance_dict = dict(self.test_instance)
+ instance_dict.update({'host': 'fake',
+ 'power_state': power_state.RUNNING,
+ 'vm_state': vm_states.ACTIVE})
+ instance_ref = objects.Instance(**instance_dict)
+ target_xml = self.device_xml_tmpl.format(
+ device_path='/dev/disk/by-path/'
+ 'ip-1.2.3.4:3260-iqn.'
+ 'cde.67890.opst-lun-Z')
+ # start test
+ connection_info = {
+ u'driver_volume_type': u'iscsi',
+ u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
+ u'data': {
+ u'access_mode': u'rw', u'target_discovered': False,
+ u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
+ u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
+ 'device_path':
+ u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
+ },
+ }
+ bdm = objects.LibvirtLiveMigrateBDMInfo(
+ serial='58a84f6d-3f0c-4e19-a0af-eb657b790657',
+ bus='virtio', type='disk', dev='vdb',
+ connection_info=connection_info)
+ migrate_data = objects.LibvirtLiveMigrateData(
+ serial_listen_addr='',
+ target_connect_addr='127.0.0.2',
+ bdms=[bdm])
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ test_mock = mock.MagicMock()
+
+ with mock.patch.object(drvr, '_update_xml') as mupdate:
+
+ test_mock.XMLDesc.return_value = target_xml
+ drvr._live_migration_operation(self.context, instance_ref,
+ 'dest', False, migrate_data,
+ test_mock, [])
+ test_mock.migrateToURI2.assert_called_once_with(
+ 'qemu+tcp://127.0.0.2/system',
+ None, mupdate(), None, None, 0)
+
def test_update_volume_xml(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -6630,6 +7025,39 @@ class LibvirtConnTestCase(test.NoDBTestCase):
xml_doc = etree.fromstring(target_xml, parser)
self.assertEqual(etree.tostring(xml_doc), etree.tostring(config))
+ def test_live_migration_uri(self):
+ hypervisor_uri_map = (
+ ('xen', 'xenmigr://%s/system'),
+ ('kvm', 'qemu+tcp://%s/system'),
+ ('qemu', 'qemu+tcp://%s/system'),
+ # anything else will return None
+ ('lxc', None),
+ ('parallels', None),
+ ('', None),
+ )
+ dest = 'destination'
+ for hyperv, uri in hypervisor_uri_map:
+ self.flags(virt_type=hyperv, group='libvirt')
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ if uri is not None:
+ uri = uri % dest
+ self.assertEqual(uri, drvr._live_migration_uri(dest))
+ else:
+ self.assertRaises(exception.LiveMigrationURINotAvailable,
+ drvr._live_migration_uri,
+ dest)
+
+ def test_live_migration_uri_forced(self):
+ dest = 'destination'
+ for hyperv in ('kvm', 'xen'):
+ self.flags(virt_type=hyperv, group='libvirt')
+
+ forced_uri = 'foo://%s/bar'
+ self.flags(live_migration_uri=forced_uri, group='libvirt')
+
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest))
+
def test_update_volume_xml_no_serial(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -6745,17 +7173,18 @@ class LibvirtConnTestCase(test.NoDBTestCase):
graphics_listen_addr_vnc='10.0.0.1',
graphics_listen_addr_spice='10.0.0.2',
serial_listen_addr='9.0.0.12',
+ target_connect_addr=None,
bdms=[])
dom = fakelibvirt.virDomain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, dom)
+ False, migrate_data, dom, [])
mock_xml.assert_called_once_with(
flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE)
mock_migrate.assert_called_once_with(
- CONF.libvirt.live_migration_uri % 'dest',
+ drvr._live_migration_uri('dest'),
None, target_xml, mock.ANY, None, bandwidth)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
@@ -6767,28 +7196,13 @@ class LibvirtConnTestCase(test.NoDBTestCase):
CONF.set_override("enabled", True, "serial_console")
dom = fakelibvirt.virDomain
migrate_data = objects.LibvirtLiveMigrateData(
- serial_listen_addr='')
+ serial_listen_addr='', target_connect_addr=None)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, dom)
-
- def test_live_migration_fails_with_invalid_live_migration_flag(self):
- self.flags(live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, "
- "VIR_MIGRATE_PEER2PEER, "
- "VIR_MIGRATE_LIVE, "
- "VIR_MIGRATE_TUNNELLED, "
- "VIR_MIGRATE_FOO_BAR",
- group='libvirt')
- instance_ref = self.test_instance
- dom = fakelibvirt.virDomain
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
- self.assertRaises(exception.Invalid,
- drvr._live_migration_operation,
- self.context, instance_ref, 'dest',
- False, None, dom)
+ False, migrate_data, dom, [])
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
@@ -6800,11 +7214,13 @@ class LibvirtConnTestCase(test.NoDBTestCase):
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
- vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ vdmock.migrateToURI(drvr._live_migration_uri('dest'),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
@@ -6815,13 +7231,13 @@ class LibvirtConnTestCase(test.NoDBTestCase):
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='0.0.0.0',
serial_listen_addr='127.0.0.1',
+ target_connect_addr=None,
bdms=[])
self.mox.ReplayAll()
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, vdmock)
+ False, migrate_data, vdmock, [])
def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
self.compute = importutils.import_object(CONF.compute_manager)
@@ -6831,11 +7247,13 @@ class LibvirtConnTestCase(test.NoDBTestCase):
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
- vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ vdmock.migrateToURI(drvr._live_migration_uri('dest'),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
@@ -6844,13 +7262,48 @@ class LibvirtConnTestCase(test.NoDBTestCase):
# start test
migrate_data = objects.LibvirtLiveMigrateData(
serial_listen_addr='',
+ target_connect_addr=None,
bdms=[])
self.mox.ReplayAll()
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, vdmock)
+ False, migrate_data, vdmock, [])
+
+ @mock.patch.object(host.Host, 'has_min_version', return_value=True)
+ @mock.patch.object(fakelibvirt.virDomain, "migrateToURI3")
+ @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._update_xml',
+ return_value='')
+ @mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='')
+ def test_live_migration_uses_migrateToURI3(
+ self, mock_old_xml, mock_new_xml, mock_migrateToURI3,
+ mock_min_version):
+ # Preparing mocks
+ disk_paths = ['vda', 'vdb']
+ params = {
+ 'migrate_disks': ['vda', 'vdb'],
+ 'bandwidth': CONF.libvirt.live_migration_bandwidth,
+ 'destination_xml': '',
+ }
+ mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR")
+
+ # Start test
+ migrate_data = objects.LibvirtLiveMigrateData(
+ graphics_listen_addr_vnc='0.0.0.0',
+ graphics_listen_addr_spice='0.0.0.0',
+ serial_listen_addr='127.0.0.1',
+ target_connect_addr=None,
+ bdms=[])
+
+ dom = fakelibvirt.virDomain
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ instance = objects.Instance(**self.test_instance)
+ self.assertRaises(fakelibvirt.libvirtError,
+ drvr._live_migration_operation,
+ self.context, instance, 'dest',
+ False, migrate_data, dom, disk_paths)
+ mock_migrateToURI3.assert_called_once_with(
+ drvr._live_migration_uri('dest'), params, None)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
@@ -6871,13 +7324,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
migrate_data = objects.LibvirtLiveMigrateData(
graphics_listen_addr_vnc='1.2.3.4',
graphics_listen_addr_spice='1.2.3.4',
- serial_listen_addr='127.0.0.1')
+ serial_listen_addr='127.0.0.1',
+ target_connect_addr=None)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, vdmock)
+ False, migrate_data, vdmock, [])
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
@@ -6889,12 +7343,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
- vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ vdmock.migrateToURI(drvr._live_migration_uri('dest'),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
@@ -6902,7 +7358,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
else:
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE
).AndReturn(FakeVirtDomain().XMLDesc(flags=0))
- vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
+ vdmock.migrateToURI2(drvr._live_migration_uri('dest'),
None,
mox.IgnoreArg(),
mox.IgnoreArg(),
@@ -6915,13 +7371,13 @@ class LibvirtConnTestCase(test.NoDBTestCase):
graphics_listen_addr_vnc='127.0.0.1',
graphics_listen_addr_spice='127.0.0.1',
serial_listen_addr='127.0.0.1',
+ target_connect_addr=None,
bdms=[])
self.mox.ReplayAll()
- drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, vdmock)
+ False, migrate_data, vdmock, [])
self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
self.assertEqual(power_state.RUNNING, instance_ref.power_state)
@@ -6933,6 +7389,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
# Preparing data
instance_ref = objects.Instance(**self.test_instance)
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
@@ -6945,13 +7403,13 @@ class LibvirtConnTestCase(test.NoDBTestCase):
fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
# This is the first error we hit but since the error code is
# VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
- vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None,
+ vdmock.migrateToURI2(drvr._live_migration_uri('dest'), None,
mox.IgnoreArg(), mox.IgnoreArg(), None,
_bandwidth).AndRaise(unsupported_config_error)
# This is the second and final error that will actually kill the run,
# we use TestingException to make sure it's not the same libvirtError
# above.
- vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
+ vdmock.migrateToURI(drvr._live_migration_uri('dest'),
mox.IgnoreArg(), None,
_bandwidth).AndRaise(test.TestingException('oops'))
@@ -6960,6 +7418,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
graphics_listen_addr_vnc='0.0.0.0',
graphics_listen_addr_spice='127.0.0.1',
serial_listen_addr='127.0.0.1',
+ target_connect_addr=None,
bdms=[])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -6972,7 +7431,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertRaises(test.TestingException,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
- False, migrate_data, vdmock)
+ False, migrate_data, vdmock, [])
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists', return_value=True)
@@ -7022,8 +7481,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.assertFalse(mock_exist.called)
self.assertFalse(mock_shutil.called)
+ @mock.patch.object(host.Host, "has_min_version", return_value=False)
@mock.patch.object(fakelibvirt.Domain, "XMLDesc")
- def test_live_migration_copy_disk_paths(self, mock_xml):
+ def test_live_migration_copy_disk_paths(self, mock_xml, mock_version):
xml = """
<domain>
<name>dummy</name>
@@ -7031,17 +7491,21 @@ class LibvirtConnTestCase(test.NoDBTestCase):
<devices>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.root"/>
+ <target dev="vda"/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.shared"/>
+ <target dev="vdb"/>
<shareable/>
</disk>
<disk type="file">
<source file="/var/lib/nova/instance/123/disk.config"/>
+ <target dev="vdc"/>
<readonly/>
</disk>
<disk type="block">
<source dev="/dev/mapper/somevol"/>
+ <target dev="vdd"/>
</disk>
<disk type="network">
<source protocol="https" name="url_path">
@@ -7056,9 +7520,101 @@ class LibvirtConnTestCase(test.NoDBTestCase):
dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
guest = libvirt_guest.Guest(dom)
- paths = drvr._live_migration_copy_disk_paths(guest)
- self.assertEqual(["/var/lib/nova/instance/123/disk.root",
- "/dev/mapper/somevol"], paths)
+ paths = drvr._live_migration_copy_disk_paths(None, None, guest)
+ self.assertEqual((["/var/lib/nova/instance/123/disk.root",
+ "/dev/mapper/somevol"], ['vda', 'vdd']), paths)
+
+ @mock.patch.object(host.Host, "has_min_version", return_value=True)
+ @mock.patch('nova.virt.driver.get_block_device_info')
+ @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
+ @mock.patch.object(fakelibvirt.Domain, "XMLDesc")
+ def test_live_migration_copy_disk_paths_selective_block_migration(
+ self, mock_xml, mock_get_instance,
+ mock_block_device_info, mock_version):
+ xml = """
+ <domain>
+ <name>dummy</name>
+ <uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid>
+ <devices>
+ <disk type="file">
+ <source file="/var/lib/nova/instance/123/disk.root"/>
+ <target dev="vda"/>
+ </disk>
+ <disk type="file">
+ <source file="/var/lib/nova/instance/123/disk.shared"/>
+ <target dev="vdb"/>
+ </disk>
+ <disk type="file">
+ <source file="/var/lib/nova/instance/123/disk.config"/>
+ <target dev="vdc"/>
+ </disk>
+ <disk type="block">
+ <source dev="/dev/mapper/somevol"/>
+ <target dev="vdd"/>
+ </disk>
+ <disk type="network">
+ <source protocol="https" name="url_path">
+ <host name="hostname" port="443"/>
+ </source>
+ </disk>
+ </devices>
+ </domain>"""
+ mock_xml.return_value = xml
+ instance = objects.Instance(**self.test_instance)
+ instance.root_device_name = '/dev/vda'
+ block_device_info = {
+ 'swap': {
+ 'disk_bus': u'virtio',
+ 'swap_size': 10,
+ 'device_name': u'/dev/vdc'
+ },
+ 'root_device_name': u'/dev/vda',
+ 'ephemerals': [{
+ 'guest_format': u'ext3',
+ 'device_name': u'/dev/vdb',
+ 'disk_bus': u'virtio',
+ 'device_type': u'disk',
+ 'size': 1
+ }],
+ 'block_device_mapping': [{
+ 'guest_format': None,
+ 'boot_index': None,
+ 'mount_device': u'/dev/vdd',
+ 'connection_info': {
+ u'driver_volume_type': u'iscsi',
+ 'serial': u'147df29f-aec2-4851-b3fe-f68dad151834',
+ u'data': {
+ u'access_mode': u'rw',
+ u'target_discovered': False,
+ u'encrypted': False,
+ u'qos_specs': None,
+ u'target_iqn': u'iqn.2010-10.org.openstack:'
+ u'volume-147df29f-aec2-4851-b3fe-'
+ u'f68dad151834',
+ u'target_portal': u'10.102.44.141:3260', u'volume_id':
+ u'147df29f-aec2-4851-b3fe-f68dad151834',
+ u'target_lun': 1,
+ u'auth_password': u'cXELT66FngwzTwpf',
+ u'auth_username': u'QbQQjj445uWgeQkFKcVw',
+ u'auth_method': u'CHAP'
+ }
+ },
+ 'disk_bus': None,
+ 'device_type': None,
+ 'delete_on_termination': False
+ }]
+ }
+ mock_block_device_info.return_value = block_device_info
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ dom = fakelibvirt.Domain(drvr._get_connection(), xml, False)
+ guest = libvirt_guest.Guest(dom)
+ return_value = drvr._live_migration_copy_disk_paths(context, instance,
+ guest)
+ expected = (['/var/lib/nova/instance/123/disk.root',
+ '/var/lib/nova/instance/123/disk.shared',
+ '/var/lib/nova/instance/123/disk.config'],
+ ['vda', 'vdb', 'vdc'])
+ self.assertEqual(expected, return_value)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_live_migration_copy_disk_paths")
@@ -7108,6 +7664,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
side_effect=lambda x: eventlet.sleep(0))
@mock.patch.object(host.DomainJobInfo, "for_domain")
@mock.patch.object(objects.Instance, "save")
+ @mock.patch.object(objects.Migration, "save")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
@mock.patch.object(fakelibvirt.virDomain, "abortJob")
def _test_live_migration_monitoring(self,
@@ -7117,6 +7674,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock_abort,
mock_running,
mock_save,
+ mock_mig_save,
mock_job_info,
mock_sleep,
mock_time):
@@ -7154,7 +7712,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock_time.side_effect = fake_time
dest = mock.sentinel.migrate_dest
- migrate_data = mock.sentinel.migrate_data
+ migration = objects.Migration(context=self.context, id=1)
+ migrate_data = objects.LibvirtLiveMigrateData(
+ migration=migration)
fake_post_method = mock.MagicMock()
fake_recover_method = mock.MagicMock()
@@ -7168,6 +7728,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
finish_event,
[])
+ mock_mig_save.assert_called_with()
+
if expect_result == self.EXPECT_SUCCESS:
self.assertFalse(fake_recover_method.called,
'Recover method called when success expected')
@@ -7437,8 +7999,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
"<domain><name>demo</name></domain>", True)
guest = libvirt_guest.Guest(dom)
migrate_data = {}
- disk_paths = ['/dev/vda', '/dev/vdb']
- mock_copy_disk_path.return_value = disk_paths
+ disks_to_copy = (['/some/path/one', '/test/path/two'],
+ ['vda', 'vdb'])
+ mock_copy_disk_path.return_value = disks_to_copy
mock_guest.return_value = guest
@@ -7451,7 +8014,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, True,
migrate_data)
- mock_copy_disk_path.assert_called_once_with(guest)
+ mock_copy_disk_path.assert_called_once_with(self.context, instance,
+ guest)
class AnyEventletEvent(object):
def __eq__(self, other):
@@ -7460,11 +8024,11 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", True,
- migrate_data, dom)
+ migrate_data, dom, disks_to_copy[1])
mock_monitor.assert_called_once_with(
self.context, instance, guest, "fakehost",
fake_post, fake_recover, True,
- migrate_data, dom, AnyEventletEvent(), disk_paths)
+ migrate_data, dom, AnyEventletEvent(), disks_to_copy[0])
def _do_test_create_images_and_backing(self, disk_type):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -7482,7 +8046,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
fallback_from_host=None)
self.mox.ReplayAll()
- self.stubs.Set(os.path, 'exists', lambda *args: False)
+ self.stub_out('os.path.exists', lambda *args: False)
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", [disk_info])
@@ -7653,7 +8217,37 @@ class LibvirtConnTestCase(test.NoDBTestCase):
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", None)
+ def _generate_target_ret(self, target_connect_addr=None):
+ target_ret = {
+ 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
+ 'target_connect_addr': target_connect_addr,
+ 'serial_listen_addr': '127.0.0.1',
+ 'volume': {
+ '12345': {'connection_info': {u'data': {'device_path':
+ u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
+ 'serial': '12345'},
+ 'disk_info': {'bus': 'scsi',
+ 'dev': 'sda',
+ 'type': 'disk'}},
+ '67890': {'connection_info': {u'data': {'device_path':
+ u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
+ 'serial': '67890'},
+ 'disk_info': {'bus': 'scsi',
+ 'dev': 'sdb',
+ 'type': 'disk'}}}}
+ return target_ret
+
def test_pre_live_migration_works_correctly_mocked(self):
+ self._test_pre_live_migration_works_correctly_mocked()
+
+ def test_pre_live_migration_with_transport_ip(self):
+ self.flags(live_migration_inbound_addr='127.0.0.2',
+ group='libvirt')
+ target_ret = self._generate_target_ret('127.0.0.2')
+ self._test_pre_live_migration_works_correctly_mocked(target_ret)
+
+ def _test_pre_live_migration_works_correctly_mocked(self,
+ target_ret=None):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
@@ -7706,23 +8300,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
result = drvr.pre_live_migration(
c, instance, vol, nw_info, None,
migrate_data=migrate_data)
-
- target_ret = {
- 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
- 'serial_listen_addr': '127.0.0.1',
- 'volume': {
- '12345': {'connection_info': {u'data': {'device_path':
- u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
- 'serial': '12345'},
- 'disk_info': {'bus': 'scsi',
- 'dev': 'sda',
- 'type': 'disk'}},
- '67890': {'connection_info': {u'data': {'device_path':
- u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
- 'serial': '67890'},
- 'disk_info': {'bus': 'scsi',
- 'dev': 'sdb',
- 'type': 'disk'}}}}
+ if not target_ret:
+ target_ret = self._generate_target_ret()
self.assertEqual(
result.to_legacy_dict(
pre_migration_result=True)['pre_live_migration_result'],
@@ -7780,6 +8359,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
)
self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
+ 'target_connect_addr': None,
'serial_listen_addr': '127.0.0.1',
'volume': {}}, res_data['pre_live_migration_result'])
@@ -7838,6 +8418,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
+ 'target_connect_addr': None,
'serial_listen_addr': '127.0.0.1',
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
@@ -8245,7 +8826,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
- image_meta = self.test_image_meta
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
@@ -8305,12 +8886,14 @@ class LibvirtConnTestCase(test.NoDBTestCase):
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+
drvr.spawn(self.context, instance,
- self.test_image_meta, [], None)
+ image_meta, [], None)
self.assertTrue(self.create_image_called)
drvr.spawn(self.context, instance,
- self.test_image_meta, [], None)
+ image_meta, [], None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
@@ -8339,6 +8922,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
'boot_index': 0}
]
}
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
# Volume-backed instance created without image
instance_ref = self.test_instance
@@ -8348,7 +8932,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
- self.test_image_meta, [], None,
+ image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
@@ -8360,7 +8944,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
- self.test_image_meta, [], None,
+ image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
@@ -8369,7 +8953,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance,
- self.test_image_meta, [], None)
+ image_meta, [], None)
self.assertTrue(self.cache_called_for_disk)
def test_start_lxc_from_volume(self):
@@ -8437,7 +9021,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance_ref['ephemeral_gb'] = 0
instance_ref['uuid'] = uuidutils.generate_uuid()
inst_obj = objects.Instance(**instance_ref)
- image_meta = {}
+ image_meta = objects.ImageMeta.from_dict({})
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with test.nested(
@@ -8503,8 +9087,10 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance['pci_devices'] = objects.PciDeviceList(
objects=[objects.PciDevice(address='0000:00:00.0')])
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+
drvr.spawn(self.context, instance,
- self.test_image_meta, [], None)
+ image_meta, [], None)
def test_chown_disk_config_for_instance(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -8835,6 +9421,22 @@ class LibvirtConnTestCase(test.NoDBTestCase):
host='fake-source-host',
receive=True)
+ @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
+ def test_create_image_resize_snap_backend(self, mock_cache):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.image_backend = mock.Mock()
+ drvr.image_backend.image.return_value = drvr.image_backend
+ instance = objects.Instance(**self.test_instance)
+ instance.task_state = task_states.RESIZE_FINISH
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
+ instance,
+ image_meta)
+ with mock.patch.object(drvr.image_backend, 'create_snap') as mock_crt:
+ drvr._create_image(self.context, instance, disk_info['mapping'])
+ mock_crt.assert_called_once_with(
+ libvirt_utils.RESIZE_SNAPSHOT_NAME)
+
@mock.patch.object(utils, 'execute')
def test_create_ephemeral_specified_fs(self, mock_exec):
self.flags(default_ephemeral_format='ext3')
@@ -9674,8 +10276,24 @@ class LibvirtConnTestCase(test.NoDBTestCase):
network_info = _fake_network_info(self, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ # some more adjustments for the fake network_info so that
+ # the correct get_config function will be executed (vif's
+ # get_config_hw_veb - which is according to the real SRIOV vif)
+ # and most importantly the pci_slot which is translated to
+ # cfg.source_dev, then to PciDevice.address and sent to
+ # _detach_pci_devices
+ network_info[0]['profile'] = dict(pci_slot="0000:00:00.0")
+ network_info[0]['type'] = "hw_veb"
+ network_info[0]['details'] = dict(vlan="2145")
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
+ # fill the pci_devices of the instance so that
+ # pci_manager.get_instance_pci_devs will not return an empty list
+ # which will eventually fail the assertion for detachDeviceFlags
+ instance.pci_devices = objects.PciDeviceList()
+ instance.pci_devices.objects = [
+ objects.PciDevice(address='0000:00:00.0', request_id=None)
+ ]
domain = FakeVirtDomain()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -9686,6 +10304,61 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance.system_metadata)
self.assertTrue(mock_detachDeviceFlags.called)
+ @mock.patch.object(host.Host, 'has_min_version', return_value=True)
+ @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
+ def test_detach_duplicate_mac_sriov_ports(self,
+ mock_detachDeviceFlags,
+ mock_has_min_version):
+ instance = objects.Instance(**self.test_instance)
+
+ network_info = _fake_network_info(self, 2)
+
+ for network_info_inst in network_info:
+ network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT
+ network_info_inst['type'] = "hw_veb"
+ network_info_inst['details'] = dict(vlan="2145")
+ network_info_inst['address'] = "fa:16:3e:96:2a:48"
+
+ network_info[0]['profile'] = dict(pci_slot="0000:00:00.0")
+ network_info[1]['profile'] = dict(pci_slot="0000:00:00.1")
+
+ instance.info_cache = objects.InstanceInfoCache(
+ network_info=network_info)
+ # fill the pci_devices of the instance so that
+ # pci_manager.get_instance_pci_devs will not return an empty list
+ # which will eventually fail the assertion for detachDeviceFlags
+ instance.pci_devices = objects.PciDeviceList()
+ instance.pci_devices.objects = [
+ objects.PciDevice(address='0000:00:00.0', request_id=None),
+ objects.PciDevice(address='0000:00:00.1', request_id=None)
+ ]
+
+ domain = FakeVirtDomain()
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ guest = libvirt_guest.Guest(domain)
+
+ drvr._detach_sriov_ports(self.context, instance, guest)
+
+ expected_xml = [
+ ('<hostdev mode="subsystem" type="pci" managed="yes">\n'
+ ' <source>\n'
+ ' <address bus="0x00" domain="0x0000" \
+ function="0x0" slot="0x00"/>\n'
+ ' </source>\n'
+ '</hostdev>\n'),
+ ('<hostdev mode="subsystem" type="pci" managed="yes">\n'
+ ' <source>\n'
+ ' <address bus="0x00" domain="0x0000" \
+ function="0x1" slot="0x00"/>\n'
+ ' </source>\n'
+ '</hostdev>\n')
+ ]
+
+ mock_detachDeviceFlags.has_calls([
+ mock.call(expected_xml[0], flags=1),
+ mock.call(expected_xml[1], flags=1)
+ ])
+
def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
@@ -11779,9 +12452,8 @@ class LibvirtConnTestCase(test.NoDBTestCase):
network_info = _fake_network_info(self, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
- fake_image_meta = {'id': instance['image_ref']}
- fake_image_meta_obj = objects.ImageMeta.from_dict(
- fake_image_meta)
+ fake_image_meta = objects.ImageMeta.from_dict(
+ {'id': instance['image_ref']})
if method_name == "attach_interface":
self.mox.StubOutWithMock(drvr.firewall_driver,
@@ -11789,7 +12461,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
drvr.firewall_driver.setup_basic_filtering(instance, network_info)
expected = drvr.vif_driver.get_config(instance, network_info[0],
- fake_image_meta_obj,
+ fake_image_meta,
instance.get_flavor(),
CONF.libvirt.virt_type,
drvr._host)
@@ -11828,7 +12500,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
def test_default_root_device_name(self):
instance = {'uuid': 'fake_instance'}
- image_meta = {'id': 'fake'}
+ image_meta = objects.ImageMeta.from_dict({'id': 'fake'})
root_bdm = {'source_type': 'image',
'detination_type': 'volume',
'image_id': 'fake_id'}
@@ -11839,16 +12511,16 @@ class LibvirtConnTestCase(test.NoDBTestCase):
blockinfo.get_disk_bus_for_device_type(instance,
'fake_libvirt_type',
- mox.IsA(objects.ImageMeta),
+ image_meta,
'disk').InAnyOrder().\
AndReturn('virtio')
blockinfo.get_disk_bus_for_device_type(instance,
'fake_libvirt_type',
- mox.IsA(objects.ImageMeta),
+ image_meta,
'cdrom').InAnyOrder().\
AndReturn('ide')
blockinfo.get_root_info(instance, 'fake_libvirt_type',
- mox.IsA(objects.ImageMeta), root_bdm,
+ image_meta, root_bdm,
'virtio', 'ide').AndReturn({'dev': 'vda'})
self.mox.ReplayAll()
@@ -11982,7 +12654,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
if image_meta is None:
- image_meta = {}
+ image_meta = objects.ImageMeta.from_dict({})
conf = drvr._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
self.resultXML = conf.to_xml()
@@ -12554,10 +13226,12 @@ class LibvirtConnTestCase(test.NoDBTestCase):
disconnect_volume.assert_called_once_with(old_connection_info, 'vdb')
volume_save.assert_called_once_with()
- def test_live_snapshot(self):
+ def _test_live_snapshot(self, can_quiesce=False, require_quiesce=False):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
-
mock_dom = mock.MagicMock()
+ test_image_meta = self.test_image_meta.copy()
+ if require_quiesce:
+ test_image_meta = {'properties': {'os_require_quiesce': 'yes'}}
with test.nested(
mock.patch.object(drvr._conn, 'defineXML', create=True),
@@ -12566,8 +13240,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
mock.patch.object(fake_libvirt_utils, 'chown'),
mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
+ mock.patch.object(drvr, '_set_quiesced')
) as (mock_define, mock_size, mock_backing, mock_create_cow,
- mock_chown, mock_snapshot):
+ mock_chown, mock_snapshot, mock_quiesce):
xmldoc = "<domain/>"
srcfile = "/first/path"
@@ -12581,7 +13256,12 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock_backing.return_value = bckfile
guest = libvirt_guest.Guest(mock_dom)
- image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
+ if not can_quiesce:
+ mock_quiesce.side_effect = (
+ exception.InstanceQuiesceNotSupported(
+ instance_id=self.test_instance['id'], reason='test'))
+
+ image_meta = objects.ImageMeta.from_dict(test_image_meta)
drvr._live_snapshot(self.context, self.test_instance, guest,
srcfile, dstfile, "qcow2", "qcow2", image_meta)
@@ -12602,6 +13282,25 @@ class LibvirtConnTestCase(test.NoDBTestCase):
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
mock_define.assert_called_once_with(xmldoc)
+ mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
+ mock.ANY, True)
+ if can_quiesce:
+ mock_quiesce.assert_any_call(mock.ANY, self.test_instance,
+ mock.ANY, False)
+
+ def test_live_snapshot(self):
+ self._test_live_snapshot()
+
+ def test_live_snapshot_with_quiesce(self):
+ self._test_live_snapshot(can_quiesce=True)
+
+ def test_live_snapshot_with_require_quiesce(self):
+ self._test_live_snapshot(can_quiesce=True, require_quiesce=True)
+
+ def test_live_snapshot_with_require_quiesce_fails(self):
+ self.assertRaises(exception.InstanceQuiesceNotSupported,
+ self._test_live_snapshot,
+ can_quiesce=False, require_quiesce=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration")
def test_live_migration_hostname_valid(self, mock_lm):
@@ -12624,6 +13323,12 @@ class LibvirtConnTestCase(test.NoDBTestCase):
lambda x: x,
lambda x: x)
+ @mock.patch.object(libvirt_driver.LibvirtDriver, "pause")
+ def test_live_migration_force_complete(self, pause):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.live_migration_force_complete(self.test_instance)
+ pause.assert_called_once_with(self.test_instance)
+
@mock.patch('os.path.exists', return_value=True)
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close', return_value=None)
@@ -12953,6 +13658,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = {}
inst['metadata'] = {}
+ inst['task_state'] = None
inst.update(params)
@@ -13009,7 +13715,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
- self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ self.stub_out('os.path.exists', fake_os_path_exists)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
@@ -13509,9 +14215,10 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
migration.dest_compute = 'fake-dest-compute'
migration.source_node = 'fake-source-node'
migration.dest_node = 'fake-dest-node'
+ image_meta = objects.ImageMeta.from_dict(self.test_image_meta)
self.drvr.finish_migration(
context.get_admin_context(), migration, ins_ref,
- self._disk_info(), [], self.test_image_meta,
+ self._disk_info(), [], image_meta,
resize_instance, None, power_on)
self.assertTrue(self.fake_create_domain_called)
self.assertEqual(
@@ -13609,6 +14316,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
context = 'fake_context'
instance = self._create_instance()
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
@@ -13632,6 +14340,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
+ imagebackend.Backend.image(mox.IgnoreArg(), 'disk').AndReturn(
+ fake_imagebackend.Raw())
+
self.mox.ReplayAll()
self.drvr.finish_revert_migration(context, instance, [])
@@ -13669,6 +14380,45 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
side_effect=fake_get_guest_xml)):
drvr.finish_revert_migration('', instance, None, power_on=False)
+ def test_finish_revert_migration_snap_backend(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.image_backend = mock.Mock()
+ drvr.image_backend.image.return_value = drvr.image_backend
+ ins_ref = self._create_instance()
+
+ with test.nested(
+ mock.patch.object(utils, 'get_image_from_system_metadata'),
+ mock.patch.object(drvr, '_create_domain_and_network'),
+ mock.patch.object(drvr, '_get_guest_xml')) as (
+ mock_image, mock_cdn, mock_ggx):
+ mock_image.return_value = {'disk_format': 'raw'}
+ drvr.finish_revert_migration('', ins_ref, None, power_on=False)
+
+ drvr.image_backend.rollback_to_snap.assert_called_once_with(
+ libvirt_utils.RESIZE_SNAPSHOT_NAME)
+ drvr.image_backend.remove_snap.assert_called_once_with(
+ libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True)
+
+ def test_finish_revert_migration_snap_backend_snapshot_not_found(self):
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.image_backend = mock.Mock()
+ drvr.image_backend.image.return_value = drvr.image_backend
+ ins_ref = self._create_instance()
+
+ with test.nested(
+ mock.patch.object(rbd_utils, 'RBDDriver'),
+ mock.patch.object(utils, 'get_image_from_system_metadata'),
+ mock.patch.object(drvr, '_create_domain_and_network'),
+ mock.patch.object(drvr, '_get_guest_xml')) as (
+ mock_rbd, mock_image, mock_cdn, mock_ggx):
+ mock_image.return_value = {'disk_format': 'raw'}
+ mock_rbd.rollback_to_snap.side_effect = exception.SnapshotNotFound(
+ snapshot_id='testing')
+ drvr.finish_revert_migration('', ins_ref, None, power_on=False)
+
+ drvr.image_backend.remove_snap.assert_called_once_with(
+ libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True)
+
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
@@ -13693,8 +14443,9 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
def fake_os_path_exists(path):
return True
- self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ self.stub_out('os.path.exists', fake_os_path_exists)
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
@@ -13702,6 +14453,8 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
+ imagebackend.Backend.image(ins_ref, 'disk').AndReturn(
+ fake_imagebackend.Raw())
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
@@ -13724,7 +14477,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
def fake_unfilter_instance(instance, network_info):
pass
- self.stubs.Set(os.path, 'exists', fake_os_path_exists)
+ self.stub_out('os.path.exists', fake_os_path_exists)
self.stubs.Set(self.drvr, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.drvr, 'unplug_vifs',
@@ -13732,6 +14485,7 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
self.stubs.Set(self.drvr.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
+ self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
@@ -13739,11 +14493,36 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
+ imagebackend.Backend.image(ins_ref, 'disk').AndReturn(
+ fake_imagebackend.Raw())
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self, 1))
+ def test_cleanup_resize_snap_backend(self):
+ CONF.set_override('policy_dirs', [], group='oslo_policy')
+ ins_ref = self._create_instance({'host': CONF.host})
+ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
+ drvr.image_backend = mock.Mock()
+ drvr.image_backend.image.return_value = drvr.image_backend
+
+ with test.nested(
+ mock.patch.object(os.path, 'exists'),
+ mock.patch.object(libvirt_utils, 'get_instance_path'),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(drvr.image_backend, 'remove_snap')) as (
+ mock_exists, mock_get_path, mock_exec, mock_remove):
+ mock_exists.return_value = True
+ mock_get_path.return_value = '/fake/inst'
+
+ drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1))
+ mock_get_path.assert_called_once_with(ins_ref, forceold=True)
+ mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize',
+ delay_on_retry=True, attempts=5)
+ mock_remove.assert_called_once_with(
+ libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True)
+
def test_get_instance_disk_info_exception(self):
instance = self._create_instance()
@@ -13938,12 +14717,11 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
self.drvr.firewall_driver.setup_basic_filtering(
instance, [network_info[0]])
- fake_image_meta = {'id': instance.image_ref}
- fake_image_meta_obj = objects.ImageMeta.from_dict(
- fake_image_meta)
+ fake_image_meta = objects.ImageMeta.from_dict(
+ {'id': instance.image_ref})
expected = self.drvr.vif_driver.get_config(
- instance, network_info[0], fake_image_meta_obj, instance.flavor,
+ instance, network_info[0], fake_image_meta, instance.flavor,
CONF.libvirt.virt_type, self.drvr._host)
self.mox.StubOutWithMock(self.drvr.vif_driver,
@@ -14003,6 +14781,30 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
'detach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
+ @mock.patch('nova.virt.libvirt.driver.LOG')
+ def test_detach_interface_device_not_found(self, mock_log):
+ # Asserts that we don't log an error when the interface device is not
+ # found on the guest after a libvirt error during detach.
+ instance = self._create_instance()
+ vif = _fake_network_info(self, 1)[0]
+ guest = mock.Mock(spec='nova.virt.libvirt.guest.Guest')
+ guest.get_power_state = mock.Mock()
+ self.drvr._host.get_guest = mock.Mock(return_value=guest)
+ self.drvr.vif_driver = mock.Mock()
+ error = fakelibvirt.libvirtError(
+ 'no matching network device was found')
+ error.err = (fakelibvirt.VIR_ERR_OPERATION_FAILED,)
+ guest.detach_device = mock.Mock(side_effect=error)
+ # mock out that get_interface_by_mac doesn't find the interface
+ guest.get_interface_by_mac = mock.Mock(return_value=None)
+ self.drvr.detach_interface(instance, vif)
+ guest.get_interface_by_mac.assert_called_once_with(vif['address'])
+ # an error shouldn't be logged, but a warning should be logged
+ self.assertFalse(mock_log.error.called)
+ self.assertEqual(1, mock_log.warning.call_count)
+ self.assertIn('the device is no longer found on the guest',
+ six.text_type(mock_log.warning.call_args[0]))
+
def test_rescue(self):
instance = self._create_instance({'config_drive': None})
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
@@ -14050,10 +14852,11 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
- image_meta = {'id': 'fake', 'name': 'fake'}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': 'fake', 'name': 'fake'})
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
- mox.IsA(objects.ImageMeta),
+ image_meta,
rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
@@ -14172,10 +14975,11 @@ class LibvirtDriverTestCase(test.NoDBTestCase):
content=mox.IgnoreArg(),
extra_md=mox.IgnoreArg(),
network_info=mox.IgnoreArg())
- image_meta = {'id': 'fake', 'name': 'fake'}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': 'fake', 'name': 'fake'})
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
- mox.IsA(objects.ImageMeta),
+ image_meta,
rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
@@ -15831,6 +16635,84 @@ class LibvirtSnapshotTests(_BaseSnapshotTests):
self._test_snapshot(disk_format='qcow2',
extra_properties=extra_properties)
+ @mock.patch.object(rbd_utils, 'RBDDriver')
+ @mock.patch.object(rbd_utils, 'rbd')
+ def test_raw_with_rbd_clone(self, mock_rbd, mock_driver):
+ self.flags(images_type='rbd', group='libvirt')
+ rbd = mock_driver.return_value
+ rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
+ rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
+ with mock.patch.object(fake_libvirt_utils, 'find_disk',
+ return_value=('rbd://some/fake/rbd/image',
+ 'raw')):
+ with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'):
+ self._test_snapshot(disk_format='raw')
+ rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool')
+ rbd.flatten.assert_called_with(mock.ANY, pool='test-pool')
+
+ @mock.patch.object(rbd_utils, 'RBDDriver')
+ @mock.patch.object(rbd_utils, 'rbd')
+ def test_raw_with_rbd_clone_graceful_fallback(self, mock_rbd, mock_driver):
+ self.flags(images_type='rbd', group='libvirt')
+ rbd = mock_driver.return_value
+ rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable(
+ image_id='fake_id', reason='rbd testing'))
+ with test.nested(
+ mock.patch.object(libvirt_driver.imagebackend.images,
+ 'convert_image',
+ side_effect=_fake_convert_image),
+ mock.patch.object(fake_libvirt_utils, 'find_disk',
+ return_value=('rbd://some/fake/rbd/image',
+ 'raw')),
+ mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')):
+ self._test_snapshot(disk_format='raw')
+ self.assertFalse(rbd.clone.called)
+
+ @mock.patch.object(rbd_utils, 'RBDDriver')
+ @mock.patch.object(rbd_utils, 'rbd')
+ def test_raw_with_rbd_clone_eperm(self, mock_rbd, mock_driver):
+ self.flags(images_type='rbd', group='libvirt')
+ rbd = mock_driver.return_value
+ rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
+ rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
+ rbd.clone = mock.Mock(side_effect=exception.Forbidden(
+ image_id='fake_id', reason='rbd testing'))
+ with test.nested(
+ mock.patch.object(libvirt_driver.imagebackend.images,
+ 'convert_image',
+ side_effect=_fake_convert_image),
+ mock.patch.object(fake_libvirt_utils, 'find_disk',
+ return_value=('rbd://some/fake/rbd/image',
+ 'raw')),
+ mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')):
+ self._test_snapshot(disk_format='raw')
+ # Ensure that the direct_snapshot attempt was cleaned up
+ rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=False,
+ pool='b', force=True)
+
+ @mock.patch.object(rbd_utils, 'RBDDriver')
+ @mock.patch.object(rbd_utils, 'rbd')
+ def test_raw_with_rbd_clone_post_process_fails(self, mock_rbd,
+ mock_driver):
+ self.flags(images_type='rbd', group='libvirt')
+ rbd = mock_driver.return_value
+ rbd.parent_info = mock.Mock(return_value=['test-pool', '', ''])
+ rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd'])
+ with test.nested(
+ mock.patch.object(fake_libvirt_utils, 'find_disk',
+ return_value=('rbd://some/fake/rbd/image',
+ 'raw')),
+ mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'),
+ mock.patch.object(self.image_service, 'update',
+ side_effect=test.TestingException)):
+ self.assertRaises(test.TestingException, self._test_snapshot,
+ disk_format='raw')
+ rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool')
+ rbd.flatten.assert_called_with(mock.ANY, pool='test-pool')
+ # Ensure that the direct_snapshot attempt was cleaned up
+ rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=True,
+ pool='b', force=True)
+
class LXCSnapshotTests(LibvirtSnapshotTests):
"""Repeat all of the Libvirt snapshot tests, but with LXC enabled"""
diff --git a/nova/tests/unit/virt/libvirt/test_fakelibvirt.py b/nova/tests/unit/virt/libvirt/test_fakelibvirt.py
index d3e14fe152..2f2f61aeab 100644
--- a/nova/tests/unit/virt/libvirt/test_fakelibvirt.py
+++ b/nova/tests/unit/virt/libvirt/test_fakelibvirt.py
@@ -12,12 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from nova import test
-
from lxml import etree
import six
from nova.compute import arch
+from nova import test
import nova.tests.unit.virt.libvirt.fakelibvirt as libvirt
diff --git a/nova/tests/unit/virt/libvirt/test_firewall.py b/nova/tests/unit/virt/libvirt/test_firewall.py
index 6ab97f7adc..6a203a1914 100644
--- a/nova/tests/unit/virt/libvirt/test_firewall.py
+++ b/nova/tests/unit/virt/libvirt/test_firewall.py
@@ -33,7 +33,6 @@ from nova.tests.unit.virt.libvirt import fakelibvirt
from nova.virt.libvirt import firewall
from nova.virt.libvirt import host
from nova.virt import netutils
-from nova.virt import virtapi
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_stub_out_get_nw_info = fake_network.stub_out_nw_api_get_instance_nw_info
@@ -81,11 +80,6 @@ class NWFilterFakes(object):
return True
-class FakeVirtAPI(virtapi.VirtAPI):
- def provider_fw_rule_get_all(self, context):
- return []
-
-
class IptablesFirewallTestCase(test.NoDBTestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
@@ -93,7 +87,6 @@ class IptablesFirewallTestCase(test.NoDBTestCase):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.fw = firewall.IptablesFirewallDriver(
- FakeVirtAPI(),
host=host.Host("qemu:///system"))
in_rules = [
@@ -431,74 +424,6 @@ class IptablesFirewallTestCase(test.NoDBTestCase):
# should undefine just the instance filter
self.assertEqual(original_filter_count - len(fakefilter.filters), 1)
- @mock.patch.object(FakeVirtAPI, "provider_fw_rule_get_all")
- @mock.patch.object(objects.SecurityGroupRuleList, "get_by_instance")
- def test_provider_firewall_rules(self, mock_secrule, mock_fwrules):
- mock_secrule.return_value = objects.SecurityGroupRuleList()
-
- # setup basic instance data
- instance_ref = self._create_instance_ref()
- instance_ref.security_groups = objects.SecurityGroupList()
-
- # FRAGILE: peeks at how the firewall names chains
- chain_name = 'inst-%s' % instance_ref['id']
-
- # create a firewall via setup_basic_filtering like libvirt_conn.spawn
- # should have a chain with 0 rules
- network_info = _fake_network_info(self, 1)
- self.fw.setup_basic_filtering(instance_ref, network_info)
- self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(0, len(rules))
-
- # add a rule angd send the update message, check for 1 rule
- mock_fwrules.return_value = [{'protocol': 'tcp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535}]
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(1, len(rules))
-
- # Add another, refresh, and make sure number of rules goes to two
- mock_fwrules.return_value = [{'protocol': 'tcp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535},
- {'protocol': 'udp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535}]
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(2, len(rules))
-
- # create the instance filter and make sure it has a jump rule
- self.fw.prepare_instance_filter(instance_ref, network_info)
- self.fw.apply_instance_filter(instance_ref, network_info)
- inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == chain_name]
- jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
- provjump_rules = []
- # IptablesTable doesn't make rules unique internally
- for rule in jump_rules:
- if 'provider' in rule.rule and rule not in provjump_rules:
- provjump_rules.append(rule)
- self.assertEqual(1, len(provjump_rules))
-
- # remove a rule from the db, cast to compute to refresh rule
- mock_fwrules.return_value = [{'protocol': 'udp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535}]
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(1, len(rules))
-
@mock.patch.object(firewall, 'libvirt', fakelibvirt)
class NWFilterTestCase(test.NoDBTestCase):
@@ -507,9 +432,7 @@ class NWFilterTestCase(test.NoDBTestCase):
self.useFixture(fakelibvirt.FakeLibvirtFixture())
- self.fw = firewall.NWFilterFirewall(
- FakeVirtAPI(),
- host=host.Host("qemu:///system"))
+ self.fw = firewall.NWFilterFirewall(host=host.Host("qemu:///system"))
def _create_security_group(self, instance_ref):
secgroup = objects.SecurityGroup(id=1,
diff --git a/nova/tests/unit/virt/libvirt/test_guest.py b/nova/tests/unit/virt/libvirt/test_guest.py
index 0be0d09220..063fd2930c 100644
--- a/nova/tests/unit/virt/libvirt/test_guest.py
+++ b/nova/tests/unit/virt/libvirt/test_guest.py
@@ -363,6 +363,13 @@ class GuestTestCase(test.NoDBTestCase):
<address domain='0x0000' bus='0x06' slot='0x12' function='0x6'/>
</source>
</hostdev>
+ <interface type="bridge">
+ <mac address="fa:16:3e:f9:af:ae"/>
+ <model type="virtio"/>
+ <driver name="qemu"/>
+ <source bridge="qbr84008d03-11"/>
+ <target dev="tap84008d03-11"/>
+ </interface>
<controller type='usb' index='0'/>
<controller type='pci' index='0' model='pci-root'/>
<memballoon model='none'/>
@@ -373,14 +380,15 @@ class GuestTestCase(test.NoDBTestCase):
self.domain.XMLDesc.return_value = xml
devs = self.guest.get_all_devices()
- # Only currently parse <disk> and <hostdev> elements
+ # Only currently parse <disk>, <hostdev> and <interface> elements
# hence we're not counting the controller/memballoon
- self.assertEqual(5, len(devs))
+ self.assertEqual(6, len(devs))
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[2], vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(devs[3], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[4], vconfig.LibvirtConfigGuestHostdev)
+ self.assertIsInstance(devs[5], vconfig.LibvirtConfigGuestInterface)
devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestDisk)
self.assertEqual(3, len(devs))
@@ -399,6 +407,14 @@ class GuestTestCase(test.NoDBTestCase):
self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestHostdev)
self.assertIsInstance(devs[1], vconfig.LibvirtConfigGuestHostdev)
+ devs = self.guest.get_all_devices(vconfig.LibvirtConfigGuestInterface)
+ self.assertEqual(1, len(devs))
+ self.assertIsInstance(devs[0], vconfig.LibvirtConfigGuestInterface)
+
+ self.assertIsNotNone(
+ self.guest.get_interface_by_mac('fa:16:3e:f9:af:ae'))
+ self.assertIsNone(self.guest.get_interface_by_mac(None))
+
def test_get_info(self):
self.domain.info.return_value = (1, 2, 3, 4, 5)
self.domain.ID.return_value = 6
diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py
index 0746652cb9..76da5694ed 100644
--- a/nova/tests/unit/virt/libvirt/test_imagebackend.py
+++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py
@@ -107,15 +107,15 @@ class _ImageTestCase(object):
return
self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE)
- self.stubs.Set(os.path, 'exists', lambda _: True)
- self.stubs.Set(os, 'access', lambda p, w: True)
+ self.stub_out('os.path.exists', lambda _: True)
+ self.stub_out('os.access', lambda p, w: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
- ['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
+ ['fallocate -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
@@ -132,8 +132,8 @@ class _ImageTestCase(object):
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, '_can_fallocate', lambda: True)
self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE)
- self.stubs.Set(os.path, 'exists', lambda _: True)
- self.stubs.Set(os, 'access', lambda p, w: False)
+ self.stub_out('os.path.exists', lambda _: True)
+ self.stub_out('os.access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
@@ -765,7 +765,7 @@ class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
def fake_fetch(target, *args, **kwargs):
return
- self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE)
@@ -1138,7 +1138,7 @@ class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
def fake_fetch(target, *args, **kwargs):
return
- self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE)
@@ -1154,6 +1154,7 @@ class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
+ FSID = "FakeFsID"
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
@@ -1325,7 +1326,7 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
def fake_fetch(target, *args, **kwargs):
return
- self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE)
@@ -1441,6 +1442,137 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
mock_import.assert_called_once_with(mock.sentinel.file, name)
_test()
+ def test_get_parent_pool(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ with mock.patch.object(rbd_utils.RBDDriver, 'parent_info') as mock_pi:
+ mock_pi.return_value = [self.POOL, 'fake-image', 'fake-snap']
+ parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image',
+ self.FSID)
+ self.assertEqual(self.POOL, parent_pool)
+
+ def test_get_parent_pool_no_parent_info(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ rbd_uri = 'rbd://%s/%s/fake-image/fake-snap' % (self.FSID, self.POOL)
+ with test.nested(mock.patch.object(rbd_utils.RBDDriver, 'parent_info'),
+ mock.patch.object(imagebackend.IMAGE_API, 'get'),
+ ) as (mock_pi, mock_get):
+ mock_pi.side_effect = exception.ImageUnacceptable(image_id='test',
+ reason='test')
+ mock_get.return_value = {'locations': [{'url': rbd_uri}]}
+ parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image',
+ self.FSID)
+ self.assertEqual(self.POOL, parent_pool)
+
+ def test_get_parent_pool_non_local_image(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ rbd_uri = 'rbd://remote-cluster/remote-pool/fake-image/fake-snap'
+ with test.nested(
+ mock.patch.object(rbd_utils.RBDDriver, 'parent_info'),
+ mock.patch.object(imagebackend.IMAGE_API, 'get')
+ ) as (mock_pi, mock_get):
+ mock_pi.side_effect = exception.ImageUnacceptable(image_id='test',
+ reason='test')
+ mock_get.return_value = {'locations': [{'url': rbd_uri}]}
+ self.assertRaises(exception.ImageUnacceptable,
+ image._get_parent_pool, self.CONTEXT,
+ 'fake-image', self.FSID)
+
+ def test_direct_snapshot(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ test_snap = 'rbd://%s/%s/fake-image-id/snap' % (self.FSID, self.POOL)
+ with test.nested(
+ mock.patch.object(rbd_utils.RBDDriver, 'get_fsid',
+ return_value=self.FSID),
+ mock.patch.object(image, '_get_parent_pool',
+ return_value=self.POOL),
+ mock.patch.object(rbd_utils.RBDDriver, 'create_snap'),
+ mock.patch.object(rbd_utils.RBDDriver, 'clone'),
+ mock.patch.object(rbd_utils.RBDDriver, 'flatten'),
+ mock.patch.object(image, 'cleanup_direct_snapshot')
+ ) as (mock_fsid, mock_parent, mock_create_snap, mock_clone,
+ mock_flatten, mock_cleanup):
+ location = image.direct_snapshot(self.CONTEXT, 'fake-snapshot',
+ 'fake-format', 'fake-image-id',
+ 'fake-base-image')
+ mock_fsid.assert_called_once_with()
+ mock_parent.assert_called_once_with(self.CONTEXT,
+ 'fake-base-image',
+ self.FSID)
+ mock_create_snap.assert_has_calls([mock.call(image.rbd_name,
+ 'fake-snapshot',
+ protect=True),
+ mock.call('fake-image-id',
+ 'snap',
+ pool=self.POOL,
+ protect=True)])
+ mock_clone.assert_called_once_with(mock.ANY, 'fake-image-id',
+ dest_pool=self.POOL)
+ mock_flatten.assert_called_once_with('fake-image-id',
+ pool=self.POOL)
+ mock_cleanup.assert_called_once_with(mock.ANY)
+ self.assertEqual(test_snap, location)
+
+ def test_direct_snapshot_cleans_up_on_failures(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.pool,
+ image.rbd_name)
+ with test.nested(
+ mock.patch.object(rbd_utils.RBDDriver, 'get_fsid',
+ return_value=self.FSID),
+ mock.patch.object(image, '_get_parent_pool',
+ return_value=self.POOL),
+ mock.patch.object(rbd_utils.RBDDriver, 'create_snap'),
+ mock.patch.object(rbd_utils.RBDDriver, 'clone',
+ side_effect=exception.Forbidden('testing')),
+ mock.patch.object(rbd_utils.RBDDriver, 'flatten'),
+ mock.patch.object(image, 'cleanup_direct_snapshot')) as (
+ mock_fsid, mock_parent, mock_create_snap, mock_clone,
+ mock_flatten, mock_cleanup):
+ self.assertRaises(exception.Forbidden, image.direct_snapshot,
+ self.CONTEXT, 'snap', 'fake-format',
+ 'fake-image-id', 'fake-base-image')
+ mock_create_snap.assert_called_once_with(image.rbd_name, 'snap',
+ protect=True)
+ self.assertFalse(mock_flatten.called)
+ mock_cleanup.assert_called_once_with(dict(url=test_snap))
+
+ def test_cleanup_direct_snapshot(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.pool,
+ image.rbd_name)
+ with test.nested(
+ mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'),
+ mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume')
+ ) as (mock_rm, mock_destroy):
+ # Ensure that the method does nothing when no location is provided
+ image.cleanup_direct_snapshot(None)
+ self.assertFalse(mock_rm.called)
+
+ # Ensure that destroy_volume is not called
+ image.cleanup_direct_snapshot(dict(url=test_snap))
+ mock_rm.assert_called_once_with(image.rbd_name, 'snap', force=True,
+ ignore_errors=False,
+ pool=image.pool)
+ self.assertFalse(mock_destroy.called)
+
+ def test_cleanup_direct_snapshot_destroy_volume(self):
+ image = self.image_class(self.INSTANCE, self.NAME)
+ test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.pool,
+ image.rbd_name)
+ with test.nested(
+ mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'),
+ mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume')
+ ) as (mock_rm, mock_destroy):
+ # Ensure that destroy_volume is called
+ image.cleanup_direct_snapshot(dict(url=test_snap),
+ also_destroy_volume=True)
+ mock_rm.assert_called_once_with(image.rbd_name, 'snap',
+ force=True,
+ ignore_errors=False,
+ pool=image.pool)
+ mock_destroy.assert_called_once_with(image.rbd_name,
+ pool=image.pool)
+
class PloopTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
@@ -1504,7 +1636,7 @@ class PloopTestCase(_ImageTestCase, test.NoDBTestCase):
def fake_fetch(target, *args, **kwargs):
return
- self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(image, 'check_image_exists', lambda: True)
self.stubs.Set(image, 'get_disk_size', lambda _: self.SIZE)
@@ -1553,6 +1685,11 @@ class BackendTestCase(test.NoDBTestCase):
raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertFalse(raw.preallocate)
+ def test_image_raw_native_io(self):
+ self.flags(preallocate_images="space")
+ raw = imagebackend.Raw(self.INSTANCE, 'fake_disk', '/tmp/xyz')
+ self.assertEqual(raw.driver_io, "native")
+
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
@@ -1568,6 +1705,20 @@ class BackendTestCase(test.NoDBTestCase):
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertFalse(qcow.preallocate)
+ def test_image_qcow2_native_io(self):
+ self.flags(preallocate_images="space")
+ qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
+ self.assertEqual(qcow.driver_io, "native")
+
+ def test_image_lvm_native_io(self):
+ def _test_native_io(is_sparse, driver_io):
+ self.flags(images_volume_group='FakeVG', group='libvirt')
+ self.flags(sparse_logical_volumes=is_sparse, group='libvirt')
+ lvm = imagebackend.Lvm(self.INSTANCE, 'fake_disk')
+ self.assertEqual(lvm.driver_io, driver_io)
+ _test_native_io(is_sparse=False, driver_io="native")
+ _test_native_io(is_sparse=True, driver_io=None)
+
def test_image_lvm(self):
self.flags(images_volume_group='FakeVG', group='libvirt')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
diff --git a/nova/tests/unit/virt/libvirt/test_imagecache.py b/nova/tests/unit/virt/libvirt/test_imagecache.py
index be10e95181..62c193be54 100644
--- a/nova/tests/unit/virt/libvirt/test_imagecache.py
+++ b/nova/tests/unit/virt/libvirt/test_imagecache.py
@@ -20,6 +20,7 @@ import os
import time
import mock
+from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import formatters
@@ -30,7 +31,6 @@ from six.moves import cStringIO
from nova import conductor
from nova import context
-from nova import db
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
@@ -66,7 +66,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
'banana-42-hamster'])
def test_read_stored_checksum_missing(self):
- self.stubs.Set(os.path, 'exists', lambda x: False)
+ self.stub_out('os.path.exists', lambda x: False)
csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
self.assertIsNone(csum)
@@ -139,8 +139,8 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
'17d1b00b81642842e514494a78e804e9a511637c_10737418240']
listing.extend(images)
- self.stubs.Set(os, 'listdir', lambda x: listing)
- self.stubs.Set(os.path, 'isfile', lambda x: True)
+ self.stub_out('os.listdir', lambda x: listing)
+ self.stub_out('os.path.isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
self.flags(instances_path='/var/lib/nova/instances')
@@ -184,10 +184,10 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertIn('swap_1000', image_cache_manager.back_swap_images)
def test_list_backing_images_small(self):
- self.stubs.Set(os, 'listdir',
+ self.stub_out('os.listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
- self.stubs.Set(os.path, 'exists',
+ self.stub_out('os.path.exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
@@ -206,10 +206,10 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_resized(self):
- self.stubs.Set(os, 'listdir',
+ self.stub_out('os.listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
- self.stubs.Set(os.path, 'exists',
+ self.stub_out('os.path.exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
@@ -230,9 +230,9 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_instancename(self):
- self.stubs.Set(os, 'listdir',
+ self.stub_out('os.listdir',
lambda x: ['_base', 'banana-42-hamster'])
- self.stubs.Set(os.path, 'exists',
+ self.stub_out('os.path.exists',
lambda x: x.find('banana-42-hamster') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
@@ -251,9 +251,9 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_disk_notexist(self):
- self.stubs.Set(os, 'listdir',
+ self.stub_out('os.listdir',
lambda x: ['_base', 'banana-42-hamster'])
- self.stubs.Set(os.path, 'exists',
+ self.stub_out('os.path.exists',
lambda x: x.find('banana-42-hamster') != -1)
def fake_get_disk(disk_path):
@@ -269,7 +269,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
image_cache_manager._list_backing_images)
def test_find_base_file_nothing(self):
- self.stubs.Set(os.path, 'exists', lambda x: False)
+ self.stub_out('os.path.exists', lambda x: False)
base_dir = '/var/lib/nova/instances/_base'
fingerprint = '549867354867'
@@ -280,7 +280,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
def test_find_base_file_small(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
- self.stubs.Set(os.path, 'exists',
+ self.stub_out('os.path.exists',
lambda x: x.endswith('%s_sm' % fingerprint))
base_dir = '/var/lib/nova/instances/_base'
@@ -297,10 +297,10 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
- self.stubs.Set(os, 'listdir', lambda x: listing)
- self.stubs.Set(os.path, 'exists',
+ self.stub_out('os.listdir', lambda x: listing)
+ self.stub_out('os.path.exists',
lambda x: x.endswith('%s_10737418240' % fingerprint))
- self.stubs.Set(os.path, 'isfile', lambda x: True)
+ self.stub_out('os.path.isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
@@ -318,9 +318,9 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
- self.stubs.Set(os, 'listdir', lambda x: listing)
- self.stubs.Set(os.path, 'exists', lambda x: True)
- self.stubs.Set(os.path, 'isfile', lambda x: True)
+ self.stub_out('os.listdir', lambda x: listing)
+ self.stub_out('os.path.exists', lambda x: True)
+ self.stub_out('os.path.isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
@@ -462,34 +462,32 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
[fname])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
- def test_handle_base_image_used(self):
- self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+ @mock.patch.object(libvirt_utils, 'update_mtime')
+ def test_handle_base_image_used(self, mock_mtime):
img = '123'
with self._make_base_file() as fname:
- os.utime(fname, (-1, time.time() - 3601))
-
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
+ mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
- def test_handle_base_image_used_remotely(self):
- self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
+ @mock.patch.object(libvirt_utils, 'update_mtime')
+ def test_handle_base_image_used_remotely(self, mock_mtime):
img = '123'
with self._make_base_file() as fname:
- os.utime(fname, (-1, time.time() - 3601))
-
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
+ mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
@@ -528,9 +526,9 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
- def test_handle_base_image_checksum_fails(self):
+ @mock.patch.object(libvirt_utils, 'update_mtime')
+ def test_handle_base_image_checksum_fails(self, mock_mtime):
self.flags(checksum_base_images=True, group='libvirt')
- self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
img = '123'
@@ -547,12 +545,15 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
+ mock_mtime.assert_called_once_with(fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files,
[fname])
- def test_verify_base_images(self):
+ @mock.patch.object(libvirt_utils, 'update_mtime')
+ @mock.patch.object(lockutils, 'external_lock')
+ def test_verify_base_images(self, mock_lock, mock_mtime):
hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
@@ -606,12 +607,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.fail('Unexpected path existence check: %s' % path)
- self.stubs.Set(os.path, 'exists', lambda x: exists(x))
-
- self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
-
- # We need to stub utime as well
- self.stubs.Set(os, 'utime', lambda x, y: None)
+ self.stub_out('os.path.exists', lambda x: exists(x))
# Fake up some instances in the instances directory
orig_listdir = os.listdir
@@ -629,7 +625,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.fail('Unexpected directory listed: %s' % path)
- self.stubs.Set(os, 'listdir', lambda x: listdir(x))
+ self.stub_out('os.listdir', lambda x: listdir(x))
# Fake isfile for these faked images in _base
orig_isfile = os.path.isfile
@@ -645,7 +641,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.fail('Unexpected isfile call: %s' % path)
- self.stubs.Set(os.path, 'isfile', lambda x: isfile(x))
+ self.stub_out('os.path.isfile', lambda x: isfile(x))
# Fake the database call which lists running instances
instances = [{'image_ref': '1',
@@ -689,7 +685,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
return 1000000
- self.stubs.Set(os.path, 'getmtime', lambda x: getmtime(x))
+ self.stub_out('os.path.getmtime', lambda x: getmtime(x))
# Make sure we don't accidentally remove a real file
orig_remove = os.remove
@@ -701,7 +697,7 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
# Don't try to remove fake files
return
- self.stubs.Set(os, 'remove', lambda x: remove(x))
+ self.stub_out('os.remove', lambda x: remove(x))
self.mox.StubOutWithMock(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
@@ -836,12 +832,13 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
- self.stubs.Set(db, 'instance_get_all_by_filters',
- fake_get_all_by_filters)
+ self.stub_out('nova.db.instance_get_all_by_filters',
+ fake_get_all_by_filters)
compute = importutils.import_object(CONF.compute_manager)
self.flags(use_local=True, group='conductor')
compute.conductor_api = conductor.API()
- compute._run_image_cache_manager_pass(None)
+ ctxt = context.get_admin_context()
+ compute._run_image_cache_manager_pass(ctxt)
self.assertTrue(was['called'])
def test_store_swap_image(self):
@@ -857,13 +854,13 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
expect_set = set(['swap_123', 'swap_456'])
self.assertEqual(image_cache_manager.back_swap_images, expect_set)
- @mock.patch.object(libvirt_utils, 'chown')
+ @mock.patch.object(lockutils, 'external_lock')
+ @mock.patch.object(libvirt_utils, 'update_mtime')
@mock.patch('os.path.exists', return_value=True)
- @mock.patch('os.utime')
@mock.patch('os.path.getmtime')
@mock.patch('os.remove')
def test_age_and_verify_swap_images(self, mock_remove, mock_getmtime,
- mock_utime, mock_exist, mock_chown):
+ mock_exist, mock_mtime, mock_lock):
image_cache_manager = imagecache.ImageCacheManager()
expected_remove = set()
expected_exist = set(['swap_128', 'swap_256'])
@@ -894,6 +891,20 @@ class ImageCacheManagerTestCase(test.NoDBTestCase):
self.assertIn('swap_128', expected_exist)
self.assertIn('swap_256', expected_remove)
+ @mock.patch.object(utils, 'synchronized')
+ @mock.patch.object(imagecache.ImageCacheManager, '_get_age_of_file',
+ return_value=(True, 100))
+ def test_lock_acquired_on_removing_old_enough_files(self, mock_get_age,
+ mock_synchronized):
+ base_file = '/tmp_age_test'
+ lock_path = os.path.join(CONF.instances_path, 'locks')
+ lock_file = os.path.split(base_file)[-1]
+ image_cache_manager = imagecache.ImageCacheManager()
+ image_cache_manager._remove_old_enough_file(
+ base_file, 60, remove_sig=False, remove_lock=False)
+ mock_synchronized.assert_called_once_with(lock_file, external=True,
+ lock_path=lock_path)
+
class VerifyChecksumTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py
index bd203257dc..0f1fefc426 100644
--- a/nova/tests/unit/virt/libvirt/test_utils.py
+++ b/nova/tests/unit/virt/libvirt/test_utils.py
@@ -527,7 +527,7 @@ disk size: 4.4M
self.path = path
return FakeStatResult()
- self.stubs.Set(os, 'statvfs', fake_statvfs)
+ self.stub_out('os.statvfs', fake_statvfs)
fs_info = libvirt_utils.get_fs_info('/some/file/path')
self.assertEqual('/some/file/path', self.path)
@@ -606,8 +606,8 @@ disk size: 4.4M
return FakeImgInfo()
self.stubs.Set(utils, 'execute', fake_execute)
- self.stubs.Set(os, 'rename', fake_rename)
- self.stubs.Set(os, 'unlink', fake_unlink)
+ self.stub_out('os.rename', fake_rename)
+ self.stub_out('os.unlink', fake_unlink)
self.stubs.Set(images, 'fetch', lambda *_, **__: None)
self.stubs.Set(images, 'qemu_img_info', fake_qemu_img_info)
self.stubs.Set(fileutils, 'delete_if_exists', fake_rm_on_error)
@@ -677,7 +677,7 @@ disk size: 4.4M
return True
self.stubs.Set(utils, 'execute', fake_execute)
- self.stubs.Set(os.path, 'exists', return_true)
+ self.stub_out('os.path.exists', return_true)
out = libvirt_utils.get_disk_backing_file('')
self.assertEqual(out, 'baz')
diff --git a/nova/tests/unit/virt/libvirt/test_vif.py b/nova/tests/unit/virt/libvirt/test_vif.py
index 3be8894f45..003141becf 100644
--- a/nova/tests/unit/virt/libvirt/test_vif.py
+++ b/nova/tests/unit/virt/libvirt/test_vif.py
@@ -12,6 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+
+import fixtures
from lxml import etree
import mock
from oslo_concurrency import processutils
@@ -24,6 +27,7 @@ from nova.network import model as network_model
from nova import objects
from nova.pci import utils as pci_utils
from nova import test
+from nova.tests.unit.virt import fakelibosinfo
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import host
@@ -280,6 +284,16 @@ class LibvirtVifTestCase(test.NoDBTestCase):
'/tmp/vif-xxx-yyy-zzz'}
)
+ vif_vhostuser_fp = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=network_model.VIF_TYPE_VHOSTUSER,
+ devname='tap-xxx-yyy-zzz',
+ details = {network_model.VIF_DETAILS_VHOSTUSER_SOCKET:
+ '/tmp/usv-xxx-yyy-zzz',
+ network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True},
+ )
+
vif_vhostuser_ovs = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
@@ -291,6 +305,33 @@ class LibvirtVifTestCase(test.NoDBTestCase):
ovs_interfaceid='aaa-bbb-ccc'
)
+ vif_vhostuser_ovs_fp = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=network_model.VIF_TYPE_VHOSTUSER,
+ details = {network_model.VIF_DETAILS_VHOSTUSER_MODE: 'server',
+ network_model.VIF_DETAILS_VHOSTUSER_SOCKET:
+ '/tmp/usv-xxx-yyy-zzz',
+ network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True,
+ network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc'
+ )
+
+ vif_vhostuser_ovs_fp_hybrid = network_model.VIF(id='vif-xxx-yyy-zzz',
+ address='ca:fe:de:ad:be:ef',
+ network=network_bridge,
+ type=network_model.VIF_TYPE_VHOSTUSER,
+ details = {'ovs_hybrid_plug': True,
+ network_model.VIF_DETAILS_VHOSTUSER_MODE: 'server',
+ network_model.VIF_DETAILS_VHOSTUSER_SOCKET:
+ '/tmp/usv-xxx-yyy-zzz',
+ network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True,
+ network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG: True},
+ devname='tap-xxx-yyy-zzz',
+ ovs_interfaceid='aaa-bbb-ccc'
+ )
+
vif_vhostuser_no_path = network_model.VIF(id='vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_bridge,
@@ -537,6 +578,22 @@ class LibvirtVifTestCase(test.NoDBTestCase):
image_meta)
self._assertModel(xml, model)
+ @mock.patch.object(vif.designer, 'set_vif_guest_frontend_config')
+ def test_model_with_osinfo(self, mock_set):
+ self.flags(use_virtio_for_bridges=True,
+ virt_type='kvm',
+ group='libvirt')
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.osinfo.libosinfo',
+ fakelibosinfo))
+ d = vif.LibvirtGenericVIFDriver()
+ image_meta = {'properties': {'os_name': 'fedora22'}}
+ image_meta = objects.ImageMeta.from_dict(image_meta)
+ d.get_base_config(None, self.vif_bridge, image_meta,
+ None, 'kvm')
+ mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
+ 'virtio', None, None)
+
def _test_model_qemu(self, *vif_objs, **kw):
libvirt_version = kw.get('libvirt_version')
self.flags(use_virtio_for_bridges=True,
@@ -635,7 +692,7 @@ class LibvirtVifTestCase(test.NoDBTestCase):
delete.side_effect = processutils.ProcessExecutionError
d.unplug_ivs_ethernet(None, self.vif_ovs)
- def test_plug_ovs_hybrid(self):
+ def _test_plug_ovs_hybrid(self, ipv6_exists):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy'),
mock.call('qvovif-xxx-yyy')],
@@ -650,23 +707,33 @@ class LibvirtVifTestCase(test.NoDBTestCase):
mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
- check_exit_code=[0, 1]),
- mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
- run_as_root=True),
- mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
- 'qvbvif-xxx-yyy', run_as_root=True)],
+ check_exit_code=[0, 1])],
'create_ovs_vif_port': [mock.call(
'br0', 'qvovif-xxx-yyy', 'aaa-bbb-ccc',
'ca:fe:de:ad:be:ef',
'f0000000-0000-0000-0000-000000000001')]
}
+ # The disable_ipv6 call needs to be added in the middle, if required
+ if ipv6_exists:
+ calls['execute'].extend([
+ mock.call('tee', ('/proc/sys/net/ipv6/conf'
+ '/qbrvif-xxx-yyy/disable_ipv6'),
+ process_input='1', run_as_root=True,
+ check_exit_code=[0, 1])])
+ calls['execute'].extend([
+ mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
+ run_as_root=True),
+ mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True)])
with test.nested(
mock.patch.object(linux_net, 'device_exists',
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
- mock.patch.object(linux_net, 'create_ovs_vif_port')
- ) as (device_exists, execute, _create_veth_pair, create_ovs_vif_port):
+ mock.patch.object(linux_net, 'create_ovs_vif_port'),
+ mock.patch.object(os.path, 'exists', return_value=ipv6_exists)
+ ) as (device_exists, execute, _create_veth_pair, create_ovs_vif_port,
+ path_exists):
d = vif.LibvirtGenericVIFDriver()
d.plug_ovs_hybrid(self.instance, self.vif_ovs)
device_exists.assert_has_calls(calls['device_exists'])
@@ -674,6 +741,12 @@ class LibvirtVifTestCase(test.NoDBTestCase):
execute.assert_has_calls(calls['execute'])
create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
+ def test_plug_ovs_hybrid_ipv6(self):
+ self._test_plug_ovs_hybrid(ipv6_exists=True)
+
+ def test_plug_ovs_hybrid_no_ipv6(self):
+ self._test_plug_ovs_hybrid(ipv6_exists=False)
+
def test_unplug_ovs_hybrid(self):
calls = {
'device_exists': [mock.call('qbrvif-xxx-yyy')],
@@ -769,6 +842,10 @@ class LibvirtVifTestCase(test.NoDBTestCase):
'/bridge/multicast_snooping'),
process_input='0', run_as_root=True,
check_exit_code=[0, 1]),
+ mock.call('tee', ('/proc/sys/net/ipv6/conf'
+ '/qbrvif-xxx-yyy/disable_ipv6'),
+ process_input='1', run_as_root=True,
+ check_exit_code=[0, 1]),
mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
run_as_root=True),
mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
@@ -782,8 +859,10 @@ class LibvirtVifTestCase(test.NoDBTestCase):
return_value=False),
mock.patch.object(utils, 'execute'),
mock.patch.object(linux_net, '_create_veth_pair'),
- mock.patch.object(linux_net, 'create_ivs_vif_port')
- ) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port):
+ mock.patch.object(linux_net, 'create_ivs_vif_port'),
+ mock.patch.object(os.path, 'exists', return_value=True)
+ ) as (device_exists, execute, _create_veth_pair, create_ivs_vif_port,
+ path_exists):
d = vif.LibvirtGenericVIFDriver()
d.plug_ivs_hybrid(self.instance, self.vif_ivs)
device_exists.assert_has_calls(calls['device_exists'])
@@ -1217,6 +1296,19 @@ class LibvirtVifTestCase(test.NoDBTestCase):
self._assertMacEquals(node, self.vif_vhostuser_ovs)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
+ @mock.patch.object(linux_net, 'create_fp_dev')
+ def test_vhostuser_fp_plug(self, mock_create_fp_dev):
+ d = vif.LibvirtGenericVIFDriver()
+ d.plug_vhostuser(self.instance, self.vif_vhostuser_fp)
+ mock_create_fp_dev.assert_has_calls(
+ [mock.call('tap-xxx-yyy-zzz', '/tmp/usv-xxx-yyy-zzz', 'client')])
+
+ @mock.patch.object(linux_net, 'delete_fp_dev')
+ def test_vhostuser_fp_unplug(self, mock_delete_fp_dev):
+ d = vif.LibvirtGenericVIFDriver()
+ d.unplug_vhostuser(None, self.vif_vhostuser_fp)
+ mock_delete_fp_dev.assert_has_calls([mock.call('tap-xxx-yyy-zzz')])
+
def test_vhostuser_ovs_plug(self):
calls = {
@@ -1245,3 +1337,111 @@ class LibvirtVifTestCase(test.NoDBTestCase):
d = vif.LibvirtGenericVIFDriver()
d.unplug_vhostuser(None, self.vif_vhostuser_ovs)
delete_port.assert_has_calls(calls['delete_ovs_vif_port'])
+
+ def test_vhostuser_ovs_fp_plug(self):
+ calls = {
+ 'create_fp_dev': [mock.call('tap-xxx-yyy-zzz',
+ '/tmp/usv-xxx-yyy-zzz',
+ 'client')],
+ 'create_ovs_vif_port': [mock.call(
+ 'br0', 'tap-xxx-yyy-zzz',
+ 'aaa-bbb-ccc', 'ca:fe:de:ad:be:ef',
+ 'f0000000-0000-0000-0000-000000000001')]
+ }
+ with test.nested(
+ mock.patch.object(linux_net, 'create_fp_dev'),
+ mock.patch.object(linux_net, 'create_ovs_vif_port'),
+ ) as (create_fp_dev, create_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver()
+ d.plug_vhostuser(self.instance, self.vif_vhostuser_ovs_fp)
+ create_fp_dev.assert_has_calls(calls['create_fp_dev'])
+ create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
+
+ def test_vhostuser_ovs_fp_unplug(self):
+ calls = {
+ 'delete_ovs_vif_port': [mock.call('br0', 'tap-xxx-yyy-zzz',
+ False)],
+ 'delete_fp_dev': [mock.call('tap-xxx-yyy-zzz')],
+ }
+ with test.nested(
+ mock.patch.object(linux_net, 'delete_ovs_vif_port'),
+ mock.patch.object(linux_net, 'delete_fp_dev')
+ ) as (delete_ovs_port, delete_fp_dev):
+ d = vif.LibvirtGenericVIFDriver()
+ d.unplug_vhostuser(None, self.vif_vhostuser_ovs_fp)
+ delete_ovs_port.assert_has_calls(calls['delete_ovs_vif_port'])
+ delete_fp_dev.assert_has_calls(calls['delete_fp_dev'])
+
+ def test_vhostuser_ovs_fp_hybrid_plug(self):
+ calls = {
+ 'create_fp_dev': [mock.call('tap-xxx-yyy-zzz',
+ '/tmp/usv-xxx-yyy-zzz',
+ 'client')],
+ 'device_exists': [mock.call('tap-xxx-yyy-zzz'),
+ mock.call('qbrvif-xxx-yyy'),
+ mock.call('qvovif-xxx-yyy')],
+ '_create_veth_pair': [mock.call('qvbvif-xxx-yyy',
+ 'qvovif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'addbr', 'qbrvif-xxx-yyy',
+ run_as_root=True),
+ mock.call('brctl', 'setfd', 'qbrvif-xxx-yyy', 0,
+ run_as_root=True),
+ mock.call('brctl', 'stp', 'qbrvif-xxx-yyy', 'off',
+ run_as_root=True),
+ mock.call('tee', ('/sys/class/net/qbrvif-xxx-yyy'
+ '/bridge/multicast_snooping'),
+ process_input='0', run_as_root=True,
+ check_exit_code=[0, 1]),
+ mock.call('ip', 'link', 'set', 'qbrvif-xxx-yyy', 'up',
+ run_as_root=True),
+ mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True),
+ mock.call('brctl', 'addif', 'qbrvif-xxx-yyy',
+ 'tap-xxx-yyy-zzz', run_as_root=True)],
+ 'create_ovs_vif_port': [mock.call(
+ 'br0', 'qvovif-xxx-yyy',
+ 'aaa-bbb-ccc', 'ca:fe:de:ad:be:ef',
+ 'f0000000-0000-0000-0000-000000000001')]
+ }
+ with test.nested(
+ mock.patch.object(linux_net, 'create_fp_dev'),
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=False),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, '_create_veth_pair'),
+ mock.patch.object(linux_net, 'create_ovs_vif_port')
+ ) as (create_fp_dev, device_exists, execute, _create_veth_pair,
+ create_ovs_vif_port):
+ d = vif.LibvirtGenericVIFDriver()
+ d.plug_vhostuser(self.instance, self.vif_vhostuser_ovs_fp_hybrid)
+ create_fp_dev.assert_has_calls(calls['create_fp_dev'])
+ device_exists.assert_has_calls(calls['device_exists'])
+ _create_veth_pair.assert_has_calls(calls['_create_veth_pair'])
+ execute.assert_has_calls(calls['execute'])
+ create_ovs_vif_port.assert_has_calls(calls['create_ovs_vif_port'])
+
+ def test_vhostuser_ovs_fp_hybrid_unplug(self):
+ calls = {
+ 'device_exists': [mock.call('qbrvif-xxx-yyy')],
+ 'execute': [mock.call('brctl', 'delif', 'qbrvif-xxx-yyy',
+ 'qvbvif-xxx-yyy', run_as_root=True),
+ mock.call('ip', 'link', 'set',
+ 'qbrvif-xxx-yyy', 'down', run_as_root=True),
+ mock.call('brctl', 'delbr',
+ 'qbrvif-xxx-yyy', run_as_root=True)],
+ 'delete_ovs_vif_port': [mock.call('br0', 'qvovif-xxx-yyy')],
+ 'delete_fp_dev': [mock.call('tap-xxx-yyy-zzz')]
+ }
+ with test.nested(
+ mock.patch.object(linux_net, 'device_exists',
+ return_value=True),
+ mock.patch.object(utils, 'execute'),
+ mock.patch.object(linux_net, 'delete_ovs_vif_port'),
+ mock.patch.object(linux_net, 'delete_fp_dev')
+ ) as (device_exists, execute, delete_ovs_vif_port, delete_fp_dev):
+ d = vif.LibvirtGenericVIFDriver()
+ d.unplug_vhostuser(None, self.vif_vhostuser_ovs_fp_hybrid)
+ device_exists.assert_has_calls(calls['device_exists'])
+ execute.assert_has_calls(calls['execute'])
+ delete_ovs_vif_port.assert_has_calls(calls['delete_ovs_vif_port'])
+ delete_fp_dev.assert_has_calls(calls['delete_fp_dev'])
diff --git a/nova/tests/unit/virt/libvirt/volume/test_disco.py b/nova/tests/unit/virt/libvirt/volume/test_disco.py
new file mode 100644
index 0000000000..9556eec2f3
--- /dev/null
+++ b/nova/tests/unit/virt/libvirt/volume/test_disco.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2015 Industrial Technology Research Institute.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from os_brick.initiator import connector
+
+from nova.tests.unit.virt.libvirt.volume import test_volume
+from nova.virt.libvirt.volume import disco
+
+
+class LibvirtDISCOVolumeDriverTestCase(
+ test_volume.LibvirtVolumeBaseTestCase):
+
+ def test_libvirt_disco_driver(self):
+ libvirt_driver = disco.LibvirtDISCOVolumeDriver(
+ self.fake_conn)
+ self.assertIsInstance(libvirt_driver.connector,
+ connector.DISCOConnector)
+
+ def test_libvirt_disco_driver_connect(self):
+ dcon = disco.LibvirtDISCOVolumeDriver(self.fake_conn)
+ conf = {'server_ip': '127.0.0.1', 'server_port': 9898}
+ disk_info = {'disco_id': '1234567',
+ 'name': 'aDiscoVolume',
+ 'conf': conf}
+ conn = {'data': disk_info}
+ with mock.patch.object(dcon.connector,
+ 'connect_volume',
+ return_value={'path': '/dev/dms1234567'}):
+ dcon.connect_volume(conn, None)
+ self.assertEqual('/dev/dms1234567',
+ conn['data']['device_path'])
+
+ def test_libvirt_disco_driver_get_config(self):
+ dcon = disco.LibvirtDISCOVolumeDriver(self.fake_conn)
+
+ disk_info = {'path': '/dev/dms1234567', 'name': 'aDiscoVolume',
+ 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0',
+ 'device_path': '/dev/dms1234567'}
+ conn = {'data': disk_info}
+ conf = dcon.get_config(conn, disk_info)
+ self.assertEqual('file', conf.source_type)
+ self.assertEqual('/dev/dms1234567', conf.source_path)
+ self.assertEqual('disco', conf.source_protocol)
+
+ def test_libvirt_disco_driver_disconnect(self):
+ dcon = disco.LibvirtDISCOVolumeDriver(self.fake_conn)
+ dcon.connector.disconnect_volume = mock.MagicMock()
+ disk_info = {'path': '/dev/dms1234567', 'name': 'aDiscoVolume',
+ 'type': 'raw', 'dev': 'vda1', 'bus': 'pci0',
+ 'device_path': '/dev/dms123456'}
+ conn = {'data': disk_info}
+ dcon.disconnect_volume(conn, disk_info)
+ dcon.connector.disconnect_volume.assert_called_once_with(
+ disk_info, None)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
index 5bea62525b..a6dbf233a2 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_fibrechannel.py
@@ -42,3 +42,18 @@ class LibvirtFibreChannelVolumeDriverTestCase(
@mock.patch.object(platform, 'machine', return_value=arch.S390X)
def test_libvirt_fibrechan_driver_s390x(self, mock_machine):
self._test_libvirt_fibrechan_driver_s390()
+
+ def test_libvirt_fibrechan_driver_get_config(self):
+ libvirt_driver = fibrechannel.LibvirtFibreChannelVolumeDriver(
+ self.fake_conn)
+
+ device_path = '/dev/fake-dev'
+ connection_info = {'data': {'device_path': device_path}}
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(device_path, tree.find('./source').get('dev'))
+ self.assertEqual('raw', tree.find('./driver').get('type'))
+ self.assertEqual('native', tree.find('./driver').get('io'))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
index 84469407d9..cc14063e16 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_iscsi.py
@@ -74,3 +74,17 @@ Setting up iSCSI targets: unused
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
+
+ def test_libvirt_iscsi_driver_get_config(self):
+ libvirt_driver = iscsi.LibvirtISCSIVolumeDriver(self.fake_conn)
+
+ device_path = '/dev/fake-dev'
+ connection_info = {'data': {'device_path': device_path}}
+
+ conf = libvirt_driver.get_config(connection_info, self.disk_info)
+ tree = conf.format_dom()
+
+ self.assertEqual('block', tree.get('type'))
+ self.assertEqual(device_path, tree.find('./source').get('dev'))
+ self.assertEqual('raw', tree.find('./driver').get('type'))
+ self.assertEqual('native', tree.find('./driver').get('io'))
diff --git a/nova/tests/unit/virt/libvirt/volume/test_nfs.py b/nova/tests/unit/virt/libvirt/volume/test_nfs.py
index f31dec4967..5308b0d540 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_nfs.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_nfs.py
@@ -91,6 +91,7 @@ class LibvirtNFSVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
self.assertEqual('raw', tree.find('./driver').get('type'))
+ self.assertEqual('native', tree.find('./driver').get('io'))
def test_libvirt_nfs_driver_already_mounted(self):
libvirt_driver = nfs.LibvirtNFSVolumeDriver(self.fake_conn)
diff --git a/nova/tests/unit/virt/libvirt/volume/test_scality.py b/nova/tests/unit/virt/libvirt/volume/test_scality.py
index 8b490943f8..2957ac2d30 100644
--- a/nova/tests/unit/virt/libvirt/volume/test_scality.py
+++ b/nova/tests/unit/virt/libvirt/volume/test_scality.py
@@ -52,7 +52,7 @@ class LibvirtScalityVolumeDriverTestCase(
else:
return os.access(path, flags)
- self.stubs.Set(os, 'access', _access_wrapper)
+ self.stub_out('os.access', _access_wrapper)
with mock.patch.object(self.drv, '_mount_sofs'):
self.drv.connect_volume(TEST_CONN_INFO, self.disk_info)
diff --git a/nova/tests/unit/virt/test_hardware.py b/nova/tests/unit/virt/test_hardware.py
index a0f2121c6a..cb4fd45f47 100644
--- a/nova/tests/unit/virt/test_hardware.py
+++ b/nova/tests/unit/virt/test_hardware.py
@@ -1137,7 +1137,18 @@ class NUMATopologyTest(test.NoDBTestCase):
},
"expect": exception.CPUThreadPolicyConfigurationInvalid,
},
-
+ {
+ # Invalid vCPUs mask with realtime
+ "flavor": objects.Flavor(vcpus=4, memory_mb=2048,
+ extra_specs={
+ "hw:cpu_policy": "dedicated",
+ "hw:cpu_realtime": "yes",
+ }),
+ "image": {
+ "properties": {}
+ },
+ "expect": exception.RealtimeMaskNotFoundOrInvalid,
+ },
]
for testitem in testdata:
@@ -1948,7 +1959,7 @@ class _CPUPinningTestCaseBase(object):
for inst_p, host_p in instance_cell.cpu_pinning.items():
pins_per_sib[cpu_to_sib[host_p]] += 1
self.assertTrue(max(pins_per_sib.values()) > 1,
- "Seems threads were not prefered by the pinning "
+ "Seems threads were not preferred by the pinning "
"logic.")
diff --git a/nova/tests/unit/virt/test_images.py b/nova/tests/unit/virt/test_images.py
index 46857ac09d..b0fc20b01b 100644
--- a/nova/tests/unit/virt/test_images.py
+++ b/nova/tests/unit/virt/test_images.py
@@ -25,7 +25,7 @@ from nova.virt import images
class QemuTestCase(test.NoDBTestCase):
def test_qemu_info_with_bad_path(self):
- self.assertRaises(exception.InvalidDiskInfo,
+ self.assertRaises(exception.DiskNotFound,
images.qemu_img_info,
'/path/that/does/not/exist')
diff --git a/nova/tests/unit/virt/test_osinfo.py b/nova/tests/unit/virt/test_osinfo.py
new file mode 100644
index 0000000000..fcc8e68bb9
--- /dev/null
+++ b/nova/tests/unit/virt/test_osinfo.py
@@ -0,0 +1,89 @@
+# Copyright 2015 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import fixtures
+import mock
+
+from nova import exception
+from nova import objects
+from nova import test
+from nova.tests.unit.virt import fakelibosinfo
+from nova.virt import osinfo
+
+
+class LibvirtOsInfoTest(test.NoDBTestCase):
+
+ def setUp(self):
+ super(LibvirtOsInfoTest, self).setUp()
+ image_meta = {'properties':
+ {'os_distro': 'fedora22',
+ 'hw_disk_bus': 'ide',
+ 'hw_vif_model': 'rtl8139'}
+ }
+ self.img_meta = objects.ImageMeta.from_dict(image_meta)
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.osinfo.libosinfo',
+ fakelibosinfo))
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.osinfo._OsInfoDatabase._instance',
+ None))
+
+ def test_get_os(self):
+ os_info_db = osinfo._OsInfoDatabase.get_instance()
+ os_name = os_info_db.get_os('fedora22').get_name()
+ self.assertTrue('Fedora 22', os_name)
+
+ def test_get_os_fails(self):
+ os_info_db = osinfo._OsInfoDatabase.get_instance()
+ self.assertRaises(exception.OsInfoNotFound,
+ os_info_db.get_os,
+ 'test33')
+
+ def test_module_load_failed(self):
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.osinfo.libosinfo',
+ None))
+ with test.nested(
+ mock.patch.object(osinfo.importutils, 'import_module',
+ side_effect=ImportError('gi.repository.Libosinfo')),
+ mock.patch.object(osinfo.LOG, 'info')) as (mock_import, mock_log):
+
+ os_info_db = osinfo._OsInfoDatabase.get_instance()
+ self.assertIsNone(os_info_db.get_os('fedora22'))
+
+ os_info_db = osinfo._OsInfoDatabase.get_instance()
+ self.assertIsNone(os_info_db.get_os('fedora19'))
+ self.assertEqual(1, mock_log.call_count)
+
+ def test_hardware_properties_from_osinfo(self):
+ """Verifies that HardwareProperties attributes are being set
+ from libosinfo.
+ """
+ img_meta = {'properties':
+ {'os_distro': 'fedora22'}
+ }
+
+ img_meta = objects.ImageMeta.from_dict(img_meta)
+ osinfo_obj = osinfo.HardwareProperties(img_meta)
+ self.assertEqual('virtio', osinfo_obj.network_model)
+ self.assertEqual('virtio', osinfo_obj.disk_model)
+
+ def test_hardware_properties_from_meta(self):
+ """Verifies that HardwareProperties attributes are being set
+ from image properties.
+ """
+ with mock.patch.object(osinfo._OsInfoDatabase, 'get_instance'):
+ osinfo_obj = osinfo.HardwareProperties(self.img_meta)
+ self.assertEqual('rtl8139', osinfo_obj.network_model)
+ self.assertEqual('ide', osinfo_obj.disk_model)
diff --git a/nova/tests/unit/virt/test_virt.py b/nova/tests/unit/virt/test_virt.py
index 1d7254644a..45d6009c98 100644
--- a/nova/tests/unit/virt/test_virt.py
+++ b/nova/tests/unit/virt/test_virt.py
@@ -14,7 +14,6 @@
# under the License.
import io
-import os
import mock
import six
@@ -203,7 +202,7 @@ class TestVirtDisk(test.NoDBTestCase):
def fake_instance_for_format(image, mountdir, partition):
return FakeMount(image, mountdir, partition)
- self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
self.stubs.Set(mount.Mount, 'instance_for_format',
staticmethod(fake_instance_for_format))
@@ -222,7 +221,7 @@ class TestVirtDisk(test.NoDBTestCase):
}
return mount_points[mount_point]
- self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
@@ -266,7 +265,7 @@ class TestVirtDisk(test.NoDBTestCase):
def proc_mounts(self, mount_point):
return None
- self.stubs.Set(os.path, 'exists', lambda _: True)
+ self.stub_out('os.path.exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
diff --git a/nova/tests/unit/virt/test_virt_drivers.py b/nova/tests/unit/virt/test_virt_drivers.py
index b6e2173fd5..b592cc422f 100644
--- a/nova/tests/unit/virt/test_virt_drivers.py
+++ b/nova/tests/unit/virt/test_virt_drivers.py
@@ -27,6 +27,7 @@ import six
from nova.compute import manager
from nova.console import type as ctype
+from nova import context
from nova import exception
from nova import objects
from nova import test
@@ -259,8 +260,8 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
network_info = test_utils.get_test_network_info()
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
- image_info = test_utils.get_test_image_info(None, instance_ref)
- self.connection.spawn(self.ctxt, instance_ref, image_info,
+ image_meta = test_utils.get_test_image_object(None, instance_ref)
+ self.connection.spawn(self.ctxt, instance_ref, image_meta,
[], 'herp', network_info=network_info)
return instance_ref, network_info
@@ -344,7 +345,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
@catch_notimplementederror
def test_rescue(self):
- image_meta = {}
+ image_meta = objects.ImageMeta.from_dict({})
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info,
image_meta, '')
@@ -356,7 +357,7 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
@catch_notimplementederror
def test_unrescue_rescued_instance(self):
- image_meta = {}
+ image_meta = objects.ImageMeta.from_dict({})
instance_ref, network_info = self._get_running_instance()
self.connection.rescue(self.ctxt, instance_ref, network_info,
image_meta, '')
@@ -643,11 +644,6 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
self.connection.refresh_instance_security_rules(instance_ref)
@catch_notimplementederror
- def test_refresh_provider_fw_rules(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.refresh_provider_fw_rules()
-
- @catch_notimplementederror
def test_ensure_filtering_for_instance(self):
instance = test_utils.get_test_instance(obj=True)
network_info = test_utils.get_test_network_info()
@@ -663,8 +659,18 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
@catch_notimplementederror
def test_live_migration(self):
instance_ref, network_info = self._get_running_instance()
+ fake_context = context.RequestContext('fake', 'fake')
+ migration = objects.Migration(context=fake_context, id=1)
+ migrate_data = objects.LibvirtLiveMigrateData(
+ migration=migration)
self.connection.live_migration(self.ctxt, instance_ref, 'otherhost',
- lambda *a: None, lambda *a: None)
+ lambda *a: None, lambda *a: None,
+ migrate_data=migrate_data)
+
+ @catch_notimplementederror
+ def test_live_migration_force_complete(self):
+ instance_ref, network_info = self._get_running_instance()
+ self.connection.live_migration_force_complete(instance_ref)
@catch_notimplementederror
def _check_available_resource_fields(self, host_status):
diff --git a/nova/tests/unit/virt/vmwareapi/fake.py b/nova/tests/unit/virt/vmwareapi/fake.py
index 5171e9df83..6364cf528f 100644
--- a/nova/tests/unit/virt/vmwareapi/fake.py
+++ b/nova/tests/unit/virt/vmwareapi/fake.py
@@ -1325,6 +1325,13 @@ class FakeVim(object):
task_mdo = create_task(method, "success")
return task_mdo.obj
+ def _rename(self, method, *args, **kwargs):
+ vm_ref = args[0]
+ vm_mdo = _get_vm_mdo(vm_ref)
+ vm_mdo.set('name', kwargs['newName'])
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
def _create_copy_disk(self, method, vmdk_file_path):
"""Creates/copies a vmdk file object in the datastore."""
# We need to add/create both .vmdk and .-flat.vmdk files
@@ -1589,6 +1596,9 @@ class FakeVim(object):
elif attr_name == "ReconfigVM_Task":
return lambda *args, **kwargs: self._reconfig_vm(attr_name,
*args, **kwargs)
+ elif attr_name == "Rename_Task":
+ return lambda *args, **kwargs: self._rename(attr_name,
+ *args, **kwargs)
elif attr_name == "CreateVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("name"))
diff --git a/nova/tests/unit/virt/vmwareapi/stubs.py b/nova/tests/unit/virt/vmwareapi/stubs.py
index 2e3dd5ae3f..588e6c175b 100644
--- a/nova/tests/unit/virt/vmwareapi/stubs.py
+++ b/nova/tests/unit/virt/vmwareapi/stubs.py
@@ -20,9 +20,6 @@ Stubouts for the test suite
from oslo_vmware import exceptions as vexc
from nova.tests.unit.virt.vmwareapi import fake
-from nova.virt.vmwareapi import driver
-from nova.virt.vmwareapi import images
-from nova.virt.vmwareapi import network_util
def fake_get_vim_object(arg):
@@ -65,12 +62,16 @@ def fake_session_permission_exception():
raise vexc.VimFaultException(fault_list, fault_string, details=details)
-def set_stubs(stubs):
+def set_stubs(test):
"""Set the stubs."""
- stubs.Set(network_util, 'get_network_with_the_name',
- fake.fake_get_network)
- stubs.Set(images, 'upload_image_stream_optimized', fake.fake_upload_image)
- stubs.Set(images, 'fetch_image', fake.fake_fetch_image)
- stubs.Set(driver.VMwareAPISession, "vim", fake_vim_prop)
- stubs.Set(driver.VMwareAPISession, "_is_vim_object",
- fake_is_vim_object)
+
+ test.stub_out('nova.virt.vmwareapi.network_util.get_network_with_the_name',
+ fake.fake_get_network)
+ test.stub_out('nova.virt.vmwareapi.images.upload_image_stream_optimized',
+ fake.fake_upload_image)
+ test.stub_out('nova.virt.vmwareapi.images.fetch_image',
+ fake.fake_fetch_image)
+ test.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim',
+ fake_vim_prop)
+ test.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession._is_vim_object',
+ fake_is_vim_object)
diff --git a/nova/tests/unit/virt/vmwareapi/test_configdrive.py b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
index fc2de29919..ab823c2ed6 100644
--- a/nova/tests/unit/virt/vmwareapi/test_configdrive.py
+++ b/nova/tests/unit/virt/vmwareapi/test_configdrive.py
@@ -28,7 +28,6 @@ from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt import fake
from nova.virt.vmwareapi import driver
-from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
@@ -49,7 +48,7 @@ class ConfigDriveTestCase(test.NoDBTestCase):
use_linked_clone=False, group='vmware')
self.flags(enabled=False, group='vnc')
vmwareapi_fake.reset()
- stubs.set_stubs(self.stubs)
+ stubs.set_stubs(self)
nova.tests.unit.image.fake.stub_out_image_service(self)
self.conn = driver.VMwareVCDriver(fake.FakeVirtAPI)
self.network_info = utils.get_test_network_info()
@@ -87,11 +86,11 @@ class ConfigDriveTestCase(test.NoDBTestCase):
(image_service, image_id) = glance.get_remote_image_service(context,
image_ref)
metadata = image_service.show(context, image_id)
- self.image = {
+ self.image = objects.ImageMeta.from_dict({
'id': image_ref,
'disk_format': 'vmdk',
'size': int(metadata['size']),
- }
+ })
class FakeInstanceMetadata(object):
def __init__(self, instance, content=None, extra_md=None,
@@ -109,13 +108,12 @@ class ConfigDriveTestCase(test.NoDBTestCase):
pass
# We can't actually make a config drive v2 because ensure_tree has
# been faked out
- self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder,
- 'make_drive', fake_make_drive)
+ self.stub_out('nova.virt.configdrive.ConfigDriveBuilder.make_drive',
+ fake_make_drive)
def fake_upload_iso_to_datastore(iso_path, instance, **kwargs):
pass
- self.stubs.Set(images,
- 'upload_iso_to_datastore',
+ self.stub_out('nova.virt.vmwareapi.images.upload_iso_to_datastore',
fake_upload_iso_to_datastore)
def tearDown(self):
diff --git a/nova/tests/unit/virt/vmwareapi/test_driver_api.py b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
index 0ec4c94516..8418f52a75 100644
--- a/nova/tests/unit/virt/vmwareapi/test_driver_api.py
+++ b/nova/tests/unit/virt/vmwareapi/test_driver_api.py
@@ -170,13 +170,14 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
- stubs.set_stubs(self.stubs)
+ stubs.set_stubs(self)
vmwareapi_fake.reset()
nova.tests.unit.image.fake.stub_out_image_service(self)
self.conn = driver.VMwareVCDriver(None, False)
self._set_exception_vars()
self.node_name = self.conn._nodename
self.ds = 'ds1'
+ self._display_name = 'fake-display-name'
self.vim = vmwareapi_fake.FakeVim()
@@ -187,12 +188,12 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
(image_service, image_id) = glance.get_remote_image_service(
self.context, image_ref)
metadata = image_service.show(self.context, image_id)
- self.image = {
+ self.image = objects.ImageMeta.from_dict({
'id': image_ref,
'disk_format': 'vmdk',
'size': int(metadata['size']),
- }
- self.fake_image_uuid = self.image['id']
+ })
+ self.fake_image_uuid = self.image.id
nova.tests.unit.image.fake.stub_out_image_service(self)
self.vnc_host = 'ha-host'
@@ -268,8 +269,10 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
def _fake_check_session(_self):
return True
- self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login)
- self.stubs.Set(vmwareapi_fake.FakeVim, '_check_session',
+ self.stub_out('nova.tests.unit.virt.vmwareapi.fake.FakeVim._login',
+ _fake_login)
+ self.stub_out('nova.tests.unit.virt.vmwareapi.'
+ 'fake.FakeVim._check_session',
_fake_check_session)
with mock.patch.object(greenthread, 'sleep'):
@@ -301,6 +304,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
if ephemeral is not None:
self.type_data['ephemeral_gb'] = ephemeral
values = {'name': 'fake_name',
+ 'display_name': self._display_name,
'id': 1,
'uuid': uuid,
'project_id': self.project_id,
@@ -348,7 +352,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
# Get record for VM
vms = vmwareapi_fake._get_objects("VirtualMachine")
for vm in vms.objects:
- if vm.get('name') == self.uuid:
+ if vm.get('name') == vm_util._get_vm_name(self._display_name,
+ self.uuid):
return vm
self.fail('Unable to find VM backing!')
@@ -439,11 +444,6 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
uuids = self.conn.list_instance_uuids()
self.assertEqual(1, len(uuids))
- def test_list_instance_uuids_invalid_uuid(self):
- self._create_vm(uuid='fake_id')
- uuids = self.conn.list_instance_uuids()
- self.assertEqual(0, len(uuids))
-
def _cached_files_exist(self, exists=True):
cache = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
@@ -493,7 +493,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
vmwareapi_fake.assertPathExists(self, str(root))
def _iso_disk_type_created(self, instance_type='m1.large'):
- self.image['disk_format'] = 'iso'
+ self.image.disk_format = 'iso'
self._create_vm(instance_type=instance_type)
path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
@@ -519,9 +519,9 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path))
- self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
+ self.stub_out('nova.virt.vmwareapi.vmops._attach_cdrom_to_vm',
fake_attach_cdrom)
- self.image['disk_format'] = 'iso'
+ self.image.disk_format = 'iso'
self._create_vm()
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
@@ -544,24 +544,24 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
ds_obj.DatastorePath(self.ds, 'fake-config-drive')]
self.iso_index = 0
- def fake_create_config_drive(instance, injected_files, password,
- network_info, data_store_name,
- folder, uuid, cookies):
- return 'fake-config-drive'
-
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index]))
self.iso_index += 1
- self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
- fake_attach_cdrom)
- self.stubs.Set(self.conn._vmops, '_create_config_drive',
- fake_create_config_drive)
-
- self.image['disk_format'] = 'iso'
- self._create_vm()
- self.assertEqual(2, self.iso_index)
+ with test.nested(
+ mock.patch.object(self.conn._vmops,
+ '_attach_cdrom_to_vm',
+ side_effect=fake_attach_cdrom),
+ mock.patch.object(self.conn._vmops,
+ '_create_config_drive',
+ return_value='fake-config-drive'),
+ ) as (fake_attach_cdrom_to_vm, fake_create_config_drive):
+ self.image.disk_format = 'iso'
+ self._create_vm()
+ self.assertEqual(2, self.iso_index)
+ self.assertEqual(fake_attach_cdrom_to_vm.call_count, 2)
+ self.assertEqual(fake_create_config_drive.call_count, 1)
def test_ephemeral_disk_attach(self):
self._create_vm(ephemeral=50)
@@ -598,24 +598,19 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
iso_path = ds_obj.DatastorePath(self.ds, 'fake-config-drive')
self.cd_attach_called = False
- def fake_create_config_drive(instance, injected_files, password,
- network_info, data_store_name,
- folder, uuid, cookies):
-
- return 'fake-config-drive'
-
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path))
self.cd_attach_called = True
- self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
- fake_attach_cdrom)
- self.stubs.Set(self.conn._vmops, '_create_config_drive',
- fake_create_config_drive)
-
- self._create_vm()
- self.assertTrue(self.cd_attach_called)
+ with test.nested(
+ mock.patch.object(self.conn._vmops, '_attach_cdrom_to_vm',
+ side_effect=fake_attach_cdrom),
+ mock.patch.object(self.conn._vmops, '_create_config_drive',
+ return_value='fake-config-drive'),
+ ) as (fake_attach_cdrom_to_vm, fake_create_config_drive):
+ self._create_vm()
+ self.assertTrue(self.cd_attach_called)
@mock.patch.object(vmops.VMwareVMOps, 'power_off')
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume')
@@ -780,13 +775,15 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
def _fake_extend(instance, requested_size, name, dc_ref):
vmwareapi_fake._add_file(str(root))
- self.stubs.Set(self.conn._vmops, '_extend_virtual_disk',
- _fake_extend)
-
- self._create_vm()
- info = self._get_info()
- self._check_vm_info(info, power_state.RUNNING)
- vmwareapi_fake.assertPathExists(self, str(root))
+ with test.nested(
+ mock.patch.object(self.conn._vmops, '_extend_virtual_disk',
+ side_effect=_fake_extend)
+ ) as (fake_extend_virtual_disk):
+ self._create_vm()
+ info = self._get_info()
+ self._check_vm_info(info, power_state.RUNNING)
+ vmwareapi_fake.assertPathExists(self, str(root))
+ self.assertEqual(1, fake_extend_virtual_disk[0].call_count)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
@@ -1552,7 +1549,7 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
return str(ds_obj.DatastorePath(data_store_name,
instance_uuid, 'fake.iso'))
- self.stubs.Set(self.conn._vmops, '_create_config_drive',
+ self.stub_out('nova.virt.vmwareapi.vmops._create_config_drive',
fake_create_config_drive)
self._create_vm()
@@ -1563,9 +1560,10 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
- self.stubs.Set(vm_util, "power_on_instance",
+ self.stub_out('nova.virt.vmwareapi.vm_util.power_on_instance',
fake_power_on_instance)
- self.stubs.Set(self.conn._volumeops, "attach_disk_to_vm",
+ self.stub_out('nova.virt.vmwareapi.volumeops.'
+ 'VMwareVolumeOps.attach_disk_to_vm',
fake_attach_disk_to_vm)
self.conn.rescue(self.context, self.instance, self.network_info,
@@ -1886,7 +1884,8 @@ class VMwareAPIVMTestCase(test.NoDBTestCase):
def _fake_get_timestamp_filename(fake):
return self._get_timestamp_filename()
- self.stubs.Set(imagecache.ImageCacheManager, '_get_timestamp_filename',
+ self.stub_out('nova.virt.vmwareapi.imagecache.'
+ 'ImageCacheManager._get_timestamp_filename',
_fake_get_timestamp_filename)
def _timestamp_file_exists(self, exists=True):
diff --git a/nova/tests/unit/virt/vmwareapi/test_ds_util.py b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
index e56e3c28f8..9e1c0eb721 100644
--- a/nova/tests/unit/virt/vmwareapi/test_ds_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_ds_util.py
@@ -447,3 +447,30 @@ class DsUtilTestCase(test.NoDBTestCase):
"normal",
"VMFS",
datastore_regex))
+
+ def test_get_connected_hosts_none(self):
+ with mock.patch.object(self.session,
+ '_call_method') as _call_method:
+ hosts = ds_util.get_connected_hosts(self.session,
+ 'fake_datastore')
+ self.assertEqual([], hosts)
+ _call_method.assert_called_once_with(
+ mock.ANY, 'get_object_property',
+ 'fake_datastore', 'host')
+
+ def test_get_connected_hosts(self):
+ host = mock.Mock(spec=object)
+ host.value = 'fake-host'
+ host_mount = mock.Mock(spec=object)
+ host_mount.key = host
+ host_mounts = mock.Mock(spec=object)
+ host_mounts.DatastoreHostMount = [host_mount]
+
+ with mock.patch.object(self.session, '_call_method',
+ return_value=host_mounts) as _call_method:
+ hosts = ds_util.get_connected_hosts(self.session,
+ 'fake_datastore')
+ self.assertEqual(['fake-host'], hosts)
+ _call_method.assert_called_once_with(
+ mock.ANY, 'get_object_property',
+ 'fake_datastore', 'host')
diff --git a/nova/tests/unit/virt/vmwareapi/test_images.py b/nova/tests/unit/virt/vmwareapi/test_images.py
index 9e3a5b1b2b..4a5424e7dd 100644
--- a/nova/tests/unit/virt/vmwareapi/test_images.py
+++ b/nova/tests/unit/virt/vmwareapi/test_images.py
@@ -28,6 +28,7 @@ from nova import test
import nova.tests.unit.image.fake
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import images
+from nova.virt.vmwareapi import vm_util
class VMwareImagesTestCase(test.NoDBTestCase):
@@ -118,12 +119,14 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'start_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method')
+ mock.patch.object(session, '_call_method'),
+ mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_start_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method):
+ mock_call_method,
+ mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
'size': 512}
@@ -169,7 +172,8 @@ class VMwareImagesTestCase(test.NoDBTestCase):
fileobj=mock_read_handle)
mock_start_transfer.assert_called_once_with(context,
mock_read_handle, 512, write_file_handle=mock_write_handle)
-
+ mock_get_vmdk_info.assert_called_once_with(
+ session, mock.sentinel.vm_ref, 'fake-vm')
mock_call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
@@ -186,12 +190,14 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock.patch.object(images.IMAGE_API, 'download'),
mock.patch.object(images, 'start_transfer'),
mock.patch.object(images, '_build_shadow_vm_config_spec'),
- mock.patch.object(session, '_call_method')
+ mock.patch.object(session, '_call_method'),
+ mock.patch.object(vm_util, 'get_vmdk_info')
) as (mock_image_api_get,
mock_image_api_download,
mock_start_transfer,
mock_build_shadow_vm_config_spec,
- mock_call_method):
+ mock_call_method,
+ mock_get_vmdk_info):
image_data = {'id': 'fake-id',
'disk_format': 'vmdk',
'size': 512}
@@ -219,6 +225,8 @@ class VMwareImagesTestCase(test.NoDBTestCase):
mock_call_method.assert_called_once_with(
session.vim, "UnregisterVM", mock.sentinel.vm_ref)
+ mock_get_vmdk_info.assert_called_once_with(
+ session, mock.sentinel.vm_ref, 'fake-vm')
def test_from_image_with_image_ref(self):
raw_disk_size_in_gb = 83
diff --git a/nova/tests/unit/virt/vmwareapi/test_network_util.py b/nova/tests/unit/virt/vmwareapi/test_network_util.py
index a83d4c6117..ff2c8764ba 100644
--- a/nova/tests/unit/virt/vmwareapi/test_network_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_network_util.py
@@ -38,8 +38,10 @@ class GetNetworkWithTheNameTestCase(test.NoDBTestCase):
def setUp(self):
super(GetNetworkWithTheNameTestCase, self).setUp()
fake.reset()
- self.stubs.Set(driver.VMwareAPISession, "vim", stubs.fake_vim_prop)
- self.stubs.Set(driver.VMwareAPISession, "_is_vim_object",
+ self.stub_out('nova.virt.vmwareapi.driver.VMwareAPISession.vim',
+ stubs.fake_vim_prop)
+ self.stub_out('nova.virt.vmwareapi.driver.'
+ 'VMwareAPISession.is_vim_object',
stubs.fake_is_vim_object)
self._session = driver.VMwareAPISession()
diff --git a/nova/tests/unit/virt/vmwareapi/test_read_write_util.py b/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
index 74541c01e2..d58402273f 100644
--- a/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
@@ -16,13 +16,10 @@
import urllib
import mock
-from oslo_config import cfg
from nova import test
from nova.virt.vmwareapi import read_write_util
-CONF = cfg.CONF
-
class ReadWriteUtilTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/virt/vmwareapi/test_vif.py b/nova/tests/unit/virt/vmwareapi/test_vif.py
index 81fce0db7e..515f298e2f 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vif.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vif.py
@@ -14,7 +14,6 @@
# under the License.
import mock
-from oslo_config import cfg
from oslo_vmware import exceptions as vexc
from oslo_vmware import vim_util
@@ -29,8 +28,6 @@ from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vm_util
-CONF = cfg.CONF
-
class VMwareVifTestCase(test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/unit/virt/vmwareapi/test_vm_util.py b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
index 9ce4a24b83..aeae9897d7 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vm_util.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vm_util.py
@@ -44,11 +44,12 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMUtilTestCase, self).setUp()
fake.reset()
- stubs.set_stubs(self.stubs)
+ stubs.set_stubs(self)
vm_util.vm_refs_cache_reset()
self._instance = fake_instance.fake_instance_obj(
None,
**{'id': 7, 'name': 'fake!',
+ 'display_name': 'fake-display-name',
'uuid': uuidutils.generate_uuid(),
'vcpus': 2, 'memory_mb': 2048})
@@ -1732,6 +1733,39 @@ class VMwareVMUtilTestCase(test.NoDBTestCase):
vm_util.folder_ref_cache_update(path, 'fake-ref')
self.assertEqual('fake-ref', vm_util.folder_ref_cache_get(path))
+ def test_get_vm_name(self):
+ uuid = uuidutils.generate_uuid()
+ expected = uuid
+ name = vm_util._get_vm_name(None, uuid)
+ self.assertEqual(expected, name)
+
+ display_name = 'fira'
+ expected = 'fira (%s)' % uuid
+ name = vm_util._get_vm_name(display_name, uuid)
+ self.assertEqual(expected, name)
+
+ display_name = 'X' * 255
+ expected = '%s (%s)' % ('X' * 41, uuid)
+ name = vm_util._get_vm_name(display_name, uuid)
+ self.assertEqual(expected, name)
+ self.assertEqual(len(name), 80)
+
+ @mock.patch.object(vm_util, '_get_vm_name', return_value='fake-name')
+ def test_rename_vm(self, mock_get_name):
+ session = fake.FakeSession()
+ with test.nested(
+ mock.patch.object(session, '_call_method',
+ return_value='fake_rename_task'),
+ mock.patch.object(session, '_wait_for_task')
+ ) as (_call_method, _wait_for_task):
+ vm_util.rename_vm(session, 'fake-ref', self._instance)
+ _call_method.assert_called_once_with(mock.ANY,
+ 'Rename_Task', 'fake-ref', newName='fake-name')
+ _wait_for_task.assert_called_once_with(
+ 'fake_rename_task')
+ mock_get_name.assert_called_once_with(self._instance.display_name,
+ self._instance.uuid)
+
@mock.patch.object(driver.VMwareAPISession, 'vim', stubs.fake_vim_prop)
class VMwareVMUtilGetHostRefTestCase(test.NoDBTestCase):
diff --git a/nova/tests/unit/virt/vmwareapi/test_vmops.py b/nova/tests/unit/virt/vmwareapi/test_vmops.py
index 5f476101f8..685df0d824 100644
--- a/nova/tests/unit/virt/vmwareapi/test_vmops.py
+++ b/nova/tests/unit/virt/vmwareapi/test_vmops.py
@@ -59,7 +59,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
super(VMwareVMOpsTestCase, self).setUp()
ds_util.dc_cache_reset()
vmwareapi_fake.reset()
- stubs.set_stubs(self.stubs)
+ stubs.set_stubs(self)
self.flags(enabled=True, group='vnc')
self.flags(image_cache_subdirectory_name='vmware_base',
my_ip='',
@@ -79,8 +79,8 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
vmFolder='fake_vm_folder')
cluster = vmwareapi_fake.create_cluster('fake_cluster', fake_ds_ref)
self._instance_values = {
- 'display_name': 'fake_display_name',
'name': 'fake_name',
+ 'display_name': 'fake_display_name',
'uuid': 'fake_uuid',
'vcpus': 1,
'memory_mb': 512,
@@ -203,12 +203,12 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
def test_get_valid_vms_from_retrieve_result(self, _mock_cont):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
- fake_objects.add_object(vmwareapi_fake.VirtualMachine(
- name=uuidutils.generate_uuid()))
- fake_objects.add_object(vmwareapi_fake.VirtualMachine(
- name=uuidutils.generate_uuid()))
- fake_objects.add_object(vmwareapi_fake.VirtualMachine(
- name=uuidutils.generate_uuid()))
+ for x in range(0, 3):
+ vm = vmwareapi_fake.VirtualMachine()
+ vm.set('config.extraConfig["nvp.vm-uuid"]',
+ vmwareapi_fake.OptionValue(
+ value=uuidutils.generate_uuid()))
+ fake_objects.add_object(vm)
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
self.assertEqual(3, len(vms))
@@ -217,14 +217,21 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
_mock_cont):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
- fake_objects.add_object(vmwareapi_fake.VirtualMachine(
- name=uuidutils.generate_uuid()))
- invalid_vm1 = vmwareapi_fake.VirtualMachine(
- name=uuidutils.generate_uuid())
+ valid_vm = vmwareapi_fake.VirtualMachine()
+ valid_vm.set('config.extraConfig["nvp.vm-uuid"]',
+ vmwareapi_fake.OptionValue(
+ value=uuidutils.generate_uuid()))
+ fake_objects.add_object(valid_vm)
+ invalid_vm1 = vmwareapi_fake.VirtualMachine()
invalid_vm1.set('runtime.connectionState', 'orphaned')
- invalid_vm2 = vmwareapi_fake.VirtualMachine(
- name=uuidutils.generate_uuid())
+ invalid_vm1.set('config.extraConfig["nvp.vm-uuid"]',
+ vmwareapi_fake.OptionValue(
+ value=uuidutils.generate_uuid()))
+ invalid_vm2 = vmwareapi_fake.VirtualMachine()
invalid_vm2.set('runtime.connectionState', 'inaccessible')
+ invalid_vm2.set('config.extraConfig["nvp.vm-uuid"]',
+ vmwareapi_fake.OptionValue(
+ value=uuidutils.generate_uuid()))
fake_objects.add_object(invalid_vm1)
fake_objects.add_object(invalid_vm2)
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
@@ -527,7 +534,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
resize_instance=resize_instance,
image_meta=None,
power_on=power_on)
- fake_resize_create_ephemerals_and_swap.called_once_with(
+ fake_resize_create_ephemerals_and_swap.assert_called_once_with(
'fake-ref', self._instance, None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
@@ -656,8 +663,8 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
mock_attach_disk.assert_called_once_with(
'fake-ref', self._instance, 'fake-adapter', 'fake-disk',
'[fake] uuid/root.vmdk')
- fake_remove_ephemerals_and_swap.called_once_with('fake-ref')
- fake_resize_create_ephemerals_and_swap.called_once_with(
+ fake_remove_ephemerals_and_swap.assert_called_once_with('fake-ref')
+ fake_resize_create_ephemerals_and_swap.assert_called_once_with(
'fake-ref', self._instance, None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
@@ -944,6 +951,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
'image_id': image_id,
'version': version.version_string_with_package()})
+ @mock.patch.object(vm_util, 'rename_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@@ -963,7 +971,8 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
enlist_image, fetch_image,
use_disk_image,
power_on_instance,
- create_folders):
+ create_folders,
+ rename_vm):
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'}
@@ -1007,6 +1016,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
connection_info2, self._instance,
constants.DEFAULT_ADAPTER_TYPE)
+ @mock.patch.object(vm_util, 'rename_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_folders',
return_value='fake_vm_folder')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@@ -1020,7 +1030,8 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
get_vm_config_info,
build_virtual_machine,
power_on_instance,
- create_folders):
+ create_folders,
+ rename_vm):
self._instance.image_ref = None
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
@@ -1294,6 +1305,8 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
if extras:
expected_methods.extend(extras)
+ # Last call should be renaming the instance
+ expected_methods.append('Rename_Task')
recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
self.assertEqual(expected_methods, recorded_methods)
@@ -1570,6 +1583,7 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
'device_name': '/dev/sdb'}}
self._test_spawn(block_device_info=block_device_info)
+ @mock.patch.object(vm_util, 'rename_vm')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, '_create_and_attach_thin_disk')
@mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone')
@@ -1589,7 +1603,8 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
fetch_image,
use_disk_image,
create_and_attach_thin_disk,
- power_on_instance):
+ power_on_instance,
+ rename_vm):
self._instance.flavor = objects.Flavor(vcpus=1, memory_mb=512,
name="m1.tiny", root_gb=1,
ephemeral_gb=1, swap=512,
@@ -2094,7 +2109,8 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
image_ds_loc.rel_path,
cookies='Fake-CookieJar')
- @mock.patch.object(images, 'fetch_image_stream_optimized')
+ @mock.patch.object(images, 'fetch_image_stream_optimized',
+ return_value=123)
def test_fetch_image_as_vapp(self, mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
@@ -2108,6 +2124,23 @@ class VMwareVMOpsTestCase(test.NoDBTestCase):
self._ds.name,
vi.dc_info.vmFolder,
self._vmops._root_resource_pool)
+ self.assertEqual(vi.ii.file_size, 123)
+
+ @mock.patch.object(images, 'fetch_image_ova', return_value=123)
+ def test_fetch_image_as_ova(self, mock_fetch_image):
+ vi = self._make_vm_config_info()
+ image_ds_loc = mock.Mock()
+ image_ds_loc.parent.basename = 'fake-name'
+ self._vmops._fetch_image_as_ova(self._context, vi, image_ds_loc)
+ mock_fetch_image.assert_called_once_with(
+ self._context,
+ vi.instance,
+ self._session,
+ 'fake-name',
+ self._ds.name,
+ vi.dc_info.vmFolder,
+ self._vmops._root_resource_pool)
+ self.assertEqual(vi.ii.file_size, 123)
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_iso_image(self, mock_generate_uuid):
diff --git a/nova/tests/unit/virt/vmwareapi/test_volumeops.py b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
index 562ace3e0b..d0296d3274 100644
--- a/nova/tests/unit/virt/vmwareapi/test_volumeops.py
+++ b/nova/tests/unit/virt/vmwareapi/test_volumeops.py
@@ -36,7 +36,7 @@ class VMwareVolumeOpsTestCase(test.NoDBTestCase):
super(VMwareVolumeOpsTestCase, self).setUp()
vmwareapi_fake.reset()
- stubs.set_stubs(self.stubs)
+ stubs.set_stubs(self)
self._session = driver.VMwareAPISession()
self._context = context.RequestContext('fake_user', 'fake_project')
diff --git a/nova/tests/unit/virt/xenapi/client/test_session.py b/nova/tests/unit/virt/xenapi/client/test_session.py
index 7c7435f244..16c352feac 100644
--- a/nova/tests/unit/virt/xenapi/client/test_session.py
+++ b/nova/tests/unit/virt/xenapi/client/test_session.py
@@ -34,7 +34,7 @@ class SessionTestCase(stubs.XenAPITestBaseNoDB):
create_session.return_value = sess
mock_version.return_value = ('version', 'brand')
- session.XenAPISession('url', 'username', 'password')
+ session.XenAPISession('http://someserver', 'username', 'password')
expected_version = '%s %s %s' % (version.vendor_string(),
version.product_string(),
@@ -54,7 +54,7 @@ class SessionTestCase(stubs.XenAPITestBaseNoDB):
create_session.return_value = sess
mock_version.return_value = ('version', 'brand')
- session.XenAPISession('url', 'username', 'password')
+ session.XenAPISession('http://someserver', 'username', 'password')
self.assertEqual(2, sess.login_with_password.call_count)
self.assertEqual(2, mock_timeout.call_count)
@@ -77,7 +77,7 @@ class SessionTestCase(stubs.XenAPITestBaseNoDB):
XenAPI.Failure(['HOST_IS_SLAVE', 'master']), None, None]
mock_version.return_value = ('version', 'brand')
- session.XenAPISession('url', 'username', 'password')
+ session.XenAPISession('http://slave', 'username', 'password')
self.assertEqual(3, sess.login_with_password.call_count)
self.assertEqual(3, mock_timeout.call_count)
diff --git a/nova/tests/unit/virt/xenapi/image/test_utils.py b/nova/tests/unit/virt/xenapi/image/test_utils.py
index 1d980384a6..9d7071452c 100644
--- a/nova/tests/unit/virt/xenapi/image/test_utils.py
+++ b/nova/tests/unit/virt/xenapi/image/test_utils.py
@@ -208,8 +208,8 @@ class RawTGZTestCase(test.NoDBTestCase):
def test_stream_to_without_size_retrieved(self):
source_tar = self.mox.CreateMock(tarfile.TarFile)
first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
- target_file = self.mox.CreateMock(file)
- source_file = self.mox.CreateMock(file)
+ target_file = self.mox.CreateMock(open)
+ source_file = self.mox.CreateMock(open)
image = utils.RawTGZImage(None)
image._image_service_and_image_id = ('service', 'id')
@@ -230,8 +230,8 @@ class RawTGZTestCase(test.NoDBTestCase):
def test_stream_to_with_size_retrieved(self):
source_tar = self.mox.CreateMock(tarfile.TarFile)
first_tarinfo = self.mox.CreateMock(tarfile.TarInfo)
- target_file = self.mox.CreateMock(file)
- source_file = self.mox.CreateMock(file)
+ target_file = self.mox.CreateMock(open)
+ source_file = self.mox.CreateMock(open)
first_tarinfo.size = 124
image = utils.RawTGZImage(None)
diff --git a/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
index 4a86ce5371..964707d5f1 100644
--- a/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
+++ b/nova/tests/unit/virt/xenapi/image/test_vdi_through_dev.py
@@ -17,6 +17,7 @@ import contextlib
import tarfile
import eventlet
+import six
from nova.image import glance
from nova import test
@@ -147,14 +148,14 @@ class TestTarGzProducer(test.NoDBTestCase):
self.assertEqual('writefile', producer.output)
def test_start(self):
- outf = self.mox.CreateMock(file)
+ outf = six.StringIO()
producer = vdi_through_dev.TarGzProducer('fpath', outf,
'100', 'fname')
tfile = self.mox.CreateMock(tarfile.TarFile)
tinfo = self.mox.CreateMock(tarfile.TarInfo)
- inf = self.mox.CreateMock(file)
+ inf = self.mox.CreateMock(open)
self.mox.StubOutWithMock(vdi_through_dev, 'tarfile')
self.mox.StubOutWithMock(producer, '_open_file')
diff --git a/nova/tests/unit/virt/xenapi/test_agent.py b/nova/tests/unit/virt/xenapi/test_agent.py
index fedbcd0599..5fe07b569b 100644
--- a/nova/tests/unit/virt/xenapi/test_agent.py
+++ b/nova/tests/unit/virt/xenapi/test_agent.py
@@ -230,7 +230,7 @@ class RebootRetryTestCase(AgentTestCaseBase):
self.assertEqual("done", result)
self.assertTrue(mock_session.VM.get_domid.called)
self.assertEqual(2, mock_session.call_plugin.call_count)
- mock_wait.called_once_with(mock_session, self.vm_ref,
+ mock_wait.assert_called_once_with(mock_session, self.vm_ref,
"fake_dom_id", "asdf")
@mock.patch.object(time, 'sleep')
diff --git a/nova/tests/unit/virt/xenapi/test_vif.py b/nova/tests/unit/virt/xenapi/test_vif.py
new file mode 100644
index 0000000000..a41e506451
--- /dev/null
+++ b/nova/tests/unit/virt/xenapi/test_vif.py
@@ -0,0 +1,189 @@
+# Copyright 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import mock
+
+from nova import exception
+from nova.network import model
+from nova.tests.unit.virt.xenapi import stubs
+from nova.virt.xenapi import network_utils
+from nova.virt.xenapi import vif
+
+fake_vif = {
+ 'created_at': None,
+ 'updated_at': None,
+ 'deleted_at': None,
+ 'deleted': 0,
+ 'id': '123456789123',
+ 'address': '00:00:00:00:00:00',
+ 'network_id': 123,
+ 'instance_uuid': 'fake-uuid',
+ 'uuid': 'fake-uuid-2',
+}
+
+
+def fake_call_xenapi(method, *args):
+ if method == "VM.get_VIFs":
+ return ["fake_vif_ref", "fake_vif_ref_A2"]
+ if method == "VIF.get_record":
+ if args[0] == "fake_vif_ref":
+ return {'uuid': fake_vif['uuid'],
+ 'MAC': fake_vif['address'],
+ 'network': 'fake_network',
+ 'other_config': {'nicira-iface-id': fake_vif['id']}
+ }
+ else:
+ raise exception.Exception("Failed get vif record")
+ if method == "VIF.unplug":
+ return
+ if method == "VIF.destroy":
+ if args[0] == "fake_vif_ref":
+ return
+ else:
+ raise exception.Exception("unplug vif failed")
+ if method == "VIF.create":
+ if args[0] == "fake_vif_rec":
+ return "fake_vif_ref"
+ else:
+ raise exception.Exception("VIF existed")
+ return "Unexpected call_xenapi: %s.%s" % (method, args)
+
+
+class XenVIFDriverTestBase(stubs.XenAPITestBaseNoDB):
+ def setUp(self):
+ super(XenVIFDriverTestBase, self).setUp()
+ self._session = mock.Mock()
+ self._session.call_xenapi.side_effect = fake_call_xenapi
+
+
+class XenVIFDriverTestCase(XenVIFDriverTestBase):
+ def setUp(self):
+ super(XenVIFDriverTestCase, self).setUp()
+ self.base_driver = vif.XenVIFDriver(self._session)
+
+ def test_get_vif_ref(self):
+ vm_ref = "fake_vm_ref"
+ vif_ref = 'fake_vif_ref'
+ ret_vif_ref = self.base_driver._get_vif_ref(fake_vif, vm_ref)
+ self.assertEqual(vif_ref, ret_vif_ref)
+
+ expected = [mock.call('VM.get_VIFs', vm_ref),
+ mock.call('VIF.get_record', vif_ref)]
+ self.assertEqual(expected, self._session.call_xenapi.call_args_list)
+
+ def test_get_vif_ref_none_and_exception(self):
+ vm_ref = "fake_vm_ref"
+ vif = {'address': "no_match_vif_address"}
+ ret_vif_ref = self.base_driver._get_vif_ref(vif, vm_ref)
+ self.assertIsNone(ret_vif_ref)
+
+ expected = [mock.call('VM.get_VIFs', vm_ref),
+ mock.call('VIF.get_record', 'fake_vif_ref'),
+ mock.call('VIF.get_record', 'fake_vif_ref_A2')]
+ self.assertEqual(expected, self._session.call_xenapi.call_args_list)
+
+ def test_create_vif(self):
+ vif_rec = "fake_vif_rec"
+ vm_ref = "fake_vm_ref"
+ ret_vif_ref = self.base_driver._create_vif(fake_vif, vif_rec, vm_ref)
+ self.assertEqual("fake_vif_ref", ret_vif_ref)
+
+ expected = [mock.call('VIF.create', vif_rec)]
+ self.assertEqual(expected, self._session.call_xenapi.call_args_list)
+
+ def test_create_vif_exception(self):
+ self.assertRaises(exception.NovaException,
+ self.base_driver._create_vif,
+ "fake_vif", "missing_vif_rec", "fake_vm_ref")
+
+ @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref',
+ return_value='fake_vif_ref')
+ def test_unplug(self, mock_get_vif_ref):
+ instance = {'name': "fake_instance"}
+ vm_ref = "fake_vm_ref"
+ self.base_driver.unplug(instance, fake_vif, vm_ref)
+ expected = [mock.call('VIF.destroy', 'fake_vif_ref')]
+ self.assertEqual(expected, self._session.call_xenapi.call_args_list)
+
+ @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref',
+ return_value='missing_vif_ref')
+ def test_unplug_exception(self, mock_get_vif_ref):
+ instance = "fake_instance"
+ vm_ref = "fake_vm_ref"
+ self.assertRaises(exception.NovaException,
+ self.base_driver.unplug,
+ instance, fake_vif, vm_ref)
+
+
+class XenAPIBridgeDriverTestCase(XenVIFDriverTestBase, object):
+ def setUp(self):
+ super(XenAPIBridgeDriverTestCase, self).setUp()
+ self.bridge_driver = vif.XenAPIBridgeDriver(self._session)
+
+ @mock.patch.object(vif.XenAPIBridgeDriver, '_ensure_vlan_bridge',
+ return_value='fake_network_ref')
+ @mock.patch.object(vif.XenVIFDriver, '_create_vif',
+ return_value='fake_vif_ref')
+ def test_plug_create_vlan(self, mock_create_vif, mock_ensure_vlan_bridge):
+ instance = {'name': "fake_instance_name"}
+ network = model.Network()
+ network._set_meta({'should_create_vlan': True})
+ vif = model.VIF()
+ vif._set_meta({'rxtx_cap': 1})
+ vif['network'] = network
+ vif['address'] = "fake_address"
+ vm_ref = "fake_vm_ref"
+ device = 1
+ ret_vif_ref = self.bridge_driver.plug(instance, vif, vm_ref, device)
+ self.assertEqual('fake_vif_ref', ret_vif_ref)
+
+ @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref',
+ return_value='fake_vif_ref')
+ def test_unplug(self, mock_get_vif_ref):
+ instance = {'name': "fake_instance"}
+ vm_ref = "fake_vm_ref"
+ self.bridge_driver.unplug(instance, fake_vif, vm_ref)
+
+ expected = [mock.call('VIF.destroy', 'fake_vif_ref')]
+ self.assertEqual(expected, self._session.call_xenapi.call_args_list)
+
+
+class XenAPIOpenVswitchDriverTestCase(XenVIFDriverTestBase):
+ def setUp(self):
+ super(XenAPIOpenVswitchDriverTestCase, self).setUp()
+ self.ovs_driver = vif.XenAPIOpenVswitchDriver(self._session)
+
+ @mock.patch.object(network_utils, 'find_network_with_bridge',
+ return_value='fake_network_ref')
+ @mock.patch.object(vif.XenVIFDriver, '_create_vif',
+ return_value='fake_vif_ref')
+ @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref', return_value=None)
+ def test_plug(self, mock_get_vif_ref, mock_create_vif,
+ mock_find_network_with_bridge):
+ instance = {'name': "fake_instance_name"}
+ vm_ref = "fake_vm_ref"
+ device = 1
+ ret_vif_ref = self.ovs_driver.plug(instance, fake_vif, vm_ref, device)
+ self.assertEqual('fake_vif_ref', ret_vif_ref)
+
+ @mock.patch.object(vif.XenVIFDriver, '_get_vif_ref',
+ return_value='fake_vif_ref')
+ def test_unplug(self, mock_get_vif_ref):
+ instance = {'name': "fake_instance"}
+ vm_ref = "fake_vm_ref"
+ self.ovs_driver.unplug(instance, fake_vif, vm_ref)
+
+ expected = [mock.call('VIF.destroy', 'fake_vif_ref')]
+ self.assertEqual(expected, self._session.call_xenapi.call_args_list)
diff --git a/nova/tests/unit/virt/xenapi/test_vm_utils.py b/nova/tests/unit/virt/xenapi/test_vm_utils.py
index b8d21e161c..1305d05cc4 100644
--- a/nova/tests/unit/virt/xenapi/test_vm_utils.py
+++ b/nova/tests/unit/virt/xenapi/test_vm_utils.py
@@ -1153,8 +1153,9 @@ class GenerateDiskTestCase(VMUtilsTestBase):
self._expect_parted_calls()
self.mox.ReplayAll()
- vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
- self.vm_ref, "2", "name", "user", 10, None)
+ vdi_ref = vm_utils._generate_disk(
+ self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "user", 10, None, None)
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
@@ -1163,44 +1164,50 @@ class GenerateDiskTestCase(VMUtilsTestBase):
utils.execute('mkswap', '/dev/fakedev1', run_as_root=True)
self.mox.ReplayAll()
- vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
- self.vm_ref, "2", "name", "swap", 10, "linux-swap")
+ vdi_ref = vm_utils._generate_disk(
+ self.session, {"uuid": "fake_uuid"},
+ self.vm_ref, "2", "name", "swap", 10, "swap", None)
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral(self):
self._expect_parted_calls()
- utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
- run_as_root=True)
+ utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral',
+ '/dev/fakedev1', run_as_root=True)
self.mox.ReplayAll()
- vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
- self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+ vdi_ref = vm_utils._generate_disk(
+ self.session, {"uuid": "fake_uuid"}, self.vm_ref,
+ "4", "name", "ephemeral", 10, "ext4", "ephemeral")
self._check_vdi(vdi_ref)
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ensure_cleanup_called(self):
self._expect_parted_calls()
- utils.execute('mkfs', '-t', 'ext4', '/dev/fakedev1',
+ utils.execute(
+ 'mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral', '/dev/fakedev1',
run_as_root=True).AndRaise(test.TestingException)
- vm_utils.destroy_vdi(self.session,
+ vm_utils.destroy_vdi(
+ self.session,
mox.IgnoreArg()).AndRaise(exception.StorageError(reason=""))
self.mox.ReplayAll()
- self.assertRaises(test.TestingException, vm_utils._generate_disk,
+ self.assertRaises(
+ test.TestingException, vm_utils._generate_disk,
self.session, {"uuid": "fake_uuid"},
- self.vm_ref, "2", "name", "ephemeral", 10, "ext4")
+ self.vm_ref, "4", "name", "ephemeral", 10, "ext4", "ephemeral")
@test_xenapi.stub_vm_utils_with_vdi_attached_here
def test_generate_disk_ephemeral_local_not_attached(self):
self.session.is_local_connection = True
self._expect_parted_calls()
- utils.execute('mkfs', '-t', 'ext4', '/dev/mapper/fakedev1',
- run_as_root=True)
+ utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'ephemeral',
+ '/dev/mapper/fakedev1', run_as_root=True)
self.mox.ReplayAll()
- vdi_ref = vm_utils._generate_disk(self.session, {"uuid": "fake_uuid"},
- None, "2", "name", "ephemeral", 10, "ext4")
+ vdi_ref = vm_utils._generate_disk(
+ self.session, {"uuid": "fake_uuid"},
+ None, "4", "name", "ephemeral", 10, "ext4", "ephemeral")
self._check_vdi(vdi_ref, check_attached=False)
@@ -1213,6 +1220,7 @@ class GenerateEphemeralTestCase(VMUtilsTestBase):
self.name_label = "name"
self.ephemeral_name_label = "name ephemeral"
self.userdevice = 4
+ self.fs_label = "ephemeral"
self.mox.StubOutWithMock(vm_utils, "_generate_disk")
self.mox.StubOutWithMock(vm_utils, "safe_destroy_vdis")
@@ -1231,46 +1239,54 @@ class GenerateEphemeralTestCase(VMUtilsTestBase):
expected = [1024, 1024]
self.assertEqual(expected, list(result))
- def _expect_generate_disk(self, size, device, name_label):
- vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ def _expect_generate_disk(self, size, device, name_label, fs_label):
+ vm_utils._generate_disk(
+ self.session, self.instance, self.vm_ref,
str(device), name_label, 'ephemeral',
- size * 1024, None).AndReturn(device)
+ size * 1024, None, fs_label).AndReturn(device)
def test_generate_ephemeral_adds_one_disk(self):
- self._expect_generate_disk(20, self.userdevice,
- self.ephemeral_name_label)
+ self._expect_generate_disk(
+ 20, self.userdevice, self.ephemeral_name_label, self.fs_label)
self.mox.ReplayAll()
- vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ vm_utils.generate_ephemeral(
+ self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 20)
def test_generate_ephemeral_adds_multiple_disks(self):
- self._expect_generate_disk(2000, self.userdevice,
- self.ephemeral_name_label)
- self._expect_generate_disk(2000, self.userdevice + 1,
- self.ephemeral_name_label + " (1)")
- self._expect_generate_disk(30, self.userdevice + 2,
- self.ephemeral_name_label + " (2)")
+ self._expect_generate_disk(
+ 2000, self.userdevice, self.ephemeral_name_label, self.fs_label)
+ self._expect_generate_disk(
+ 2000, self.userdevice + 1, self.ephemeral_name_label + " (1)",
+ self.fs_label + "1")
+ self._expect_generate_disk(
+ 30, self.userdevice + 2, self.ephemeral_name_label + " (2)",
+ self.fs_label + "2")
self.mox.ReplayAll()
- vm_utils.generate_ephemeral(self.session, self.instance, self.vm_ref,
+ vm_utils.generate_ephemeral(
+ self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4030)
def test_generate_ephemeral_cleans_up_on_error(self):
- self._expect_generate_disk(1024, self.userdevice,
- self.ephemeral_name_label)
- self._expect_generate_disk(1024, self.userdevice + 1,
- self.ephemeral_name_label + " (1)")
+ self._expect_generate_disk(
+ 1024, self.userdevice, self.ephemeral_name_label, self.fs_label)
+ self._expect_generate_disk(
+ 1024, self.userdevice + 1, self.ephemeral_name_label + " (1)",
+ self.fs_label + "1")
- vm_utils._generate_disk(self.session, self.instance, self.vm_ref,
+ vm_utils._generate_disk(
+ self.session, self.instance, self.vm_ref,
str(self.userdevice + 2), "name ephemeral (2)", 'ephemeral',
- units.Mi, None).AndRaise(exception.NovaException)
+ units.Mi, None, 'ephemeral2').AndRaise(exception.NovaException)
vm_utils.safe_destroy_vdis(self.session, [4, 5])
self.mox.ReplayAll()
- self.assertRaises(exception.NovaException, vm_utils.generate_ephemeral,
+ self.assertRaises(
+ exception.NovaException, vm_utils.generate_ephemeral,
self.session, self.instance, self.vm_ref,
str(self.userdevice), self.name_label, 4096)
diff --git a/nova/tests/unit/virt/xenapi/test_volume_utils.py b/nova/tests/unit/virt/xenapi/test_volume_utils.py
index cb7e4f40dc..618b099589 100644
--- a/nova/tests/unit/virt/xenapi/test_volume_utils.py
+++ b/nova/tests/unit/virt/xenapi/test_volume_utils.py
@@ -15,7 +15,6 @@
from eventlet import greenthread
import mock
-
import six
from nova import exception
diff --git a/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova/tests/unit/virt/xenapi/test_xenapi.py
index 495d4d9625..b19d6e19d5 100644
--- a/nova/tests/unit/virt/xenapi/test_xenapi.py
+++ b/nova/tests/unit/virt/xenapi/test_xenapi.py
@@ -26,7 +26,6 @@ import uuid
import mock
from mox3 import mox
from oslo_concurrency import lockutils
-from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_log import log as logging
from oslo_serialization import jsonutils
@@ -41,6 +40,7 @@ from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
+import nova.conf
from nova import context
from nova import crypto
from nova import db
@@ -73,12 +73,10 @@ from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('network_manager', 'nova.service')
-CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('host', 'nova.netconf')
-CONF.import_opt('default_availability_zone', 'nova.availability_zones')
CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
group="xenserver")
@@ -297,7 +295,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
- db_fakes.stub_out_db_instance_api(self.stubs)
+ db_fakes.stub_out_db_instance_api(self)
xenapi_fake.create_network('fake', 'fake_br1')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
@@ -758,7 +756,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
- image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
+ image_meta = objects.ImageMeta.from_dict(
+ IMAGE_FIXTURES[image_ref]["image_meta"])
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
@@ -932,7 +931,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
- db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+ db_fakes.stub_out_db_instance_api(self, injected=True)
self._tee_executed = False
@@ -983,7 +982,7 @@ iface eth0 inet6 static
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_spawn_netinject_xenstore(self):
- db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+ db_fakes.stub_out_db_instance_api(self, injected=True)
self._tee_executed = False
@@ -1031,8 +1030,9 @@ iface eth0 inet6 static
self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
self.mox.ReplayAll()
- self.conn.spawn(self.context, instance,
- IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
+ image_meta = objects.ImageMeta.from_dict(
+ IMAGE_FIXTURES['1']["image_meta"])
+ self.conn.spawn(self.context, instance, image_meta, [], 'herp', '')
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
@@ -1276,9 +1276,10 @@ iface eth0 inet6 static
other_config={'osvol': True})
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
- image_meta = {'id': IMAGE_VHD,
- 'disk_format': 'vhd',
- 'properties': {'vm_mode': 'xen'}}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': IMAGE_VHD,
+ 'disk_format': 'vhd',
+ 'properties': {'vm_mode': 'xen'}})
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
@@ -1302,9 +1303,10 @@ iface eth0 inet6 static
# bug #1227898
instance = self._create_instance(obj=True)
session = get_session()
- image_meta = {'id': IMAGE_VHD,
- 'disk_format': 'vhd',
- 'properties': {'vm_mode': 'xen'}}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': IMAGE_VHD,
+ 'disk_format': 'vhd',
+ 'properties': {'vm_mode': 'xen'}})
vm_ref = vm_utils.lookup(session, instance['name'])
vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
@@ -1436,8 +1438,8 @@ iface eth0 inet6 static
return [test_aggregate.fake_aggregate]
else:
return []
- self.stubs.Set(db, 'aggregate_get_by_host',
- fake_aggregate_get)
+ self.stub_out('nova.db.aggregate_get_by_host',
+ fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
@@ -1476,8 +1478,7 @@ iface eth0 inet6 static
fake_inst = fake_instance.fake_db_instance(id=123)
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
- columns_to_join=None,
- use_slave=False
+ columns_to_join=None
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
@@ -1491,7 +1492,7 @@ iface eth0 inet6 static
def fake_aggregate_get_by_host(self, *args, **kwargs):
was['called'] = True
raise test.TestingException()
- self.stubs.Set(db, "aggregate_get_by_host",
+ self.stub_out("nova.db.aggregate_get_by_host",
fake_aggregate_get_by_host)
self.stubs.Set(self.conn._session, "is_slave", True)
@@ -1505,8 +1506,8 @@ iface eth0 inet6 static
agg = copy.copy(test_aggregate.fake_aggregate)
agg['metadetails'][CONF.host] = 'this_should_be_metadata'
return [agg]
- self.stubs.Set(db, 'aggregate_get_by_host',
- fake_aggregate_get)
+ self.stub_out('nova.db.aggregate_get_by_host',
+ fake_aggregate_get)
self.stubs.Set(self.conn._session, "is_slave", True)
@@ -1561,8 +1562,9 @@ iface eth0 inet6 static
instance = create_instance_with_system_metadata(self.context,
instance_values)
network_info = fake_network.fake_get_instance_nw_info(self)
- image_meta = {'id': IMAGE_VHD,
- 'disk_format': 'vhd'}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': IMAGE_VHD,
+ 'disk_format': 'vhd'})
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
@@ -1649,7 +1651,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- db_fakes.stub_out_db_instance_api(self.stubs)
+ db_fakes.stub_out_db_instance_api(self)
xenapi_fake.create_network('fake', 'fake_br1')
self.user_id = 'fake'
self.project_id = 'fake'
@@ -1687,8 +1689,15 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
+ def _create_instance(self, **kw):
+ values = self.instance_values.copy()
+ values.update(kw)
+ instance = objects.Instance(context=self.context, **values)
+ instance.create()
+ return instance
+
def test_migrate_disk_and_power_off(self):
- instance = db.instance_create(self.context, self.instance_values)
+ instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
@@ -1701,7 +1710,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
- instance = db.instance_create(self.context, self.instance_values)
+ instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=80,
ephemeral_gb=0)
@@ -1717,7 +1726,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
- instance = db.instance_create(self.context, self.instance_values)
+ instance = self._create_instance()
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
@@ -1729,10 +1738,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
flavor = fake_flavor.fake_flavor_obj(self.context, root_gb=0,
ephemeral_gb=0)
- values = copy.copy(self.instance_values)
- values["root_gb"] = 0
- values["ephemeral_gb"] = 0
- instance = db.instance_create(self.context, values)
+ instance = self._create_instance(root_gb=0, ephemeral_gb=0)
xenapi_fake.create_vm(instance['name'], 'Running')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(conn._session, instance['name'])
@@ -1771,7 +1777,8 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
- image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': instance['image_ref'], 'disk_format': 'vhd'})
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
@@ -1813,7 +1820,8 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
- image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True,
@@ -1840,7 +1848,8 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
- image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
@@ -1857,15 +1866,15 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self)
# Resize instance would be determined by the compute call
- image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': instance['image_ref'], 'disk_format': 'vhd'})
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_too_many_partitions_no_resize_down(self):
- instance_values = self.instance_values
- instance = db.instance_create(self.context, instance_values)
+ instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
flavor = fake_flavor.fake_flavor_obj(self.context, **flavor)
@@ -1883,8 +1892,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
@stub_vm_utils_with_vdi_attached_here
def test_migrate_bad_fs_type_no_resize_down(self):
- instance_values = self.instance_values
- instance = db.instance_create(self.context, instance_values)
+ instance = self._create_instance()
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
flavor = fake_flavor.fake_flavor_obj(self.context, **flavor)
@@ -2324,9 +2332,10 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
- image_meta = {'id': 'null',
- 'disk_format': 'vhd',
- 'properties': {'vm_mode': 'xen'}}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': 'null',
+ 'disk_format': 'vhd',
+ 'properties': {'vm_mode': 'xen'}})
self.conn._vmops._attach_disks(instance, image_meta, vm_ref,
instance['name'], vdis, disk_image_type, "fake_nw_inf")
@@ -2401,7 +2410,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- db_fakes.stub_out_db_instance_api(self.stubs)
+ db_fakes.stub_out_db_instance_api(self)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
@@ -2444,9 +2453,10 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
vdi_key = 'iso'
vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
- image_meta = {'id': 'null',
- 'disk_format': 'vhd',
- 'properties': {'vm_mode': 'xen'}}
+ image_meta = objects.ImageMeta.from_dict(
+ {'id': 'null',
+ 'disk_format': 'vhd',
+ 'properties': {'vm_mode': 'xen'}})
self.conn._vmops._attach_disks(instance, image_meta, vm_ref,
instance['name'], vdis, disk_image_type, "fake_nw_inf")
self.assertTrue(self.called)
@@ -2852,63 +2862,6 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
"Rules were not updated properly. "
"The rule for UDP acceptance is missing")
- def test_provider_firewall_rules(self):
- # setup basic instance data
- instance_ref = self._create_instance_ref()
- # FRAGILE: as in libvirt tests
- # peeks at how the firewall names chains
- chain_name = 'inst-%s' % instance_ref['id']
-
- network_info = fake_network.fake_get_instance_nw_info(self, 1, 1)
- self.fw.prepare_instance_filter(instance_ref, network_info)
- self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(0, len(rules))
-
- admin_ctxt = context.get_admin_context()
- # add a rule and send the update message, check for 1 rule
- db.provider_fw_rule_create(admin_ctxt,
- {'protocol': 'tcp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535})
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(1, len(rules))
-
- # Add another, refresh, and make sure number of rules goes to two
- provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
- {'protocol': 'udp',
- 'cidr': '10.99.99.99/32',
- 'from_port': 1,
- 'to_port': 65535})
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(2, len(rules))
-
- # create the instance filter and make sure it has a jump rule
- self.fw.prepare_instance_filter(instance_ref, network_info)
- self.fw.apply_instance_filter(instance_ref, network_info)
- inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == chain_name]
- jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
- provjump_rules = []
- # IptablesTable doesn't make rules unique internally
- for rule in jump_rules:
- if 'provider' in rule.rule and rule not in provjump_rules:
- provjump_rules.append(rule)
- self.assertEqual(1, len(provjump_rules))
-
- # remove a rule from the db, cast to compute to refresh rule
- db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
- self.fw.refresh_provider_fw_rules()
- rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
- if rule.chain == 'provider']
- self.assertEqual(1, len(rules))
-
class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for testing we find the right SR."""
@@ -3368,7 +3321,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
- db_fakes.stub_out_db_instance_api(self.stubs)
+ db_fakes.stub_out_db_instance_api(self)
self.context = context.get_admin_context()
def test_live_migration_calls_vmops(self):
@@ -3597,7 +3550,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
return [dict(test_aggregate.fake_aggregate,
metadetails={"host": "test_host_uuid"})]
- self.stubs.Set(db, "aggregate_get_by_host",
+ self.stub_out("nova.db.aggregate_get_by_host",
fake_aggregate_get_by_host)
self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'}, False, False)
@@ -3611,7 +3564,7 @@ class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
return [dict(test_aggregate.fake_aggregate,
metadetails={"dest_other": "test_host_uuid"})]
- self.stubs.Set(db, "aggregate_get_by_host",
+ self.stub_out("nova.db.aggregate_get_by_host",
fake_aggregate_get_by_host)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
diff --git a/nova/tests/unit/volume/test_cinder.py b/nova/tests/unit/volume/test_cinder.py
index 79a7fc3395..6c86cdcbce 100644
--- a/nova/tests/unit/volume/test_cinder.py
+++ b/nova/tests/unit/volume/test_cinder.py
@@ -19,6 +19,8 @@ import mock
from nova import context
from nova import exception
from nova import test
+from nova.tests.unit.fake_instance import fake_instance_obj
+from nova.tests import uuidsentinel as uuids
from nova.volume import cinder
@@ -171,6 +173,7 @@ class CinderApiTestCase(test.NoDBTestCase):
volume = {'status': 'available'}
volume['attach_status'] = "detached"
volume['availability_zone'] = 'zone1'
+ volume['multiattach'] = False
instance = {'availability_zone': 'zone1', 'host': 'fakehost'}
cinder.CONF.set_override('cross_az_attach', False, group='cinder')
@@ -182,11 +185,27 @@ class CinderApiTestCase(test.NoDBTestCase):
cinder.CONF.reset()
def test_check_detach(self):
- volume = {'id': 'fake', 'status': 'available'}
+ volume = {'id': 'fake', 'status': 'in-use',
+ 'attach_status': 'attached',
+ 'attachments': {uuids.instance: {
+ 'attachment_id': uuids.attachment}}
+ }
+ self.assertIsNone(self.api.check_detach(self.ctx, volume))
+ instance = fake_instance_obj(self.ctx)
+ instance.uuid = uuids.instance
+ self.assertIsNone(self.api.check_detach(self.ctx, volume, instance))
+ instance.uuid = uuids.instance2
+ self.assertRaises(exception.VolumeUnattached,
+ self.api.check_detach, self.ctx, volume, instance)
+ volume['attachments'] = {}
+ self.assertRaises(exception.VolumeUnattached,
+ self.api.check_detach, self.ctx, volume, instance)
+ volume['status'] = 'available'
+ self.assertRaises(exception.InvalidVolume,
+ self.api.check_detach, self.ctx, volume)
+ volume['attach_status'] = 'detached'
self.assertRaises(exception.InvalidVolume,
self.api.check_detach, self.ctx, volume)
- volume['status'] = 'non-available'
- self.assertIsNone(self.api.check_detach(self.ctx, volume))
def test_reserve_volume(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
@@ -251,14 +270,24 @@ class CinderApiTestCase(test.NoDBTestCase):
mode='ro')
def test_detach(self):
+ self.mox.StubOutWithMock(self.api,
+ 'get',
+ use_mock_anything=True)
+ self.api.get(self.ctx, 'id1').\
+ AndReturn({'id': 'id1', 'status': 'in-use',
+ 'multiattach': True,
+ 'attach_status': 'attached',
+ 'attachments': {'fake_uuid':
+ {'attachment_id': 'fakeid'}}
+ })
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
self.mox.StubOutWithMock(self.cinderclient.volumes,
'detach',
use_mock_anything=True)
- self.cinderclient.volumes.detach('id1')
+ self.cinderclient.volumes.detach('id1', 'fakeid')
self.mox.ReplayAll()
- self.api.detach(self.ctx, 'id1')
+ self.api.detach(self.ctx, 'id1', instance_uuid='fake_uuid')
def test_initialize_connection(self):
cinder.cinderclient(self.ctx).AndReturn(self.cinderclient)
diff --git a/nova/utils.py b/nova/utils.py
index 0c013af5d7..14488cab98 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -722,12 +722,12 @@ def monkey_patch():
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
- if six.PY3:
+ if six.PY2:
+ is_method = inspect.ismethod
+ else:
def is_method(obj):
# Unbound methods became regular functions on Python 3
return inspect.ismethod(obj) or inspect.isfunction(obj)
- else:
- is_method = inspect.ismethod
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
@@ -802,6 +802,7 @@ def sanitize_hostname(hostname, default_name=None):
if six.PY3:
hostname = hostname.decode('latin-1')
+ hostname = truncate_hostname(hostname)
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
@@ -810,8 +811,7 @@ def sanitize_hostname(hostname, default_name=None):
# empty hostname
if hostname == "" and default_name is not None:
return truncate_hostname(default_name)
-
- return truncate_hostname(hostname)
+ return hostname
@contextlib.contextmanager
@@ -1201,11 +1201,7 @@ def is_neutron():
return _IS_NEUTRON
try:
- # compatibility with Folsom/Grizzly configs
cls_name = CONF.network_api_class
- if cls_name == 'nova.network.quantumv2.api.API':
- cls_name = 'nova.network.neutronv2.api.API'
-
from nova.network.neutronv2 import api as neutron_api
_IS_NEUTRON = issubclass(importutils.import_class(cls_name),
neutron_api.API)
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 56b49304dd..de8a8efda8 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -23,7 +23,6 @@ from oslo_utils import fileutils
from oslo_utils import units
from nova import exception
-from nova import objects
from nova.objects import fields
from nova import utils
from nova import version
@@ -167,9 +166,8 @@ class ConfigDriveBuilder(object):
def required_by(instance):
- image_meta = objects.ImageMeta.from_instance(instance)
- image_prop = image_meta.properties.get(
+ image_prop = instance.image_meta.properties.get(
"img_config_drive",
fields.ConfigDrivePolicy.OPTIONAL)
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 90d400b234..7f699939c5 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -31,10 +31,10 @@ if os.name != 'nt':
import crypt
from oslo_concurrency import processutils
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
+import nova.conf
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
@@ -48,33 +48,7 @@ from nova.virt import images
LOG = logging.getLogger(__name__)
-disk_opts = [
- # NOTE(yamahata): ListOpt won't work because the command may include a
- # comma. For example:
- #
- # mkfs.ext4 -O dir_index,extent -E stride=8,stripe-width=16
- # --label %(fs_label)s %(target)s
- #
- # list arguments are comma separated and there is no way to
- # escape such commas.
- #
- cfg.MultiStrOpt('virt_mkfs',
- default=[],
- help='Name of the mkfs commands for ephemeral device. '
- 'The format is <os_type>=<mkfs command>'),
-
- cfg.BoolOpt('resize_fs_using_block_device',
- default=False,
- help='Attempt to resize the filesystem by accessing the '
- 'image over a block device. This is done by the host '
- 'and may not be necessary if the image contains a recent '
- 'version of cloud-init. Possible mechanisms require '
- 'the nbd driver (for qcow and raw), or loop (for raw).'),
- ]
-
-CONF = cfg.CONF
-CONF.register_opts(disk_opts)
-CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
+CONF = nova.conf.CONF
_MKFS_COMMAND = {}
_DEFAULT_MKFS_COMMAND = None
diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py
index 21b91c9f15..e368bdee3f 100644
--- a/nova/virt/disk/mount/nbd.py
+++ b/nova/virt/disk/mount/nbd.py
@@ -18,24 +18,16 @@ import random
import re
import time
-from oslo_config import cfg
from oslo_log import log as logging
+import nova.conf
from nova.i18n import _, _LE, _LI, _LW
from nova import utils
from nova.virt.disk.mount import api
LOG = logging.getLogger(__name__)
-nbd_opts = [
- cfg.IntOpt('timeout_nbd',
- default=10,
- help='Amount of time, in seconds, to wait for NBD '
- 'device start up.'),
- ]
-
-CONF = cfg.CONF
-CONF.register_opts(nbd_opts)
+CONF = nova.conf.CONF
NBD_DEVICE_RE = re.compile('nbd[0-9]+')
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index a691df0b86..624b014907 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -22,46 +22,15 @@ Driver base-classes:
import sys
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
+import nova.conf
from nova.i18n import _, _LE, _LI
from nova import utils
from nova.virt import event as virtevent
-driver_opts = [
- cfg.StrOpt('compute_driver',
- help='Driver to use for controlling virtualization. Options '
- 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
- 'fake.FakeDriver, ironic.IronicDriver, '
- 'vmwareapi.VMwareVCDriver, hyperv.HyperVDriver'),
- cfg.StrOpt('default_ephemeral_format',
- help='The default format an ephemeral_volume will be '
- 'formatted with on creation.'),
- cfg.StrOpt('preallocate_images',
- default='none',
- choices=('none', 'space'),
- help='VM image preallocation mode: '
- '"none" => no storage provisioning is done up front, '
- '"space" => storage is fully allocated at instance start'),
- cfg.BoolOpt('use_cow_images',
- default=True,
- help='Whether to use cow images'),
- cfg.BoolOpt('vif_plugging_is_fatal',
- default=True,
- help="Fail instance boot if vif plugging fails"),
- cfg.IntOpt('vif_plugging_timeout',
- default=300,
- help='Number of seconds to wait for neutron vif plugging '
- 'events to arrive before continuing or failing (see '
- 'vif_plugging_is_fatal). If this is set to zero and '
- 'vif_plugging_is_fatal is False, events should not '
- 'be expected to arrive at all.'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(driver_opts)
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -276,8 +245,8 @@ class ComputeDriver(object):
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
- :param image_meta: image object returned by nova.image.glance that
- defines the image from which to boot this instance
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param bdms: block-device-mappings to use for rebuild
@@ -313,8 +282,8 @@ class ComputeDriver(object):
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
- :param image_meta: image object returned by nova.image.glance that
- defines the image from which to boot this instance
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info:
@@ -528,8 +497,8 @@ class ComputeDriver(object):
:param nova.objects.instance.Instance instance:
The instance which will get an additional network interface.
- :param dict image_meta:
- A dictionary which describes metadata of the image of the instance.
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
:param nova.network.model.NetworkInfo vif:
The object which has the information about the interface to attach.
@@ -611,9 +580,8 @@ class ComputeDriver(object):
:param disk_info: the newly transferred disk information
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
- :param image_meta: image object returned by nova.image.glance that
- defines the image from which this instance
- was created
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
:param resize_instance: True if the instance is being resized,
False otherwise
:param block_device_info: instance volume block device info
@@ -727,7 +695,15 @@ class ComputeDriver(object):
rescue_password):
"""Rescue the specified instance.
- :param instance: nova.objects.instance.Instance
+ :param nova.context.RequestContext context:
+ The context for the rescue.
+ :param nova.objects.instance.Instance instance:
+ The instance being rescued.
+ :param nova.network.model.NetworkInfo network_info:
+ Necessary network information for the resume.
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
+ :param rescue_password: new root password to set for rescue.
"""
raise NotImplementedError()
@@ -827,7 +803,7 @@ class ComputeDriver(object):
:param block_device_info: instance block device information
:param network_info: instance network information
:param disk_info: instance disk information
- :param migrate_data: implementation specific data dict.
+ :param migrate_data: a LiveMigrateData object
"""
raise NotImplementedError()
@@ -848,7 +824,15 @@ class ComputeDriver(object):
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, migrate VM disk.
- :param migrate_data: implementation specific params.
+ :param migrate_data: a LiveMigrateData object
+
+ """
+ raise NotImplementedError()
+
+ def live_migration_force_complete(self, instance):
+ """Force live migration to complete
+
+ :param instance: Instance being live migrated
"""
raise NotImplementedError()
@@ -866,7 +850,7 @@ class ComputeDriver(object):
:param block_device_info: instance block device information
:param destroy_disks:
if true, destroy disks at destination during cleanup
- :param migrate_data: implementation specific params
+ :param migrate_data: a LiveMigrateData object
"""
raise NotImplementedError()
@@ -878,7 +862,7 @@ class ComputeDriver(object):
:param context: security context
:instance: instance object that was migrated
:block_device_info: instance block device information
- :param migrate_data: if not None, it is a dict which has data
+ :param migrate_data: a LiveMigrateData object
"""
pass
@@ -947,7 +931,7 @@ class ComputeDriver(object):
:param dst_compute_info: Info about the receiving machine
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
- :returns: a dict containing migration info (hypervisor-dependent)
+ :returns: a LiveMigrateData object (hypervisor-dependent)
"""
raise NotImplementedError()
@@ -971,7 +955,7 @@ class ComputeDriver(object):
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
- :returns: a dict containing migration info (hypervisor-dependent)
+ :returns: a LiveMigrateData object
"""
raise NotImplementedError()
@@ -1009,23 +993,6 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def refresh_provider_fw_rules(self):
- """This triggers a firewall update based on database changes.
-
- When this is called, rules have either been added or removed from the
- datastore. You can retrieve rules with
- :py:meth:`nova.db.provider_fw_rule_get_all`.
-
- Provider rules take precedence over security group rules. If an IP
- would be allowed by a security group ingress rule, but blocked by
- a provider rule, then packets from the IP are dropped. This includes
- intra-project traffic in the case of the allow_project_net_traffic
- flag for the libvirt-derived classes.
-
- """
- # TODO(Vek): Need to pass context in for access to auth_token
- raise NotImplementedError()
-
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules
@@ -1313,7 +1280,7 @@ class ComputeDriver(object):
This is called during spawn_instance by the compute manager.
- Note that the format of the return value is specific to Quantum
+ Note that the format of the return value is specific to the Neutron
client API.
:return: None, or a set of DHCP options, eg:
@@ -1351,7 +1318,7 @@ class ComputeDriver(object):
:param nova.objects.aggregate.Aggregate aggregate:
The aggregate which should add the given `host`
:param str host:
- The name of the host to add to the the given `aggregate`.
+ The name of the host to add to the given `aggregate`.
:param dict kwargs:
A free-form thingy...
@@ -1370,7 +1337,7 @@ class ComputeDriver(object):
:param nova.objects.aggregate.Aggregate aggregate:
The aggregate which should remove the given `host`
:param str host:
- The name of the host to remove from the the given `aggregate`.
+ The name of the host to remove from the given `aggregate`.
:param dict kwargs:
A free-form thingy...
@@ -1535,7 +1502,15 @@ class ComputeDriver(object):
raise NotImplementedError()
def default_root_device_name(self, instance, image_meta, root_bdm):
- """Provide a default root device name for the driver."""
+ """Provide a default root device name for the driver.
+
+ :param nova.objects.instance.Instance instance:
+ The instance to get the root device for.
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
+ :param nova.objects.BlockDeviceMapping root_bdm:
+ The description of the root device.
+ """
raise NotImplementedError()
def default_device_names_for_instance(self, instance, root_device_name,
@@ -1581,9 +1556,8 @@ class ComputeDriver(object):
:param context: request context
:param instance: nova.objects.instance.Instance to be quiesced
- :param image_meta: image object returned by nova.image.glance that
- defines the image from which this instance
- was created
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
"""
raise NotImplementedError()
@@ -1596,9 +1570,8 @@ class ComputeDriver(object):
:param context: request context
:param instance: nova.objects.instance.Instance to be unquiesced
- :param image_meta: image object returned by nova.image.glance that
- defines the image from which this instance
- was created
+ :param nova.objects.ImageMeta image_meta:
+ The metadata of the image of the instance.
"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index d5456d6f3e..1c0e9cfe42 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -37,7 +37,6 @@ from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.console import type as ctype
-from nova import db
from nova import exception
from nova.i18n import _LW
from nova.virt import diagnostics
@@ -428,9 +427,6 @@ class FakeDriver(driver.ComputeDriver):
def refresh_instance_security_rules(self, instance):
return True
- def refresh_provider_fw_rules(self):
- pass
-
def get_available_resource(self, nodename):
"""Updates compute manager resource info on ComputeNode table.
@@ -472,6 +468,9 @@ class FakeDriver(driver.ComputeDriver):
migrate_data)
return
+ def live_migration_force_complete(self, instance):
+ return
+
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
return
@@ -542,9 +541,6 @@ class FakeDriver(driver.ComputeDriver):
class FakeVirtAPI(virtapi.VirtAPI):
- def provider_fw_rule_get_all(self, context):
- return db.provider_fw_rule_get_all(context)
-
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index 239b53f4cb..61c79cdec6 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -15,11 +15,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from nova.compute import utils as compute_utils
+import nova.conf
from nova import context
from nova.i18n import _LI
from nova.network import linux_net
@@ -29,17 +29,7 @@ from nova.virt import netutils
LOG = logging.getLogger(__name__)
-firewall_opts = [
- cfg.StrOpt('firewall_driver',
- help='Firewall driver '
- '(defaults to hypervisor specific iptables driver)'),
- cfg.BoolOpt('allow_same_net_traffic',
- default=True,
- help='Whether to allow network traffic from same network'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(firewall_opts)
+CONF = nova.conf.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
@@ -51,12 +41,9 @@ def load_driver(default, *args, **kwargs):
class FirewallDriver(object):
"""Firewall Driver base class.
- Defines methods that any driver providing security groups
- and provider firewall functionality should implement.
- """
- def __init__(self, virtapi):
- self._virtapi = virtapi
+ Defines methods that any driver providing security groups should implement.
+ """
def prepare_instance_filter(self, instance, network_info):
"""Prepare filters for the instance.
@@ -103,15 +90,6 @@ class FirewallDriver(object):
"""
raise NotImplementedError()
- def refresh_provider_fw_rules(self):
- """Refresh common rules for all hosts/instances from data store.
-
- Gets called when a rule has been added to or removed from
- the list of rules (via admin api).
-
- """
- raise NotImplementedError()
-
def setup_basic_filtering(self, instance, network_info):
"""Create rules to block spoofing and allow dhcp.
@@ -129,11 +107,9 @@ class FirewallDriver(object):
class IptablesFirewallDriver(FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
- def __init__(self, virtapi, **kwargs):
- super(IptablesFirewallDriver, self).__init__(virtapi)
+ def __init__(self, **kwargs):
self.iptables = linux_net.iptables_manager
self.instance_info = {}
- self.basically_filtered = False
# Flags for DHCP request rule
self.dhcp_create = False
@@ -172,9 +148,6 @@ class IptablesFirewallDriver(FirewallDriver):
ipv6_rules)
LOG.debug('Filters added to instance: %s', instance.id,
instance=instance)
- self.refresh_provider_fw_rules()
- LOG.debug('Provider Firewall Rules refreshed: %s', instance.id,
- instance=instance)
# Ensure that DHCP request rule is updated if necessary
if (self.dhcp_create and not self.dhcp_created):
self.iptables.ipv4['filter'].add_rule(
@@ -258,10 +231,6 @@ class IptablesFirewallDriver(FirewallDriver):
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
- # Pass through provider-wide drops
- ipv4_rules += ['-j $provider']
- ipv6_rules += ['-j $provider']
-
def _do_dhcp_rules(self, ipv4_rules, network_info):
v4_subnets = self._get_subnets(network_info, 4)
dhcp_servers = [subnet.get_meta('dhcp_server')
@@ -293,8 +262,8 @@ class IptablesFirewallDriver(FirewallDriver):
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
def _build_icmp_rule(self, rule, version):
- icmp_type = rule['from_port']
- icmp_code = rule['to_port']
+ icmp_type = rule.from_port
+ icmp_code = rule.to_port
if icmp_type == -1:
icmp_type_arg = None
@@ -312,12 +281,12 @@ class IptablesFirewallDriver(FirewallDriver):
return []
def _build_tcp_udp_rule(self, rule, version):
- if rule['from_port'] == rule['to_port']:
- return ['--dport', '%s' % (rule['from_port'],)]
+ if rule.from_port == rule.to_port:
+ return ['--dport', '%s' % (rule.from_port,)]
else:
return ['-m', 'multiport',
- '--dports', '%s:%s' % (rule['from_port'],
- rule['to_port'])]
+ '--dports', '%s:%s' % (rule.from_port,
+ rule.to_port)]
def instance_rules(self, instance, network_info):
ctxt = context.get_admin_context()
@@ -450,85 +419,6 @@ class IptablesFirewallDriver(FirewallDriver):
self._inner_do_refresh_rules(instance, network_info, ipv4_rules,
ipv6_rules)
- def refresh_provider_fw_rules(self):
- """See :class:`FirewallDriver` docs."""
- self._do_refresh_provider_fw_rules()
- self.iptables.apply()
-
- @utils.synchronized('iptables', external=True)
- def _do_refresh_provider_fw_rules(self):
- """Internal, synchronized version of refresh_provider_fw_rules."""
- self._purge_provider_fw_rules()
- self._build_provider_fw_rules()
-
- def _purge_provider_fw_rules(self):
- """Remove all rules from the provider chains."""
- self.iptables.ipv4['filter'].empty_chain('provider')
- if CONF.use_ipv6:
- self.iptables.ipv6['filter'].empty_chain('provider')
-
- def _build_provider_fw_rules(self):
- """Create all rules for the provider IP DROPs."""
- self.iptables.ipv4['filter'].add_chain('provider')
- if CONF.use_ipv6:
- self.iptables.ipv6['filter'].add_chain('provider')
- ipv4_rules, ipv6_rules = self._provider_rules()
- for rule in ipv4_rules:
- self.iptables.ipv4['filter'].add_rule('provider', rule)
-
- if CONF.use_ipv6:
- for rule in ipv6_rules:
- self.iptables.ipv6['filter'].add_rule('provider', rule)
-
- def _provider_rules(self):
- """Generate a list of rules from provider for IP4 & IP6."""
- ctxt = context.get_admin_context()
- ipv4_rules = []
- ipv6_rules = []
- rules = self._virtapi.provider_fw_rule_get_all(ctxt)
- for rule in rules:
- LOG.debug('Adding provider rule: %s', rule['cidr'])
- version = netutils.get_ip_version(rule['cidr'])
- if version == 4:
- fw_rules = ipv4_rules
- else:
- fw_rules = ipv6_rules
-
- protocol = rule['protocol']
- if version == 6 and protocol == 'icmp':
- protocol = 'icmpv6'
-
- args = ['-p', protocol, '-s', rule['cidr']]
-
- if protocol in ['udp', 'tcp']:
- if rule['from_port'] == rule['to_port']:
- args += ['--dport', '%s' % (rule['from_port'],)]
- else:
- args += ['-m', 'multiport',
- '--dports', '%s:%s' % (rule['from_port'],
- rule['to_port'])]
- elif protocol == 'icmp':
- icmp_type = rule['from_port']
- icmp_code = rule['to_port']
-
- if icmp_type == -1:
- icmp_type_arg = None
- else:
- icmp_type_arg = '%s' % icmp_type
- if not icmp_code == -1:
- icmp_type_arg += '/%s' % icmp_code
-
- if icmp_type_arg:
- if version == 4:
- args += ['-m', 'icmp', '--icmp-type',
- icmp_type_arg]
- elif version == 6:
- args += ['-m', 'icmp6', '--icmpv6-type',
- icmp_type_arg]
- args += ['-j DROP']
- fw_rules += [' '.join(args)]
- return ipv4_rules, ipv6_rules
-
class NoopFirewallDriver(object):
"""Firewall driver which just provides No-op methods."""
diff --git a/nova/virt/hardware.py b/nova/virt/hardware.py
index 2a8d49fb7e..f5d8bfd322 100644
--- a/nova/virt/hardware.py
+++ b/nova/virt/hardware.py
@@ -1034,6 +1034,13 @@ def is_realtime_enabled(flavor):
return strutils.bool_from_string(flavor_rt)
+def _get_realtime_mask(flavor, image):
+ """Returns realtime mask based on flavor/image meta"""
+ flavor_mask = flavor.get('extra_specs', {}).get("hw:cpu_realtime_mask")
+ image_mask = image.properties.get("hw_cpu_realtime_mask")
+ return image_mask or flavor_mask
+
+
def vcpus_realtime_topology(vcpus_set, flavor, image):
"""Partitions vcpus used for realtime and 'normal' vcpus.
@@ -1041,10 +1048,7 @@ def vcpus_realtime_topology(vcpus_set, flavor, image):
vcpus configured for realtime scheduler and set running as a
'normal' vcpus.
"""
- flavor_mask = flavor.get('extra_specs', {}).get("hw:cpu_realtime_mask")
- image_mask = image.properties.get("hw_cpu_realtime_mask")
-
- mask = image_mask or flavor_mask
+ mask = _get_realtime_mask(flavor, image)
if not mask:
raise exception.RealtimeMaskNotFoundOrInvalid()
@@ -1089,9 +1093,11 @@ def _add_cpu_pinning_constraint(flavor, image_meta, numa_topology):
else:
cpu_policy = fields.CPUAllocationPolicy.SHARED
- if (is_realtime_enabled(flavor) and
- cpu_policy != fields.CPUAllocationPolicy.DEDICATED):
+ rt = is_realtime_enabled(flavor)
+ if (rt and cpu_policy != fields.CPUAllocationPolicy.DEDICATED):
raise exception.RealtimeConfigurationInvalid()
+ elif rt and not _get_realtime_mask(flavor, image_meta):
+ raise exception.RealtimeMaskNotFoundOrInvalid()
flavor_thread_policy = flavor.get('extra_specs', {}).get(
'hw:cpu_thread_policy')
@@ -1200,8 +1206,16 @@ def numa_fit_instance_to_host(
InstanceNUMATopology with it's cell ids set to host cell id's of
the first successful permutation, or None.
"""
- if (not (host_topology and instance_topology) or
- len(host_topology) < len(instance_topology)):
+ if not (host_topology and instance_topology):
+ LOG.debug("Require both a host and instance NUMA topology to "
+ "fit instance on host.")
+ return
+ elif len(host_topology) < len(instance_topology):
+ LOG.debug("There are not enough free cores on the system to schedule "
+ "the instance correctly. Required: %(required)s, actual: "
+ "%(actual)s",
+ {'required': len(instance_topology),
+ 'actual': len(host_topology)})
return
else:
# TODO(ndipanov): We may want to sort permutations differently
@@ -1407,7 +1421,7 @@ def get_host_numa_usage_from_instance(host, instance, free=False,
:param host: nova.objects.ComputeNode instance, or a db object or dict
:param instance: nova.objects.Instance instance, or a db object or dict
- :param free: if True the the returned topology will have it's usage
+ :param free: if True the returned topology will have it's usage
decreased instead.
:param never_serialize_result: if True result will always be an instance of
objects.NUMATopology class.
diff --git a/nova/virt/hyperv/constants.py b/nova/virt/hyperv/constants.py
index 00e8838676..9609dc3702 100644
--- a/nova/virt/hyperv/constants.py
+++ b/nova/virt/hyperv/constants.py
@@ -17,48 +17,29 @@
Constants used in ops classes
"""
+from os_win import constants
+
from nova.compute import arch
from nova.compute import power_state
-HYPERV_VM_STATE_OTHER = 1
-HYPERV_VM_STATE_ENABLED = 2
-HYPERV_VM_STATE_DISABLED = 3
-HYPERV_VM_STATE_SHUTTING_DOWN = 4
-HYPERV_VM_STATE_REBOOT = 10
-HYPERV_VM_STATE_PAUSED = 32768
-HYPERV_VM_STATE_SUSPENDED = 32769
-
HYPERV_POWER_STATE = {
- HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN,
- HYPERV_VM_STATE_SHUTTING_DOWN: power_state.SHUTDOWN,
- HYPERV_VM_STATE_ENABLED: power_state.RUNNING,
- HYPERV_VM_STATE_PAUSED: power_state.PAUSED,
- HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED
+ constants.HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN,
+ constants.HYPERV_VM_STATE_SHUTTING_DOWN: power_state.SHUTDOWN,
+ constants.HYPERV_VM_STATE_ENABLED: power_state.RUNNING,
+ constants.HYPERV_VM_STATE_PAUSED: power_state.PAUSED,
+ constants.HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED
}
WMI_WIN32_PROCESSOR_ARCHITECTURE = {
- 0: arch.I686,
- 1: arch.MIPS,
- 2: arch.ALPHA,
- 3: arch.PPC,
- 5: arch.ARMV7,
- 6: arch.IA64,
- 9: arch.X86_64,
+ constants.ARCH_I686: arch.I686,
+ constants.ARCH_MIPS: arch.MIPS,
+ constants.ARCH_ALPHA: arch.ALPHA,
+ constants.ARCH_PPC: arch.PPC,
+ constants.ARCH_ARMV7: arch.ARMV7,
+ constants.ARCH_IA64: arch.IA64,
+ constants.ARCH_X86_64: arch.X86_64,
}
-PROCESSOR_FEATURE = {
- 7: '3dnow',
- 3: 'mmx',
- 12: 'nx',
- 9: 'pae',
- 8: 'rdtsc',
- 20: 'slat',
- 13: 'sse3',
- 21: 'vmx',
- 6: 'sse',
- 10: 'sse2',
- 17: 'xsave',
-}
CTRL_TYPE_IDE = "IDE"
CTRL_TYPE_SCSI = "SCSI"
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 90f7eb2015..bbbdbef109 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -19,6 +19,7 @@ A Hyper-V Nova Compute driver.
import functools
import platform
+import sys
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
@@ -27,7 +28,6 @@ import six
from nova import exception
from nova.i18n import _, _LE
-from nova import objects
from nova.virt import driver
from nova.virt.hyperv import eventhandler
from nova.virt.hyperv import hostops
@@ -57,7 +57,13 @@ def convert_exceptions(function, exception_map):
raised_exception = exception_map[expected]
break
- raise raised_exception(six.text_type(ex))
+ exc_info = sys.exc_info()
+ # NOTE(claudiub): Python 3 raises the exception object given as
+ # the second argument in six.reraise.
+ # The original message will be maintained by passing the original
+ # exception.
+ exc = raised_exception(six.text_type(exc_info[1]))
+ six.reraise(raised_exception, exc, exc_info[2])
return wrapper
@@ -132,7 +138,6 @@ class HyperVDriver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
@@ -301,7 +306,6 @@ class HyperVDriver(driver.ComputeDriver):
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._migrationops.finish_migration(context, migration, instance,
disk_info, network_info,
image_meta, resize_instance,
diff --git a/nova/virt/hyperv/eventhandler.py b/nova/virt/hyperv/eventhandler.py
index ca89b69d69..a7c96a5b44 100644
--- a/nova/virt/hyperv/eventhandler.py
+++ b/nova/virt/hyperv/eventhandler.py
@@ -20,6 +20,7 @@ import sys
if sys.platform == 'win32':
import wmi
+from os_win import constants
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_config import cfg
@@ -28,7 +29,6 @@ from oslo_log import log as logging
from nova.i18n import _LW
from nova import utils
from nova.virt import event as virtevent
-from nova.virt.hyperv import constants
LOG = logging.getLogger(__name__)
diff --git a/nova/virt/hyperv/hostops.py b/nova/virt/hyperv/hostops.py
index 3fd9de759f..8b6098e59a 100644
--- a/nova/virt/hyperv/hostops.py
+++ b/nova/virt/hyperv/hostops.py
@@ -21,6 +21,7 @@ import os
import platform
import time
+from os_win import constants as os_win_const
from os_win import utilsfactory
from oslo_config import cfg
from oslo_log import log as logging
@@ -67,7 +68,7 @@ class HostOps(object):
cpu_info['topology'] = topology
features = list()
- for fkey, fname in constants.PROCESSOR_FEATURE.items():
+ for fkey, fname in os_win_const.PROCESSOR_FEATURE.items():
if self._hostutils.is_cpu_feature_present(fkey):
features.append(fname)
cpu_info['features'] = features
diff --git a/nova/virt/hyperv/imagecache.py b/nova/virt/hyperv/imagecache.py
index ee84247202..e88048db90 100644
--- a/nova/virt/hyperv/imagecache.py
+++ b/nova/virt/hyperv/imagecache.py
@@ -18,11 +18,11 @@ Image caching and management.
import os
from os_win import utilsfactory
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
+import nova.conf
from nova import exception
from nova import utils
from nova.virt.hyperv import pathutils
@@ -30,8 +30,7 @@ from nova.virt import images
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-CONF.import_opt('use_cow_images', 'nova.virt.driver')
+CONF = nova.conf.CONF
class ImageCache(object):
diff --git a/nova/virt/hyperv/livemigrationops.py b/nova/virt/hyperv/livemigrationops.py
index 4dc42f40e7..6654df4eb5 100644
--- a/nova/virt/hyperv/livemigrationops.py
+++ b/nova/virt/hyperv/livemigrationops.py
@@ -16,14 +16,12 @@
"""
Management class for live migration VM operations.
"""
-import functools
from os_win import utilsfactory
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
-from nova.i18n import _
+import nova.conf
from nova.objects import migrate_data as migrate_data_obj
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import pathutils
@@ -31,36 +29,18 @@ from nova.virt.hyperv import vmops
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
-CONF = cfg.CONF
-CONF.import_opt('use_cow_images', 'nova.virt.driver')
-
-
-def check_os_version_requirement(function):
- @functools.wraps(function)
- def wrapper(self, *args, **kwds):
- if not self._livemigrutils:
- raise NotImplementedError(_('Live migration is supported '
- 'starting with Hyper-V Server '
- '2012'))
- return function(self, *args, **kwds)
- return wrapper
+CONF = nova.conf.CONF
class LiveMigrationOps(object):
def __init__(self):
- # Live migration is supported starting from Hyper-V Server 2012
- if utilsfactory.get_hostutils().check_min_windows_version(6, 2):
- self._livemigrutils = utilsfactory.get_livemigrationutils()
- else:
- self._livemigrutils = None
-
+ self._livemigrutils = utilsfactory.get_livemigrationutils()
self._pathutils = pathutils.PathUtils()
self._vmops = vmops.VMOps()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
self._vmutils = utilsfactory.get_vmutils()
- @check_os_version_requirement
def live_migration(self, context, instance_ref, dest, post_method,
recover_method, block_migration=False,
migrate_data=None):
@@ -82,7 +62,6 @@ class LiveMigrationOps(object):
instance_name)
post_method(context, instance_ref, dest, block_migration)
- @check_os_version_requirement
def pre_live_migration(self, context, instance, block_device_info,
network_info):
LOG.debug("pre_live_migration called", instance=instance)
@@ -96,14 +75,12 @@ class LiveMigrationOps(object):
self._volumeops.initialize_volumes_connection(block_device_info)
- @check_os_version_requirement
def post_live_migration(self, context, instance, block_device_info):
self._volumeops.disconnect_volumes(block_device_info)
self._pathutils.get_instance_dir(instance.name,
create_dir=False,
remove_dir=True)
- @check_os_version_requirement
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info, block_migration):
LOG.debug("post_live_migration_at_destination called",
@@ -111,7 +88,6 @@ class LiveMigrationOps(object):
self._vmops.log_vm_serial_output(instance_ref['name'],
instance_ref['uuid'])
- @check_os_version_requirement
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
@@ -119,12 +95,10 @@ class LiveMigrationOps(object):
LOG.debug("check_can_live_migrate_destination called", instance_ref)
return migrate_data_obj.LiveMigrateData()
- @check_os_version_requirement
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
LOG.debug("check_can_live_migrate_destination_cleanup called")
- @check_os_version_requirement
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
LOG.debug("check_can_live_migrate_source called", instance_ref)
diff --git a/nova/virt/hyperv/snapshotops.py b/nova/virt/hyperv/snapshotops.py
index 93fa5f716a..d453362a45 100644
--- a/nova/virt/hyperv/snapshotops.py
+++ b/nova/virt/hyperv/snapshotops.py
@@ -20,17 +20,15 @@ import os
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
-from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import task_states
from nova import exception
-from nova.i18n import _LW
+from nova.i18n import _LE
from nova.image import glance
from nova import utils
from nova.virt.hyperv import pathutils
-CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -60,7 +58,7 @@ class SnapshotOps(object):
try:
instance_synchronized_snapshot()
except os_win_exc.HyperVVMNotFoundException:
- # the instance might dissapear before starting the operation.
+ # the instance might disappear before starting the operation.
raise exception.InstanceNotFound(instance_id=instance.uuid)
def _snapshot(self, context, instance, image_id, update_task_state):
@@ -129,10 +127,9 @@ class SnapshotOps(object):
try:
LOG.debug("Removing snapshot %s", image_id)
self._vmutils.remove_vm_snapshot(snapshot_path)
- except Exception as ex:
- LOG.exception(ex)
- LOG.warning(_LW('Failed to remove snapshot for VM %s'),
- instance_name)
+ except Exception:
+ LOG.exception(_LE('Failed to remove snapshot for VM %s'),
+ instance_name, instance=instance)
if export_dir:
LOG.debug('Removing directory: %s', export_dir)
self._pathutils.rmtree(export_dir)
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 5bef354059..ebf83f59fc 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -22,6 +22,7 @@ import os
import time
from eventlet import timeout as etimeout
+from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from os_win.utils.io import ioutils
from os_win import utilsfactory
@@ -37,6 +38,7 @@ from oslo_utils import uuidutils
import six
from nova.api.metadata import base as instance_metadata
+import nova.conf
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova import utils
@@ -86,9 +88,8 @@ hyperv_opts = [
' if instance does not shutdown within this window.'),
]
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
-CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('network_api_class', 'nova.network')
SHUTDOWN_TIME_INCREMENT = 5
@@ -130,6 +131,7 @@ class VMOps(object):
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
+ self._metricsutils = utilsfactory.get_metricsutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._hostutils = utilsfactory.get_hostutils()
self._pathutils = pathutils.PathUtils()
@@ -331,7 +333,7 @@ class VMOps(object):
self._vif_driver.plug(instance, vif)
if CONF.hyperv.enable_instance_metrics_collection:
- self._vmutils.enable_vm_metrics_collection(instance_name)
+ self._metricsutils.enable_vm_metrics_collection(instance_name)
self._create_vm_com_port_pipe(instance)
@@ -469,7 +471,7 @@ class VMOps(object):
return
self._set_vm_state(instance,
- constants.HYPERV_VM_STATE_REBOOT)
+ os_win_const.HYPERV_VM_STATE_REBOOT)
def _soft_shutdown(self, instance,
timeout=CONF.hyperv.wait_soft_reboot_seconds,
@@ -511,25 +513,25 @@ class VMOps(object):
"""Pause VM instance."""
LOG.debug("Pause instance", instance=instance)
self._set_vm_state(instance,
- constants.HYPERV_VM_STATE_PAUSED)
+ os_win_const.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug("Unpause instance", instance=instance)
self._set_vm_state(instance,
- constants.HYPERV_VM_STATE_ENABLED)
+ os_win_const.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug("Suspend instance", instance=instance)
self._set_vm_state(instance,
- constants.HYPERV_VM_STATE_SUSPENDED)
+ os_win_const.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug("Resume instance", instance=instance)
self._set_vm_state(instance,
- constants.HYPERV_VM_STATE_ENABLED)
+ os_win_const.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
@@ -544,7 +546,7 @@ class VMOps(object):
return
self._set_vm_state(instance,
- constants.HYPERV_VM_STATE_DISABLED)
+ os_win_const.HYPERV_VM_STATE_DISABLED)
except os_win_exc.HyperVVMNotFoundException:
# The manager can call the stop API after receiving instance
# power off events. If this is triggered when the instance
@@ -561,7 +563,7 @@ class VMOps(object):
self._volumeops.fix_instance_volume_disk_paths(instance.name,
block_device_info)
- self._set_vm_state(instance, constants.HYPERV_VM_STATE_ENABLED)
+ self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, instance, req_state):
instance_name = instance.name
@@ -570,11 +572,11 @@ class VMOps(object):
try:
self._vmutils.set_vm_state(instance_name, req_state)
- if req_state in (constants.HYPERV_VM_STATE_DISABLED,
- constants.HYPERV_VM_STATE_REBOOT):
+ if req_state in (os_win_const.HYPERV_VM_STATE_DISABLED,
+ os_win_const.HYPERV_VM_STATE_REBOOT):
self._delete_vm_console_log(instance)
- if req_state in (constants.HYPERV_VM_STATE_ENABLED,
- constants.HYPERV_VM_STATE_REBOOT):
+ if req_state in (os_win_const.HYPERV_VM_STATE_ENABLED,
+ os_win_const.HYPERV_VM_STATE_REBOOT):
self.log_vm_serial_output(instance_name,
instance_uuid)
@@ -599,7 +601,7 @@ class VMOps(object):
False otherwise.
"""
- desired_vm_states = [constants.HYPERV_VM_STATE_DISABLED]
+ desired_vm_states = [os_win_const.HYPERV_VM_STATE_DISABLED]
def _check_vm_status(instance_name):
if self._get_vm_state(instance_name) in desired_vm_states:
@@ -725,7 +727,7 @@ class VMOps(object):
given instance.
"""
vm_state = self._get_vm_state(instance.name)
- if vm_state == constants.HYPERV_VM_STATE_DISABLED:
+ if vm_state == os_win_const.HYPERV_VM_STATE_DISABLED:
# can attach / detach interface to stopped VMs.
return True
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index d2005c5630..f3ad30eda4 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -370,7 +370,7 @@ def export_path_synchronized(f):
class SMBFSVolumeDriver(object):
def __init__(self):
- self._pathutils = utilsfactory.get_pathutils()
+ self._smbutils = utilsfactory.get_smbutils()
self._vmutils = utilsfactory.get_vmutils()
self._username_regex = re.compile(r'user(?:name)?=([^, ]+)')
self._password_regex = re.compile(r'pass(?:word)?=([^, ]+)')
@@ -441,12 +441,12 @@ class SMBFSVolumeDriver(object):
def ensure_share_mounted(self, connection_info):
export_path = self._get_export_path(connection_info)
- if not self._pathutils.check_smb_mapping(export_path):
+ if not self._smbutils.check_smb_mapping(export_path):
opts_str = connection_info['data'].get('options', '')
username, password = self._parse_credentials(opts_str)
- self._pathutils.mount_smb_share(export_path,
- username=username,
- password=password)
+ self._smbutils.mount_smb_share(export_path,
+ username=username,
+ password=password)
def _parse_credentials(self, opts_str):
match = self._username_regex.findall(opts_str)
@@ -467,5 +467,5 @@ class SMBFSVolumeDriver(object):
# an instance.
@utils.synchronized(export_path)
def unmount_synchronized():
- self._pathutils.unmount_smb_share(export_path)
+ self._smbutils.unmount_smb_share(export_path)
unmount_synchronized()
diff --git a/nova/virt/images.py b/nova/virt/images.py
index adaa009754..33a6927361 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -22,10 +22,10 @@ Handling of VM disk images.
import os
from oslo_concurrency import processutils
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
+import nova.conf
from nova import exception
from nova.i18n import _, _LE
from nova import image
@@ -34,14 +34,7 @@ from nova import utils
LOG = logging.getLogger(__name__)
-image_opts = [
- cfg.BoolOpt('force_raw_images',
- default=True,
- help='Force backing images to raw format'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(image_opts)
+CONF = nova.conf.CONF
IMAGE_API = image.API()
@@ -54,8 +47,7 @@ def qemu_img_info(path, format=None):
CONF.import_opt('images_type', 'nova.virt.libvirt.imagebackend',
group='libvirt')
if not os.path.exists(path) and CONF.libvirt.images_type != 'rbd':
- msg = (_("Path does not exist %(path)s") % {'path': path})
- raise exception.InvalidDiskInfo(reason=msg)
+ raise exception.DiskNotFound(location=path)
try:
cmd = ('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path)
diff --git a/nova/virt/ironic/client_wrapper.py b/nova/virt/ironic/client_wrapper.py
index a840ca9759..4b38fdb060 100644
--- a/nova/virt/ironic/client_wrapper.py
+++ b/nova/virt/ironic/client_wrapper.py
@@ -30,6 +30,9 @@ CONF = cfg.CONF
ironic = None
+# The API version required by the Ironic driver
+IRONIC_API_VERSION = (1, 8)
+
class IronicClientWrapper(object):
"""Ironic client wrapper class that encapsulates retry logic."""
@@ -82,8 +85,9 @@ class IronicClientWrapper(object):
# Retries for Conflict exception
kwargs['max_retries'] = max_retries
kwargs['retry_interval'] = retry_interval
+ kwargs['os_ironic_api_version'] = '%d.%d' % IRONIC_API_VERSION
try:
- cli = ironic.client.get_client(CONF.ironic.api_version, **kwargs)
+ cli = ironic.client.get_client(IRONIC_API_VERSION[0], **kwargs)
# Cache the client so we don't have to reconstruct and
# reauthenticate it every time we need it.
if retry_on_conflict:
diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py
index e773698f72..ae47c59d54 100644
--- a/nova/virt/ironic/driver.py
+++ b/nova/virt/ironic/driver.py
@@ -74,6 +74,10 @@ _UNPROVISION_STATES = (ironic_states.ACTIVE, ironic_states.DEPLOYFAIL,
ironic_states.ERROR, ironic_states.DEPLOYWAIT,
ironic_states.DEPLOYING)
+_NODE_FIELDS = ('uuid', 'power_state', 'target_power_state', 'provision_state',
+ 'target_provision_state', 'last_error', 'maintenance',
+ 'properties', 'instance_uuid')
+
def map_power_state(state):
try:
@@ -83,18 +87,6 @@ def map_power_state(state):
return power_state.NOSTATE
-def _validate_instance_and_node(ironicclient, instance):
- """Get the node associated with the instance.
-
- Check with the Ironic service that this instance is associated with a
- node, and return the node.
- """
- try:
- return ironicclient.call("node.get_by_instance_uuid", instance.uuid)
- except ironic.exc.NotFound:
- raise exception.InstanceNotFound(instance_id=instance.uuid)
-
-
def _get_nodes_supported_instances(cpu_arch=None):
"""Return supported instances for a node."""
if not cpu_arch:
@@ -159,6 +151,23 @@ class IronicDriver(virt_driver.ComputeDriver):
self.ironicclient = client_wrapper.IronicClientWrapper()
+ def _get_node(self, node_uuid):
+ """Get a node by its UUID."""
+ return self.ironicclient.call('node.get', node_uuid,
+ fields=_NODE_FIELDS)
+
+ def _validate_instance_and_node(self, instance):
+ """Get the node associated with the instance.
+
+ Check with the Ironic service that this instance is associated with a
+ node, and return the node.
+ """
+ try:
+ return self.ironicclient.call('node.get_by_instance_uuid',
+ instance.uuid, fields=_NODE_FIELDS)
+ except ironic.exc.NotFound:
+ raise exception.InstanceNotFound(instance_id=instance.uuid)
+
def _node_resources_unavailable(self, node_obj):
"""Determine whether the node's resources are in an acceptable state.
@@ -375,7 +384,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# on the next cycle (M).
patch.append({'op': 'remove', 'path': '/instance_uuid'})
try:
- _validate_instance_and_node(self.ironicclient, instance)
+ self._validate_instance_and_node(instance)
self.ironicclient.call('node.update', node.uuid, patch)
except exception.InstanceNotFound:
LOG.debug("Instance already removed from Ironic node %s. Skip "
@@ -390,7 +399,7 @@ class IronicDriver(virt_driver.ComputeDriver):
self._unplug_vifs(node, instance, network_info)
self._stop_firewall(instance, network_info)
- def _wait_for_active(self, ironicclient, instance):
+ def _wait_for_active(self, instance):
"""Wait for the node to be marked as ACTIVE in Ironic."""
instance.refresh()
if (instance.task_state == task_states.DELETING or
@@ -398,7 +407,7 @@ class IronicDriver(virt_driver.ComputeDriver):
raise exception.InstanceDeployFailure(
_("Instance %s provisioning was aborted") % instance.uuid)
- node = _validate_instance_and_node(ironicclient, instance)
+ node = self._validate_instance_and_node(instance)
if node.provision_state == ironic_states.ACTIVE:
# job is done
LOG.debug("Ironic node %(node)s is now ACTIVE",
@@ -423,9 +432,9 @@ class IronicDriver(virt_driver.ComputeDriver):
_log_ironic_polling('become ACTIVE', node, instance)
- def _wait_for_power_state(self, ironicclient, instance, message):
+ def _wait_for_power_state(self, instance, message):
"""Wait for the node to complete a power state change."""
- node = _validate_instance_and_node(ironicclient, instance)
+ node = self._validate_instance_and_node(instance)
if node.target_power_state == ironic_states.NOSTATE:
raise loopingcall.LoopingCallDone()
@@ -446,7 +455,7 @@ class IronicDriver(virt_driver.ComputeDriver):
def _get_hypervisor_version(self):
"""Returns the version of the Ironic API service endpoint."""
- return CONF.ironic.api_version
+ return client_wrapper.IRONIC_API_VERSION[0]
def instance_exists(self, instance):
"""Checks the existence of an instance.
@@ -459,7 +468,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
try:
- _validate_instance_and_node(self.ironicclient, instance)
+ self._validate_instance_and_node(instance)
return True
except exception.InstanceNotFound:
return False
@@ -525,7 +534,7 @@ class IronicDriver(virt_driver.ComputeDriver):
# NOTE(comstud): Fallback and check Ironic. This case should be
# rare.
try:
- self.ironicclient.call("node.get", nodename)
+ self._get_node(nodename)
return True
except ironic.exc.NotFound:
return False
@@ -585,7 +594,7 @@ class IronicDriver(virt_driver.ComputeDriver):
else:
LOG.debug("Node %(node)s not found in cache, age: %(age)s",
{'node': nodename, 'age': cache_age})
- node = self.ironicclient.call("node.get", nodename)
+ node = self._get_node(nodename)
return self._node_resource(node)
def get_info(self, instance):
@@ -598,7 +607,7 @@ class IronicDriver(virt_driver.ComputeDriver):
:returns: a InstanceInfo object
"""
try:
- node = _validate_instance_and_node(self.ironicclient, instance)
+ node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
return hardware.InstanceInfo(
state=map_power_state(ironic_states.NOSTATE))
@@ -643,7 +652,7 @@ class IronicDriver(virt_driver.ComputeDriver):
MAC addresses'.
"""
try:
- node = self.ironicclient.call("node.get", instance.node)
+ node = self._get_node(instance.node)
except ironic.exc.NotFound:
return None
ports = self.ironicclient.call("node.list_ports", node.uuid)
@@ -704,8 +713,6 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
LOG.debug('Spawn called for instance', instance=instance)
- image_meta = objects.ImageMeta.from_dict(image_meta)
-
# The compute manager is meant to know the node uuid, so missing uuid
# is a significant issue. It may mean we've been passed the wrong data.
node_uuid = instance.get('node')
@@ -714,7 +721,7 @@ class IronicDriver(virt_driver.ComputeDriver):
_("Ironic node uuid not supplied to "
"driver for instance %s.") % instance.uuid)
- node = self.ironicclient.call("node.get", node_uuid)
+ node = self._get_node(node_uuid)
flavor = instance.flavor
self._add_driver_fields(node, instance, image_meta, flavor)
@@ -783,7 +790,6 @@ class IronicDriver(virt_driver.ComputeDriver):
flavor=flavor)
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
- self.ironicclient,
instance)
try:
timer.start(interval=CONF.ironic.api_retry_interval).wait()
@@ -796,12 +802,13 @@ class IronicDriver(virt_driver.ComputeDriver):
{'instance': instance.uuid,
'node': node_uuid})
- def _unprovision(self, ironicclient, instance, node):
+ def _unprovision(self, instance, node):
"""This method is called from destroy() to unprovision
already provisioned node after required checks.
"""
try:
- ironicclient.call("node.set_provision_state", node.uuid, "deleted")
+ self.ironicclient.call("node.set_provision_state", node.uuid,
+ "deleted")
except Exception as e:
# if the node is already in a deprovisioned state, continue
# This should be fixed in Ironic.
@@ -816,7 +823,7 @@ class IronicDriver(virt_driver.ComputeDriver):
def _wait_for_provision_state():
try:
- node = _validate_instance_and_node(ironicclient, instance)
+ node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
LOG.debug("Instance already removed from Ironic",
instance=instance)
@@ -867,7 +874,7 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
LOG.debug('Destroy called for instance', instance=instance)
try:
- node = _validate_instance_and_node(self.ironicclient, instance)
+ node = self._validate_instance_and_node(instance)
except exception.InstanceNotFound:
LOG.warning(_LW("Destroy called on non-existing instance %s."),
instance.uuid)
@@ -878,7 +885,7 @@ class IronicDriver(virt_driver.ComputeDriver):
return
if node.provision_state in _UNPROVISION_STATES:
- self._unprovision(self.ironicclient, instance, node)
+ self._unprovision(instance, node)
self._cleanup_deploy(context, node, instance, network_info)
LOG.info(_LI('Successfully unprovisioned Ironic node %s'),
@@ -906,12 +913,11 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
LOG.debug('Reboot called for instance', instance=instance)
- node = _validate_instance_and_node(self.ironicclient, instance)
+ node = self._validate_instance_and_node(instance)
self.ironicclient.call("node.set_power_state", node.uuid, 'reboot')
timer = loopingcall.FixedIntervalLoopingCall(
- self._wait_for_power_state,
- self.ironicclient, instance, 'reboot')
+ self._wait_for_power_state, instance, 'reboot')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully rebooted Ironic node %s'),
node.uuid, instance=instance)
@@ -931,12 +937,11 @@ class IronicDriver(virt_driver.ComputeDriver):
for it to shutdown. Ignored by this driver.
"""
LOG.debug('Power off called for instance', instance=instance)
- node = _validate_instance_and_node(self.ironicclient, instance)
+ node = self._validate_instance_and_node(instance)
self.ironicclient.call("node.set_power_state", node.uuid, 'off')
timer = loopingcall.FixedIntervalLoopingCall(
- self._wait_for_power_state,
- self.ironicclient, instance, 'power off')
+ self._wait_for_power_state, instance, 'power off')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully powered off Ironic node %s'),
node.uuid, instance=instance)
@@ -957,12 +962,11 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
LOG.debug('Power on called for instance', instance=instance)
- node = _validate_instance_and_node(self.ironicclient, instance)
+ node = self._validate_instance_and_node(instance)
self.ironicclient.call("node.set_power_state", node.uuid, 'on')
timer = loopingcall.FixedIntervalLoopingCall(
- self._wait_for_power_state,
- self.ironicclient, instance, 'power on')
+ self._wait_for_power_state, instance, 'power on')
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Successfully powered on Ironic node %s'),
node.uuid, instance=instance)
@@ -977,10 +981,6 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
self.firewall_driver.refresh_security_group_rules(security_group_id)
- def refresh_provider_fw_rules(self):
- """Triggers a firewall update based on database changes."""
- self.firewall_driver.refresh_provider_fw_rules()
-
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store.
@@ -1073,7 +1073,7 @@ class IronicDriver(virt_driver.ComputeDriver):
:param network_info: Instance network information.
"""
- node = self.ironicclient.call("node.get", instance.node)
+ node = self._get_node(instance.node)
self._plug_vifs(node, instance, network_info)
def unplug_vifs(self, instance, network_info):
@@ -1083,7 +1083,7 @@ class IronicDriver(virt_driver.ComputeDriver):
:param network_info: Instance network information.
"""
- node = self.ironicclient.call("node.get", instance.node)
+ node = self._get_node(instance.node)
self._unplug_vifs(node, instance, network_info)
def rebuild(self, context, instance, image_meta, injected_files,
@@ -1131,13 +1131,11 @@ class IronicDriver(virt_driver.ComputeDriver):
"""
LOG.debug('Rebuild called for instance', instance=instance)
- image_meta = objects.ImageMeta.from_dict(image_meta)
-
instance.task_state = task_states.REBUILD_SPAWNING
instance.save(expected_task_state=[task_states.REBUILDING])
node_uuid = instance.node
- node = self.ironicclient.call("node.get", node_uuid)
+ node = self._get_node(node_uuid)
self._add_driver_fields(node, instance, image_meta, instance.flavor,
preserve_ephemeral)
@@ -1157,7 +1155,6 @@ class IronicDriver(virt_driver.ComputeDriver):
# Although the target provision state is REBUILD, it will actually go
# to ACTIVE once the redeploy is finished.
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
- self.ironicclient,
instance)
timer.start(interval=CONF.ironic.api_retry_interval).wait()
LOG.info(_LI('Instance was successfully rebuilt'), instance=instance)
@@ -1178,7 +1175,7 @@ class IronicDriver(virt_driver.ComputeDriver):
:returns: a string representing the host ID
"""
- node = self.ironicclient.call("node.get", instance.node)
+ node = self._get_node(instance.node)
if getattr(node, 'network_provider', 'none') == 'none':
# flat network, go ahead and allow the port to be bound
return super(IronicDriver, self).network_binding_host_id(
diff --git a/nova/virt/ironic/patcher.py b/nova/virt/ironic/patcher.py
index 6322844aea..4d54a77881 100644
--- a/nova/virt/ironic/patcher.py
+++ b/nova/virt/ironic/patcher.py
@@ -20,12 +20,12 @@
Helper classes for Ironic HTTP PATCH creation.
"""
-from oslo_config import cfg
from oslo_serialization import jsonutils
import six
-CONF = cfg.CONF
-CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
+import nova.conf
+
+CONF = nova.conf.CONF
def create(node):
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index 5cc411cd00..f770bbc784 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -84,6 +84,7 @@ from nova.objects import base as obj_base
from nova.virt import configdrive
from nova.virt import driver
from nova.virt.libvirt import utils as libvirt_utils
+from nova.virt import osinfo
CONF = cfg.CONF
@@ -234,8 +235,11 @@ def get_disk_bus_for_device_type(instance,
"""
# Prefer a disk bus set against the image first of all
- key = "hw_" + device_type + "_bus"
- disk_bus = image_meta.properties.get(key)
+ if device_type == "disk":
+ disk_bus = osinfo.HardwareProperties(image_meta).disk_model
+ else:
+ key = "hw_" + device_type + "_bus"
+ disk_bus = image_meta.properties.get(key)
if disk_bus is not None:
if not is_disk_bus_valid_for_virt(virt_type, disk_bus):
raise exception.UnsupportedHardware(model=disk_bus,
diff --git a/nova/virt/libvirt/config.py b/nova/virt/libvirt/config.py
index d325a69ab0..1fe41eef52 100644
--- a/nova/virt/libvirt/config.py
+++ b/nova/virt/libvirt/config.py
@@ -714,6 +714,7 @@ class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
self.driver_format = None
self.driver_cache = None
self.driver_discard = None
+ self.driver_io = None
self.source_path = None
self.source_protocol = None
self.source_name = None
@@ -757,6 +758,8 @@ class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
drv.set("cache", self.driver_cache)
if self.driver_discard is not None:
drv.set("discard", self.driver_discard)
+ if self.driver_io is not None:
+ drv.set("io", self.driver_io)
dev.append(drv)
if self.source_type == "file":
@@ -854,6 +857,7 @@ class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice):
self.driver_format = c.get('type')
self.driver_cache = c.get('cache')
self.driver_discard = c.get('discard')
+ self.driver_io = c.get('io')
elif c.tag == 'source':
if self.source_type == 'file':
self.source_path = c.get('file')
@@ -1251,6 +1255,84 @@ class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice):
return dev
+ def parse_dom(self, xmldoc):
+ super(LibvirtConfigGuestInterface, self).parse_dom(xmldoc)
+
+ self.net_type = xmldoc.get('type')
+
+ for c in xmldoc.getchildren():
+ if c.tag == 'mac':
+ self.mac_addr = c.get('address')
+ elif c.tag == 'model':
+ self.model = c.get('type')
+ elif c.tag == 'driver':
+ self.driver_name = c.get('name')
+ self.vhost_queues = c.get('queues')
+ elif c.tag == 'source':
+ if self.net_type == 'direct':
+ self.source_dev = c.get('dev')
+ self.source_mode = c.get('mode', 'private')
+ elif self.net_type == 'vhostuser':
+ self.vhostuser_type = c.get('type')
+ self.vhostuser_mode = c.get('mode')
+ self.vhostuser_path = c.get('path')
+ elif self.net_type == 'hostdev':
+ for sub in c.getchildren():
+ if sub.tag == 'address' and sub.get('type') == 'pci':
+ # strip the 0x prefix on each attribute since
+ # format_dom puts them back on - note that
+ # LibvirtConfigGuestHostdevPCI does not do this...
+ self.source_dev = (
+ pci_utils.get_pci_address(
+ sub.get('domain')[2:],
+ sub.get('bus')[2:],
+ sub.get('slot')[2:],
+ sub.get('function')[2:]
+ )
+ )
+ else:
+ self.source_dev = c.get('bridge')
+ elif c.tag == 'target':
+ self.target_dev = c.get('dev')
+ elif c.tag == 'script':
+ self.script = c.get('path')
+ elif c.tag == 'vlan':
+ # NOTE(mriedem): The vlan element can have multiple tag
+ # sub-elements but we're currently only storing a single tag
+ # id in the vlan attribute.
+ for sub in c.getchildren():
+ if sub.tag == 'tag' and sub.get('id'):
+ self.vlan = sub.get('id')
+ break
+ elif c.tag == 'virtualport':
+ self.vporttype = c.get('type')
+ for sub in c.getchildren():
+ if sub.tag == 'parameters':
+ for k, v in dict(sub.attrib).items():
+ self.add_vport_param(k, v)
+ elif c.tag == 'filterref':
+ self.filtername = c.get('filter')
+ for sub in c.getchildren():
+ if sub.tag == 'parameter':
+ self.add_filter_param(sub.get('name'),
+ sub.get('value'))
+ elif c.tag == 'bandwidth':
+ for sub in c.getchildren():
+ # Note that only average is mandatory, burst and peak are
+ # optional (and all are ints).
+ if sub.tag == 'inbound':
+ self.vif_inbound_average = int(sub.get('average'))
+ if sub.get('burst'):
+ self.vif_inbound_burst = int(sub.get('burst'))
+ if sub.get('peak'):
+ self.vif_inbound_peak = int(sub.get('peak'))
+ elif sub.tag == 'outbound':
+ self.vif_outbound_average = int(sub.get('average'))
+ if sub.get('burst'):
+ self.vif_outbound_burst = int(sub.get('burst'))
+ if sub.get('peak'):
+ self.vif_outbound_peak = int(sub.get('peak'))
+
def add_filter_param(self, key, value):
self.filterparams.append({'key': key, 'value': value})
@@ -1858,6 +1940,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
self.sysinfo = None
self.os_type = None
self.os_loader = None
+ self.os_loader_type = None
self.os_kernel = None
self.os_initrd = None
self.os_cmdline = None
@@ -1903,7 +1986,17 @@ class LibvirtConfigGuest(LibvirtConfigObject):
if self.os_kernel is not None:
os.append(self._text_node("kernel", self.os_kernel))
if self.os_loader is not None:
- os.append(self._text_node("loader", self.os_loader))
+ # Generate XML nodes for UEFI boot.
+ if self.os_loader_type == "pflash":
+ loader = self._text_node("loader", self.os_loader)
+ loader.set("type", "pflash")
+ loader.set("readonly", "yes")
+ os.append(loader)
+ nvram = self._text_node("nvram", "")
+ nvram.set("template", self.os_loader)
+ os.append(nvram)
+ else:
+ os.append(self._text_node("loader", self.os_loader))
if self.os_initrd is not None:
os.append(self._text_node("initrd", self.os_initrd))
if self.os_cmdline is not None:
@@ -1977,6 +2070,7 @@ class LibvirtConfigGuest(LibvirtConfigObject):
def parse_dom(self, xmldoc):
# Note: This cover only for: LibvirtConfigGuestDisks
# LibvirtConfigGuestHostdevPCI
+ # LibvirtConfigGuestInterface
# LibvirtConfigGuestUidMap
# LibvirtConfigGuestGidMap
# LibvirtConfigGuestCPU
@@ -1991,6 +2085,10 @@ class LibvirtConfigGuest(LibvirtConfigObject):
obj = LibvirtConfigGuestHostdevPCI()
obj.parse_dom(d)
self.devices.append(obj)
+ elif d.tag == 'interface':
+ obj = LibvirtConfigGuestInterface()
+ obj.parse_dom(d)
+ self.devices.append(obj)
if c.tag == 'idmap':
for map in c.getchildren():
obj = None
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 814d832f4f..6ffc2e5005 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -66,6 +66,7 @@ from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
+import nova.conf
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
@@ -114,6 +115,8 @@ from nova.volume import encryptors
libvirt = None
+uefi_logged = False
+
LOG = logging.getLogger(__name__)
# Downtime period in milliseconds
@@ -154,20 +157,48 @@ libvirt_opts = [
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
+ cfg.StrOpt('live_migration_inbound_addr',
+ default=None,
+ help='Live migration target ip or hostname '
+ '(if this option is set to be None,'
+ 'the hostname of the migration target'
+ 'compute node will be used)'),
cfg.StrOpt('live_migration_uri',
- default="qemu+tcp://%s/system",
- help='Migration target URI '
+ help='Override the default libvirt live migration target URI '
+ '(which is dependent on virt_type) '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED',
- help='Migration flags to be set for live migration'),
+ help='Migration flags to be set for live migration',
+ deprecated_for_removal=True,
+ deprecated_reason='The correct live migration flags can be '
+ 'inferred from the new '
+ 'live_migration_tunnelled config option. '
+ 'live_migration_flag will be removed to '
+ 'avoid potential misconfiguration.'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC',
- help='Migration flags to be set for block migration'),
+ help='Migration flags to be set for block migration',
+ deprecated_for_removal=True,
+ deprecated_reason='The correct block migration flags can be '
+ 'inferred from the new '
+ 'live_migration_tunnelled config option. '
+ 'block_migration_flag will be removed to '
+ 'avoid potential misconfiguration.'),
+ cfg.BoolOpt('live_migration_tunnelled',
+ help='Whether to use tunnelled migration, where migration '
+ 'data is transported over the libvirtd connection. If '
+ 'True, we use the VIR_MIGRATE_TUNNELLED migration flag, '
+ 'avoiding the need to configure the network to allow '
+ 'direct hypervisor to hypervisor communication. If '
+ 'False, use the native transport. If not set, Nova '
+ 'will choose a sensible default based on, for example '
+ 'the availability of native encryption support in the '
+ 'hypervisor.'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth(in MiB/s) to be used during migration. '
@@ -280,11 +311,10 @@ libvirt_opts = [
'kernel (usually 1-99)')
]
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
-CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('enabled', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('cipher', 'nova.compute.api',
@@ -295,8 +325,6 @@ CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc', group='vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
CONF.import_opt('vcpu_pin_set', 'nova.conf.virt')
-CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver')
-CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
CONF.import_opt('hw_disk_discard', 'nova.virt.libvirt.imagebackend',
group='libvirt')
CONF.import_group('workarounds', 'nova.utils')
@@ -307,6 +335,11 @@ DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
+DEFAULT_UEFI_LOADER_PATH = {
+ "x86_64": "/usr/share/OVMF/OVMF_CODE.fd",
+ "aarch64": "/usr/share/AAVMF/AAVMF_CODE.fd"
+}
+
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
@@ -340,6 +373,7 @@ libvirt_volume_drivers = [
'quobyte=nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
'hgst=nova.virt.libvirt.volume.hgst.LibvirtHGSTVolumeDriver',
'scaleio=nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
+ 'disco=nova.virt.libvirt.volume.disco.LibvirtDISCOVolumeDriver',
]
@@ -370,9 +404,6 @@ patch_tpool_proxy()
# versions. Over time, this will become a common min version
# for all architectures/hypervisors, as this value rises to
# meet them.
-#
-# TODO(berrange) find out what min version ppc64 needs as it
-# almost certainly wants something newer than this....
MIN_LIBVIRT_VERSION = (0, 10, 2)
# TODO(berrange): Re-evaluate this at start of each release cycle
# to decide if we want to plan a future min version bump.
@@ -392,6 +423,9 @@ MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
# Relative block commit & rebase (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKJOB_RELATIVE_VERSION = (1, 2, 7)
+# Libvirt version 1.2.17 is required for successfull block live migration
+# of vm booted from image with attached devices
+MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION = (1, 2, 17)
# libvirt discard feature
MIN_LIBVIRT_DISCARD_VERSION = (1, 0, 6)
MIN_QEMU_DISCARD_VERSION = (1, 6, 0)
@@ -422,6 +456,9 @@ MIN_QEMU_NUMA_HUGEPAGE_VERSION = (2, 1, 0)
# fsFreeze/fsThaw requirement
MIN_LIBVIRT_FSFREEZE_VERSION = (1, 2, 5)
+# UEFI booting support
+MIN_LIBVIRT_UEFI_VERSION = (1, 2, 9)
+
# Hyper-V paravirtualized time source
MIN_LIBVIRT_HYPERV_TIMER_VERSION = (1, 2, 2)
MIN_QEMU_HYPERV_TIMER_VERSION = (2, 0, 0)
@@ -445,6 +482,11 @@ MIN_QEMU_S390_VERSION = (2, 3, 0)
# libvirt 1.3 fix f391889f4e942e22b9ef8ecca492de05106ce41e
MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION = (1, 3, 0)
+# ppc64/ppc64le architectures with KVM
+# NOTE(rfolco): Same levels for Libvirt/Qemu on Big Endian and Little
+# Endian giving the nuance around guest vs host architectures
+MIN_LIBVIRT_KVM_PPC64_VERSION = (1, 2, 12)
+MIN_QEMU_PPC64_VERSION = (2, 1, 0)
# Names of the types that do not get compressed during migration
NO_COMPRESSION_TYPES = ('qcow2',)
@@ -452,6 +494,19 @@ NO_COMPRESSION_TYPES = ('qcow2',)
# realtime suppport
MIN_LIBVIRT_REALTIME_VERSION = (1, 2, 13)
+MIN_LIBVIRT_OTHER_ARCH = {arch.S390: MIN_LIBVIRT_KVM_S390_VERSION,
+ arch.S390X: MIN_LIBVIRT_KVM_S390_VERSION,
+ arch.PPC: MIN_LIBVIRT_KVM_PPC64_VERSION,
+ arch.PPC64: MIN_LIBVIRT_KVM_PPC64_VERSION,
+ arch.PPC64LE: MIN_LIBVIRT_KVM_PPC64_VERSION,
+ }
+MIN_QEMU_OTHER_ARCH = {arch.S390: MIN_QEMU_S390_VERSION,
+ arch.S390X: MIN_QEMU_S390_VERSION,
+ arch.PPC: MIN_QEMU_PPC64_VERSION,
+ arch.PPC64: MIN_QEMU_PPC64_VERSION,
+ arch.PPC64LE: MIN_QEMU_PPC64_VERSION,
+ }
+
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
@@ -476,7 +531,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._caps = None
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
- self.virtapi,
host=self._host)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
@@ -525,6 +579,8 @@ class LibvirtDriver(driver.ComputeDriver):
self.job_tracker = instancejobtracker.InstanceJobTracker()
self._remotefs = remotefs.RemoteFilesystem()
+ self._live_migration_flags = self._block_migration_flags = None
+
def _get_volume_drivers(self):
return libvirt_volume_drivers
@@ -586,7 +642,8 @@ class LibvirtDriver(driver.ComputeDriver):
self._host.initialize()
self._do_quality_warnings()
- self._do_migration_flag_warnings()
+
+ self._parse_migration_flags()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
@@ -624,35 +681,133 @@ class LibvirtDriver(driver.ComputeDriver):
{'version': self._version_to_string(
NEXT_MIN_LIBVIRT_VERSION)})
+ kvm_arch = arch.from_host()
if (CONF.libvirt.virt_type in ('kvm', 'qemu') and
- arch.from_host() in (arch.S390, arch.S390X) and
- not self._host.has_min_version(MIN_LIBVIRT_KVM_S390_VERSION,
- MIN_QEMU_S390_VERSION)):
- raise exception.NovaException(
- _('Running Nova with qemu/kvm virt_type on s390/s390x '
- 'requires libvirt version %(libvirt_ver)s and '
- 'qemu version %(qemu_ver)s, or greater') %
- {'libvirt_ver': self._version_to_string(
- MIN_LIBVIRT_KVM_S390_VERSION),
- 'qemu_ver': self._version_to_string(
- MIN_QEMU_S390_VERSION)})
-
- def _do_migration_flag_warnings(self):
- block_migration_flag = 'VIR_MIGRATE_NON_SHARED_INC'
- if block_migration_flag in CONF.libvirt.live_migration_flag:
- LOG.warning(_LW('Running Nova with a live_migration_flag config '
- 'option which contains %(flag)s '
- 'will cause all live-migrations to be block-'
- 'migrations instead. This setting should only be '
- 'on the block_migration_flag instead.'),
- {'flag': block_migration_flag})
- if block_migration_flag not in CONF.libvirt.block_migration_flag:
- LOG.warning(_LW('Running Nova with a block_migration_flag config '
- 'option which does not contain %(flag)s '
- 'will cause all block-migrations to be live-'
- 'migrations instead. This setting should be '
- 'on the block_migration_flag.'),
- {'flag': block_migration_flag})
+ kvm_arch in MIN_LIBVIRT_OTHER_ARCH and
+ not self._host.has_min_version(
+ MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch),
+ MIN_QEMU_OTHER_ARCH.get(kvm_arch))):
+ raise exception.NovaException(
+ _('Running Nova with qemu/kvm virt_type on %(arch)s '
+ 'requires libvirt version %(libvirt_ver)s and '
+ 'qemu version %(qemu_ver)s, or greater') %
+ {'arch': kvm_arch,
+ 'libvirt_ver': self._version_to_string(
+ MIN_LIBVIRT_OTHER_ARCH.get(kvm_arch)),
+ 'qemu_ver': self._version_to_string(
+ MIN_QEMU_OTHER_ARCH.get(kvm_arch))})
+
+ def _check_required_migration_flags(self, migration_flags, config_name):
+ if CONF.libvirt.virt_type == 'xen':
+ if (migration_flags & libvirt.VIR_MIGRATE_PEER2PEER) != 0:
+ LOG.warning(_LW('Removing the VIR_MIGRATE_PEER2PEER flag from '
+ '%(config_name)s because peer-to-peer '
+ 'migrations are not supported by the "xen" '
+ 'virt type'),
+ {'config_name': config_name})
+ migration_flags &= ~libvirt.VIR_MIGRATE_PEER2PEER
+ else:
+ if (migration_flags & libvirt.VIR_MIGRATE_PEER2PEER) == 0:
+ LOG.warning(_LW('Adding the VIR_MIGRATE_PEER2PEER flag to '
+ '%(config_name)s because direct migrations '
+ 'are not supported by the %(virt_type)s '
+ 'virt type'),
+ {'config_name': config_name,
+ 'virt_type': CONF.libvirt.virt_type})
+ migration_flags |= libvirt.VIR_MIGRATE_PEER2PEER
+
+ if (migration_flags & libvirt.VIR_MIGRATE_UNDEFINE_SOURCE) == 0:
+ LOG.warning(_LW('Adding the VIR_MIGRATE_UNDEFINE_SOURCE flag to '
+ '%(config_name)s because, without it, migrated '
+ 'VMs will remain defined on the source host'),
+ {'config_name': config_name})
+ migration_flags |= libvirt.VIR_MIGRATE_UNDEFINE_SOURCE
+
+ if (migration_flags & libvirt.VIR_MIGRATE_PERSIST_DEST) != 0:
+ LOG.warning(_LW('Removing the VIR_MIGRATE_PERSIST_DEST flag from '
+ '%(config_name)s as Nova ensures the VM is '
+ 'persisted on the destination host'),
+ {'config_name': config_name})
+ migration_flags &= ~libvirt.VIR_MIGRATE_PERSIST_DEST
+
+ return migration_flags
+
+ def _check_block_migration_flags(self, live_migration_flags,
+ block_migration_flags):
+ if (live_migration_flags & libvirt.VIR_MIGRATE_NON_SHARED_INC) != 0:
+ LOG.warning(_LW('Removing the VIR_MIGRATE_NON_SHARED_INC flag '
+ 'from the live_migration_flag config option '
+ 'because it will cause all live-migrations to be '
+ 'block-migrations instead.'))
+ live_migration_flags &= ~libvirt.VIR_MIGRATE_NON_SHARED_INC
+
+ if (block_migration_flags & libvirt.VIR_MIGRATE_NON_SHARED_INC) == 0:
+ LOG.warning(_LW('Adding the VIR_MIGRATE_NON_SHARED_INC flag to '
+ 'the block_migration_flag config option, '
+ 'otherwise all block-migrations will be '
+ 'live-migrations instead.'))
+ block_migration_flags |= libvirt.VIR_MIGRATE_NON_SHARED_INC
+
+ return (live_migration_flags, block_migration_flags)
+
+ def _handle_live_migration_tunnelled(self, migration_flags, config_name):
+ if CONF.libvirt.live_migration_tunnelled is None:
+ return migration_flags
+
+ if CONF.libvirt.live_migration_tunnelled:
+ if (migration_flags & libvirt.VIR_MIGRATE_TUNNELLED) == 0:
+ LOG.warning(_LW('The %(config_name)s config option does not '
+ 'contain the VIR_MIGRATE_TUNNELLED flag but '
+ 'the live_migration_tunnelled is set to True '
+ 'which causes VIR_MIGRATE_TUNNELLED to be '
+ 'set'),
+ {'config_name': config_name})
+ migration_flags |= libvirt.VIR_MIGRATE_TUNNELLED
+ else:
+ if (migration_flags & libvirt.VIR_MIGRATE_TUNNELLED) != 0:
+ LOG.warning(_LW('The %(config_name)s config option contains '
+ 'the VIR_MIGRATE_TUNNELLED flag but the '
+ 'live_migration_tunnelled is set to False '
+ 'which causes VIR_MIGRATE_TUNNELLED to be '
+ 'unset'),
+ {'config_name': config_name})
+ migration_flags &= ~libvirt.VIR_MIGRATE_TUNNELLED
+
+ return migration_flags
+
+ def _parse_migration_flags(self):
+ def str2sum(str_val):
+ logical_sum = 0
+ for s in [i.strip() for i in str_val.split(',') if i]:
+ try:
+ logical_sum |= getattr(libvirt, s)
+ except AttributeError:
+ LOG.warning(_LW("Ignoring unknown libvirt live migration "
+ "flag '%(flag)s'"), {'flag': s})
+ return logical_sum
+
+ live_migration_flags = str2sum(CONF.libvirt.live_migration_flag)
+ block_migration_flags = str2sum(CONF.libvirt.block_migration_flag)
+
+ live_config_name = 'live_migration_flag'
+ block_config_name = 'block_migration_flag'
+
+ live_migration_flags = self._check_required_migration_flags(
+ live_migration_flags, live_config_name)
+ block_migration_flags = self._check_required_migration_flags(
+ block_migration_flags, block_config_name)
+
+ (live_migration_flags,
+ block_migration_flags) = self._check_block_migration_flags(
+ live_migration_flags, block_migration_flags)
+
+ live_migration_flags = self._handle_live_migration_tunnelled(
+ live_migration_flags, live_config_name)
+ block_migration_flags = self._handle_live_migration_tunnelled(
+ block_migration_flags, block_config_name)
+
+ self._live_migration_flags = live_migration_flags
+ self._block_migration_flags = block_migration_flags
# TODO(sahid): This method is targeted for removal when the tests
# have been updated to avoid its use
@@ -681,6 +836,21 @@ class LibvirtDriver(driver.ComputeDriver):
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
+ @staticmethod
+ def _live_migration_uri(dest):
+ # Only Xen and QEMU support live migration, see
+ # https://libvirt.org/migration.html#scenarios for reference
+ uris = {
+ 'kvm': 'qemu+tcp://%s/system',
+ 'qemu': 'qemu+tcp://%s/system',
+ 'xen': 'xenmigr://%s/system',
+ }
+ virt_type = CONF.libvirt.virt_type
+ uri = CONF.libvirt.live_migration_uri or uris.get(virt_type)
+ if uri is None:
+ raise exception.LiveMigrationURINotAvailable(virt_type=virt_type)
+ return uri % dest
+
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
@@ -1076,6 +1246,17 @@ class LibvirtDriver(driver.ComputeDriver):
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
+ backend = self.image_backend.image(instance, 'disk')
+ # TODO(nic): Set ignore_errors=False in a future release.
+ # It is set to True here to avoid any upgrade issues surrounding
+ # instances being in pending resize state when the software is updated;
+ # in that case there will be no snapshot to remove. Once it can be
+ # reasonably assumed that no such instances exist in the wild
+ # anymore, it should be set back to False (the default) so it will
+ # throw errors, like it should.
+ backend.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
+ ignore_errors=True)
+
if instance.host != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
@@ -1123,8 +1304,6 @@ class LibvirtDriver(driver.ComputeDriver):
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
- image_meta = objects.ImageMeta.from_instance(instance)
-
guest = self._host.get_guest(instance)
disk_dev = mountpoint.rpartition("/")[2]
@@ -1149,7 +1328,7 @@ class LibvirtDriver(driver.ComputeDriver):
raise exception.InvalidHypervisorType(msg)
disk_info = blockinfo.get_info_from_bdm(
- instance, CONF.libvirt.virt_type, image_meta, bdm)
+ instance, CONF.libvirt.virt_type, instance.image_meta, bdm)
self._connect_volume(connection_info, disk_info)
conf = self._get_volume_config(connection_info, disk_info)
self._set_cache_mode(conf)
@@ -1255,14 +1434,13 @@ class LibvirtDriver(driver.ComputeDriver):
guest = self._host.get_guest(instance)
xml = guest.get_xml_desc()
except exception.InstanceNotFound:
- image_meta = objects.ImageMeta.from_instance(instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
- image_meta,
+ instance.image_meta,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
- image_meta,
+ instance.image_meta,
block_device_info=block_device_info)
return xml
@@ -1312,7 +1490,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._disconnect_volume(connection_info, disk_dev)
def attach_interface(self, instance, image_meta, vif):
- image_meta = objects.ImageMeta.from_dict(image_meta)
guest = self._host.get_guest(instance)
self.vif_driver.plug(instance, vif)
@@ -1333,9 +1510,9 @@ class LibvirtDriver(driver.ComputeDriver):
instance_uuid=instance.uuid)
def detach_interface(self, instance, vif):
- image_meta = objects.ImageMeta.from_instance(instance)
guest = self._host.get_guest(instance)
- cfg = self.vif_driver.get_config(instance, vif, image_meta,
+ cfg = self.vif_driver.get_config(instance, vif,
+ instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type, self._host)
try:
@@ -1350,10 +1527,28 @@ class LibvirtDriver(driver.ComputeDriver):
"instance disappeared."),
instance=instance)
else:
- LOG.error(_LE('detaching network adapter failed.'),
- instance=instance, exc_info=True)
- raise exception.InterfaceDetachFailed(
- instance_uuid=instance.uuid)
+ # NOTE(mriedem): When deleting an instance and using Neutron,
+ # we can be racing against Neutron deleting the port and
+ # sending the vif-deleted event which then triggers a call to
+ # detach the interface, so we might have failed because the
+ # network device no longer exists. Libvirt will fail with
+ # "operation failed: no matching network device was found"
+ # which unfortunately does not have a unique error code so we
+ # need to look up the interface by MAC and if it's not found
+ # then we can just log it as a warning rather than tracing an
+ # error.
+ mac = vif.get('address')
+ interface = guest.get_interface_by_mac(mac)
+ if interface:
+ LOG.error(_LE('detaching network adapter failed.'),
+ instance=instance, exc_info=True)
+ raise exception.InterfaceDetachFailed(
+ instance_uuid=instance.uuid)
+
+ # The interface is gone so just log it as a warning.
+ LOG.warning(_LW('Detaching interface %(mac)s failed because '
+ 'the device is no longer found on the guest.'),
+ {'mac': mac}, instance=instance)
def _create_snapshot_metadata(self, image_meta, instance,
img_fmt, snp_name):
@@ -1399,8 +1594,6 @@ class LibvirtDriver(driver.ComputeDriver):
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
- image_meta = objects.ImageMeta.from_instance(instance)
-
snapshot = self._image_api.get(context, image_id)
# source_format is an on-disk format
@@ -1425,7 +1618,7 @@ class LibvirtDriver(driver.ComputeDriver):
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
- metadata = self._create_snapshot_metadata(image_meta,
+ metadata = self._create_snapshot_metadata(instance.image_meta,
instance,
image_format,
snapshot['name'])
@@ -1490,48 +1683,86 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
- snapshot_directory = CONF.libvirt.snapshots_directory
- fileutils.ensure_tree(snapshot_directory)
- with utils.tempdir(dir=snapshot_directory) as tmpdir:
- try:
- out_path = os.path.join(tmpdir, snapshot_name)
- if live_snapshot:
- # NOTE(xqueralt): libvirt needs o+x in the temp directory
- os.chmod(tmpdir, 0o701)
- self._live_snapshot(context, instance, guest, disk_path,
- out_path, source_format, image_format,
- image_meta)
- else:
- snapshot_backend.snapshot_extract(out_path, image_format)
- finally:
- guest = None
- # NOTE(dkang): because previous managedSave is not called
- # for LXC, _create_domain must not be called.
- if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
- if state == power_state.RUNNING:
- guest = self._create_domain(domain=virt_dom)
- elif state == power_state.PAUSED:
- guest = self._create_domain(
- domain=virt_dom, pause=True)
-
- if guest is not None:
- self._attach_pci_devices(guest,
- pci_manager.get_instance_pci_devs(instance))
- self._attach_sriov_ports(context, instance, guest)
- LOG.info(_LI("Snapshot extracted, beginning image upload"),
- instance=instance)
-
- # Upload that image to the image service
+ try:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
- expected_state=task_states.IMAGE_PENDING_UPLOAD)
- with libvirt_utils.file_open(out_path) as image_file:
- self._image_api.update(context,
- image_id,
- metadata,
- image_file)
- LOG.info(_LI("Snapshot image upload complete"),
- instance=instance)
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
+ metadata['location'] = snapshot_backend.direct_snapshot(
+ context, snapshot_name, image_format, image_id,
+ instance.image_ref)
+ self._snapshot_domain(context, live_snapshot, virt_dom, state,
+ instance)
+ self._image_api.update(context, image_id, metadata,
+ purge_props=False)
+ except (NotImplementedError, exception.ImageUnacceptable,
+ exception.Forbidden) as e:
+ if type(e) != NotImplementedError:
+ LOG.warning(_LW('Performing standard snapshot because direct '
+ 'snapshot failed: %(error)s'), {'error': e})
+ failed_snap = metadata.pop('location', None)
+ if failed_snap:
+ failed_snap = {'url': str(failed_snap)}
+ snapshot_backend.cleanup_direct_snapshot(failed_snap,
+ also_destroy_volume=True,
+ ignore_errors=True)
+ update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD,
+ expected_state=task_states.IMAGE_UPLOADING)
+
+ snapshot_directory = CONF.libvirt.snapshots_directory
+ fileutils.ensure_tree(snapshot_directory)
+ with utils.tempdir(dir=snapshot_directory) as tmpdir:
+ try:
+ out_path = os.path.join(tmpdir, snapshot_name)
+ if live_snapshot:
+ # NOTE(xqueralt): libvirt needs o+x in the tempdir
+ os.chmod(tmpdir, 0o701)
+ self._live_snapshot(context, instance, guest,
+ disk_path, out_path, source_format,
+ image_format, instance.image_meta)
+ else:
+ snapshot_backend.snapshot_extract(out_path,
+ image_format)
+ finally:
+ self._snapshot_domain(context, live_snapshot, virt_dom,
+ state, instance)
+ LOG.info(_LI("Snapshot extracted, beginning image upload"),
+ instance=instance)
+
+ # Upload that image to the image service
+ update_task_state(task_state=task_states.IMAGE_UPLOADING,
+ expected_state=task_states.IMAGE_PENDING_UPLOAD)
+ with libvirt_utils.file_open(out_path) as image_file:
+ self._image_api.update(context,
+ image_id,
+ metadata,
+ image_file)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ LOG.exception(_LE("Failed to snapshot image"))
+ failed_snap = metadata.pop('location', None)
+ if failed_snap:
+ failed_snap = {'url': str(failed_snap)}
+ snapshot_backend.cleanup_direct_snapshot(
+ failed_snap, also_destroy_volume=True,
+ ignore_errors=True)
+
+ LOG.info(_LI("Snapshot image upload complete"), instance=instance)
+
+ def _snapshot_domain(self, context, live_snapshot, virt_dom, state,
+ instance):
+ guest = None
+ # NOTE(dkang): because previous managedSave is not called
+ # for LXC, _create_domain must not be called.
+ if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
+ if state == power_state.RUNNING:
+ guest = self._create_domain(domain=virt_dom)
+ elif state == power_state.PAUSED:
+ guest = self._create_domain(domain=virt_dom, pause=True)
+
+ if guest is not None:
+ self._attach_pci_devices(
+ guest, pci_manager.get_instance_pci_devs(instance))
+ self._attach_sriov_ports(context, instance, guest)
def _can_set_admin_password(self, image_meta):
if (CONF.libvirt.virt_type not in ('kvm', 'qemu') or
@@ -1543,11 +1774,10 @@ class LibvirtDriver(driver.ComputeDriver):
raise exception.QemuGuestAgentNotEnabled()
def set_admin_password(self, instance, new_pass):
- image_meta = objects.ImageMeta.from_instance(instance)
- self._can_set_admin_password(image_meta)
+ self._can_set_admin_password(instance.image_meta)
guest = self._host.get_guest(instance)
- user = image_meta.properties.get("os_admin_user")
+ user = instance.image_meta.properties.get("os_admin_user")
if not user:
if instance.os_type == "windows":
user = "Administrator"
@@ -1592,12 +1822,10 @@ class LibvirtDriver(driver.ComputeDriver):
The qemu-guest-agent must be setup to execute fsfreeze.
"""
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._set_quiesced(context, instance, image_meta, True)
def unquiesce(self, context, instance, image_meta):
"""Thaw the guest filesystems after snapshot."""
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._set_quiesced(context, instance, image_meta, False)
def _live_snapshot(self, context, instance, guest, disk_path, out_path,
@@ -1628,10 +1856,15 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
- require_quiesce = image_meta.properties.get(
- 'os_require_quiesce', False)
- if require_quiesce:
- self.quiesce(context, instance, image_meta)
+ quiesced = False
+ try:
+ self._set_quiesced(context, instance, image_meta, True)
+ quiesced = True
+ except exception.NovaException as err:
+ if image_meta.properties.get('os_require_quiesce', False):
+ raise
+ LOG.info(_LI('Skipping quiescing instance: %(reason)s.'),
+ {'reason': err}, instance=instance)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
@@ -1652,8 +1885,8 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._host.write_instance_config(xml)
- if require_quiesce:
- self.unquiesce(context, instance, image_meta)
+ if quiesced:
+ self._set_quiesced(context, instance, image_meta, False)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
@@ -2193,7 +2426,7 @@ class LibvirtDriver(driver.ComputeDriver):
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
- for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
+ for x in range(CONF.libvirt.wait_soft_reboot_seconds):
guest = self._host.get_guest(instance)
state = guest.get_power_state(self._host)
@@ -2232,14 +2465,12 @@ class LibvirtDriver(driver.ComputeDriver):
self._destroy(instance)
# Convert the system metadata to image metadata
- image_meta = objects.ImageMeta.from_instance(instance)
-
instance_dir = libvirt_utils.get_instance_path(instance)
fileutils.ensure_tree(instance_dir)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
- image_meta,
+ instance.image_meta,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
@@ -2248,7 +2479,7 @@ class LibvirtDriver(driver.ComputeDriver):
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
- image_meta,
+ instance.image_meta,
block_device_info=block_device_info,
write_to_disk=True)
@@ -2408,10 +2639,8 @@ class LibvirtDriver(driver.ComputeDriver):
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
- image_meta = objects.ImageMeta.from_instance(instance)
-
disk_info = blockinfo.get_disk_info(
- CONF.libvirt.virt_type, instance, image_meta,
+ CONF.libvirt.virt_type, instance, instance.image_meta,
block_device_info=block_device_info)
xml = self._get_existing_domain_xml(instance, network_info,
@@ -2464,10 +2693,8 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
rescue_image_id = None
- if image_meta is not None:
- image_meta = objects.ImageMeta.from_dict(image_meta)
- if image_meta.obj_attr_is_set("id"):
- rescue_image_id = image_meta.id
+ if image_meta.obj_attr_is_set("id"):
+ rescue_image_id = image_meta.id
rescue_images = {
'image_id': (rescue_image_id or
@@ -2520,7 +2747,6 @@ class LibvirtDriver(driver.ComputeDriver):
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- image_meta = objects.ImageMeta.from_dict(image_meta)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
@@ -2736,18 +2962,11 @@ class LibvirtDriver(driver.ComputeDriver):
return hasDirectIO
@staticmethod
- def _create_local(target, local_size, unit='G',
- fs_format=None, label=None):
- """Create a blank image of specified size."""
-
- libvirt_utils.create_image('raw', target,
- '%d%c' % (local_size, unit))
-
- def _create_ephemeral(self, target, ephemeral_size,
+ def _create_ephemeral(target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
max_size=None, context=None, specified_fs=None):
if not is_block_dev:
- self._create_local(target, ephemeral_size)
+ libvirt_utils.create_image('raw', target, '%dG' % ephemeral_size)
# Run as root only for block devices.
disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
@@ -2943,6 +3162,8 @@ class LibvirtDriver(driver.ComputeDriver):
size = None
backend = image('disk')
+ if instance.task_state == task_states.RESIZE_FINISH:
+ backend.create_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
@@ -3162,12 +3383,11 @@ class LibvirtDriver(driver.ComputeDriver):
return
if self._has_sriov_port(network_info):
- image_meta = objects.ImageMeta.from_instance(instance)
for vif in network_info:
- if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
+ if vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV:
cfg = self.vif_driver.get_config(instance,
vif,
- image_meta,
+ instance.image_meta,
instance.flavor,
CONF.libvirt.virt_type,
self._host)
@@ -3193,15 +3413,25 @@ class LibvirtDriver(driver.ComputeDriver):
dev=network_info)
image_meta = objects.ImageMeta.from_instance(instance)
- for vif in network_info:
- if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
- cfg = self.vif_driver.get_config(instance,
- vif,
- image_meta,
- instance.flavor,
- CONF.libvirt.virt_type,
- self._host)
- guest.detach_device(cfg, live=True)
+ sriov_pci_addresses = [
+ self.vif_driver.get_config(instance,
+ vif,
+ image_meta,
+ instance.flavor,
+ CONF.libvirt.virt_type,
+ self._host).source_dev
+ for vif in network_info
+ if vif['vnic_type'] in network_model.VNIC_TYPES_SRIOV
+ ]
+
+ # use detach_pci_devices to avoid failure in case of
+ # multiple guest SRIOV ports with the same MAC
+ # (protection use-case, ports are on different physical
+ # interfaces)
+ pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
+ sriov_devs = [pci_dev for pci_dev in pci_devs
+ if pci_dev.address in sriov_pci_addresses]
+ self._detach_pci_devices(guest, sriov_devs)
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
@@ -3999,7 +4229,7 @@ class LibvirtDriver(driver.ComputeDriver):
video.type = 'xen'
elif CONF.libvirt.virt_type == 'parallels':
video.type = 'vga'
- elif guestarch in (arch.PPC, arch.PPC64):
+ elif guestarch in (arch.PPC, arch.PPC64, arch.PPC64LE):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
@@ -4119,6 +4349,14 @@ class LibvirtDriver(driver.ComputeDriver):
return flavor
return instance.flavor
+ def _has_uefi_support(self):
+ # This means that the host can support uefi booting for guests
+ supported_archs = [arch.X86_64, arch.AARCH64]
+ caps = self._host.get_capabilities()
+ return ((caps.host.cpu.arch in supported_archs) and
+ self._host.has_min_version(MIN_LIBVIRT_UEFI_VERSION) and
+ os.path.exists(DEFAULT_UEFI_LOADER_PATH[caps.host.cpu.arch]))
+
def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
image_meta, flavor, root_device_name):
if virt_type == "xen":
@@ -4128,6 +4366,20 @@ class LibvirtDriver(driver.ComputeDriver):
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
+ hw_firmware_type = image_meta.properties.get('hw_firmware_type')
+ if hw_firmware_type == fields.FirmwareType.UEFI:
+ if self._has_uefi_support():
+ global uefi_logged
+ if not uefi_logged:
+ LOG.warn(_LW("uefi support is without some kind of "
+ "functional testing and therefore "
+ "considered experimental."))
+ uefi_logged = True
+ guest.os_loader = DEFAULT_UEFI_LOADER_PATH[
+ caps.host.cpu.arch]
+ guest.os_loader_type = "pflash"
+ else:
+ raise exception.UEFINotSupported()
guest.os_mach_type = self._get_machine_type(image_meta, caps)
if image_meta.properties.get('hw_boot_menu') is None:
guest.os_bootmenu = strutils.bool_from_string(
@@ -4613,7 +4865,6 @@ class LibvirtDriver(driver.ComputeDriver):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
- image_meta = objects.ImageMeta.from_instance(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
@@ -4648,7 +4899,7 @@ class LibvirtDriver(driver.ComputeDriver):
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
- with self._lxc_disk_handler(instance, image_meta,
+ with self._lxc_disk_handler(instance, instance.image_meta,
block_device_info, disk_info):
guest = self._create_domain(
xml, pause=pause, power_on=power_on)
@@ -5114,16 +5365,13 @@ class LibvirtDriver(driver.ComputeDriver):
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
- def refresh_provider_fw_rules(self):
- self.firewall_driver.refresh_provider_fw_rules()
-
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
- :param nodename: will be put in PCI device
+ :param nodename: unused in this driver
:returns: dictionary containing resource info
"""
@@ -5223,11 +5471,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
- :returns: a dict containing:
- :filename: name of the tmpfile under CONF.instances_path
- :block_migration: whether this is block migration
- :disk_over_commit: disk-over-commit factor on dest host
- :disk_available_mb: available disk space on dest host
+ :returns: a LibvirtLiveMigrateData object
"""
disk_available_mb = None
if block_migration:
@@ -5259,7 +5503,7 @@ class LibvirtDriver(driver.ComputeDriver):
:param context: security context
"""
- filename = dest_check_data["filename"]
+ filename = dest_check_data.filename
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
@@ -5274,14 +5518,14 @@ class LibvirtDriver(driver.ComputeDriver):
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
- :returns: a dict containing migration info
+ :returns: a LibvirtLiveMigrateData object
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
if not isinstance(dest_check_data, migrate_data_obj.LiveMigrateData):
- md_obj = migrate_data_obj.LibvirtLiveMigrateData()
+ md_obj = objects.LibvirtLiveMigrateData()
md_obj.from_legacy_dict(dest_check_data)
dest_check_data = md_obj
@@ -5311,18 +5555,27 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info)
if block_device_info:
bdm = block_device_info.get('block_device_mapping')
- # NOTE(stpierre): if this instance has mapped volumes,
- # we can't do a block migration, since that will
- # result in volumes being copied from themselves to
- # themselves, which is a recipe for disaster.
- if bdm and len(bdm):
- LOG.error(_LE('Cannot block migrate instance %s with '
- 'mapped volumes'),
- instance.uuid, instance=instance)
- msg = (_('Cannot block migrate instance %s with mapped '
- 'volumes') % instance.uuid)
+ # NOTE(pkoniszewski): libvirt from version 1.2.17 upwards
+ # supports selective block device migration. It means that it
+ # is possible to define subset of block devices to be copied
+ # during migration. If they are not specified - block devices
+ # won't be migrated. However, it does not work when live
+ # migration is tunnelled through libvirt.
+ if bdm and not self._host.has_min_version(
+ MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
+ # NOTE(stpierre): if this instance has mapped volumes,
+ # we can't do a block migration, since that will result
+ # in volumes being copied from themselves to themselves,
+ # which is a recipe for disaster.
+ ver = ".".join([str(x) for x in
+ MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION])
+ msg = (_('Cannot block migrate instance %(uuid)s with'
+ ' mapped volumes. Selective block device'
+ ' migration feature requires libvirt version'
+ ' %(libvirt_ver)s') %
+ {'uuid': instance.uuid, 'libvirt_ver': ver})
+ LOG.error(msg, instance=instance)
raise exception.MigrationPreCheckError(reason=msg)
-
elif not (dest_check_data.is_shared_block_storage or
dest_check_data.is_shared_instance_path or
(booted_from_volume and not has_local_disk)):
@@ -5549,7 +5802,7 @@ class LibvirtDriver(driver.ComputeDriver):
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
- :param migrate_data: implementation specific params
+ :param migrate_data: a LibvirtLiveMigrateData object
"""
@@ -5700,7 +5953,8 @@ class LibvirtDriver(driver.ComputeDriver):
raise exception.MigrationError(reason=msg)
def _live_migration_operation(self, context, instance, dest,
- block_migration, migrate_data, dom):
+ block_migration, migrate_data, dom,
+ device_names):
"""Invoke the live migration operation
:param context: security context
@@ -5709,8 +5963,10 @@ class LibvirtDriver(driver.ComputeDriver):
instance object that is migrated.
:param dest: destination host
:param block_migration: if true, do block migration.
- :param migrate_data: implementation specific params
+ :param migrate_data: a LibvirtLiveMigrateData object
:param dom: the libvirt domain object
+ :param device_names: list of device names that are being migrated with
+ instance
This method is intended to be run in a background thread and will
block that thread until the migration is finished or failed.
@@ -5720,19 +5976,9 @@ class LibvirtDriver(driver.ComputeDriver):
try:
if block_migration:
- flaglist = CONF.libvirt.block_migration_flag.split(',')
+ migration_flags = self._block_migration_flags
else:
- flaglist = CONF.libvirt.live_migration_flag.split(',')
-
- def getflag(s):
- try:
- return getattr(libvirt, s)
- except AttributeError:
- msg = _("Unknown libvirt live migration flag '%s'") % s
- raise exception.Invalid(msg)
-
- flagvals = [getflag(x.strip()) for x in flaglist]
- logical_sum = six.moves.reduce(lambda x, y: x | y, flagvals)
+ migration_flags = self._live_migration_flags
listen_addrs = {}
if 'graphics_listen_addr_vnc' in migrate_data:
@@ -5742,6 +5988,8 @@ class LibvirtDriver(driver.ComputeDriver):
listen_addrs['spice'] = str(
migrate_data.graphics_listen_addr_spice)
serial_listen_addr = migrate_data.serial_listen_addr
+ if migrate_data.target_connect_addr is not None:
+ dest = migrate_data.target_connect_addr
migratable_flag = getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE',
None)
@@ -5752,8 +6000,8 @@ class LibvirtDriver(driver.ComputeDriver):
# check_can_live_migrate_destination/source phase
self._check_graphics_addresses_can_live_migrate(listen_addrs)
self._verify_serial_console_is_disabled()
- dom.migrateToURI(CONF.libvirt.live_migration_uri % dest,
- logical_sum,
+ dom.migrateToURI(self._live_migration_uri(dest),
+ migration_flags,
None,
CONF.libvirt.live_migration_bandwidth)
else:
@@ -5763,12 +6011,25 @@ class LibvirtDriver(driver.ComputeDriver):
listen_addrs,
serial_listen_addr)
try:
- dom.migrateToURI2(CONF.libvirt.live_migration_uri % dest,
- None,
- new_xml_str,
- logical_sum,
- None,
- CONF.libvirt.live_migration_bandwidth)
+ if self._host.has_min_version(
+ MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
+ params = {
+ 'bandwidth': CONF.libvirt.live_migration_bandwidth,
+ 'destination_xml': new_xml_str,
+ 'migrate_disks': device_names,
+ }
+ dom.migrateToURI3(
+ self._live_migration_uri(dest),
+ params,
+ migration_flags)
+ else:
+ dom.migrateToURI2(
+ self._live_migration_uri(dest),
+ None,
+ new_xml_str,
+ migration_flags,
+ None,
+ CONF.libvirt.live_migration_bandwidth)
except libvirt.libvirtError as ex:
# NOTE(mriedem): There is a bug in older versions of
# libvirt where the VIR_DOMAIN_XML_MIGRATABLE flag causes
@@ -5791,8 +6052,8 @@ class LibvirtDriver(driver.ComputeDriver):
listen_addrs)
self._verify_serial_console_is_disabled()
dom.migrateToURI(
- CONF.libvirt.live_migration_uri % dest,
- logical_sum,
+ self._live_migration_uri(dest),
+ migration_flags,
None,
CONF.libvirt.live_migration_bandwidth)
else:
@@ -5893,37 +6154,48 @@ class LibvirtDriver(driver.ComputeDriver):
for i in range(steps + 1):
yield (int(delay * i), int(offset + base ** i))
- def _live_migration_copy_disk_paths(self, guest):
+ def _live_migration_copy_disk_paths(self, context, instance, guest):
'''Get list of disks to copy during migration
+ :param context: security context
+ :param instance: the instance being migrated
:param guest: the Guest instance being migrated
Get the list of disks to copy during migration.
- :returns: a list of local disk paths to copy
+ :returns: a list of local source paths and a list of device names to
+ copy
'''
- disks = []
+ disk_paths = []
+ device_names = []
+ block_devices = []
+
+ # TODO(pkoniszewski): Remove this if-statement when we bump min libvirt
+ # version to >= 1.2.17
+ if self._host.has_min_version(
+ MIN_LIBVIRT_BLOCK_LM_WITH_VOLUMES_VERSION):
+ bdm_list = objects.BlockDeviceMappingList.get_by_instance_uuid(
+ context, instance.uuid)
+ block_device_info = driver.get_block_device_info(instance,
+ bdm_list)
+
+ block_device_mappings = driver.block_device_info_get_mapping(
+ block_device_info)
+ for bdm in block_device_mappings:
+ device_name = str(bdm['mount_device'].rsplit('/', 1)[1])
+ block_devices.append(device_name)
+
for dev in guest.get_all_disks():
- # TODO(berrange) This is following the current
- # (stupid) default logic in libvirt for selecting
- # which disks are copied. In the future, when we
- # can use a libvirt which accepts a list of disks
- # to copy, we will need to adjust this to use a
- # different rule.
- #
- # Our future goal is that a disk needs to be copied
- # if it is a non-cinder volume which is not backed
- # by shared storage. eg it may be an LVM block dev,
- # or a raw/qcow2 file on a local filesystem. We
- # never want to copy disks on NFS, or RBD or any
- # cinder volume
if dev.readonly or dev.shareable:
continue
if dev.source_type not in ["file", "block"]:
continue
- disks.append(dev.source_path)
- return disks
+ if dev.target_dev in block_devices:
+ continue
+ disk_paths.append(dev.source_path)
+ device_names.append(dev.target_dev)
+ return (disk_paths, device_names)
def _live_migration_data_gb(self, instance, disk_paths):
'''Calculate total amount of data to be transferred
@@ -5972,6 +6244,7 @@ class LibvirtDriver(driver.ComputeDriver):
completion_timeout = int(
CONF.libvirt.live_migration_completion_timeout * data_gb)
progress_timeout = CONF.libvirt.live_migration_progress_timeout
+ migration = migrate_data.migration
n = 0
start = time.time()
@@ -6089,6 +6362,16 @@ class LibvirtDriver(driver.ComputeDriver):
# admins see slow running migration operations
# when debug logs are off.
if (n % 10) == 0:
+ # Note(Shaohe Feng) every 5 secs to update the migration
+ # db, that keeps updates to the instance and migration
+ # objects in sync.
+ migration.memory_total = info.memory_total
+ migration.memory_processed = info.memory_processed
+ migration.memory_remaining = info.memory_remaining
+ migration.disk_total = info.disk_total
+ migration.disk_processed = info.disk_processed
+ migration.disk_remaining = info.disk_remaining
+ migration.save()
# Ignoring memory_processed, as due to repeated
# dirtying of data, this can be way larger than
# memory_total. Best to just look at what's
@@ -6171,7 +6454,7 @@ class LibvirtDriver(driver.ComputeDriver):
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
- :param migrate_data: implementation specific params
+ :param migrate_data: a LibvirtLiveMigrateData object
This fires off a new thread to run the blocking migration
operation, and then this thread monitors the progress of
@@ -6181,8 +6464,10 @@ class LibvirtDriver(driver.ComputeDriver):
guest = self._host.get_guest(instance)
disk_paths = []
+ device_names = []
if block_migration:
- disk_paths = self._live_migration_copy_disk_paths(guest)
+ disk_paths, device_names = self._live_migration_copy_disk_paths(
+ context, instance, guest)
# TODO(sahid): We are converting all calls from a
# virDomain object to use nova.virt.libvirt.Guest.
@@ -6192,7 +6477,8 @@ class LibvirtDriver(driver.ComputeDriver):
opthread = utils.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
- migrate_data, dom)
+ migrate_data, dom,
+ device_names)
finish_event = eventlet.event.Event()
@@ -6220,6 +6506,12 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.debug("Live migration monitoring is all done",
instance=instance)
+ def live_migration_force_complete(self, instance):
+ # NOTE(pkoniszewski): currently only pause during live migration is
+ # supported to force live migration to complete, so just try to pause
+ # the instance
+ self.pause(instance)
+
def _try_fetch_image(self, context, path, image_id, instance,
fallback_from_host=None):
try:
@@ -6305,8 +6597,6 @@ class LibvirtDriver(driver.ComputeDriver):
is_shared_instance_path = migrate_data.is_shared_instance_path
is_block_migration = migrate_data.block_migration
- image_meta = objects.ImageMeta.from_instance(instance)
-
if configdrive.required_by(instance):
# NOTE(sileht): configdrive is stored into the block storage
# kvm is a block device, live migration will work
@@ -6342,20 +6632,21 @@ class LibvirtDriver(driver.ComputeDriver):
context, instance, instance_dir, disk_info,
fallback_from_host=instance.host)
- if not (is_block_migration or is_shared_instance_path):
- # NOTE(angdraug): when block storage is shared between source and
- # destination and instance path isn't (e.g. volume backed or rbd
- # backed instance), instance path on destination has to be prepared
+ if not is_block_migration:
+ # NOTE(angdraug): when block storage is shared between source
+ # and destination and instance path isn't (e.g. volume backed
+ # or rbd backed instance), instance path on destination has to
+ # be prepared
- # Touch the console.log file, required by libvirt.
- console_file = self._get_console_log_path(instance)
- LOG.debug('Touch instance console log: %s', console_file,
- instance=instance)
- libvirt_utils.file_open(console_file, 'a').close()
+ # Touch the console.log file, required by libvirt.
+ console_file = self._get_console_log_path(instance)
+ LOG.debug('Touch instance console log: %s', console_file,
+ instance=instance)
+ libvirt_utils.file_open(console_file, 'a').close()
- # if image has kernel and ramdisk, just download
- # following normal way.
- self._fetch_instance_kernel_ramdisk(context, instance)
+ # if image has kernel and ramdisk, just download
+ # following normal way.
+ self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
@@ -6368,7 +6659,8 @@ class LibvirtDriver(driver.ComputeDriver):
for bdm in block_device_mapping:
connection_info = bdm['connection_info']
disk_info = blockinfo.get_info_from_bdm(
- instance, CONF.libvirt.virt_type, image_meta, bdm)
+ instance, CONF.libvirt.virt_type,
+ instance.image_meta, bdm)
self._connect_volume(connection_info, disk_info)
# We call plug_vifs before the compute manager calls
@@ -6401,12 +6693,16 @@ class LibvirtDriver(driver.ComputeDriver):
migrate_data.graphics_listen_addr_spice = CONF.spice.server_listen
migrate_data.serial_listen_addr = \
CONF.serial_console.proxyclient_address
+ # Store live_migration_inbound_addr
+ migrate_data.target_connect_addr = \
+ CONF.libvirt.live_migration_inbound_addr
for vol in block_device_mapping:
connection_info = vol['connection_info']
if connection_info.get('serial'):
disk_info = blockinfo.get_info_from_bdm(
- instance, CONF.libvirt.virt_type, image_meta, vol)
+ instance, CONF.libvirt.virt_type,
+ instance.image_meta, vol)
bdmi = objects.LibvirtLiveMigrateBDMInfo()
bdmi.serial = connection_info['serial']
@@ -6566,15 +6862,14 @@ class LibvirtDriver(driver.ComputeDriver):
:param block_migration: if true, post operation of block_migration.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
- image_meta = objects.ImageMeta.from_instance(instance)
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
- image_meta, block_device_info)
+ instance.image_meta, block_device_info)
xml = self._get_guest_xml(context, instance,
network_info, disk_info,
- image_meta,
+ instance.image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._host.write_instance_config(xml)
@@ -6971,8 +7266,6 @@ class LibvirtDriver(driver.ComputeDriver):
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
- image_meta = objects.ImageMeta.from_dict(image_meta)
-
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
@@ -7041,14 +7334,31 @@ class LibvirtDriver(driver.ComputeDriver):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
- image_meta = objects.ImageMeta.from_instance(instance)
+ backend = self.image_backend.image(instance, 'disk')
+ # Once we rollback, the snapshot is no longer needed, so remove it
+ # TODO(nic): Remove the try/except/finally in a future release
+ # To avoid any upgrade issues surrounding instances being in pending
+ # resize state when the software is updated, this portion of the
+ # method logs exceptions rather than failing on them. Once it can be
+ # reasonably assumed that no such instances exist in the wild
+ # anymore, the try/except/finally should be removed,
+ # and ignore_errors should be set back to False (the default) so
+ # that problems throw errors, like they should.
+ try:
+ backend.rollback_to_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME)
+ except exception.SnapshotNotFound:
+ LOG.warning(_LW("Failed to rollback snapshot (%s)"),
+ libvirt_utils.RESIZE_SNAPSHOT_NAME)
+ finally:
+ backend.remove_snap(libvirt_utils.RESIZE_SNAPSHOT_NAME,
+ ignore_errors=True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
- image_meta,
+ instance.image_meta,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
- image_meta,
+ instance.image_meta,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
@@ -7301,7 +7611,6 @@ class LibvirtDriver(driver.ComputeDriver):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
- image_meta = objects.ImageMeta.from_dict(image_meta)
disk_bus = blockinfo.get_disk_bus_for_device_type(
instance, CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
@@ -7313,8 +7622,6 @@ class LibvirtDriver(driver.ComputeDriver):
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
- image_meta = objects.ImageMeta.from_instance(instance)
-
block_device_mapping = list(itertools.chain(*block_device_lists))
# NOTE(ndipanov): Null out the device names so that blockinfo code
# will assign them
@@ -7331,15 +7638,13 @@ class LibvirtDriver(driver.ComputeDriver):
nova_context.get_admin_context(),
instance,
block_device_info,
- image_meta)
+ instance.image_meta)
def get_device_name_for_instance(self, instance, bdms, block_device_obj):
- image_meta = objects.ImageMeta.from_instance(instance)
-
block_device_info = driver.get_block_device_info(instance, bdms)
instance_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
- image_meta, block_device_info=block_device_info)
+ instance.image_meta, block_device_info=block_device_info)
suggested_dev_name = block_device_obj.device_name
if suggested_dev_name is not None:
@@ -7350,7 +7655,7 @@ class LibvirtDriver(driver.ComputeDriver):
# only when it's actually not set on the bd object
block_device_obj.device_name = None
disk_info = blockinfo.get_info_from_bdm(
- instance, CONF.libvirt.virt_type, image_meta,
+ instance, CONF.libvirt.virt_type, instance.image_meta,
block_device_obj, mapping=instance_info['mapping'])
return block_device.prepend_dev(disk_info['dev'])
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index cbfaca461e..65a79bb8c3 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -45,15 +45,13 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
spoofing, IP spoofing, and ARP spoofing.
"""
- def __init__(self, virtapi, host, **kwargs):
+ def __init__(self, host, **kwargs):
"""Create an NWFilter firewall driver
- :param virtapi: nova.virt.virtapi.VirtAPI instance
:param host: nova.virt.libvirt.host.Host instance
:param kwargs: currently unused
"""
- super(NWFilterFirewall, self).__init__(virtapi)
global libvirt
if libvirt is None:
try:
@@ -324,10 +322,9 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
- def __init__(self, virtapi, execute=None, **kwargs):
+ def __init__(self, execute=None, **kwargs):
"""Create an IP tables firewall driver instance
- :param virtapi: nova.virt.virtapi.VirtAPI instance
:param execute: unused, pass None
:param kwargs: extra arguments
@@ -336,17 +333,12 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
class.
"""
- super(IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
- self.nwfilter = NWFilterFirewall(virtapi, kwargs['host'])
+ super(IptablesFirewallDriver, self).__init__(**kwargs)
+ self.nwfilter = NWFilterFirewall(kwargs['host'])
def setup_basic_filtering(self, instance, network_info):
- """Set up provider rules and basic NWFilter."""
+ """Set up basic NWFilter."""
self.nwfilter.setup_basic_filtering(instance, network_info)
- if not self.basically_filtered:
- LOG.debug('iptables firewall: Setup Basic Filtering',
- instance=instance)
- self.refresh_provider_fw_rules()
- self.basically_filtered = True
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
diff --git a/nova/virt/libvirt/guest.py b/nova/virt/libvirt/guest.py
index c2a7cd7ca3..0576997df0 100644
--- a/nova/virt/libvirt/guest.py
+++ b/nova/virt/libvirt/guest.py
@@ -186,6 +186,22 @@ class Guest(object):
return interfaces
+ def get_interface_by_mac(self, mac):
+ """Lookup a LibvirtConfigGuestInterface by the MAC address.
+
+ :param mac: MAC address of the guest interface.
+ :type mac: str
+ :returns: nova.virt.libvirt.config.LibvirtConfigGuestInterface instance
+ if found, else None
+ """
+
+ if mac:
+ interfaces = self.get_all_devices(
+ vconfig.LibvirtConfigGuestInterface)
+ for interface in interfaces:
+ if interface.mac_addr == mac:
+ return interface
+
def get_vcpus_info(self):
"""Returns virtual cpus information of guest.
@@ -374,7 +390,7 @@ class Guest(object):
"""Returns information on Guest
:param host: a host.Host object with current
- connection. Unfortunatly we need to pass it
+ connection. Unfortunately we need to pass it
because of a workaround with < version 1.2..11
:returns list: [state, maxMem, memory, nrVirtCpu, cpuTime]
diff --git a/nova/virt/libvirt/host.py b/nova/virt/libvirt/host.py
index e2b0a05e74..c51cb12f29 100644
--- a/nova/virt/libvirt/host.py
+++ b/nova/virt/libvirt/host.py
@@ -63,7 +63,7 @@ LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
-native_Queue = patcher.original("queue" if six.PY3 else "Queue")
+native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
@@ -700,7 +700,7 @@ class Host(object):
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
- See method "list_instance_domains" for more informations.
+ See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py
index 61cc03cca7..0dd71d3ea2 100644
--- a/nova/virt/libvirt/imagebackend.py
+++ b/nova/virt/libvirt/imagebackend.py
@@ -29,9 +29,10 @@ from oslo_utils import strutils
from oslo_utils import units
import six
+import nova.conf
from nova import exception
from nova.i18n import _
-from nova.i18n import _LE, _LI
+from nova.i18n import _LE, _LI, _LW
from nova import image
from nova import keymgr
from nova import utils
@@ -70,10 +71,9 @@ __imagebackend_opts = [
' format)'),
]
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.register_opts(__imagebackend_opts, 'libvirt')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
-CONF.import_opt('preallocate_images', 'nova.virt.driver')
CONF.import_opt('enabled', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('cipher', 'nova.compute.api',
@@ -108,6 +108,7 @@ class Image(object):
self.source_type = source_type
self.driver_format = driver_format
+ self.driver_io = None
self.discard_mode = CONF.libvirt.hw_disk_discard
self.is_block_dev = is_block_dev
self.preallocate = False
@@ -169,6 +170,7 @@ class Image(object):
info.target_dev = disk_dev
info.driver_cache = cache_mode
info.driver_discard = self.discard_mode
+ info.driver_io = self.driver_io
info.driver_format = self.driver_format
driver_name = libvirt_utils.pick_disk_driver_name(hypervisor_version,
self.is_block_dev)
@@ -262,14 +264,14 @@ class Image(object):
"""
can_fallocate = getattr(self.__class__, 'can_fallocate', None)
if can_fallocate is None:
- _out, err = utils.trycmd('fallocate', '-n', '-l', '1',
- self.path + '.fallocate_test')
- fileutils.delete_if_exists(self.path + '.fallocate_test')
+ test_path = self.path + '.fallocate_test'
+ _out, err = utils.trycmd('fallocate', '-l', '1', test_path)
+ fileutils.delete_if_exists(test_path)
can_fallocate = not err
self.__class__.can_fallocate = can_fallocate
if not can_fallocate:
- LOG.error(_LE('Unable to preallocate image at path: '
- '%(path)s'), {'path': self.path})
+ LOG.warning(_LW('Unable to preallocate image at path: '
+ '%(path)s'), {'path': self.path})
return can_fallocate
def verify_base_size(self, base, size, base_size=0):
@@ -393,6 +395,25 @@ class Image(object):
raise exception.ImageUnacceptable(image_id=image_id_or_uri,
reason=reason)
+ def direct_snapshot(self, context, snapshot_name, image_format, image_id,
+ base_image_id):
+ """Prepare a snapshot for direct reference from glance
+
+ :raises: exception.ImageUnacceptable if it cannot be
+ referenced directly in the specified image format
+ :returns: URL to be given to glance
+ """
+ raise NotImplementedError(_('direct_snapshot() is not implemented'))
+
+ def cleanup_direct_snapshot(self, location, also_destroy_volume=False,
+ ignore_errors=False):
+ """Performs any cleanup actions required after calling
+ direct_snapshot(), for graceful exception handling and the like.
+
+ This should be a no-op on any backend where it is not implemented.
+ """
+ pass
+
def _get_lock_name(self, base):
"""Get an image's name of a base file."""
return os.path.split(base)[-1]
@@ -421,6 +442,31 @@ class Image(object):
# we should talk about if we want this functionality for everything.
pass
+ def create_snap(self, name):
+ """Create a snapshot on the image. A noop on backends that don't
+ support snapshots.
+
+ :param name: name of the snapshot
+ """
+ pass
+
+ def remove_snap(self, name, ignore_errors=False):
+ """Remove a snapshot on the image. A noop on backends that don't
+ support snapshots.
+
+ :param name: name of the snapshot
+ :param ignore_errors: don't log errors if the snapshot does not exist
+ """
+ pass
+
+ def rollback_to_snap(self, name):
+ """Rollback the image to the named snapshot. A noop on backends that
+ don't support snapshots.
+
+ :param name: name of the snapshot
+ """
+ pass
+
class Raw(Image):
def __init__(self, instance=None, disk_name=None, path=None):
@@ -432,6 +478,8 @@ class Raw(Image):
disk_name))
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
+ if self.preallocate:
+ self.driver_io = "native"
self.disk_info_path = os.path.join(os.path.dirname(self.path),
'disk.info')
self.correct_format()
@@ -483,10 +531,15 @@ class Raw(Image):
else:
if not os.path.exists(base):
prepare_template(target=base, max_size=size, *args, **kwargs)
+
+ # NOTE(mikal): Update the mtime of the base file so the image
+ # cache manager knows it is in use.
+ libvirt_utils.update_mtime(base)
self.verify_base_size(base, size)
if not os.path.exists(self.path):
with fileutils.remove_path_on_error(self.path):
copy_raw_image(base, self.path, size)
+
self.correct_format()
def resize_image(self, size):
@@ -514,6 +567,8 @@ class Qcow2(Image):
disk_name))
self.preallocate = (
strutils.to_slug(CONF.preallocate_images) == 'space')
+ if self.preallocate:
+ self.driver_io = "native"
self.disk_info_path = os.path.join(os.path.dirname(self.path),
'disk.info')
self.resolve_driver_format()
@@ -534,6 +589,10 @@ class Qcow2(Image):
# Download the unmodified base image unless we already have a copy.
if not os.path.exists(base):
prepare_template(target=base, max_size=size, *args, **kwargs)
+
+ # NOTE(ankit): Update the mtime of the base file so the image
+ # cache manager knows it is in use.
+ libvirt_utils.update_mtime(base)
self.verify_base_size(base, size)
legacy_backing_size = None
@@ -626,6 +685,9 @@ class Lvm(Image):
self.sparse = CONF.libvirt.sparse_logical_volumes
self.preallocate = not self.sparse
+ if not self.sparse:
+ self.driver_io = "native"
+
def _supports_encryption(self):
return True
@@ -872,6 +934,105 @@ class Rbd(Image):
self.driver.remove_image(name)
self.driver.import_image(local_file, name)
+ def create_snap(self, name):
+ return self.driver.create_snap(self.rbd_name, name)
+
+ def remove_snap(self, name, ignore_errors=False):
+ return self.driver.remove_snap(self.rbd_name, name, ignore_errors)
+
+ def rollback_to_snap(self, name):
+ return self.driver.rollback_to_snap(self.rbd_name, name)
+
+ def _get_parent_pool(self, context, base_image_id, fsid):
+ parent_pool = None
+ try:
+ # The easy way -- the image is an RBD clone, so use the parent
+ # images' storage pool
+ parent_pool, _im, _snap = self.driver.parent_info(self.rbd_name)
+ except exception.ImageUnacceptable:
+ # The hard way -- the image is itself a parent, so ask Glance
+ # where it came from
+ LOG.debug('No parent info for %s; asking the Image API where its '
+ 'store is', base_image_id)
+ try:
+ image_meta = IMAGE_API.get(context, base_image_id,
+ include_locations=True)
+ except Exception as e:
+ LOG.debug('Unable to get image %(image_id)s; error: %(error)s',
+ {'image_id': base_image_id, 'error': e})
+ image_meta = {}
+
+ # Find the first location that is in the same RBD cluster
+ for location in image_meta.get('locations', []):
+ try:
+ parent_fsid, parent_pool, _im, _snap = \
+ self.driver.parse_url(location['url'])
+ if parent_fsid == fsid:
+ break
+ else:
+ parent_pool = None
+ except exception.ImageUnacceptable:
+ continue
+
+ if not parent_pool:
+ raise exception.ImageUnacceptable(
+ _('Cannot determine the parent storage pool for %s; '
+ 'cannot determine where to store images') %
+ base_image_id)
+
+ return parent_pool
+
+ def direct_snapshot(self, context, snapshot_name, image_format,
+ image_id, base_image_id):
+ """Creates an RBD snapshot directly.
+ """
+ fsid = self.driver.get_fsid()
+ # NOTE(nic): Nova has zero comprehension of how Glance's image store
+ # is configured, but we can infer what storage pool Glance is using
+ # by looking at the parent image. If using authx, write access should
+ # be enabled on that pool for the Nova user
+ parent_pool = self._get_parent_pool(context, base_image_id, fsid)
+
+ # Snapshot the disk and clone it into Glance's storage pool. librbd
+ # requires that snapshots be set to "protected" in order to clone them
+ self.driver.create_snap(self.rbd_name, snapshot_name, protect=True)
+ location = {'url': 'rbd://%(fsid)s/%(pool)s/%(image)s/%(snap)s' %
+ dict(fsid=fsid,
+ pool=self.pool,
+ image=self.rbd_name,
+ snap=snapshot_name)}
+ try:
+ self.driver.clone(location, image_id, dest_pool=parent_pool)
+ # Flatten the image, which detaches it from the source snapshot
+ self.driver.flatten(image_id, pool=parent_pool)
+ finally:
+ # all done with the source snapshot, clean it up
+ self.cleanup_direct_snapshot(location)
+
+ # Glance makes a protected snapshot called 'snap' on uploaded
+ # images and hands it out, so we'll do that too. The name of
+ # the snapshot doesn't really matter, this just uses what the
+ # glance-store rbd backend sets (which is not configurable).
+ self.driver.create_snap(image_id, 'snap', pool=parent_pool,
+ protect=True)
+ return ('rbd://%(fsid)s/%(pool)s/%(image)s/snap' %
+ dict(fsid=fsid, pool=parent_pool, image=image_id))
+
+ def cleanup_direct_snapshot(self, location, also_destroy_volume=False,
+ ignore_errors=False):
+ """Unprotects and destroys the name snapshot.
+
+ With also_destroy_volume=True, it will also cleanup/destroy the parent
+ volume. This is useful for cleaning up when the target volume fails
+ to snapshot properly.
+ """
+ if location:
+ _fsid, _pool, _im, _snap = self.driver.parse_url(location['url'])
+ self.driver.remove_snap(_im, _snap, pool=_pool, force=True,
+ ignore_errors=ignore_errors)
+ if also_destroy_volume:
+ self.driver.destroy_volume(_im, pool=_pool)
+
class Ploop(Image):
def __init__(self, instance=None, disk_name=None, path=None):
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index 0553bd00b3..efdd5483c6 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -449,10 +449,18 @@ class ImageCacheManager(imagecache.ImageCacheManager):
if not exists:
return
- if age < maxage:
- LOG.info(_LI('Base or swap file too young to remove: %s'),
- base_file)
- else:
+ lock_file = os.path.split(base_file)[-1]
+
+ @utils.synchronized(lock_file, external=True,
+ lock_path=self.lock_path)
+ def _inner_remove_old_enough_file():
+ # NOTE(mikal): recheck that the file is old enough, as a new
+ # user of the file might have come along while we were waiting
+ # for the lock
+ exists, age = self._get_age_of_file(base_file)
+ if not exists or age < maxage:
+ return
+
LOG.info(_LI('Removing base or swap file: %s'), base_file)
try:
os.remove(base_file)
@@ -466,6 +474,11 @@ class ImageCacheManager(imagecache.ImageCacheManager):
{'base_file': base_file,
'error': e})
+ if age < maxage:
+ LOG.info(_LI('Base or swap file too young to remove: %s'),
+ base_file)
+ else:
+ _inner_remove_old_enough_file()
if remove_lock:
try:
# NOTE(jichenjc) The lock file will be constructed first
@@ -473,7 +486,6 @@ class ImageCacheManager(imagecache.ImageCacheManager):
# like nova-9e881789030568a317fad9daae82c5b1c65e0d4a
# or nova-03d8e206-6500-4d91-b47d-ee74897f9b4e
# according to the original file name
- lock_file = os.path.split(base_file)[-1]
lockutils.remove_external_lock_file(lock_file,
lock_file_prefix='nova-', lock_path=self.lock_path)
except OSError as e:
@@ -562,8 +574,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
{'id': img_id,
'base_file': base_file})
if os.path.exists(base_file):
- libvirt_utils.chown(base_file, os.getuid())
- os.utime(base_file, None)
+ libvirt_utils.update_mtime(base_file)
def _age_and_verify_swap_images(self, context, base_dir):
LOG.debug('Verify swap images')
@@ -571,8 +582,7 @@ class ImageCacheManager(imagecache.ImageCacheManager):
for ent in self.back_swap_images:
base_file = os.path.join(base_dir, ent)
if ent in self.used_swap_images and os.path.exists(base_file):
- libvirt_utils.chown(base_file, os.getuid())
- os.utime(base_file, None)
+ libvirt_utils.update_mtime(base_file)
elif self.remove_unused_base_images:
self._remove_swap_file(base_file)
diff --git a/nova/virt/libvirt/storage/rbd_utils.py b/nova/virt/libvirt/storage/rbd_utils.py
index 8564181100..bf3a33486a 100644
--- a/nova/virt/libvirt/storage/rbd_utils.py
+++ b/nova/virt/libvirt/storage/rbd_utils.py
@@ -16,6 +16,8 @@
import urllib
+from eventlet import tpool
+
try:
import rados
import rbd
@@ -29,11 +31,13 @@ from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
+from nova.compute import task_states
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils
+from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
@@ -173,7 +177,7 @@ class RBDDriver(object):
raise exception.ImageUnacceptable(image_id=url, reason=reason)
return pieces
- def _get_fsid(self):
+ def get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
@@ -185,7 +189,7 @@ class RBDDriver(object):
LOG.debug('not cloneable: %s', e)
return False
- if self._get_fsid() != fsid:
+ if self.get_fsid() != fsid:
reason = '%s is in a different ceph cluster' % url
LOG.debug(reason)
return False
@@ -205,19 +209,25 @@ class RBDDriver(object):
dict(loc=url, err=e))
return False
- def clone(self, image_location, dest_name):
+ def clone(self, image_location, dest_name, dest_pool=None):
_fsid, pool, image, snapshot = self.parse_url(
image_location['url'])
- LOG.debug('cloning %(pool)s/%(img)s@%(snap)s' %
- dict(pool=pool, img=image, snap=snapshot))
+ LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to '
+ '%(dest_pool)s/%(dest_name)s',
+ dict(pool=pool, img=image, snap=snapshot,
+ dest_pool=dest_pool, dest_name=dest_name))
with RADOSClient(self, str(pool)) as src_client:
- with RADOSClient(self) as dest_client:
- rbd.RBD().clone(src_client.ioctx,
- image.encode('utf-8'),
- snapshot.encode('utf-8'),
- dest_client.ioctx,
- dest_name,
- features=src_client.features)
+ with RADOSClient(self, dest_pool) as dest_client:
+ try:
+ rbd.RBD().clone(src_client.ioctx,
+ image.encode('utf-8'),
+ snapshot.encode('utf-8'),
+ dest_client.ioctx,
+ str(dest_name),
+ features=src_client.features)
+ except rbd.PermissionError:
+ raise exception.Forbidden(_('no write permission on '
+ 'storage pool %s') % dest_pool)
def size(self, name):
with RBDVolumeProxy(self, name) as vol:
@@ -233,6 +243,31 @@ class RBDDriver(object):
with RBDVolumeProxy(self, name) as vol:
vol.resize(size)
+ def parent_info(self, volume, pool=None):
+ """Returns the pool, image and snapshot name for the parent of an
+ RBD volume.
+
+ :volume: Name of RBD object
+ :pool: Name of pool
+ """
+ try:
+ with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
+ return vol.parent_info()
+ except rbd.ImageNotFound:
+ raise exception.ImageUnacceptable(_("no usable parent snapshot "
+ "for volume %s") % volume)
+
+ def flatten(self, volume, pool=None):
+ """"Flattens" a snapshotted image with the parents' data,
+ effectively detaching it from the parent.
+
+ :volume: Name of RBD object
+ :pool: Name of pool
+ """
+ LOG.debug('flattening %(pool)s/%(vol)s', dict(pool=pool, vol=volume))
+ with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
+ tpool.execute(vol.flatten)
+
def exists(self, name, pool=None, snapshot=None):
try:
with RBDVolumeProxy(self, name,
@@ -277,11 +312,16 @@ class RBDDriver(object):
args += self.ceph_args()
utils.execute('rbd', 'import', *args)
- def cleanup_volumes(self, instance):
+ def _destroy_volume(self, client, volume, pool=None):
+ """Destroy an RBD volume, retrying as needed.
+ """
def _cleanup_vol(ioctx, volume, retryctx):
try:
- rbd.RBD().remove(client.ioctx, volume)
+ rbd.RBD().remove(ioctx, volume)
raise loopingcall.LoopingCallDone(retvalue=False)
+ except rbd.ImageHasSnapshots:
+ self.remove_snap(volume, libvirt_utils.RESIZE_SNAPSHOT_NAME,
+ ignore_errors=True)
except (rbd.ImageBusy, rbd.ImageHasSnapshots):
LOG.warn(_LW('rbd remove %(volume)s in pool %(pool)s '
'failed'),
@@ -290,25 +330,36 @@ class RBDDriver(object):
if retryctx['retries'] <= 0:
raise loopingcall.LoopingCallDone()
+ # NOTE(danms): We let it go for ten seconds
+ retryctx = {'retries': 10}
+ timer = loopingcall.FixedIntervalLoopingCall(
+ _cleanup_vol, client.ioctx, volume, retryctx)
+ timed_out = timer.start(interval=1).wait()
+ if timed_out:
+ # NOTE(danms): Run this again to propagate the error, but
+ # if it succeeds, don't raise the loopingcall exception
+ try:
+ _cleanup_vol(client.ioctx, volume, retryctx)
+ except loopingcall.LoopingCallDone:
+ pass
+
+ def cleanup_volumes(self, instance):
with RADOSClient(self, self.pool) as client:
def belongs_to_instance(disk):
- return disk.startswith(instance.uuid)
+ # NOTE(nic): On revert_resize, the cleanup steps for the root
+ # volume are handled with an "rbd snap rollback" command,
+ # and none of this is needed (and is, in fact, harmful) so
+ # filter out non-ephemerals from the list
+ if instance.task_state == task_states.RESIZE_REVERTING:
+ return (disk.startswith(instance.uuid) and
+ disk.endswith('disk.local'))
+ else:
+ return disk.startswith(instance.uuid)
volumes = rbd.RBD().list(client.ioctx)
for volume in filter(belongs_to_instance, volumes):
- # NOTE(danms): We let it go for ten seconds
- retryctx = {'retries': 10}
- timer = loopingcall.FixedIntervalLoopingCall(
- _cleanup_vol, client.ioctx, volume, retryctx)
- timed_out = timer.start(interval=1).wait()
- if timed_out:
- # NOTE(danms): Run this again to propagate the error, but
- # if it succeeds, don't raise the loopingcall exception
- try:
- _cleanup_vol(client.ioctx, volume, retryctx)
- except loopingcall.LoopingCallDone:
- pass
+ self._destroy_volume(client, volume)
def get_pool_info(self):
with RADOSClient(self) as client:
@@ -316,3 +367,67 @@ class RBDDriver(object):
return {'total': stats['kb'] * units.Ki,
'free': stats['kb_avail'] * units.Ki,
'used': stats['kb_used'] * units.Ki}
+
+ def create_snap(self, volume, name, pool=None, protect=False):
+ """Create a snapshot of an RBD volume.
+
+ :volume: Name of RBD object
+ :name: Name of snapshot
+ :pool: Name of pool
+ :protect: Set the snapshot to "protected"
+ """
+ LOG.debug('creating snapshot(%(snap)s) on rbd image(%(img)s)',
+ {'snap': name, 'img': volume})
+ with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
+ tpool.execute(vol.create_snap, name)
+ if protect and not vol.is_protected_snap(name):
+ tpool.execute(vol.protect_snap, name)
+
+ def remove_snap(self, volume, name, ignore_errors=False, pool=None,
+ force=False):
+ """Removes a snapshot from an RBD volume.
+
+ :volume: Name of RBD object
+ :name: Name of snapshot
+ :ignore_errors: whether or not to log warnings on failures
+ :pool: Name of pool
+ :force: Remove snapshot even if it is protected
+ """
+ with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
+ if name in [snap.get('name', '') for snap in vol.list_snaps()]:
+ if vol.is_protected_snap(name):
+ if force:
+ tpool.execute(vol.unprotect_snap, name)
+ elif not ignore_errors:
+ LOG.warning(_LW('snapshot(%(name)s) on rbd '
+ 'image(%(img)s) is protected, '
+ 'skipping'),
+ {'name': name, 'img': volume})
+ return
+ LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)',
+ {'name': name, 'img': volume})
+ tpool.execute(vol.remove_snap, name)
+ elif not ignore_errors:
+ LOG.warning(_LW('no snapshot(%(name)s) found on rbd '
+ 'image(%(img)s)'),
+ {'name': name, 'img': volume})
+
+ def rollback_to_snap(self, volume, name):
+ """Revert an RBD volume to its contents at a snapshot.
+
+ :volume: Name of RBD object
+ :name: Name of snapshot
+ """
+ with RBDVolumeProxy(self, volume) as vol:
+ if name in [snap.get('name', '') for snap in vol.list_snaps()]:
+ LOG.debug('rolling back rbd image(%(img)s) to '
+ 'snapshot(%(snap)s)', {'snap': name, 'img': volume})
+ tpool.execute(vol.rollback_to_snap, name)
+ else:
+ raise exception.SnapshotNotFound(snapshot_id=name)
+
+ def destroy_volume(self, volume, pool=None):
+ """A one-shot version of cleanup_volumes()
+ """
+ with RADOSClient(self, pool) as client:
+ self._destroy_volume(client, volume)
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 8d2d295bdd..1925cdadd6 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -49,6 +49,8 @@ CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
+RESIZE_SNAPSHOT_NAME = 'nova-resize'
+
def execute(*args, **kwargs):
return utils.execute(*args, **kwargs)
@@ -246,6 +248,14 @@ def chown(path, owner):
execute('chown', owner, path, run_as_root=True)
+def update_mtime(path):
+ """Touch a file without being the owner.
+
+ :param path: File bump the mtime on
+ """
+ execute('touch', '-c', path, run_as_root=True)
+
+
def _id_map_to_config(id_map):
return "%s:%s:%s" % (id_map.start, id_map.target, id_map.count)
@@ -302,13 +312,13 @@ def load_file(path):
def file_open(*args, **kwargs):
"""Open file
- see built-in file() documentation for more details
+ see built-in open() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
- return file(*args, **kwargs)
+ return open(*args, **kwargs)
def file_delete(path):
@@ -455,7 +465,7 @@ def get_instance_path(instance, forceold=False, relative=False):
def get_instance_path_at_destination(instance, migrate_data=None):
- """Get the the instance path on destination node while live migration.
+ """Get the instance path on destination node while live migration.
This method determines the directory name for instance storage on
destination node, while live migration.
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index a55cb272dc..afc46a0e00 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -33,6 +33,7 @@ from nova import objects
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
+from nova.virt import osinfo
LOG = logging.getLogger(__name__)
@@ -108,9 +109,7 @@ class LibvirtGenericVIFDriver(object):
# If the user has specified a 'vif_model' against the
# image then honour that model
if image_meta:
- vif_model = image_meta.properties.get('hw_vif_model')
- if vif_model is not None:
- model = vif_model
+ model = osinfo.HardwareProperties(image_meta).network_model
# Else if the virt type is KVM/QEMU, use virtio according
# to the global config parameter
@@ -386,10 +385,7 @@ class LibvirtGenericVIFDriver(object):
return conf
- def get_config_vhostuser(self, instance, vif, image_meta,
- inst_type, virt_type, host):
- conf = self.get_base_config(instance, vif, image_meta,
- inst_type, virt_type)
+ def _get_vhostuser_settings(self, vif):
vif_details = vif['details']
mode = vif_details.get(network_model.VIF_DETAILS_VHOSTUSER_MODE,
'server')
@@ -397,6 +393,13 @@ class LibvirtGenericVIFDriver(object):
if sock_path is None:
raise exception.VifDetailsMissingVhostuserSockPath(
vif_id=vif['id'])
+ return mode, sock_path
+
+ def get_config_vhostuser(self, instance, vif, image_meta,
+ inst_type, virt_type, host):
+ conf = self.get_base_config(instance, vif, image_meta,
+ inst_type, virt_type)
+ mode, sock_path = self._get_vhostuser_settings(vif)
designer.set_vif_host_backend_vhostuser_config(conf, mode, sock_path)
# (vladikr) Not setting up driver and queues for vhostuser
# as queues are not supported in Libvirt until version 1.2.17
@@ -489,6 +492,13 @@ class LibvirtGenericVIFDriver(object):
process_input='0',
run_as_root=True,
check_exit_code=[0, 1])
+ disv6 = '/proc/sys/net/ipv6/conf/%s/disable_ipv6' % br_name
+ if os.path.exists(disv6):
+ utils.execute('tee',
+ disv6,
+ process_input='1',
+ run_as_root=True,
+ check_exit_code=[0, 1])
if not linux_net.device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
@@ -626,18 +636,57 @@ class LibvirtGenericVIFDriver(object):
linux_net.create_tap_dev(dev, mac)
linux_net._set_device_mtu(dev)
+ def plug_vhostuser_fp(self, instance, vif):
+ """Create a fp netdevice interface with a vhostuser socket"""
+ dev = self.get_vif_devname(vif)
+ if linux_net.device_exists(dev):
+ return
+
+ ovs_plug = vif['details'].get(
+ network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
+ False)
+ sockmode_qemu, sockpath = self._get_vhostuser_settings(vif)
+ sockmode_port = 'client' if sockmode_qemu == 'server' else 'server'
+
+ try:
+ linux_net.create_fp_dev(dev, sockpath, sockmode_port)
+
+ if ovs_plug:
+ if vif.is_hybrid_plug_enabled():
+ self.plug_ovs_hybrid(instance, vif)
+ utils.execute('brctl', 'addif',
+ self.get_br_name(vif['id']),
+ dev, run_as_root=True)
+ else:
+ iface_id = self.get_ovs_interfaceid(vif)
+ linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
+ dev, iface_id,
+ vif['address'],
+ instance.uuid)
+ except processutils.ProcessExecutionError:
+ LOG.exception(_LE("Failed while plugging vif"), instance=instance)
+
+ def plug_vhostuser_ovs(self, instance, vif):
+ """Plug a VIF_TYPE_VHOSTUSER into an ovs bridge"""
+ iface_id = self.get_ovs_interfaceid(vif)
+ port_name = os.path.basename(
+ vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
+ linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
+ port_name, iface_id, vif['address'],
+ instance.uuid)
+ linux_net.ovs_set_vhostuser_port_type(port_name)
+
def plug_vhostuser(self, instance, vif):
+ fp_plug = vif['details'].get(
+ network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG,
+ False)
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
- if ovs_plug:
- iface_id = self.get_ovs_interfaceid(vif)
- port_name = os.path.basename(
- vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
- linux_net.create_ovs_vif_port(self.get_bridge_name(vif),
- port_name, iface_id, vif['address'],
- instance.uuid)
- linux_net.ovs_set_vhostuser_port_type(port_name)
+ if fp_plug:
+ self.plug_vhostuser_fp(instance, vif)
+ elif ovs_plug:
+ self.plug_vhostuser_ovs(instance, vif)
def plug_vrouter(self, instance, vif):
"""Plug into Contrail's network port
@@ -844,15 +893,43 @@ class LibvirtGenericVIFDriver(object):
LOG.exception(_LE("Failed while unplugging vif"),
instance=instance)
+ def unplug_vhostuser_fp(self, instance, vif):
+ """Delete a fp netdevice interface with a vhostuser socket"""
+ dev = self.get_vif_devname(vif)
+ ovs_plug = vif['details'].get(
+ network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
+ False)
+
+ try:
+ if ovs_plug:
+ if vif.is_hybrid_plug_enabled():
+ self.unplug_ovs_hybrid(instance, vif)
+ else:
+ linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
+ dev, False)
+ linux_net.delete_fp_dev(dev)
+ except processutils.ProcessExecutionError:
+ LOG.exception(_LE("Failed while unplugging vif"),
+ instance=instance)
+
+ def unplug_vhostuser_ovs(self, instance, vif):
+ """Unplug a VIF_TYPE_VHOSTUSER into an ovs bridge"""
+ port_name = os.path.basename(
+ vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
+ linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
+ port_name)
+
def unplug_vhostuser(self, instance, vif):
+ fp_plug = vif['details'].get(
+ network_model.VIF_DETAILS_VHOSTUSER_FP_PLUG,
+ False)
ovs_plug = vif['details'].get(
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG,
False)
- if ovs_plug:
- port_name = os.path.basename(
- vif['details'][network_model.VIF_DETAILS_VHOSTUSER_SOCKET])
- linux_net.delete_ovs_vif_port(self.get_bridge_name(vif),
- port_name)
+ if fp_plug:
+ self.unplug_vhostuser_fp(instance, vif)
+ elif ovs_plug:
+ self.unplug_vhostuser_ovs(instance, vif)
def unplug_vrouter(self, instance, vif):
"""Unplug Contrail's network port
diff --git a/nova/virt/libvirt/volume/disco.py b/nova/virt/libvirt/volume/disco.py
new file mode 100644
index 0000000000..0a435d7f43
--- /dev/null
+++ b/nova/virt/libvirt/volume/disco.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2015 Industrial Technology Research Institute.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Libvirt volume driver for DISCO."""
+
+from os_brick.initiator import connector
+from oslo_config import cfg
+from oslo_log import log as logging
+
+from nova import utils
+from nova.virt.libvirt.volume import volume as libvirt_volume
+
+LOG = logging.getLogger(__name__)
+
+CONF = cfg.CONF
+
+CONF.import_opt('num_iscsi_scan_tries', 'nova.virt.libvirt.volume.iscsi',
+ group='libvirt')
+
+
+class LibvirtDISCOVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
+ """Class DISCO Libvirt volume Driver.
+
+ Implements Libvirt part of volume driver for DISCO cinder driver.
+ Uses the DISCO connector from the os-brick projects.
+ """
+
+ def __init__(self, connection):
+ """Init DISCO connector for LibVirt."""
+ super(LibvirtDISCOVolumeDriver, self).__init__(connection,
+ is_block_dev=False)
+ self.connector = connector.InitiatorConnector.factory(
+ 'DISCO', utils.get_root_helper(),
+ device_scan_attempts=CONF.libvirt.num_iscsi_scan_tries)
+
+ def get_config(self, connection_info, disk_info):
+ """Get DISCO volume attachment configuration."""
+ conf = super(LibvirtDISCOVolumeDriver, self).get_config(
+ connection_info, disk_info)
+
+ conf.source_path = connection_info['data']['device_path']
+ conf.source_protocol = 'disco'
+ conf.source_type = 'file'
+ return conf
+
+ def connect_volume(self, connection_info, disk_info):
+ """Connect a DISCO volume to a compute node."""
+ device_info = self.connector.connect_volume(connection_info['data'])
+ connection_info['data']['device_path'] = device_info['path']
+
+ def disconnect_volume(self, connection_info, disk_dev):
+ """Disconnect a DISCO volume of a compute node."""
+ self.connector.disconnect_volume(connection_info['data'], None)
+ super(LibvirtDISCOVolumeDriver, self).disconnect_volume(
+ connection_info, disk_dev)
diff --git a/nova/virt/libvirt/volume/fibrechannel.py b/nova/virt/libvirt/volume/fibrechannel.py
index fdda66d2e2..8e9535a8d3 100644
--- a/nova/virt/libvirt/volume/fibrechannel.py
+++ b/nova/virt/libvirt/volume/fibrechannel.py
@@ -45,6 +45,7 @@ class LibvirtFibreChannelVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
+ conf.driver_io = "native"
return conf
def connect_volume(self, connection_info, disk_info):
diff --git a/nova/virt/libvirt/volume/glusterfs.py b/nova/virt/libvirt/volume/glusterfs.py
index e1b6b9023d..6564172af1 100644
--- a/nova/virt/libvirt/volume/glusterfs.py
+++ b/nova/virt/libvirt/volume/glusterfs.py
@@ -10,11 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
+import six
from nova.i18n import _LE, _LW
from nova import paths
diff --git a/nova/virt/libvirt/volume/iscsi.py b/nova/virt/libvirt/volume/iscsi.py
index e7f8014025..61e5ed793c 100644
--- a/nova/virt/libvirt/volume/iscsi.py
+++ b/nova/virt/libvirt/volume/iscsi.py
@@ -75,6 +75,7 @@ class LibvirtISCSIVolumeDriver(libvirt_volume.LibvirtBaseVolumeDriver):
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
+ conf.driver_io = "native"
return conf
def connect_volume(self, connection_info, disk_info):
diff --git a/nova/virt/libvirt/volume/nfs.py b/nova/virt/libvirt/volume/nfs.py
index 4ea7d7cc13..b4cc24523d 100644
--- a/nova/virt/libvirt/volume/nfs.py
+++ b/nova/virt/libvirt/volume/nfs.py
@@ -10,11 +10,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
+import six
from nova.i18n import _LE, _LW
from nova import paths
@@ -52,6 +52,7 @@ class LibvirtNFSVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
+ conf.driver_io = "native"
return conf
def connect_volume(self, connection_info, disk_info):
diff --git a/nova/virt/libvirt/volume/quobyte.py b/nova/virt/libvirt/volume/quobyte.py
index 494e6c23f5..199439be5d 100644
--- a/nova/virt/libvirt/volume/quobyte.py
+++ b/nova/virt/libvirt/volume/quobyte.py
@@ -15,12 +15,12 @@
import errno
import os
-import six
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
+import six
from nova import exception as nova_exception
from nova.i18n import _
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index c78b4d9ea4..6b64a5b181 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -23,20 +23,11 @@ import os
import jinja2
import netaddr
-from oslo_config import cfg
+import nova.conf
from nova.network import model
-from nova import paths
-CONF = cfg.CONF
-
-netutils_opts = [
- cfg.StrOpt('injected_network_template',
- default=paths.basedir_def('nova/virt/interfaces.template'),
- help='Template file for injected network'),
-]
-
-CONF.register_opts(netutils_opts)
+CONF = nova.conf.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
diff --git a/nova/virt/opts.py b/nova/virt/opts.py
index 57beae9094..3826863eb1 100644
--- a/nova/virt/opts.py
+++ b/nova/virt/opts.py
@@ -14,17 +14,12 @@ import itertools
import nova.conf
import nova.virt.configdrive
-import nova.virt.disk.api
-import nova.virt.disk.mount.nbd
import nova.virt.disk.vfs.guestfs
-import nova.virt.driver
-import nova.virt.firewall
import nova.virt.hyperv.pathutils
import nova.virt.hyperv.vif
import nova.virt.hyperv.vmops
import nova.virt.hyperv.volumeops
import nova.virt.imagecache
-import nova.virt.images
import nova.virt.libvirt.driver
import nova.virt.libvirt.imagebackend
import nova.virt.libvirt.imagecache
@@ -32,7 +27,6 @@ import nova.virt.libvirt.storage.lvm
import nova.virt.libvirt.utils
import nova.virt.libvirt.vif
import nova.virt.libvirt.volume.volume
-import nova.virt.netutils
import nova.virt.vmwareapi.driver
import nova.virt.vmwareapi.images
import nova.virt.vmwareapi.vif
@@ -55,16 +49,9 @@ def list_opts():
('DEFAULT',
itertools.chain(
nova.virt.configdrive.configdrive_opts,
- nova.virt.disk.api.disk_opts,
- nova.virt.disk.mount.nbd.nbd_opts,
- nova.virt.driver.driver_opts,
- nova.virt.firewall.firewall_opts,
nova.virt.imagecache.imagecache_opts,
- nova.virt.images.image_opts,
- nova.virt.netutils.netutils_opts,
)),
('guestfs', nova.virt.disk.vfs.guestfs.guestfs_opts),
- nova.conf.virt.list_opts(),
('hyperv',
itertools.chain(
nova.virt.hyperv.pathutils.hyperv_opts,
diff --git a/nova/virt/osinfo.py b/nova/virt/osinfo.py
new file mode 100644
index 0000000000..ee3289263a
--- /dev/null
+++ b/nova/virt/osinfo.py
@@ -0,0 +1,136 @@
+# Copyright 2015 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+from oslo_utils import importutils
+
+from nova import exception
+from nova.i18n import _LW, _LI
+
+libosinfo = None
+LOG = logging.getLogger(__name__)
+
+# TODO(vladikr) The current implementation will serve only as a temporary
+# solution, due to it's dependency on the libosinfo gobject library.
+# In the future it will be replaced by a pure python library or by a direct
+# parsing of the libosinfo XML files. However, it will be possible only when
+# libosinfo project will declare the XML structure to be a stable ABI.
+
+
+class _OsInfoDatabase(object):
+
+ _instance = None
+
+ def __init__(self):
+
+ global libosinfo
+ try:
+ if libosinfo is None:
+ libosinfo = importutils.import_module(
+ 'gi.repository.Libosinfo')
+ except ImportError as exp:
+ LOG.info(_LI("Cannot load Libosinfo: (%s)"), exp)
+ else:
+ self.loader = libosinfo.Loader()
+ self.loader.process_default_path()
+
+ self.db = self.loader.get_db()
+ self.oslist = self.db.get_os_list()
+
+ @classmethod
+ def get_instance(cls):
+ """Get libosinfo connection
+ """
+ if cls._instance is None:
+ cls._instance = _OsInfoDatabase()
+
+ return cls._instance
+
+ def get_os(self, os_name):
+ """Retrieve OS object based on id, unique URI identifier of the OS
+ :param os_name: id - the unique operating systemidentifier
+ e.g. http://fedoraproject.org/fedora/21,
+ http://microsoft.com/win/xp,
+ or a
+ short-id - the short name of the OS
+ e.g. fedora21, winxp
+ :returns: The operation system object Libosinfo.Os
+ :raise exception.OsInfoNotFound: If os hasn't been found
+ """
+ if libosinfo is None:
+ return
+ if not os_name:
+ raise exception.OsInfoNotFound(os_name='Empty')
+ fltr = libosinfo.Filter.new()
+ flt_field = 'id' if os_name.startswith('http') else 'short-id'
+ fltr.add_constraint(flt_field, os_name)
+ filttered = self.oslist.new_filtered(fltr)
+ list_len = filttered.get_length()
+ if not list_len:
+ raise exception.OsInfoNotFound(os_name=os_name)
+ return filttered.get_nth(0)
+
+
+class OsInfo(object):
+ """OS Information Structure
+ """
+
+ def __init__(self, os_name):
+ self._os_obj = self._get_os_obj(os_name)
+
+ def _get_os_obj(self, os_name):
+ try:
+ return _OsInfoDatabase.get_instance().get_os(os_name)
+ except exception.NovaException as e:
+ LOG.warning(_LW("Cannot find OS information - Reason: (%s)"), e)
+
+ @property
+ def network_model(self):
+ if self._os_obj is not None:
+ fltr = libosinfo.Filter()
+ fltr.add_constraint("class", "net")
+ devs = self._os_obj.get_all_devices(fltr)
+ if devs.get_length():
+ return devs.get_nth(0).get_name()
+
+ @property
+ def disk_model(self):
+ if self._os_obj is not None:
+ fltr = libosinfo.Filter()
+ fltr.add_constraint("class", "block")
+ devs = self._os_obj.get_all_devices(fltr)
+ if devs.get_length():
+ return devs.get_nth(0).get_name()
+
+
+class HardwareProperties(object):
+
+ def __init__(self, image_meta):
+ """:param image_meta: ImageMeta object
+ """
+ self.img_props = image_meta.properties
+ os_key = self.img_props.get('os_distro')
+ self.os_info_obj = OsInfo(os_key)
+
+ @property
+ def network_model(self):
+ model = self.img_props.get('hw_vif_model',
+ self.os_info_obj.network_model)
+ return 'virtio' if model == 'virtio-net' else model
+
+ @property
+ def disk_model(self):
+ model = self.img_props.get('hw_disk_bus',
+ self.os_info_obj.disk_model)
+ return 'virtio' if model == 'virtio-block' else model
diff --git a/nova/virt/virtapi.py b/nova/virt/virtapi.py
index 424699d4c9..9b4eddbab0 100644
--- a/nova/virt/virtapi.py
+++ b/nova/virt/virtapi.py
@@ -16,12 +16,6 @@ import contextlib
class VirtAPI(object):
- def provider_fw_rule_get_all(self, context):
- """Get the provider firewall rules
- :param context: security context
- """
- raise NotImplementedError()
-
@contextlib.contextmanager
def wait_for_instance_event(self, instance, event_names, deadline=300,
error_callback=None):
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index 31563eb9b8..b65efbdbd2 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -35,7 +35,6 @@ from nova.compute import task_states
from nova.compute import vm_states
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
-from nova import objects
from nova.virt import driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import error_util
@@ -302,7 +301,6 @@ class VMwareVCDriver(driver.ComputeDriver):
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
@@ -379,7 +377,6 @@ class VMwareVCDriver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
@@ -483,7 +480,6 @@ class VMwareVCDriver(driver.ComputeDriver):
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.rescue(context, instance, network_info, image_meta)
def unrescue(self, instance, network_info):
@@ -559,7 +555,6 @@ class VMwareVCDriver(driver.ComputeDriver):
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance."""
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.attach_interface(instance, image_meta, vif)
def detach_interface(self, instance, vif):
diff --git a/nova/virt/vmwareapi/ds_util.py b/nova/virt/vmwareapi/ds_util.py
index aa2cb8cfd6..cb9e25c3e6 100644
--- a/nova/virt/vmwareapi/ds_util.py
+++ b/nova/virt/vmwareapi/ds_util.py
@@ -40,7 +40,7 @@ DcInfo = collections.namedtuple('DcInfo',
['ref', 'name', 'vmFolder'])
# A cache for datastore/datacenter mappings. The key will be
-# the datastore moref. The value will the the DcInfo object.
+# the datastore moref. The value will be the DcInfo object.
_DS_DC_MAPPING = {}
@@ -484,3 +484,22 @@ def get_dc_info(session, ds_ref):
def dc_cache_reset():
global _DS_DC_MAPPING
_DS_DC_MAPPING = {}
+
+
+def get_connected_hosts(session, datastore):
+ """Get all the hosts to which the datastore is connected.
+
+ :param datastore: Reference to the datastore entity
+ :return: List of managed object references of all connected
+ hosts
+ """
+ host_mounts = session._call_method(vutil, 'get_object_property',
+ datastore, 'host')
+ if not hasattr(host_mounts, 'DatastoreHostMount'):
+ return []
+
+ connected_hosts = []
+ for host_mount in host_mounts.DatastoreHostMount:
+ connected_hosts.append(host_mount.key.value)
+
+ return connected_hosts
diff --git a/nova/virt/vmwareapi/images.py b/nova/virt/vmwareapi/images.py
index 524b1787dc..81e4609d2f 100644
--- a/nova/virt/vmwareapi/images.py
+++ b/nova/virt/vmwareapi/images.py
@@ -34,6 +34,7 @@ from nova import image
from nova.objects import fields
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
+from nova.virt.vmwareapi import vm_util
# NOTE(mdbooth): We use use_linked_clone below, but don't have to import it
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
@@ -379,8 +380,10 @@ def fetch_image_stream_optimized(context, instance, session, vm_name,
LOG.info(_LI("Downloaded image file data %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
+ vmdk = vm_util.get_vmdk_info(session, imported_vm_ref, vm_name)
session._call_method(session.vim, "UnregisterVM", imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"), instance=instance)
+ return vmdk.capacity_in_bytes
def get_vmdk_name_from_ovf(xmlstr):
@@ -444,11 +447,14 @@ def fetch_image_ova(context, instance, session, vm_name, ds_name,
LOG.info(_LI("Downloaded OVA image file %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
imported_vm_ref = write_handle.get_imported_vm()
+ vmdk = vm_util.get_vmdk_info(session,
+ imported_vm_ref,
+ vm_name)
session._call_method(session.vim, "UnregisterVM",
imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"),
instance=instance)
- return
+ return vmdk.capacity_in_bytes
raise exception.ImageUnacceptable(
reason=_("Extracting vmdk from OVA failed."),
image_id=image_ref)
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index 088ec1c8b4..713ec6e51e 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -1608,3 +1608,17 @@ def folder_ref_cache_update(path, folder_ref):
def folder_ref_cache_get(path):
return _FOLDER_PATH_REF_MAPPING.get(path)
+
+
+def _get_vm_name(display_name, id):
+ if display_name:
+ return '%s (%s)' % (display_name[:41], id[:36])
+ else:
+ return id[:36]
+
+
+def rename_vm(session, vm_ref, instance):
+ vm_name = _get_vm_name(instance.display_name, instance.uuid)
+ rename_task = session._call_method(session.vim, "Rename_Task", vm_ref,
+ newName=vm_name)
+ session._wait_for_task(rename_task)
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index beae420c15..f6620bb681 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -419,7 +419,7 @@ class VMwareVMOps(object):
'datastore_name': vi.datastore.name},
instance=vi.instance)
- images.fetch_image_stream_optimized(
+ image_size = images.fetch_image_stream_optimized(
context,
vi.instance,
self._session,
@@ -427,6 +427,10 @@ class VMwareVMOps(object):
vi.datastore.name,
vi.dc_info.vmFolder,
self._root_resource_pool)
+ # The size of the image is different from the size of the virtual disk.
+ # We want to use the latter. On vSAN this is the only way to get this
+ # size because there is no VMDK descriptor.
+ vi.ii.file_size = image_size
def _fetch_image_as_ova(self, context, vi, image_ds_loc):
"""Download root disk of an OVA image as streamOptimized."""
@@ -435,13 +439,17 @@ class VMwareVMOps(object):
# of the VM use to import it with.
vm_name = image_ds_loc.parent.basename
- images.fetch_image_ova(context,
+ image_size = images.fetch_image_ova(context,
vi.instance,
self._session,
vm_name,
vi.datastore.name,
vi.dc_info.vmFolder,
self._root_resource_pool)
+ # The size of the image is different from the size of the virtual disk.
+ # We want to use the latter. On vSAN this is the only way to get this
+ # size because there is no VMDK descriptor.
+ vi.ii.file_size = image_size
def _prepare_sparse_image(self, vi):
tmp_dir_loc = vi.datastore.build_path(
@@ -785,6 +793,11 @@ class VMwareVMOps(object):
instance, vm_ref, vi.dc_info, vi.datastore,
injected_files, admin_password, network_info)
+ # Rename the VM. This is done after the spec is created to ensure
+ # that all of the files for the instance are under the directory
+ # 'uuid' of the instance
+ vm_util.rename_vm(self._session, vm_ref, instance)
+
vm_util.power_on_instance(self._session, instance, vm_ref=vm_ref)
def _is_bdm_valid(self, block_device_mapping):
@@ -1338,8 +1351,7 @@ class VMwareVMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
# 2. Reconfigure the VM properties
- image_meta = objects.ImageMeta.from_instance(instance)
- self._resize_vm(context, instance, vm_ref, flavor, image_meta)
+ self._resize_vm(context, instance, vm_ref, flavor, instance.image_meta)
self._update_instance_progress(context, instance,
step=2,
@@ -1383,8 +1395,8 @@ class VMwareVMOps(object):
vm_util.power_off_instance(self._session, instance, vm_ref)
client_factory = self._session.vim.client.factory
# Reconfigure the VM properties
- image_meta = objects.ImageMeta.from_instance(instance)
- extra_specs = self._get_extra_specs(instance.flavor, image_meta)
+ extra_specs = self._get_extra_specs(instance.flavor,
+ instance.image_meta)
metadata = self._get_instance_metadata(context, instance)
vm_resize_spec = vm_util.get_vm_resize_spec(client_factory,
int(instance.vcpus),
@@ -1674,17 +1686,19 @@ class VMwareVMOps(object):
while retrieve_result:
for vm in retrieve_result.objects:
- vm_name = None
+ vm_uuid = None
conn_state = None
for prop in vm.propSet:
- if prop.name == "name":
- vm_name = prop.val
- elif prop.name == "runtime.connectionState":
+ if prop.name == "runtime.connectionState":
conn_state = prop.val
+ elif prop.name == 'config.extraConfig["nvp.vm-uuid"]':
+ vm_uuid = prop.val.value
+ # Ignore VM's that do not have nvp.vm-uuid defined
+ if not vm_uuid:
+ continue
# Ignoring the orphaned or inaccessible VMs
- if (conn_state not in ["orphaned", "inaccessible"] and
- uuidutils.is_uuid_like(vm_name)):
- lst_vm_names.append(vm_name)
+ if conn_state not in ["orphaned", "inaccessible"]:
+ lst_vm_names.append(vm_uuid)
retrieve_result = self._session._call_method(vutil,
'continue_retrieval',
retrieve_result)
@@ -1911,7 +1925,8 @@ class VMwareVMOps(object):
def list_instances(self):
"""Lists the VM instances that are registered with vCenter cluster."""
- properties = ['name', 'runtime.connectionState']
+ properties = ['runtime.connectionState',
+ 'config.extraConfig["nvp.vm-uuid"]']
LOG.debug("Getting list of instances from cluster %s",
self._cluster)
vms = []
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 2d6a3f9b28..dc2d17665c 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -190,7 +190,6 @@ class XenAPIDriver(driver.ComputeDriver):
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create VM instance."""
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
@@ -211,7 +210,6 @@ class XenAPIDriver(driver.ComputeDriver):
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize, turning on the migrated instance."""
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info, power_on)
@@ -286,7 +284,6 @@ class XenAPIDriver(driver.ComputeDriver):
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
- image_meta = objects.ImageMeta.from_dict(image_meta)
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
@@ -481,7 +478,7 @@ class XenAPIDriver(driver.ComputeDriver):
:param instance: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
-
+ :returns: a XenapiLiveMigrateData object
"""
return self._vmops.check_can_live_migrate_destination(context,
instance,
@@ -509,6 +506,7 @@ class XenAPIDriver(driver.ComputeDriver):
:param dest_check_data: result of check_can_live_migrate_destination
includes the block_migration flag
:param block_device_info: result of _get_instance_block_device_info
+ :returns: a XenapiLiveMigrateData object
"""
return self._vmops.check_can_live_migrate_source(context, instance,
dest_check_data)
@@ -537,7 +535,7 @@ class XenAPIDriver(driver.ComputeDriver):
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, migrate VM disk.
- :param migrate_data: implementation specific params
+ :param migrate_data: a XenapiLiveMigrateData object
"""
self._vmops.live_migrate(context, instance, dest, post_method,
recover_method, block_migration, migrate_data)
@@ -547,6 +545,17 @@ class XenAPIDriver(driver.ComputeDriver):
block_device_info,
destroy_disks=True,
migrate_data=None):
+ """Performs a live migration rollback.
+
+ :param context: security context
+ :param instance: instance object that was being migrated
+ :param network_info: instance network information
+ :param block_device_info: instance block device information
+ :param destroy_disks:
+ if true, destroy disks at destination during cleanup
+ :param migrate_data: A XenapiLiveMigrateData object
+ """
+
# NOTE(johngarbutt) Destroying the VM is not appropriate here
# and in the cases where it might make sense,
# XenServer has already done it.
@@ -563,6 +572,7 @@ class XenAPIDriver(driver.ComputeDriver):
:param block_device_info:
It must be the result of _get_instance_volume_bdms()
at compute manager.
+ :returns: a XenapiLiveMigrateData object
"""
# TODO(JohnGarbutt) look again when boot-from-volume hits trunk
result = objects.XenapiLiveMigrateData()
@@ -577,7 +587,7 @@ class XenAPIDriver(driver.ComputeDriver):
:param context: security context
:instance: instance object that was migrated
:block_device_info: instance block device information
- :param migrate_data: if not None, it is a dict which has data
+ :param migrate_data: a XenapiLiveMigrateData object
"""
self._vmops.post_live_migration(context, instance, migrate_data)
@@ -618,9 +628,6 @@ class XenAPIDriver(driver.ComputeDriver):
"""
return self._vmops.refresh_instance_security_rules(instance)
- def refresh_provider_fw_rules(self):
- return self._vmops.refresh_provider_fw_rules()
-
def get_available_nodes(self, refresh=False):
stats = self.host_state.get_host_stats(refresh=refresh)
return [stats["hypervisor_hostname"]]
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 1ae79290d6..3b04d6ae48 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -77,12 +77,11 @@ LOG = logging.getLogger(__name__)
def reset():
for c in _CLASSES:
_db_content[c] = {}
- host = create_host('fake')
+ create_host('fake')
create_vm('fake dom 0',
'Running',
is_a_template=False,
- is_control_domain=True,
- resident_on=host)
+ is_control_domain=True)
def reset_table(table):
diff --git a/nova/virt/xenapi/firewall.py b/nova/virt/xenapi/firewall.py
index ebebf3d9c1..143d029327 100644
--- a/nova/virt/xenapi/firewall.py
+++ b/nova/virt/xenapi/firewall.py
@@ -15,14 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-from oslo_log import log as logging
from oslo_serialization import jsonutils
-from nova import context
from nova.virt import firewall
-from nova.virt import netutils
-
-LOG = logging.getLogger(__name__)
class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
@@ -41,9 +36,9 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
json_ret = jsonutils.loads(ret)
return (json_ret['out'], json_ret['err'])
- def __init__(self, virtapi, xenapi_session=None, **kwargs):
+ def __init__(self, xenapi_session=None, **kwargs):
from nova.network import linux_net
- super(Dom0IptablesFirewallDriver, self).__init__(virtapi, **kwargs)
+ super(Dom0IptablesFirewallDriver, self).__init__(**kwargs)
self._session = xenapi_session
# Create IpTablesManager with executor through plugin
self.iptables = linux_net.IptablesManager(self._plugin_execute)
@@ -53,62 +48,9 @@ class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver):
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def _build_tcp_udp_rule(self, rule, version):
- if rule['from_port'] == rule['to_port']:
- return ['--dport', '%s' % (rule['from_port'],)]
+ if rule.from_port == rule.to_port:
+ return ['--dport', '%s' % (rule.from_port,)]
else:
# No multiport needed for XS!
- return ['--dport', '%s:%s' % (rule['from_port'],
- rule['to_port'])]
-
- def _provider_rules(self):
- """Generate a list of rules from provider for IP4 & IP6.
-
- Note: We could not use the common code from virt.firewall because
- XS doesn't accept the '-m multiport' option.
- """
-
- ctxt = context.get_admin_context()
- ipv4_rules = []
- ipv6_rules = []
- rules = self._virtapi.provider_fw_rule_get_all(ctxt)
- for rule in rules:
- LOG.debug('Adding provider rule: %s', rule['cidr'])
- version = netutils.get_ip_version(rule['cidr'])
- if version == 4:
- fw_rules = ipv4_rules
- else:
- fw_rules = ipv6_rules
-
- protocol = rule['protocol']
- if version == 6 and protocol == 'icmp':
- protocol = 'icmpv6'
-
- args = ['-p', protocol, '-s', rule['cidr']]
-
- if protocol in ['udp', 'tcp']:
- if rule['from_port'] == rule['to_port']:
- args += ['--dport', '%s' % (rule['from_port'],)]
- else:
- args += ['--dport', '%s:%s' % (rule['from_port'],
- rule['to_port'])]
- elif protocol == 'icmp':
- icmp_type = rule['from_port']
- icmp_code = rule['to_port']
-
- if icmp_type == -1:
- icmp_type_arg = None
- else:
- icmp_type_arg = '%s' % icmp_type
- if not icmp_code == -1:
- icmp_type_arg += '/%s' % icmp_code
-
- if icmp_type_arg:
- if version == 4:
- args += ['-m', 'icmp', '--icmp-type',
- icmp_type_arg]
- elif version == 6:
- args += ['-m', 'icmp6', '--icmpv6-type',
- icmp_type_arg]
- args += ['-j DROP']
- fw_rules += [' '.join(args)]
- return ipv4_rules, ipv6_rules
+ return ['--dport', '%s:%s' % (rule.from_port,
+ rule.to_port)]
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index b159723530..6e5d9302ec 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -168,7 +168,7 @@ class HostState(object):
"""Exctract information from the device string about the slot, the
vendor and the product ID. The string is as follow:
"Slot:\tBDF\nClass:\txxxx\nVendor:\txxxx\nDevice:\txxxx\n..."
- Return a dictionary with informations about the device.
+ Return a dictionary with information about the device.
"""
slot_regex = _compile_hex(r"Slot:\t"
r"((?:hex{4}:)?" # Domain: (optional)
@@ -207,7 +207,7 @@ class HostState(object):
pci_list = lspci_out.split("\n\n")
# For each device of the list, check if it uses the pciback
- # kernel driver and if it does, get informations and add it
+ # kernel driver and if it does, get information and add it
# to the list of passthrough_devices. Ignore it if the driver
# is not pciback.
passthrough_devices = []
diff --git a/nova/virt/xenapi/image/glance.py b/nova/virt/xenapi/image/glance.py
index ab8697477a..3a368acb91 100644
--- a/nova/virt/xenapi/image/glance.py
+++ b/nova/virt/xenapi/image/glance.py
@@ -14,11 +14,11 @@
# under the License.
import functools
-import six
import sys
from oslo_config import cfg
from oslo_log import log as logging
+import six
from nova.compute import utils as compute_utils
from nova import exception
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 79a8992c26..de09337064 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -240,5 +240,4 @@ class ResourcePool(object):
def swap_xapi_host(url, host_addr):
"""Replace the XenServer address present in 'url' with 'host_addr'."""
temp_url = urlparse.urlparse(url)
- _netloc, sep, port = temp_url.netloc.partition(':')
- return url.replace(temp_url.netloc, '%s%s%s' % (host_addr, sep, port))
+ return url.replace(temp_url.hostname, '%s' % host_addr)
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 6b3218cacb..5c7a3502d0 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -18,8 +18,11 @@
"""VIF drivers for XenAPI."""
from oslo_config import cfg
+from oslo_log import log as logging
+from nova import exception
from nova.i18n import _
+from nova.i18n import _LW
from nova.virt.xenapi import network_utils
from nova.virt.xenapi import vm_utils
@@ -31,11 +34,56 @@ xenapi_ovs_integration_bridge_opt = cfg.StrOpt('ovs_integration_bridge',
CONF = cfg.CONF
CONF.register_opt(xenapi_ovs_integration_bridge_opt, 'xenserver')
+LOG = logging.getLogger(__name__)
+
class XenVIFDriver(object):
def __init__(self, xenapi_session):
self._session = xenapi_session
+ def _get_vif_ref(self, vif, vm_ref):
+ vif_refs = self._session.call_xenapi("VM.get_VIFs", vm_ref)
+ for vif_ref in vif_refs:
+ try:
+ vif_rec = self._session.call_xenapi('VIF.get_record', vif_ref)
+ if vif_rec['MAC'] == vif['address']:
+ return vif_ref
+ except Exception:
+ # When got exception here, maybe the vif is removed during the
+ # loop, ignore this vif and continue
+ continue
+ return None
+
+ def _create_vif(self, vif, vif_rec, vm_ref):
+ try:
+ vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
+ except Exception as e:
+ LOG.warn(_LW("Failed to create vif, exception:%(exception)s, "
+ "vif:%(vif)s"), {'exception': e, 'vif': vif})
+ raise exception.NovaException(
+ reason=_("Failed to create vif %s") % vif)
+
+ LOG.debug("create vif %(vif)s for vm %(vm_ref)s successfully",
+ {'vif': vif, 'vm_ref': vm_ref})
+ return vif_ref
+
+ def unplug(self, instance, vif, vm_ref):
+ try:
+ LOG.debug("unplug vif, vif:%(vif)s, vm_ref:%(vm_ref)s",
+ {'vif': vif, 'vm_ref': vm_ref}, instance=instance)
+ vif_ref = self._get_vif_ref(vif, vm_ref)
+ if not vif_ref:
+ LOG.debug("vif didn't exist, no need to unplug vif %s",
+ vif, instance=instance)
+ return
+ self._session.call_xenapi('VIF.destroy', vif_ref)
+ except Exception as e:
+ LOG.warn(
+ _LW("Fail to unplug vif:%(vif)s, exception:%(exception)s"),
+ {'vif': vif, 'exception': e}, instance=instance)
+ raise exception.NovaException(
+ reason=_("Failed to unplug vif %s") % vif)
+
class XenAPIBridgeDriver(XenVIFDriver):
"""VIF Driver for XenAPI that uses XenAPI to create Networks."""
@@ -43,6 +91,14 @@ class XenAPIBridgeDriver(XenVIFDriver):
def plug(self, instance, vif, vm_ref=None, device=None):
if not vm_ref:
vm_ref = vm_utils.lookup(self._session, instance['name'])
+
+ # if VIF already exists, return this vif_ref directly
+ vif_ref = self._get_vif_ref(vif, vm_ref)
+ if vif_ref:
+ LOG.debug("VIF %s already exists when plug vif",
+ vif_ref, instance=instance)
+ return vif_ref
+
if not device:
device = 0
@@ -65,7 +121,7 @@ class XenAPIBridgeDriver(XenVIFDriver):
else:
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
- return vif_rec
+ return self._create_vif(vif, vif_rec, vm_ref)
def _ensure_vlan_bridge(self, network):
"""Ensure that a VLAN bridge exists."""
@@ -126,8 +182,8 @@ class XenAPIBridgeDriver(XenVIFDriver):
return network_ref
- def unplug(self, instance, vif):
- pass
+ def unplug(self, instance, vif, vm_ref):
+ super(XenAPIBridgeDriver, self).unplug(instance, vif, vm_ref)
class XenAPIOpenVswitchDriver(XenVIFDriver):
@@ -137,6 +193,13 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
if not vm_ref:
vm_ref = vm_utils.lookup(self._session, instance['name'])
+ # if VIF already exists, return this vif_ref directly
+ vif_ref = self._get_vif_ref(vif, vm_ref)
+ if vif_ref:
+ LOG.debug("VIF %s already exists when plug vif",
+ vif_ref, instance=instance)
+ return vif_ref
+
if not device:
device = 0
@@ -155,7 +218,7 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
# OVS on the hypervisor monitors this key and uses it to
# set the iface-id attribute
vif_rec['other_config'] = {'nicira-iface-id': vif['id']}
- return vif_rec
+ return self._create_vif(vif, vif_rec, vm_ref)
- def unplug(self, instance, vif):
- pass
+ def unplug(self, instance, vif, vm_ref):
+ super(XenAPIOpenVswitchDriver, self).unplug(instance, vif, vm_ref)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 3df3696312..29b37c6a41 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -45,6 +45,7 @@ from nova.api.metadata import base as instance_metadata
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
+import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova.network import model as network_model
@@ -118,10 +119,8 @@ xenapi_vm_utils_opts = [
'ISO image creation'),
]
-CONF = cfg.CONF
+CONF = nova.conf.CONF
CONF.register_opts(xenapi_vm_utils_opts, 'xenserver')
-CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
-CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('use_ipv6', 'nova.netconf')
XENAPI_POWER_STATE = {
@@ -1022,7 +1021,7 @@ def _make_partition(session, dev, partition_start, partition_end):
def _generate_disk(session, instance, vm_ref, userdevice, name_label,
- disk_type, size_mb, fs_type):
+ disk_type, size_mb, fs_type, fs_label=None):
"""Steps to programmatically generate a disk:
1. Create VDI of desired size
@@ -1050,11 +1049,9 @@ def _generate_disk(session, instance, vm_ref, userdevice, name_label,
partition_path = _make_partition(session, dev,
partition_start, partition_end)
- if fs_type == 'linux-swap':
- utils.execute('mkswap', partition_path, run_as_root=True)
- elif fs_type is not None:
- utils.execute('mkfs', '-t', fs_type, partition_path,
- run_as_root=True)
+ if fs_type is not None:
+ utils.mkfs(fs_type, partition_path, fs_label,
+ run_as_root=True)
# 4. Create VBD between instance VM and VDI
if vm_ref:
@@ -1072,7 +1069,7 @@ def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb):
# NOTE(jk0): We use a FAT32 filesystem for the Windows swap
# partition because that is what parted supports.
is_windows = instance['os_type'] == "windows"
- fs_type = "vfat" if is_windows else "linux-swap"
+ fs_type = "vfat" if is_windows else "swap"
_generate_disk(session, instance, vm_ref, userdevice, name_label,
'swap', swap_mb, fs_type)
@@ -1099,14 +1096,16 @@ def generate_single_ephemeral(session, instance, vm_ref, userdevice,
instance_name_label = instance["name"]
name_label = "%s ephemeral" % instance_name_label
+ fs_label = "ephemeral"
# TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here
label_number = int(userdevice) - 4
if label_number > 0:
name_label = "%s (%d)" % (name_label, label_number)
+ fs_label = "ephemeral%d" % label_number
return _generate_disk(session, instance, vm_ref, str(userdevice),
name_label, 'ephemeral', size_gb * 1024,
- CONF.default_ephemeral_format)
+ CONF.default_ephemeral_format, fs_label)
def generate_ephemeral(session, instance, vm_ref, first_userdevice,
@@ -2211,7 +2210,7 @@ def vdi_attached_here(session, vdi_ref, read_only=False):
def _get_sys_hypervisor_uuid():
- with file('/sys/hypervisor/uuid') as f:
+ with open('/sys/hypervisor/uuid') as f:
return f.readline().strip()
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 6c1de206dc..cbf9736bed 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -157,7 +157,6 @@ class VMOps(object):
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
- self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenserver.vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
@@ -1577,11 +1576,10 @@ class VMOps(object):
self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
- vm_utils.destroy_vm(self._session, instance, vm_ref)
-
- self.unplug_vifs(instance, network_info)
+ self.unplug_vifs(instance, network_info, vm_ref)
self.firewall_driver.unfilter_instance(
instance, network_info=network_info)
+ vm_utils.destroy_vm(self._session, instance, vm_ref)
def pause(self, instance):
"""Pause VM instance."""
@@ -1897,25 +1895,18 @@ class VMOps(object):
self._session.call_xenapi("VM.get_domid", vm_ref)
for device, vif in enumerate(network_info):
- vif_rec = self.vif_driver.plug(instance, vif,
- vm_ref=vm_ref, device=device)
- network_ref = vif_rec['network']
- LOG.debug('Creating VIF for network %s',
- network_ref, instance=instance)
- vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
- LOG.debug('Created VIF %(vif_ref)s, network %(network_ref)s',
- {'vif_ref': vif_ref, 'network_ref': network_ref},
- instance=instance)
+ LOG.debug('Create VIF %s', vif, instance=instance)
+ self.vif_driver.plug(instance, vif, vm_ref=vm_ref, device=device)
def plug_vifs(self, instance, network_info):
"""Set up VIF networking on the host."""
for device, vif in enumerate(network_info):
self.vif_driver.plug(instance, vif, device=device)
- def unplug_vifs(self, instance, network_info):
+ def unplug_vifs(self, instance, network_info, vm_ref):
if network_info:
for vif in network_info:
- self.vif_driver.unplug(instance, vif)
+ self.vif_driver.unplug(instance, vif, vm_ref)
def reset_network(self, instance, rescue=False):
"""Calls resetnetwork method in agent."""
@@ -2058,9 +2049,6 @@ class VMOps(object):
"""recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
- def refresh_provider_fw_rules(self):
- self.firewall_driver.refresh_provider_fw_rules()
-
def unfilter_instance(self, instance_ref, network_info):
"""Removes filters for each VIF of the specified instance."""
self.firewall_driver.unfilter_instance(instance_ref,
@@ -2144,7 +2132,7 @@ class VMOps(object):
:param disk_over_commit: if true, allow disk over commit
"""
- dest_check_data = migrate_data_obj.XenapiLiveMigrateData()
+ dest_check_data = objects.XenapiLiveMigrateData()
if block_migration:
dest_check_data.block_migration = True
dest_check_data.migrate_send_data = self._migrate_receive(ctxt)
@@ -2191,7 +2179,7 @@ class VMOps(object):
'relax-xsm-sr-check=true required'))
if not isinstance(dest_check_data, migrate_data_obj.LiveMigrateData):
- obj = migrate_data_obj.XenapiLiveMigrateData()
+ obj = objects.XenapiLiveMigrateData()
obj.from_legacy_dict(dest_check_data)
dest_check_data = obj
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 0bb6128f78..1b78c826ad 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -19,7 +19,6 @@ and storage repositories
"""
import re
-import six
import string
import uuid
@@ -28,6 +27,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import versionutils
+import six
from nova import exception
from nova.i18n import _, _LE, _LW
diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py
index 1c7745867b..e69de29bb2 100644
--- a/nova/vnc/__init__.py
+++ b/nova/vnc/__init__.py
@@ -1,59 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2010 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Module for VNC Proxying."""
-
-from oslo_config import cfg
-
-
-vnc_opts = [
- cfg.StrOpt('novncproxy_base_url',
- default='http://127.0.0.1:6080/vnc_auto.html',
- help='Location of VNC console proxy, in the form '
- '"http://127.0.0.1:6080/vnc_auto.html"',
- deprecated_group='DEFAULT',
- deprecated_name='novncproxy_base_url'),
- cfg.StrOpt('xvpvncproxy_base_url',
- default='http://127.0.0.1:6081/console',
- help='Location of nova xvp VNC console proxy, in the form '
- '"http://127.0.0.1:6081/console"',
- deprecated_group='DEFAULT',
- deprecated_name='xvpvncproxy_base_url'),
- cfg.StrOpt('vncserver_listen',
- default='127.0.0.1',
- help='IP address on which instance vncservers should listen',
- deprecated_group='DEFAULT',
- deprecated_name='vncserver_listen'),
- cfg.StrOpt('vncserver_proxyclient_address',
- default='127.0.0.1',
- help='The address to which proxy clients '
- '(like nova-xvpvncproxy) should connect',
- deprecated_group='DEFAULT',
- deprecated_name='vncserver_proxyclient_address'),
- cfg.BoolOpt('enabled',
- default=True,
- help='Enable VNC related features',
- deprecated_group='DEFAULT',
- deprecated_name='vnc_enabled'),
- cfg.StrOpt('keymap',
- default='en-us',
- help='Keymap for VNC',
- deprecated_group='DEFAULT',
- deprecated_name='vnc_keymap'),
- ]
-
-CONF = cfg.CONF
-CONF.register_opts(vnc_opts, group='vnc')
diff --git a/nova/vnc/xvp_proxy.py b/nova/vnc/xvp_proxy.py
index 1425866b66..06929d8fa1 100644
--- a/nova/vnc/xvp_proxy.py
+++ b/nova/vnc/xvp_proxy.py
@@ -22,10 +22,10 @@ import eventlet
import eventlet.green
import eventlet.greenio
import eventlet.wsgi
-from oslo_config import cfg
from oslo_log import log as logging
import webob
+import nova.conf
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.i18n import _LI
@@ -35,20 +35,7 @@ from nova import wsgi
LOG = logging.getLogger(__name__)
-
-xvp_proxy_opts = [
- cfg.IntOpt('xvpvncproxy_port',
- default=6081,
- min=1,
- max=65535,
- help='Port that the XCP VNC proxy should bind to'),
- cfg.StrOpt('xvpvncproxy_host',
- default='0.0.0.0',
- help='Address that the XCP VNC proxy should bind to'),
- ]
-
-CONF = cfg.CONF
-CONF.register_opts(xvp_proxy_opts)
+CONF = nova.conf.CONF
class XCPVNCProxy(object):
@@ -181,5 +168,5 @@ def get_wsgi_server():
return wsgi.Server("XCP VNC Proxy",
XCPVNCProxy(),
protocol=SafeHttpProtocol,
- host=CONF.xvpvncproxy_host,
- port=CONF.xvpvncproxy_port)
+ host=CONF.vnc.xvpvncproxy_host,
+ port=CONF.vnc.xvpvncproxy_port)
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 66d5aa3313..8aa56f184b 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -18,14 +18,15 @@
Handles all requests relating to volumes + cinder.
"""
+import collections
import copy
import sys
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from cinderclient.v1 import client as v1_client
-from keystoneclient import exceptions as keystone_exception
-from keystoneclient import session
+from keystoneauth1 import exceptions as keystone_exception
+from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
@@ -82,9 +83,9 @@ deprecated = {'timeout': [cfg.DeprecatedOpt('http_timeout',
'insecure': [cfg.DeprecatedOpt('api_insecure',
group=CINDER_OPT_GROUP)]}
-session.Session.register_conf_options(CONF,
- CINDER_OPT_GROUP,
- deprecated_opts=deprecated)
+ks_loading.register_session_conf_options(CONF,
+ CINDER_OPT_GROUP,
+ deprecated_opts=deprecated)
LOG = logging.getLogger(__name__)
@@ -104,8 +105,8 @@ def cinderclient(context):
global _V1_ERROR_RAISED
if not _SESSION:
- _SESSION = session.Session.load_from_conf_options(CONF,
- CINDER_OPT_GROUP)
+ _SESSION = ks_loading.load_session_from_conf_options(CONF,
+ CINDER_OPT_GROUP)
url = None
endpoint_override = None
@@ -160,12 +161,18 @@ def _untranslate_volume_summary_view(context, vol):
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
+ d['multiattach'] = getattr(vol, 'multiattach', False)
if vol.attachments:
- att = vol.attachments[0]
+ d['attachments'] = collections.OrderedDict()
+ for attachment in vol.attachments:
+ a = {attachment['server_id']:
+ {'attachment_id': attachment.get('attachment_id'),
+ 'mountpoint': attachment.get('device')}
+ }
+ d['attachments'].update(a.items())
+
d['attach_status'] = 'attached'
- d['instance_uuid'] = att['server_id']
- d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
@@ -316,12 +323,24 @@ class API(object):
'vol_zone': volume['availability_zone']}
raise exception.InvalidVolume(reason=msg)
- def check_detach(self, context, volume):
+ def check_detach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("volume %s already detached") % volume['id']
raise exception.InvalidVolume(reason=msg)
+ if volume['attach_status'] == 'detached':
+ msg = _("Volume must be attached in order to detach.")
+ raise exception.InvalidVolume(reason=msg)
+
+ # NOTE(ildikov):Preparation for multiattach support, when a volume
+ # can be attached to multiple hosts and/or instances,
+ # so just check the attachment specific to this instance
+ if instance is not None and instance.uuid not in volume['attachments']:
+ # TODO(ildikov): change it to a better exception, when enable
+ # multi-attach.
+ raise exception.VolumeUnattached(volume_id=volume['id'])
+
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@@ -344,8 +363,34 @@ class API(object):
mountpoint, mode=mode)
@translate_volume_exception
- def detach(self, context, volume_id):
- cinderclient(context).volumes.detach(volume_id)
+ def detach(self, context, volume_id, instance_uuid=None,
+ attachment_id=None):
+ if attachment_id is None:
+ volume = self.get(context, volume_id)
+ if volume['multiattach']:
+ attachments = volume.get('attachments', {})
+ if instance_uuid:
+ attachment_id = attachments.get(instance_uuid, {}).\
+ get('attachment_id')
+ if not attachment_id:
+ LOG.warning(_LW("attachment_id couldn't be retrieved "
+ "for volume %(volume_id)s with "
+ "instance_uuid %(instance_id)s. The "
+ "volume has the 'multiattach' flag "
+ "enabled, without the attachment_id "
+ "Cinder most probably cannot perform "
+ "the detach."),
+ {'volume_id': volume_id,
+ 'instance_id': instance_uuid})
+ else:
+ LOG.warning(_LW("attachment_id couldn't be retrieved for "
+ "volume %(volume_id)s. The volume has the "
+ "'multiattach' flag enabled, without the "
+ "attachment_id Cinder most probably "
+ "cannot perform the detach."),
+ {'volume_id': volume_id})
+
+ cinderclient(context).volumes.detach(volume_id, attachment_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):
diff --git a/nova/volume/encryptors/base.py b/nova/volume/encryptors/base.py
index 03639c69ea..b5065b7ab0 100644
--- a/nova/volume/encryptors/base.py
+++ b/nova/volume/encryptors/base.py
@@ -15,6 +15,7 @@
import abc
+
import six
from nova import keymgr
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 529583b60b..421b2a2fcb 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -27,7 +27,6 @@ import sys
import eventlet
import eventlet.wsgi
import greenlet
-from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import service
from oslo_utils import excutils
@@ -37,58 +36,12 @@ import six
import webob.dec
import webob.exc
+import nova.conf
from nova import exception
from nova.i18n import _, _LE, _LI
from nova import utils
-wsgi_opts = [
- cfg.StrOpt('api_paste_config',
- default="api-paste.ini",
- help='File name for the paste.deploy config for nova-api'),
- cfg.StrOpt('wsgi_log_format',
- default='%(client_ip)s "%(request_line)s" status: %(status_code)s'
- ' len: %(body_length)s time: %(wall_seconds).7f',
- help='A python format string that is used as the template to '
- 'generate log lines. The following values can be formatted '
- 'into it: client_ip, date_time, request_line, status_code, '
- 'body_length, wall_seconds.'),
- cfg.StrOpt('secure_proxy_ssl_header',
- help='The HTTP header used to determine the scheme for the '
- 'original request, even if it was removed by an SSL '
- 'terminating proxy. Typical value is '
- '"HTTP_X_FORWARDED_PROTO".'),
- cfg.StrOpt('ssl_ca_file',
- help="CA certificate file to use to verify "
- "connecting clients"),
- cfg.StrOpt('ssl_cert_file',
- help="SSL certificate of API server"),
- cfg.StrOpt('ssl_key_file',
- help="SSL private key of API server"),
- cfg.IntOpt('tcp_keepidle',
- default=600,
- help="Sets the value of TCP_KEEPIDLE in seconds for each "
- "server socket. Not supported on OS X."),
- cfg.IntOpt('wsgi_default_pool_size',
- default=1000,
- help="Size of the pool of greenthreads used by wsgi"),
- cfg.IntOpt('max_header_line',
- default=16384,
- help="Maximum line size of message headers to be accepted. "
- "max_header_line may need to be increased when using "
- "large tokens (typically those generated by the "
- "Keystone v3 API with big service catalogs)."),
- cfg.BoolOpt('wsgi_keep_alive',
- default=True,
- help="If False, closes the client socket connection "
- "explicitly."),
- cfg.IntOpt('client_socket_timeout', default=900,
- help="Timeout for client connections' socket operations. "
- "If an incoming connection is idle for this number of "
- "seconds it will be closed. A value of '0' means "
- "wait forever."),
- ]
-CONF = cfg.CONF
-CONF.register_opts(wsgi_opts)
+CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
@@ -330,7 +283,7 @@ class Application(object):
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
- # headers, or you want to be treated like an iterable, or or or)
+ # headers, or you want to be treated like an iterable, or ...)
res = Response()
res.app_iter = open('somefile')
diff --git a/openstack-common.conf b/openstack-common.conf
index ad277f2a40..4f6af914fc 100644
--- a/openstack-common.conf
+++ b/openstack-common.conf
@@ -3,7 +3,6 @@
# The list of modules to copy from oslo-incubator
module=cliutils
module=imageutils
-module=memorycache
# The base module to hold the copy of openstack.common
base=nova
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
index 0e95e33de3..aea5a0d3d1 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
@@ -83,7 +83,7 @@ def _resume_compute(session, compute_ref, compute_uuid):
except XenAPI.Failure, e:
# if session is invalid, e.g. xapi has restarted, then the pool
# join has been successful, wait for xapi to become alive again
- for c in xrange(0, DEFAULT_TRIES):
+ for c in range(0, DEFAULT_TRIES):
try:
_run_command(["xe", "vm-start", "uuid=%s" % compute_uuid])
return
diff --git a/releasenotes/notes/add-novnc-proxy-config-to-vnc-group-f5bb68740f623744.yaml b/releasenotes/notes/add-novnc-proxy-config-to-vnc-group-f5bb68740f623744.yaml
new file mode 100644
index 0000000000..4de0ba2b57
--- /dev/null
+++ b/releasenotes/notes/add-novnc-proxy-config-to-vnc-group-f5bb68740f623744.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - All noVNC proxy configuration options have been added to the 'vnc'
+ group. They should no longer be included in the 'DEFAULT' group.
diff --git a/releasenotes/notes/add-xvp-config-to-vnc-group-349cca99f05fcfd3.yaml b/releasenotes/notes/add-xvp-config-to-vnc-group-349cca99f05fcfd3.yaml
new file mode 100644
index 0000000000..1a7eb964e9
--- /dev/null
+++ b/releasenotes/notes/add-xvp-config-to-vnc-group-349cca99f05fcfd3.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - All VNC XVP configuration options have been added to the
+ 'vnc' group. They should no longer be included in the
+ 'DEFAULT' group.
diff --git a/releasenotes/notes/aggregate-uuid-generation-1f029af7a9af519b.yaml b/releasenotes/notes/aggregate-uuid-generation-1f029af7a9af519b.yaml
new file mode 100644
index 0000000000..2c164b40b5
--- /dev/null
+++ b/releasenotes/notes/aggregate-uuid-generation-1f029af7a9af519b.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - Upon first startup of the scheduler service in Mitaka, all defined
+ aggregates will have UUIDs generated and saved back to the
+ database. If you have a significant number of aggregates, this may
+ delay scheduler start as that work is completed, but it should be
+ minor for most deployments. \ No newline at end of file
diff --git a/releasenotes/notes/attach-detach-vol-for-shelved-and-shelved-offloaded-instances-93f70cfd49299f05.yaml b/releasenotes/notes/attach-detach-vol-for-shelved-and-shelved-offloaded-instances-93f70cfd49299f05.yaml
new file mode 100644
index 0000000000..e59cb6f5cb
--- /dev/null
+++ b/releasenotes/notes/attach-detach-vol-for-shelved-and-shelved-offloaded-instances-93f70cfd49299f05.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - It is possible to call attach and detach volume API operations for
+ instances which are in shelved and shelved_offloaded state.
+ For an instance in shelved_offloaded state Nova will set to None the value
+ for the device_name field, the right value for that field will be set once
+ the instance will be unshelved as it will be managed by a specific compute
+ manager.
diff --git a/releasenotes/notes/block-live-migrate-with-attached-volumes-ee02afbfe46937c7.yaml b/releasenotes/notes/block-live-migrate-with-attached-volumes-ee02afbfe46937c7.yaml
new file mode 100644
index 0000000000..568a02fc9b
--- /dev/null
+++ b/releasenotes/notes/block-live-migrate-with-attached-volumes-ee02afbfe46937c7.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - It is possible to block live migrate instances with additional cinder
+ volumes attached. This requires libvirt version to be >=1.2.17 and does
+ not work when live_migration_tunnelled is set to True.
diff --git a/releasenotes/notes/bp-boot-from-uefi-b413b96017db76dd.yaml b/releasenotes/notes/bp-boot-from-uefi-b413b96017db76dd.yaml
new file mode 100644
index 0000000000..09e0dab687
--- /dev/null
+++ b/releasenotes/notes/bp-boot-from-uefi-b413b96017db76dd.yaml
@@ -0,0 +1,3 @@
+---
+features:
+ - Add support for enabling uefi boot with libvirt.
diff --git a/releasenotes/notes/bp-get-valid-server-state-a817488f4c8d3822.yaml b/releasenotes/notes/bp-get-valid-server-state-a817488f4c8d3822.yaml
new file mode 100644
index 0000000000..a17d5ebb19
--- /dev/null
+++ b/releasenotes/notes/bp-get-valid-server-state-a817488f4c8d3822.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ A new host_status attribute for servers/detail and servers/{server_id}.
+ In order to use this new feature, user have to contain the header of
+ request microversion v2.16 in the API request. A new policy
+ ``os_compute_api:servers:show:host_status`` added to enable the feature.
+ By default, this is only exposed to cloud administrators.
diff --git a/releasenotes/notes/bp-instance-crash-dump-7ccbba7799dc66f9.yaml b/releasenotes/notes/bp-instance-crash-dump-7ccbba7799dc66f9.yaml
new file mode 100644
index 0000000000..2f513fbebf
--- /dev/null
+++ b/releasenotes/notes/bp-instance-crash-dump-7ccbba7799dc66f9.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - A new server action trigger_crash_dump has been added to the REST API in
+ microversion 2.17.
diff --git a/releasenotes/notes/bp-rbd-instance-snapshots-130e860b726ddc16.yaml b/releasenotes/notes/bp-rbd-instance-snapshots-130e860b726ddc16.yaml
new file mode 100644
index 0000000000..100ccfa755
--- /dev/null
+++ b/releasenotes/notes/bp-rbd-instance-snapshots-130e860b726ddc16.yaml
@@ -0,0 +1,12 @@
+---
+features:
+ - When RBD is used for ephemeral disks and image storage, make
+ snapshot use Ceph directly, and update Glance with the new location.
+ In case of failure, it will gracefully fallback to the "generic"
+ snapshot method. This requires changing the typical permissions
+ for the Nova Ceph user (if using authx) to allow writing to
+ the pool where vm images are stored, and it also requires
+ configuring Glance to provide a v2 endpoint with direct_url
+ support enabled (there are security implications to doing this).
+ See http://docs.ceph.com/docs/master/rbd/rbd-openstack/ for more
+ information on configuring OpenStack with RBD.
diff --git a/releasenotes/notes/bp-split-network-plane-for-live-migration-40bc127734173759.yaml b/releasenotes/notes/bp-split-network-plane-for-live-migration-40bc127734173759.yaml
new file mode 100644
index 0000000000..1955d85b4f
--- /dev/null
+++ b/releasenotes/notes/bp-split-network-plane-for-live-migration-40bc127734173759.yaml
@@ -0,0 +1,10 @@
+---
+features:
+ - |
+ A new option "live_migration_inbound_addr" has been added
+ in the configuration file, set None as default value.
+ If this option is present in pre_migration_data, the ip
+ address/hostname provided will be used instead of
+ the migration target compute node's hostname as the
+ uri for live migration, if it's None, then the
+ mechanism remains as it is before.
diff --git a/releasenotes/notes/bp-virt-driver-cpu-thread-pinning-1aaeeb6648f8e009.yaml b/releasenotes/notes/bp-virt-driver-cpu-thread-pinning-1aaeeb6648f8e009.yaml
new file mode 100644
index 0000000000..c816d3e336
--- /dev/null
+++ b/releasenotes/notes/bp-virt-driver-cpu-thread-pinning-1aaeeb6648f8e009.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - Added support for CPU thread policies, which can be used to control how
+ the libvirt virt driver places guests with respect to CPU SMT "threads".
+ These are provided as instance and image metadata options,
+ 'hw:cpu_thread_policy' and 'hw_cpu_thread_policy' respectively, and
+ provide an additional level of control over CPU pinning policy, when
+ compared to the existing CPU policy feature.
+ These changes were introduced in commits '83cd67c' and 'aaaba4a'.
diff --git a/releasenotes/notes/compute_upgrade_levels_auto-97acebc7b45b76df.yaml b/releasenotes/notes/compute_upgrade_levels_auto-97acebc7b45b76df.yaml
index 3ae902c51f..6db301315d 100644
--- a/releasenotes/notes/compute_upgrade_levels_auto-97acebc7b45b76df.yaml
+++ b/releasenotes/notes/compute_upgrade_levels_auto-97acebc7b45b76df.yaml
@@ -5,4 +5,8 @@ features:
``upgrade_levels.compute`` is accepted, that allows automatic determination
of the compute service version to use for RPC communication. By default, we
still use the newest version if not set in the config, a specific version
- if asked, and only do this automatic behavior if 'auto' is configured.
+ if asked, and only do this automatic behavior if 'auto' is
+ configured. When 'auto' is used, sending a SIGHUP to the service
+ will cause the value to be re-calculated. Thus, after an upgrade
+ is complete, sending SIGHUP to all services will cause them to
+ start sending messages compliant with the newer RPC version.
diff --git a/releasenotes/notes/config_scheduler_driver-e751ae392bc1a1d0.yaml b/releasenotes/notes/config_scheduler_driver-e751ae392bc1a1d0.yaml
new file mode 100644
index 0000000000..c1837411ea
--- /dev/null
+++ b/releasenotes/notes/config_scheduler_driver-e751ae392bc1a1d0.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - |
+ The option ``scheduler_driver`` is now changed to use entrypoint instead of
+ full class path. Set one of the entrypoints under the namespace
+ 'nova.scheduler.driver' in 'setup.cfg'. Its default value is
+ 'host_manager'. The full class path style is still supported in current
+ release. But it is not recommended because class path can be changed and
+ this support will be dropped in the next major release.
diff --git a/releasenotes/notes/config_scheduler_host_manager_driver-a543a74ea70f5e90.yaml b/releasenotes/notes/config_scheduler_host_manager_driver-a543a74ea70f5e90.yaml
new file mode 100644
index 0000000000..f3f1a41e61
--- /dev/null
+++ b/releasenotes/notes/config_scheduler_host_manager_driver-a543a74ea70f5e90.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+ - |
+ The option ``scheduler_host_manager`` is now changed to use entrypoint
+ instead of full class path. Set one of the entrypoints under the namespace
+ 'nova.scheduler.host_manager' in 'setup.cfg'. Its default value is
+ 'host_manager'. The full class path style is still supported in current
+ release. But it is not recommended because class path can be changed and
+ this support will be dropped in the next major release.
diff --git a/releasenotes/notes/disk-weight-scheduler-98647f9c6317d21d.yaml b/releasenotes/notes/disk-weight-scheduler-98647f9c6317d21d.yaml
new file mode 100644
index 0000000000..27b216fc77
--- /dev/null
+++ b/releasenotes/notes/disk-weight-scheduler-98647f9c6317d21d.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - A disk space scheduling filter is now available,
+ which prefers compute nodes with the most available
+ disk space. By default, free disk space is given equal
+ importance to available RAM. To increase the priority
+ of free disk space in scheduling, increase the
+ disk_weight_multiplier option.
diff --git a/releasenotes/notes/disk_ratio_to_rt-b6224ab8c0272d86.yaml b/releasenotes/notes/disk_ratio_to_rt-b6224ab8c0272d86.yaml
new file mode 100644
index 0000000000..1133b59ab9
--- /dev/null
+++ b/releasenotes/notes/disk_ratio_to_rt-b6224ab8c0272d86.yaml
@@ -0,0 +1,19 @@
+---
+feature:
+ - On Mitaka compute nodes, if you want to modify the default disk allocation
+ ratio of 1.0, you should set that on every compute node, rather than
+ setting it in the scheduler. This means the disk, RAM and CPU allocation
+ ratios now all work in the same way.
+upgrade:
+ - For Liberty compute nodes, the disk_allocation_ratio works as before, you
+ must set it on the scheduler if you want to change it.
+ For Mitaka compute nodes, the disk_allocation_ratio set on the compute
+ nodes will be used only if the configuration is not set on the scheduler.
+ This is to allow, for backwards compatibility, the ability to still
+ override the disk allocation ratio by setting the configuration on the
+ scheduler node.
+ In Newton, we plan to remove the ability to set the disk allocation ratio
+ on the scheduler, at which point the compute nodes will always define the
+ disk allocation ratio, and pass that up to the scheduler. None of this
+ changes the default disk allocation ratio of 1.0. This matches the
+ behaviour of the RAM and CPU allocation ratios.
diff --git a/releasenotes/notes/force-live-migration-be5a10cd9c8eb981.yaml b/releasenotes/notes/force-live-migration-be5a10cd9c8eb981.yaml
new file mode 100644
index 0000000000..8bc0c40e78
--- /dev/null
+++ b/releasenotes/notes/force-live-migration-be5a10cd9c8eb981.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - A new REST API to force live migration to complete has been added
+ in microversion 2.22.
diff --git a/releasenotes/notes/instance-actions-read-deleted-instances-18bbb327924b66c7.yaml b/releasenotes/notes/instance-actions-read-deleted-instances-18bbb327924b66c7.yaml
new file mode 100644
index 0000000000..1600076a6f
--- /dev/null
+++ b/releasenotes/notes/instance-actions-read-deleted-instances-18bbb327924b66c7.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - The os-instance-actions methods now read actions from deleted instances.
+ This means that
+ 'GET /v2.1/{tenant-id}/servers/{server-id}/os-instance-actions'
+ and
+ 'GET /v2.1/{tenant-id}/servers/{server-id}/os-instance-actions/{req-id}'
+ will return instance-action items even if the instance corresponding to
+ '{server-id}' has been deleted.
diff --git a/releasenotes/notes/instance-hostname-used-to-populate-ports-dns-name-08341ec73dc076c0.yaml b/releasenotes/notes/instance-hostname-used-to-populate-ports-dns-name-08341ec73dc076c0.yaml
new file mode 100644
index 0000000000..2d5cc4ebea
--- /dev/null
+++ b/releasenotes/notes/instance-hostname-used-to-populate-ports-dns-name-08341ec73dc076c0.yaml
@@ -0,0 +1,22 @@
+---
+features:
+ - When booting an instance, its sanitized 'hostname' attribute is now used to
+ populate the 'dns_name' attribute of the Neutron ports the instance is
+ attached to.
+ This functionality enables the Neutron internal DNS service to know the
+ ports by the instance's hostname. As a consequence, commands like
+ 'hostname -f' will work as expected when executed in the instance.
+ When a port's network has a non-blank 'dns_domain' attribute, the port's
+ 'dns_name' combined with the network's 'dns_domain' will be published by
+ Neutron in an external DNS as a service like Designate. As a consequence,
+ the instance's hostname is published in the external DNS as a service.
+ This functionality is added to Nova when the 'DNS Integration' extension
+ is enabled in Neutron.
+ The publication of 'dns_name' and 'dns_domain' combinations to an external
+ DNS as a service additionaly requires the configuration of the appropriate
+ driver in Neutron.
+ When the 'Port Binding' extension is also enabled in Neutron, the
+ publication of a 'dns_name' and 'dns_domain' combination to the external
+ DNS as a service will require one additional update operation when Nova
+ allocates the port during the instance boot. This may have a noticeable
+ impact on the performance of the boot process.
diff --git a/releasenotes/notes/libvirt-deprecate-migration-flags-config-4ba1e2d6c9ef09ff.yaml b/releasenotes/notes/libvirt-deprecate-migration-flags-config-4ba1e2d6c9ef09ff.yaml
new file mode 100644
index 0000000000..e09ca517b6
--- /dev/null
+++ b/releasenotes/notes/libvirt-deprecate-migration-flags-config-4ba1e2d6c9ef09ff.yaml
@@ -0,0 +1,9 @@
+---
+deprecations:
+ - |
+ The libvirt live_migration_flag and block_migration_flag
+ config options are deprecated. These options gave too
+ fine grained control over the flags used and, in some
+ cases, misconfigurations could have dangerous side
+ effects. Please note the availability of a new
+ live_migration_tunnelled configuration option.
diff --git a/releasenotes/notes/libvirt-live-migration-flags-mangling-a2407a31ddf17427.yaml b/releasenotes/notes/libvirt-live-migration-flags-mangling-a2407a31ddf17427.yaml
new file mode 100644
index 0000000000..fd2d6f7c67
--- /dev/null
+++ b/releasenotes/notes/libvirt-live-migration-flags-mangling-a2407a31ddf17427.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - The libvirt driver will now correct unsafe and invalid
+ values for the live_migration_flag and block_migration_flag
+ configuration options. The live_migration_flag must not
+ contain VIR_MIGRATE_SHARED_INC but block_migration_flag
+ must contain it. Both options must contain the
+ VIR_MIGRATE_PEER2PEER, except when using the 'xen' virt
+ type this flag is not supported. Both flags must contain
+ the VIR_MIGRATE_UNDEFINE_SOURCE flag and not contain the
+ VIR_MIGRATE_PERSIST_DEST flag.
+
diff --git a/releasenotes/notes/libvirt-live-migration-new-tunneled-option-d7ebb1eb1e95e683.yaml b/releasenotes/notes/libvirt-live-migration-new-tunneled-option-d7ebb1eb1e95e683.yaml
new file mode 100644
index 0000000000..6c56365275
--- /dev/null
+++ b/releasenotes/notes/libvirt-live-migration-new-tunneled-option-d7ebb1eb1e95e683.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - The libvirt driver now has a live_migration_tunnelled
+ configuration option which should be used where the
+ VIR_MIGRATE_TUNNELLED flag would previously have been
+ set or unset in the live_migration_flag and
+ block_migration_flag configuration options.
diff --git a/releasenotes/notes/live_migration_uri-dependent-on-virt_type-595c46c2310f45c3.yaml b/releasenotes/notes/live_migration_uri-dependent-on-virt_type-595c46c2310f45c3.yaml
new file mode 100644
index 0000000000..aa78a0c498
--- /dev/null
+++ b/releasenotes/notes/live_migration_uri-dependent-on-virt_type-595c46c2310f45c3.yaml
@@ -0,0 +1,7 @@
+---
+upgrade:
+ - The libvirt driver has changed the default value of the
+ 'live_migration_uri' flag, that now is dependent on the 'virt_type'. The
+ old default 'qemu+tcp://%s/system' now is adjusted for each of the
+ configured hypervisors. For Xen this will be 'xenmigr://%s/system', for
+ kvm/qemu this will be 'qemu+tcp://%s/system'.
diff --git a/releasenotes/notes/lock_policy-75bea372036acbd5.yaml b/releasenotes/notes/lock_policy-75bea372036acbd5.yaml
new file mode 100644
index 0000000000..3b9cf99913
--- /dev/null
+++ b/releasenotes/notes/lock_policy-75bea372036acbd5.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - Default RBAC policy for lock operations has been modified to
+ admin_or_owner for the stable V2.0 API. Please understand to modify the
+ policy if you still keep to have anyone to lock an instance and you're
+ still using the stable API endpoint.
diff --git a/releasenotes/notes/neutron-ovs-bridge-name-7b3477103622f4cc.yaml b/releasenotes/notes/neutron-ovs-bridge-name-7b3477103622f4cc.yaml
new file mode 100644
index 0000000000..8d4b877aa8
--- /dev/null
+++ b/releasenotes/notes/neutron-ovs-bridge-name-7b3477103622f4cc.yaml
@@ -0,0 +1,4 @@
+---
+features:
+ - Add support for allowing Neutron to specify the
+ bridge name for the OVS, Linux Bridge, and vhost-user VIF types.
diff --git a/releasenotes/notes/optional_project_id-6aebf1cb394d498f.yaml b/releasenotes/notes/optional_project_id-6aebf1cb394d498f.yaml
new file mode 100644
index 0000000000..94fb1335e5
--- /dev/null
+++ b/releasenotes/notes/optional_project_id-6aebf1cb394d498f.yaml
@@ -0,0 +1,16 @@
+---
+features:
+
+ - Provides API 2.18, which makes the use of project_ids in API urls
+ optional.
+
+upgrade:
+
+ - In order to make project_id optional in urls, we must constrain
+ the set of allowed values for project_id in our urls. This
+ defaults to a regex of ``[0-9a-f\-]+``, which will match hex uuids
+ (with / without dashes), and integers. This covers all known
+ project_id formats in the wild.
+
+ If your site uses other values for project_id, you can set a site
+ specific validation with ``project_id_regex`` config variable.
diff --git a/releasenotes/notes/request-spec-api-db-b9cc6e0624d563c5.yaml b/releasenotes/notes/request-spec-api-db-b9cc6e0624d563c5.yaml
new file mode 100644
index 0000000000..2ee0c1fb51
--- /dev/null
+++ b/releasenotes/notes/request-spec-api-db-b9cc6e0624d563c5.yaml
@@ -0,0 +1,19 @@
+---
+upgrade:
+ - |
+ The commit with change-id Idd4bbbe8eea68b9e538fa1567efd304e9115a02a
+ requires that the nova_api database is setup and Nova is configured to use
+ it. Instructions on doing that are provided below.
+
+ Nova now requires that two databases are available and configured. The
+ existing nova database needs no changes, but a new nova_api database needs
+ to be setup. It is configured and managed very similarly to the nova
+ database. A new connection string configuration option is available in the
+ api_database group. An example::
+
+ [api_database]
+ connection = mysql+pymysql://user:secret@127.0.0.1/nova_api?charset=utf8
+
+ And a new nova-manage command has been added to manage db migrations for
+ this database. "nova-manage api_db sync" and "nova-manage api_db version"
+ are available and function like the parallel "nova-manage db ..." version.
diff --git a/releasenotes/notes/service-status-notification-e137297f5d5aa45d.yaml b/releasenotes/notes/service-status-notification-e137297f5d5aa45d.yaml
new file mode 100644
index 0000000000..d5a9a31d86
--- /dev/null
+++ b/releasenotes/notes/service-status-notification-e137297f5d5aa45d.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - A new service.status versioned notification has been introduced.
+ When the status of the Service object is changed nova will
+ send a new service.update notification with versioned payload
+ according to bp versioned-notification-api.
+ The new notification is documented in
+ http://docs.openstack.org/developer/nova/notifications.html
+
diff --git a/releasenotes/notes/soft-affinity-for-server-group-f45e191bd8cdbd15.yaml b/releasenotes/notes/soft-affinity-for-server-group-f45e191bd8cdbd15.yaml
new file mode 100644
index 0000000000..5eb7e9db35
--- /dev/null
+++ b/releasenotes/notes/soft-affinity-for-server-group-f45e191bd8cdbd15.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - Two new policies soft-affinty and soft-anti-affinity have been implemented
+ for the server-group feature of Nova. This means that
+ POST /v2.1/{tenant_id}/os-server-groups
+ API resource now accepts 'soft-affinity' and 'soft-anti-affinity' as value
+ of the 'policies' key of the request body.
diff --git a/releasenotes/notes/switch-to-oslo-cache-7114a0ab2dea52df.yaml b/releasenotes/notes/switch-to-oslo-cache-7114a0ab2dea52df.yaml
new file mode 100644
index 0000000000..2cc24234da
--- /dev/null
+++ b/releasenotes/notes/switch-to-oslo-cache-7114a0ab2dea52df.yaml
@@ -0,0 +1,9 @@
+---
+prelude: >
+
+deprecations:
+
+ - Option ``memcached_servers`` is deprecated in Mitaka. Operators should
+ use oslo.cache configuration instead. Specifically ``enabled`` option
+ under [cache] section should be set to True and the url(s) for the
+ memcached servers should be in [cache]/memcache_servers option. \ No newline at end of file
diff --git a/releasenotes/notes/upgrade_rootwrap_compute_filters-428ca239f2e4e63d.yaml b/releasenotes/notes/upgrade_rootwrap_compute_filters-428ca239f2e4e63d.yaml
new file mode 100644
index 0000000000..652a56f531
--- /dev/null
+++ b/releasenotes/notes/upgrade_rootwrap_compute_filters-428ca239f2e4e63d.yaml
@@ -0,0 +1,13 @@
+---
+upgrade:
+ - Upgrade the rootwrap configuration for the compute service,
+ so that patches requiring new rootwrap configuration can be
+ tested with grenade.
+fixes:
+ - In a race condition if base image is deleted by ImageCacheManager
+ while imagebackend is copying the image to instance path, then the
+ instance goes in to error state. In this case when libvirt has
+ changed the base file ownership to libvirt-qemu while imagebackend
+ is copying the image, then we get permission denied error on updating
+ the file access time using os.utime. Fixed this issue by updating the
+ base file access time with root user privileges using 'touch' command. \ No newline at end of file
diff --git a/releasenotes/notes/user-settable-server-description-89dcfc75677e31bc.yaml b/releasenotes/notes/user-settable-server-description-89dcfc75677e31bc.yaml
new file mode 100644
index 0000000000..17c3d81529
--- /dev/null
+++ b/releasenotes/notes/user-settable-server-description-89dcfc75677e31bc.yaml
@@ -0,0 +1,16 @@
+---
+features:
+ - In Nova Compute API microversion 2.19, you can
+ specify a "description" attribute when creating, rebuilding, or updating
+ a server instance. This description can be retrieved by getting
+ server details, or list details for servers.
+
+ Refer to the Nova Compute API documentation for more
+ information.
+
+ Note that the description attribute existed in prior
+ Nova versions, but was set to the server name by Nova,
+ and was not visible to the user. So, servers you
+ created with microversions prior to 2.19 will return
+ the description equals the name on server details
+ microversion 2.19.
diff --git a/releasenotes/notes/versioned-notifications-423f4d8d2a3992c6.yaml b/releasenotes/notes/versioned-notifications-423f4d8d2a3992c6.yaml
new file mode 100644
index 0000000000..413e08930b
--- /dev/null
+++ b/releasenotes/notes/versioned-notifications-423f4d8d2a3992c6.yaml
@@ -0,0 +1,9 @@
+---
+features:
+ - As part of refactoring the notification interface of Nova
+ a new config option 'notification_format' has been added to specifies
+ which notification format shall be used by nova. The possible values
+ are 'unversioned' (e.g. legacy), 'versioned', 'both'. The default
+ value is 'both'.
+ The new versioned notifications are documented in
+ http://docs.openstack.org/developer/nova/notifications.html
diff --git a/releasenotes/notes/vmware_limits-16edee7a9ad023bc.yaml b/releasenotes/notes/vmware_limits-16edee7a9ad023bc.yaml
new file mode 100644
index 0000000000..c88eb16622
--- /dev/null
+++ b/releasenotes/notes/vmware_limits-16edee7a9ad023bc.yaml
@@ -0,0 +1,40 @@
+---
+features:
+ - quota:cpu_limit - (VMware only) The cpu of a virtual machine will not
+ exceed this limit, even if there are available resources. This is
+ typically used to ensure a consistent performance of virtual machines
+ independent of available resources. Units are MHz.
+ - quota:cpu_reservation - (VMware only) guaranteed minimum reservation (MHz)
+ - quota:cpu_shares_level - (VMware only) the allocation level. This can be
+ 'custom', 'high', 'normal' or 'low'.
+ - quota:cpu_shares_share - (VMware only) in the event that 'custom' is used,
+ this is the number of shares.
+ - quota:memory_limit - (VMware only) The memory utilization of a virtual
+ machine will not exceed this limit, even if there are available resources.
+ This is typically used to ensure a consistent performance of virtual machines
+ independent of available resources. Units are MB.
+ - quota:memory_reservation - (VMware only) guaranteed minimum reservation (MB)
+ - quota:memory_shares_level - (VMware only) the allocation level. This can be
+ 'custom', 'high', 'normal' or 'low'.
+ - quota:memory_shares_share - (VMware only) in the event that 'custom' is used,
+ this is the number of shares.
+ - quota:disk_io_limit - (VMware only) The I/O utilization of a virtual machine
+ will not exceed this limit. The unit is number of I/O per second.
+ - quota:disk_io_reservation - (VMware only) Reservation control is used to
+ provide guaranteed allocation in terms of IOPS
+ - quota:disk_io_shares_level - (VMware only) the allocation level. This can be
+ 'custom', 'high', 'normal' or 'low'.
+ - quota:disk_io_shares_share - (VMware only) in the event that 'custom' is used,
+ this is the number of shares.
+ - quota:vif_limit - (VMware only) The bandwidth limit for the virtual network
+ adapter. The utilization of the virtual network adapter will not exceed this
+ limit, even if there are available resources. Units in Mbits/sec.
+ - quota:vif_reservation - (VMware only) Amount of network bandwidth that is
+ guaranteed to the virtual network adapter. If utilization is less than
+ reservation, the resource can be used by other virtual network adapters.
+ Reservation is not allowed to exceed the value of limit if limit is set.
+ Units in Mbits/sec.
+ - quota:vif_shares_level - (VMware only) the allocation level. This can be
+ 'custom', 'high', 'normal' or 'low'.
+ - quota:vif_shares_share - (VMware only) in the event that 'custom' is used,
+ this is the number of shares.
diff --git a/releasenotes/notes/xen_rename-03edd9b78f3e81e5.yaml b/releasenotes/notes/xen_rename-03edd9b78f3e81e5.yaml
new file mode 100644
index 0000000000..b9a23246c2
--- /dev/null
+++ b/releasenotes/notes/xen_rename-03edd9b78f3e81e5.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - XenServer hypervisor type has been changed from ``xen`` to ``XenServer``.
+ It could impact your aggregate metadata or your flavor extra specs if you
+ provide only the former.
diff --git a/releasenotes/notes/zookeeper-servicegroup-driver-removed-c3bcaa6f9fe976ed.yaml b/releasenotes/notes/zookeeper-servicegroup-driver-removed-c3bcaa6f9fe976ed.yaml
new file mode 100644
index 0000000000..5f5b6aa89e
--- /dev/null
+++ b/releasenotes/notes/zookeeper-servicegroup-driver-removed-c3bcaa6f9fe976ed.yaml
@@ -0,0 +1,15 @@
+---
+upgrade:
+ - |
+ The Zookeeper Service Group driver has been removed.
+
+ The driver has no known users and is not actively mantained. A warning log
+ message about the driver's state was added for the Kilo release. Also,
+ evzookeeper library that the driver depends on is unmaintained and
+ `incompatible with recent eventlet releases`_.
+
+ A future release of Nova will `use the Tooz library to track
+ service liveliness`_, and Tooz supports Zookeeper.
+
+ .. _`incompatible with recent eventlet releases`: https://bugs.launchpad.net/nova/+bug/1443910
+ .. _`use the Tooz library to track service liveliness`: http://specs.openstack.org/openstack/nova-specs/specs/liberty/approved/service-group-using-tooz.html
diff --git a/requirements.txt b/requirements.txt
index 5f4bc6918e..3099ae6159 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,56 +2,57 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=1.6
-SQLAlchemy<1.1.0,>=1.0.10
-boto>=2.32.1
-decorator>=3.4.0
-eventlet>=0.17.4
+pbr>=1.6 # Apache-2.0
+SQLAlchemy<1.1.0,>=1.0.10 # MIT
+boto>=2.32.1 # MIT
+decorator>=3.4.0 # BSD
+eventlet!=0.18.3,>=0.18.2 # MIT
Jinja2>=2.8 # BSD License (3 clause)
-keystonemiddleware>=4.0.0
-lxml>=2.3
-Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7'
-Routes!=2.0,>=1.12.3;python_version!='2.7'
-cryptography>=1.0 # Apache-2.0
-WebOb>=1.2.3
-greenlet>=0.3.2
-PasteDeploy>=1.5.0
-Paste
-PrettyTable<0.8,>=0.7
-sqlalchemy-migrate>=0.9.6
-netaddr!=0.7.16,>=0.7.12
-netifaces>=0.10.4
-paramiko>=1.13.0
-Babel>=1.3
-iso8601>=0.1.9
-jsonschema!=2.5.0,<3.0.0,>=2.0.0
-python-cinderclient>=1.3.1
-python-keystoneclient!=1.8.0,>=1.6.0
-python-neutronclient>=2.6.0
-python-glanceclient>=1.2.0
-requests!=2.9.0,>=2.8.1
-six>=1.9.0
+keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0
+lxml>=2.3 # BSD
+Routes!=2.0,!=2.1,>=1.12.3;python_version=='2.7' # MIT
+Routes!=2.0,>=1.12.3;python_version!='2.7' # MIT
+cryptography>=1.0 # BSD/Apache-2.0
+WebOb>=1.2.3 # MIT
+greenlet>=0.3.2 # MIT
+PasteDeploy>=1.5.0 # MIT
+Paste # MIT
+PrettyTable<0.8,>=0.7 # BSD
+sqlalchemy-migrate>=0.9.6 # Apache-2.0
+netaddr!=0.7.16,>=0.7.12 # BSD
+netifaces>=0.10.4 # MIT
+paramiko>=1.16.0 # LGPL
+Babel>=1.3 # BSD
+iso8601>=0.1.9 # MIT
+jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
+python-cinderclient>=1.3.1 # Apache-2.0
+keystoneauth1>=2.1.0 # Apache-2.0
+python-neutronclient>=2.6.0 # Apache-2.0
+python-glanceclient>=1.2.0 # Apache-2.0
+requests!=2.9.0,>=2.8.1 # Apache-2.0
+six>=1.9.0 # MIT
stevedore>=1.5.0 # Apache-2.0
-setuptools>=16.0
-websockify>=0.6.1
-oslo.concurrency>=2.3.0 # Apache-2.0
-oslo.config>=3.2.0 # Apache-2.0
+setuptools>=16.0 # PSF/ZPL
+websockify>=0.6.1 # LGPLv3
+oslo.cache>=0.8.0 # Apache-2.0
+oslo.concurrency>=3.5.0 # Apache-2.0
+oslo.config>=3.4.0 # Apache-2.0
oslo.context>=0.2.0 # Apache-2.0
oslo.log>=1.14.0 # Apache-2.0
oslo.reports>=0.6.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0
-oslo.utils>=3.2.0 # Apache-2.0
+oslo.utils>=3.5.0 # Apache-2.0
oslo.db>=4.1.0 # Apache-2.0
oslo.rootwrap>=2.0.0 # Apache-2.0
-oslo.messaging!=2.8.0,!=3.1.0,>2.6.1 # Apache-2.0
+oslo.messaging>=4.0.0 # Apache-2.0
oslo.policy>=0.5.0 # Apache-2.0
-oslo.i18n>=1.5.0 # Apache-2.0
+oslo.i18n>=2.1.0 # Apache-2.0
oslo.service>=1.0.0 # Apache-2.0
rfc3986>=0.2.0 # Apache-2.0
oslo.middleware>=3.0.0 # Apache-2.0
-psutil<2.0.0,>=1.1.1
-oslo.versionedobjects>=0.13.0
-alembic>=0.8.0
-os-brick>=0.4.0 # Apache-2.0
-os-win>=0.0.7 # Apache-2.0
+psutil<2.0.0,>=1.1.1 # BSD
+oslo.versionedobjects>=1.5.0 # Apache-2.0
+alembic>=0.8.0 # MIT
+os-brick>=1.0.0 # Apache-2.0
+os-win>=0.2.1 # Apache-2.0
castellan>=0.3.1 # Apache-2.0
diff --git a/run_tests.sh b/run_tests.sh
index 710f8d5909..9cee6751b1 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -25,6 +25,10 @@ To run a subset of any of these tests:
i.e.: tox -e py27 test_servers
+Use following to replace './run_test.sh -8' to do pep8 check with changed files
+
+ tox -e pep8 -- -HEAD
+
Additional tox targets are available in tox.ini. For more information
see:
http://docs.openstack.org/project-team-guide/project-setup/python.html
diff --git a/setup.cfg b/setup.cfg
index b31b8513cd..9a3133e1f2 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,7 +5,7 @@ description-file =
README.rst
author = OpenStack
author-email = openstack-dev@lists.openstack.org
-home-page = http://www.openstack.org/
+home-page = http://docs.openstack.org/developer/nova/
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -34,7 +34,7 @@ oslo.config.opts =
nova.network = nova.network.opts:list_opts
nova.network.neutronv2 = nova.network.neutronv2.api:list_opts
nova.virt = nova.virt.opts:list_opts
- nova.openstack.common.memorycache = nova.openstack.common.memorycache:list_opts
+ nova.cache_utils = nova.cache_utils:list_opts
nova.compute.monitors.cpu =
virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor
@@ -137,6 +137,7 @@ nova.api.v21.extensions =
server_diagnostics = nova.api.openstack.compute.server_diagnostics:ServerDiagnostics
server_external_events = nova.api.openstack.compute.server_external_events:ServerExternalEvents
server_metadata = nova.api.openstack.compute.server_metadata:ServerMetadata
+ server_migrations = nova.api.openstack.compute.server_migrations:ServerMigrations
server_password = nova.api.openstack.compute.server_password:ServerPassword
server_usage = nova.api.openstack.compute.server_usage:ServerUsage
server_groups = nova.api.openstack.compute.server_groups:ServerGroups
@@ -187,6 +188,16 @@ nova.ipv6_backend =
rfc2462 = nova.ipv6.rfc2462
account_identifier = nova.ipv6.account_identifier
+nova.scheduler.host_manager =
+ host_manager = nova.scheduler.host_manager:HostManager
+ ironic_host_manager = nova.scheduler.ironic_host_manager:IronicHostManager
+
+nova.scheduler.driver =
+ filter_scheduler = nova.scheduler.filter_scheduler:FilterScheduler
+ caching_scheduler = nova.scheduler.caching_scheduler:CachingScheduler
+ chance_scheduler = nova.scheduler.chance:ChanceScheduler
+ fake_scheduler = nova.tests.unit.scheduler.fakes:FakeScheduler
+
[build_sphinx]
all_files = 1
build-dir = doc/build
diff --git a/test-requirements.txt b/test-requirements.txt
index dc1c565ba0..49f7fc2ab6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -3,26 +3,26 @@
# process, which may cause wedges in the gate later.
hacking<0.11,>=0.10.0
-coverage>=3.6
-fixtures>=1.3.1
-mock>=1.2
-mox3>=0.7.0
-psycopg2>=2.5
+coverage>=3.6 # Apache-2.0
+fixtures>=1.3.1 # Apache-2.0/BSD
+mock>=1.2 # BSD
+mox3>=0.7.0 # Apache-2.0
+psycopg2>=2.5 # LGPL/ZPL
PyMySQL>=0.6.2 # MIT License
-python-barbicanclient>=3.3.0
-python-ironicclient>=0.8.0
-python-subunit>=0.0.18
+python-barbicanclient>=3.3.0 # Apache-2.0
+python-ironicclient>=1.1.0 # Apache-2.0
+python-subunit>=0.0.18 # Apache-2.0/BSD
requests-mock>=0.7.0 # Apache-2.0
-sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2
+sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
oslotest>=1.10.0 # Apache-2.0
-os-testr>=0.4.1
-testrepository>=0.0.18
-testresources>=0.2.4
-testtools>=1.4.0
-tempest-lib>=0.12.0
-bandit>=0.13.2
-openstackdocstheme>=1.0.3
+os-testr>=0.4.1 # Apache-2.0
+testrepository>=0.0.18 # Apache-2.0/BSD
+testresources>=0.2.4 # Apache-2.0/BSD
+testtools>=1.4.0 # MIT
+tempest-lib>=0.14.0 # Apache-2.0
+bandit>=0.17.3 # Apache-2.0
+openstackdocstheme>=1.0.3 # Apache-2.0
# vmwareapi driver specific dependencies
oslo.vmware>=1.16.0 # Apache-2.0
diff --git a/tests-py3.txt b/tests-py3.txt
index 3d63f3043c..ae7537c413 100644
--- a/tests-py3.txt
+++ b/tests-py3.txt
@@ -48,6 +48,7 @@ nova.tests.unit.api.openstack.compute.test_server_actions.ServerActionsControlle
nova.tests.unit.api.openstack.compute.test_serversV21.Base64ValidationTest
nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerCreateTest
nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerRebuildInstanceTest
+nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerRebuildTestV219
nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerTest
nova.tests.unit.api.openstack.compute.test_serversV21.ServersControllerTestV29
nova.tests.unit.api.openstack.compute.test_simple_tenant_usage.SimpleTenantUsageTestV2
@@ -63,25 +64,18 @@ nova.tests.unit.api.openstack.compute.test_volumes.VolumeApiTestV21
nova.tests.unit.api.test_compute_req_id.RequestIdTest
nova.tests.unit.api.test_validator.ValidatorTestCase
nova.tests.unit.api.test_wsgi.Test
-nova.tests.unit.cells.test_cells_messaging.CellsBroadcastMethodsTestCase
-nova.tests.unit.cells.test_cells_messaging.CellsMessageClassesTestCase
-nova.tests.unit.cells.test_cells_scheduler.CellsSchedulerTestCase
-nova.tests.unit.cells.test_cells_state_manager.TestCellsGetCapacity
-nova.tests.unit.cells.test_cells_state_manager.TestCellsStateManager
-nova.tests.unit.cells.test_cells_state_manager.TestCellsStateManagerNToOne
-nova.tests.unit.compute.test_compute.ComputeAPITestCase
-nova.tests.unit.compute.test_compute.ComputeInjectedFilesTestCase
+nova.tests.unit.compute.test_compute.ComputeAPITestCase.test_create_with_base64_user_data
+nova.tests.unit.compute.test_compute.ComputeInjectedFilesTestCase.test_injected_invalid
nova.tests.unit.compute.test_compute.ComputeTestCase.test_finish_resize_with_volumes
-nova.tests.unit.compute.test_compute.ComputeVolumeTestCase
-nova.tests.unit.compute.test_compute_api.SecurityGroupAPITest
-nova.tests.unit.compute.test_compute_cells.CellsComputeAPITestCase
+nova.tests.unit.compute.test_compute.ComputeVolumeTestCase.test_boot_volume_serial
+nova.tests.unit.compute.test_compute.ComputeVolumeTestCase.test_poll_bandwidth_usage_not_implemented
+nova.tests.unit.compute.test_compute.ComputeVolumeTestCase.test_prep_block_device_over_quota_failure
+nova.tests.unit.compute.test_compute.ComputeVolumeTestCase.test_prep_block_device_with_blanks
+nova.tests.unit.compute.test_compute_cells.CellsComputeAPITestCase.test_create_with_base64_user_data
+nova.tests.unit.compute.test_compute_mgr.ComputeManagerUnitTestCase.test_run_pending_deletes
nova.tests.unit.compute.test_host_api.ComputeHostAPICellsTestCase
nova.tests.unit.compute.test_resources.BaseTestCase
nova.tests.unit.compute.test_tracker.TestMoveClaim
-nova.tests.unit.conductor.test_conductor.ConductorLocalComputeTaskAPITestCase
-nova.tests.unit.conductor.test_conductor.ConductorTaskAPITestCase
-nova.tests.unit.conductor.test_conductor.ConductorTaskRPCAPITestCase
-nova.tests.unit.conductor.test_conductor.ConductorTaskTestCase
nova.tests.unit.console.test_websocketproxy.NovaProxyRequestHandlerBaseTestCase
nova.tests.unit.consoleauth.test_consoleauth.ControlauthMemcacheEncodingTestCase
nova.tests.unit.db.test_migrations.TestNovaMigrationsMySQL
@@ -93,9 +87,6 @@ nova.tests.unit.keymgr.test_conf_key_mgr.ConfKeyManagerTestCase
nova.tests.unit.keymgr.test_key.SymmetricKeyTestCase
nova.tests.unit.keymgr.test_mock_key_mgr.MockKeyManagerTestCase
nova.tests.unit.keymgr.test_single_key_mgr.SingleKeyManagerTestCase
-nova.tests.unit.network.test_linux_net.LinuxNetworkTestCase.test_get_dhcp_leases_for_nw00
-nova.tests.unit.network.test_linux_net.LinuxNetworkTestCase.test_get_dhcp_leases_for_nw01
-nova.tests.unit.network.test_manager.CommonNetworkTestCase
nova.tests.unit.network.test_manager.LdapDNSTestCase
nova.tests.unit.pci.test_manager.PciDevTrackerTestCase
nova.tests.unit.pci.test_stats.PciDeviceStatsTestCase
@@ -211,4 +202,4 @@ nova.tests.unit.virt.test_virt_drivers.AbstractDriverTestCase
nova.tests.unit.virt.vmwareapi.test_configdrive.ConfigDriveTestCase
nova.tests.unit.virt.vmwareapi.test_driver_api.VMwareAPIVMTestCase
nova.tests.unit.virt.xenapi.test_vmops.BootableTestCase
-nova.tests.unit.virt.xenapi.test_vmops.SpawnTestCase \ No newline at end of file
+nova.tests.unit.virt.xenapi.test_vmops.SpawnTestCase
diff --git a/tools/ebtables.workaround b/tools/ebtables.workaround
new file mode 100644
index 0000000000..4c1d8ed81b
--- /dev/null
+++ b/tools/ebtables.workaround
@@ -0,0 +1,35 @@
+#!/bin/bash
+#
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+#
+# This is a terrible, terrible, truly terrible work around for
+# environments that have libvirt < 1.2.11. ebtables requires that you
+# specifically tell it you would like to not race and get punched in
+# the face when 2 run at the same time with a --concurrent flag.
+#
+# INSTALL instructions
+#
+# * Copy /sbin/ebtables to /sbin/ebtables.real
+# * Copy the ebtables.workaround script to /sbin/ebtables
+#
+# Note: upgrades to ebtables will overwrite this work around. If you
+# are packaging this file consider putting a trigger in place so that
+# the workaround is replaced after ebtables upgrade.
+#
+# Additional Note: this file can be removed from nova once our libvirt
+# minimum is >= 1.2.11.
+
+flock -w 300 /var/lock/ebtables.nova /sbin/ebtables.real $@
diff --git a/tools/reserve-migrations.py b/tools/reserve-migrations.py
new file mode 100755
index 0000000000..080fb3889a
--- /dev/null
+++ b/tools/reserve-migrations.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+
+import argparse
+import glob
+import os
+import subprocess
+
+BASE = 'nova/db/sqlalchemy/migrate_repo/versions'.split('/')
+API_BASE = 'nova/db/sqlalchemy/api_migrations/migrate_repo/versions'.split('/')
+
+STUB = """
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This is a placeholder for backports.
+# Do not use this number for new work. New work starts after
+# all the placeholders.
+#
+# See this for more information:
+# http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html
+
+
+def upgrade(migrate_engine):
+ pass
+"""
+
+
+def get_last_migration(base):
+ path = os.path.join(*tuple(base + ['[0-9]*.py']))
+ migrations = sorted([os.path.split(fn)[-1] for fn in glob.glob(path)])
+ return int(migrations[-1].split('_')[0])
+
+
+def reserve_migrations(base, number, git_add):
+ last = get_last_migration(base)
+ for i in range(last + 1, last + number + 1):
+ name = '%03i_placeholder.py' % i
+ path = os.path.join(*tuple(base + [name]))
+ with open(path, 'w') as f:
+ f.write(STUB)
+ print('Created %s' % path)
+ if git_add:
+ subprocess.call('git add %s' % path, shell=True)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-n', '--number', default=10,
+ type=int,
+ help='Number of migrations to reserve')
+ parser.add_argument('-g', '--git-add', action='store_const',
+ const=True, default=False,
+ help='Automatically git-add new migrations')
+ parser.add_argument('-a', '--api', action='store_const',
+ const=True, default=False,
+ help='Reserve migrations for the API database')
+ args = parser.parse_args()
+ if args.api:
+ base = API_BASE
+ else:
+ base = BASE
+ reserve_migrations(base, args.number, args.git_add)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/xenserver/rotate_xen_guest_logs.sh b/tools/xenserver/rotate_xen_guest_logs.sh
index 587ec194de..f01051c96c 100755
--- a/tools/xenserver/rotate_xen_guest_logs.sh
+++ b/tools/xenserver/rotate_xen_guest_logs.sh
@@ -22,6 +22,12 @@ syslog_tag='rotate_xen_guest_logs'
log_file_base="${log_dir}/console."
+# Only delete log files older than this number of minutes
+# to avoid a race where Xen creates the domain and starts
+# logging before the XAPI VM start returns (and allows us
+# to preserve the log file using last_dom_id)
+min_logfile_age=10
+
# Ensure logging is setup correctly for all domains
xenstore-write /local/logconsole/@ "${log_file_base}%d"
@@ -39,9 +45,9 @@ done
valid_last_dom_ids=$(xe vm-list params=other-config --minimal | tr ';,' '\n\n' | grep last_dom_id | sed -e 's/last_dom_id: //g' | xargs)
echo "Valid dom IDs: $valid_last_dom_ids" | /usr/bin/logger -t $syslog_tag
-# Remove console files that do not correspond to valid last_dom_id's
+# Remove old console files that do not correspond to valid last_dom_id's
allowed_consoles=".*console.\(${valid_last_dom_ids// /\\|}\)$"
-delete_logs=`find "$log_dir" -type f -not -regex "$allowed_consoles"`
+delete_logs=`find "$log_dir" -type f -mmin +${min_logfile_age} -not -regex "$allowed_consoles"`
for log in $delete_logs; do
if echo "$current_logs" | grep -q -w "$log"; then
echo "Deleting: $log" | /usr/bin/logger -t $syslog_tag
diff --git a/tox.ini b/tox.ini
index 4f3944047f..af938446e9 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,17 +9,12 @@ usedevelop = True
whitelist_externals = bash
find
rm
-install_command =
- constraints: {[testenv:common-constraints]install_command}
- pip install -U --force-reinstall {opts} {packages}
+install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=./nova/tests/unit
LANGUAGE=en_US
LC_ALL=en_US.utf-8
-# TODO(mriedem): Move oslo.versionedobjects[fixtures] to test-requirements.txt
-# after I937823ffeb95725f0b55e298ebee1857d6482883 lands.
deps = -r{toxinidir}/test-requirements.txt
- oslo.versionedobjects[fixtures]
commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox.sh '{posargs}'
@@ -27,17 +22,13 @@ passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
# there is also secret magic in pretty_tox.sh which lets you run in a fail only
# mode. To do this define the TRACE_FAILONLY environmental variable.
-[testenv:common-constraints]
-install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
-
[testenv:pep8]
+basepython = python2.7
commands =
bash tools/flake8wrap.sh {posargs}
-
-[testenv:pep8-constraints]
-install_command = {[testenv:common-constraints]install_command}
-commands =
- bash tools/flake8wrap.sh {posargs}
+ # Check that .po and .pot files are valid.
+ bash -c "find nova -type f -regex '.*\.pot?' -print0| \
+ xargs -0 -n 1 msgfmt --check-format -o /dev/null"
[testenv:py34]
# NOTE(mriedem): If py34 fails with "db type could not be determined", delete
@@ -48,16 +39,8 @@ commands =
find . -type f -name "*.pyc" -delete
ostestr --blacklist_file tests-py3.txt
-[testenv:py34-constraints]
-install_command = {[testenv:common-constraints]install_command}
-setenv = {[testenv]setenv}
-deps = -r{toxinidir}/requirements.txt
- -r{toxinidir}/test-requirements.txt
-commands = {[testenv:py34]commands}
-
[testenv:functional]
usedevelop = True
-install_command = pip install -U --force-reinstall {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=./nova/tests/functional
LANGUAGE=en_US
@@ -67,7 +50,6 @@ commands =
[testenv:api-samples]
usedevelop = True
-install_command = pip install -U --force-reinstall {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
GENERATE_SAMPLES=True
PYTHONHASHSEED=0
@@ -77,31 +59,16 @@ commands =
find . -type f -name "*.pyc" -delete
bash tools/pretty_tox.sh '{posargs}'
-[testenv:functional-constraints]
-usedevelop = {[testenv:functional]usedevelop}
-install_command = {[testenv:common-constraints]install_command}
-setenv = {[testenv:functional]setenv}
-commands =
- find . -type f -name "*.pyc" -delete
- bash tools/pretty_tox.sh '{posargs}'
-
[testenv:genconfig]
commands = oslo-config-generator --config-file=etc/nova/nova-config-generator.conf
[testenv:cover]
# Also do not run test_coverage_ext tests while gathering coverage as those
# tests conflict with coverage.
-commands =
- coverage erase
- python setup.py testr --coverage \
- --testr-args='{posargs}'
- coverage combine
- coverage html --include='nova/*' --omit='nova/openstack/common/*' -d covhtml -i
-
-[testenv:cover-constraints]
-install_command = {[testenv:common-constraints]install_command}
-# Also do not run test_coverage_ext tests while gathering coverage as those
-# tests conflict with coverage.
+# NOTE(sdague): this target does not use constraints because
+# upstream infra does not yet support it. Once that's fixed, we can
+# drop the install_command.
+install_command = pip install -U --force-reinstall {opts} {packages}
commands =
coverage erase
python setup.py testr --coverage \
@@ -112,10 +79,6 @@ commands =
[testenv:venv]
commands = {posargs}
-[testenv:venv-constraints]
-install_command = {[testenv:common-constraints]install_command}
-commands = {posargs}
-
[testenv:docs]
commands =
rm -rf doc/source/api doc/build api-guide/build
@@ -127,18 +90,24 @@ commands =
[testenv:api-guide]
# This environment is called from CI scripts to test and publish
# the API Guide to developer.openstack.org.
+# NOTE(sdague): this target does not use constraints because
+# upstream infra does not yet support it. Once that's fixed, we can
+# drop the install_command.
+install_command = pip install -U --force-reinstall {opts} {packages}
commands =
sphinx-build -b html -d api-guide/build/doctrees api-guide/source api-guide/build/html
-[testenv:docs-constraints]
-install_command = {[testenv:common-constraints]install_command}
-commands = {[testenv:docs]commands}
-
[testenv:bandit]
commands = bandit -c bandit.yaml -r nova -n 5 -ll
[testenv:releasenotes]
-commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+# NOTE(sdague): this target does not use constraints because
+# upstream infra does not yet support it. Once that's fixed, we can
+# drop the install_command.
+install_command = pip install -U --force-reinstall {opts} {packages}
+commands =
+ rm -rf releasenotes/build
+ sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[flake8]
# E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126