summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--api-ref/source/os-fixed-ips.inc3
-rw-r--r--api-ref/source/os-instance-actions.inc36
-rw-r--r--api-ref/source/os-keypairs.inc9
-rw-r--r--api-ref/source/parameters.yaml138
-rw-r--r--api-ref/source/servers-actions.inc84
-rw-r--r--api-ref/source/servers-admin-action.inc6
-rw-r--r--doc/api_samples/keypairs/v2.35/keypairs-list-resp.json18
-rw-r--r--doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json12
-rw-r--r--doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json18
-rw-r--r--doc/api_samples/keypairs/v2.35/keypairs-post-req.json7
-rw-r--r--doc/api_samples/keypairs/v2.35/keypairs-post-resp.json10
-rw-r--r--doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json2
-rw-r--r--doc/api_samples/versions/v21-version-get-resp.json2
-rw-r--r--doc/api_samples/versions/versions-get-resp.json2
-rw-r--r--doc/notification_samples/instance-restore-end.json62
-rw-r--r--doc/notification_samples/instance-restore-start.json62
-rw-r--r--etc/nova/nova-config-generator.conf1
-rw-r--r--nova/api/openstack/__init__.py3
-rw-r--r--nova/api/openstack/api_version_request.py7
-rw-r--r--nova/api/openstack/common.py31
-rw-r--r--nova/api/openstack/compute/keypairs.py46
-rw-r--r--nova/api/openstack/compute/migrate_server.py30
-rw-r--r--nova/api/openstack/compute/server_tags.py36
-rw-r--r--nova/api/openstack/compute/servers.py4
-rw-r--r--nova/api/openstack/compute/views/keypairs.py (renamed from nova/api/sizelimit.py)19
-rw-r--r--nova/api/openstack/rest_api_version_history.rst18
-rw-r--r--nova/compute/api.py54
-rw-r--r--nova/compute/manager.py6
-rw-r--r--nova/compute/resource_tracker.py7
-rw-r--r--nova/conf/compute.py52
-rw-r--r--nova/conf/exceptions.py16
-rw-r--r--nova/conf/serial_console.py11
-rw-r--r--nova/db/api.py10
-rw-r--r--nova/db/sqlalchemy/api.py61
-rw-r--r--nova/notifications/objects/instance.py4
-rw-r--r--nova/objects/flavor.py2
-rw-r--r--nova/objects/keypair.py50
-rw-r--r--nova/objects/resource_provider.py78
-rw-r--r--nova/objects/virtual_interface.py8
-rw-r--r--nova/scheduler/filters/core_filter.py15
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-resp.json.tpl18
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json.tpl12
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json.tpl18
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-req.json.tpl7
-rw-r--r--nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-resp.json.tpl10
-rw-r--r--nova/tests/functional/api_sample_tests/test_keypairs.py73
-rw-r--r--nova/tests/functional/api_sample_tests/test_server_tags.py1
-rw-r--r--nova/tests/functional/db/test_keypair.py109
-rw-r--r--nova/tests/functional/notification_sample_tests/test_instance.py25
-rw-r--r--nova/tests/unit/api/openstack/compute/test_keypairs.py42
-rw-r--r--nova/tests/unit/api/openstack/compute/test_migrate_server.py109
-rw-r--r--nova/tests/unit/api/openstack/compute/test_server_tags.py2
-rw-r--r--nova/tests/unit/api/openstack/test_common.py12
-rw-r--r--nova/tests/unit/compute/test_compute.py962
-rw-r--r--nova/tests/unit/compute/test_compute_api.py53
-rwxr-xr-x[-rw-r--r--]nova/tests/unit/compute/test_compute_mgr.py45
-rw-r--r--nova/tests/unit/compute/test_keypairs.py2
-rw-r--r--nova/tests/unit/db/test_db_api.py100
-rw-r--r--nova/tests/unit/objects/test_keypair.py98
-rw-r--r--nova/tests/unit/objects/test_objects.py4
-rw-r--r--nova/tests/unit/objects/test_virtual_interface.py14
-rw-r--r--nova/tests/unit/virt/libvirt/test_driver.py66
-rw-r--r--nova/tests/unit/virt/vmwareapi/test_read_write_util.py44
-rw-r--r--nova/tests/unit/virt/xenapi/plugins/test_partition_utils.py6
-rw-r--r--nova/virt/block_device.py15
-rw-r--r--nova/virt/libvirt/blockinfo.py6
-rw-r--r--nova/virt/libvirt/driver.py4
-rw-r--r--nova/virt/vmwareapi/read_write_util.py55
-rw-r--r--nova/virt/vmwareapi/vmops.py40
-rw-r--r--nova/virt/xenapi/volume_utils.py2
-rw-r--r--releasenotes/notes/async-live-migration-rest-check-675ec309a9ccc28e.yaml8
-rw-r--r--releasenotes/notes/bp-keypairs-pagination-634c46aaa1058161.yaml5
-rw-r--r--releasenotes/notes/notification-transformation-newton-29a9324d1428b7d3.yaml20
-rw-r--r--releasenotes/notes/remove-config-serial-listen-2660be1c0863ea5a.yaml6
74 files changed, 2028 insertions, 1005 deletions
diff --git a/api-ref/source/os-fixed-ips.inc b/api-ref/source/os-fixed-ips.inc
index e8ba1f8c2e..249f882337 100644
--- a/api-ref/source/os-fixed-ips.inc
+++ b/api-ref/source/os-fixed-ips.inc
@@ -1,5 +1,4 @@
.. -*- rst -*-
-.. needs:example_verification
======================================
Fixed IPs (os-fixed-ips) (DEPRECATED)
@@ -81,4 +80,4 @@ Request
Response
--------
-There is no body content for the response of a successful POST query
+There is no body content for the response of a successful POST operation.
diff --git a/api-ref/source/os-instance-actions.inc b/api-ref/source/os-instance-actions.inc
index 20ac6b75df..2d59bcda6e 100644
--- a/api-ref/source/os-instance-actions.inc
+++ b/api-ref/source/os-instance-actions.inc
@@ -1,5 +1,4 @@
.. -*- rst -*-
-.. needs:parameter_verification
.. needs:example_verification
.. needs:body_verification
@@ -30,11 +29,23 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
Response
--------
+.. rest_parameters:: parameters.yaml
+
+
+ - instanceActions: instanceActions
+ - action: action
+ - instance_uuid: instance_id_body
+ - message: message
+ - project_id: project_id_instance_action
+ - request_id: request_id_body
+ - start_time: start_time
+ - user_id: user_id
+
**Example List Actions For Server: JSON response**
.. literalinclude:: ../../doc/api_samples/os-instance-actions/instance-actions-list-resp.json
@@ -60,12 +71,31 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- request_id: request_id
Response
--------
+
+.. rest_parameters:: parameters.yaml
+
+
+ - instanceAction: instanceAction
+ - action: action
+ - instance_uuid: instance_id_body
+ - message: message
+ - project_id: project_id_instance_action
+ - request_id: request_id_body
+ - start_time: start_time
+ - user_id: user_id
+ - events: instance_action_events
+ - events.event: event
+ - events.start_time: event_start_time
+ - events.finish_time: event_finish_time
+ - events.result: event_result
+ - events.traceback: event_traceback
+
**Example Show Server Action Details: JSON response**
.. literalinclude:: ../../doc/api_samples/os-instance-actions/instance-action-get-resp.json
diff --git a/api-ref/source/os-keypairs.inc b/api-ref/source/os-keypairs.inc
index 042fb9a6d7..033df25871 100644
--- a/api-ref/source/os-keypairs.inc
+++ b/api-ref/source/os-keypairs.inc
@@ -17,6 +17,15 @@ Normal response codes: 200
Error response codes: unauthorized(401), forbidden(403)
+Request
+-------
+
+.. rest_parameters:: parameters.yaml
+
+ - user_id: keypair_user
+ - limit: keypair_limit
+ - marker: keypair_marker
+
Response
--------
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
index 95e5d93633..e49113a6c4 100644
--- a/api-ref/source/parameters.yaml
+++ b/api-ref/source/parameters.yaml
@@ -477,6 +477,25 @@ ip_query:
in: query
required: false
type: string
+keypair_limit:
+ description: |
+ Requests a page size of items. Returns a number of items up to a limit value.
+ Use the ``limit`` parameter to make an initial limited request and use the
+ last-seen item from the response as the ``marker`` parameter value in a
+ subsequent limited request.
+ in: query
+ required: false
+ type: integer
+ min_version: 2.35
+keypair_marker:
+ description: |
+ The last-seen item. Use the ``limit`` parameter to make an initial limited
+ request and use the last-seen item from the response as the ``marker``
+ parameter value in a subsequent limited request.
+ in: query
+ required: false
+ type: string
+ min_version: 2.35
keypair_type_in:
in: query
required: false
@@ -484,6 +503,14 @@ keypair_type_in:
description: |
The type of the keypair. Allowed values are ``ssh`` or ``x509``
min_version: 2.2
+keypair_user:
+ in: query
+ required: false
+ type: string
+ description: |
+ This allows adminstrative users to list key-pairs of specified
+ user ID.
+ min_version: 2.10
limit:
description: |
Requests a page size of items. Returns a number of items up to a limit value.
@@ -687,6 +714,12 @@ accessIPv6_in:
type: string
description: |
IPv6 address that should be used to access this server.
+action:
+ description: |
+ The name of the action.
+ in: body
+ required: true
+ type: string
action_reserve:
description: |
The attribute to reserve an IP with a value of ``null``.
@@ -1386,6 +1419,27 @@ evacuate:
in: body
required: true
type: string
+event:
+ description: |
+ The name of the event.
+ in: body
+ required: true
+ type: string
+event_finish_time:
+ description: |
+ The date and time when the event was finished. The date and time
+ stamp format is `ISO 8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
+
+ ::
+
+ CCYY-MM-DDThh:mm:ss±hh:mm
+
+ For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm``
+ value, if included, is the time zone as an offset from UTC. In
+ the previous example, the offset value is ``-05:00``.
+ in: body
+ required: true
+ type: string
event_name:
description: |
The event name. A valid value is ``network-changed``, ``network-vif-plugged``,
@@ -1393,6 +1447,27 @@ event_name:
in: body
required: true
type: string
+event_result:
+ description: |
+ The result of the event.
+ in: body
+ required: true
+ type: string
+event_start_time:
+ description: |
+ The date and time when the event was started. The date and time
+ stamp format is `ISO 8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
+
+ ::
+
+ CCYY-MM-DDThh:mm:ss±hh:mm
+
+ For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm``
+ value, if included, is the time zone as an offset from UTC. In
+ the previous example, the offset value is ``-05:00``.
+ in: body
+ required: true
+ type: string
event_status:
description: |
The event status. A valid value is ``failed``, ``completed``, or ``in-progress``.
@@ -1406,6 +1481,12 @@ event_tag:
in: body
required: false
type: string
+event_traceback:
+ description: |
+ The traceback stack if error occurred in this event.
+ in: body
+ required: true
+ type: string
events:
description: |
The action.
@@ -2180,6 +2261,18 @@ injectNetworkInfo:
in: body
required: true
type: string
+instance_action_events:
+ description: |
+ The events occurred in this action.
+ in: body
+ required: true
+ type: array
+instance_id_body:
+ description: |
+ The UUID of the server.
+ in: body
+ required: true
+ type: string
instance_name:
description: |
The name of the instance.
@@ -2211,6 +2304,18 @@ instance_usage_audit_task_state:
in: body
required: true
type: string
+instanceAction:
+ description: |
+ The instance action object.
+ in: body
+ required: true
+ type: object
+instanceActions:
+ description: |
+ List of the actions for the given instance.
+ in: body
+ required: true
+ type: array
instances:
description: |
The number of allowed instances for each tenant.
@@ -2502,6 +2607,12 @@ memory_mb_used:
in: body
required: true
type: integer
+message:
+ description: |
+ The error message message about this action when error occurred.
+ in: body
+ required: true
+ type: string
meta:
description: |
The object of detailed key metadata items.
@@ -3091,6 +3202,12 @@ project_id:
in: body
required: false
type: string
+project_id_instance_action:
+ description: |
+ The UUID of the project that this server belongs to.
+ in: body
+ required: ture
+ type: string
project_id_server_group:
description: |
The project ID who owns the server group.
@@ -3165,6 +3282,12 @@ removeTenantAccess:
in: body
required: true
type: string
+request_id_body:
+ description: |
+ The request id generated when execute the API of this action.
+ in: body
+ required: true
+ type: string
rescue:
description: |
The action.
@@ -3581,6 +3704,21 @@ source_type:
in: body
required: true
type: string
+start_time:
+ description: |
+ The date and time when the action was started. The date and time
+ stamp format is `ISO 8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
+
+ ::
+
+ CCYY-MM-DDThh:mm:ss±hh:mm
+
+ For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm``
+ value, if included, is the time zone as an offset from UTC. In
+ the previous example, the offset value is ``-05:00``.
+ in: body
+ required: true
+ type: string
subnet_id:
description: |
The UUID of the subnet.
diff --git a/api-ref/source/servers-actions.inc b/api-ref/source/servers-actions.inc
index 1d3d764aa8..7d99567f38 100644
--- a/api-ref/source/servers-actions.inc
+++ b/api-ref/source/servers-actions.inc
@@ -10,8 +10,8 @@
Enables all users to perform an action on a server. Specify the action
in the request body.
-You can associate a fixed or floating IP address with a server instance,
-or disassociate a fixed or floating IP address from a server instance.
+You can associate a fixed or floating IP address with a server,
+or disassociate a fixed or floating IP address from a server.
You can attach a volume to a server.
You can create an image from a server, evacuate a server from a failed
@@ -62,7 +62,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- addFloatingIp: addFloatingIp
- address: address
- fixed_address: fixed_address
@@ -76,6 +76,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Add Security Group To A Server (addSecurityGroup Action)
========================================================
@@ -96,7 +98,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- addSecurityGroup: addSecurityGroup
- name: name
@@ -108,6 +110,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Change Administrative Password (changePassword Action)
======================================================
@@ -132,7 +136,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- changePassword: changePassword
**Example Change Administrative Password (changePassword Action): JSON request**
@@ -143,6 +147,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Confirm Resized Server (confirmResize Action)
=============================================
@@ -184,7 +190,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- confirmResize: confirmResize
**Example Confirm Resized Server (confirmResize Action): JSON request**
@@ -195,6 +201,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Create Image (createImage Action)
=================================
@@ -248,7 +256,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- createImage: createImage
**Example Create Image (createImage Action): JSON request**
@@ -259,6 +267,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Lock Server (lock Action)
=========================
@@ -282,7 +292,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- lock: lock
**Example Lock Server (lock Action): JSON request**
@@ -293,6 +303,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Pause Server (pause Action)
===========================
@@ -317,7 +329,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- pause: pause
**Example Pause Server (pause Action): JSON request**
@@ -328,6 +340,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Reboot Server (reboot Action)
=============================
@@ -348,7 +362,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- reboot: reboot
**Example Reboot Server (reboot Action): JSON request**
@@ -359,6 +373,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Rebuild Server (rebuild Action)
===============================
@@ -382,7 +398,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- rebuild: rebuild
- imageRef: imageRef
- name: name
@@ -431,7 +447,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- removeFloatingIp: removeFloatingIp
- address: address
@@ -444,6 +460,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Remove Security Group From A Server (removeSecurityGroup Action)
================================================================
@@ -464,7 +482,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- removeSecurityGroup: removeSecurityGroup
- name: name
@@ -476,6 +494,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Rescue Server (rescue Action)
=============================
@@ -505,7 +525,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- rescue: rescue
- adminPass: adminPass_rescue
- rescue_image_ref: rescue_image_ref
@@ -560,7 +580,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- resize: resize
**Example Resize Server (Resize Action): JSON request**
@@ -571,6 +591,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Resume Suspended Server (resume Action)
=======================================
@@ -595,7 +617,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- resume: resume
**Example Resume Suspended Server (Resume Action): JSON request**
@@ -606,6 +628,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Revert Resized Server (revertResize Action)
===========================================
@@ -650,7 +674,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- revertResize: revertResize
**Example Revert Resized Server (revertResize Action): JSON request**
@@ -661,6 +685,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Start Server (os-start Action)
==============================
@@ -700,7 +726,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- os-start: os-start
**Example Start server: JSON request**
@@ -711,6 +737,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Stop Server (os-stop Action)
============================
@@ -744,7 +772,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- os-stop: os-stop
**Example Stop server: JSON request**
@@ -755,6 +783,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Suspend Server (suspend Action)
===============================
@@ -779,7 +809,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- suspend: suspend
**Example Suspend Server (suspend Action): JSON request**
@@ -790,6 +820,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Unlock Server (unlock Action)
=============================
@@ -813,7 +845,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- unlock: unlock
**Example Unlock Server (unlock Action): JSON request**
@@ -824,6 +856,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Unpause Server (unpause Action)
===============================
@@ -848,7 +882,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- unpause: unpause
**Example Unpause Server (unpause Action): JSON request**
@@ -859,6 +893,8 @@ Request
Response
--------
+There is no body content for the response of a successful POST operation.
+
Unrescue Server (unrescue Action)
=================================
@@ -891,7 +927,7 @@ Request
.. rest_parameters:: parameters.yaml
- - server_id: server_id
+ - server_id: server_id_path
- unrescue: unrescue
**Example Unrescue server: JSON request**
@@ -901,3 +937,5 @@ Request
Response
--------
+
+There is no body content for the response of a successful POST operation.
diff --git a/api-ref/source/servers-admin-action.inc b/api-ref/source/servers-admin-action.inc
index 84a86951ca..d48d181e84 100644
--- a/api-ref/source/servers-admin-action.inc
+++ b/api-ref/source/servers-admin-action.inc
@@ -155,6 +155,12 @@ Policy defaults enable only users with the administrative role to
perform this operation. Cloud providers can change these permissions
through the ``policy.json`` file.
+Starting from REST API version 2.34 pre-live-migration checks are done
+asynchronously, results of these checks are available in ``instance-actions``.
+Nova responds immediately, and no pre-live-migration checks are returned.
+The instance will not immediately change state to ``ERROR``, if a failure of
+the live-migration checks occurs.
+
Normal response codes: 202
Error response codes: badRequest(400), unauthorized(401), forbidden(403)
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json b/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json
new file mode 100644
index 0000000000..69c8ec4f14
--- /dev/null
+++ b/doc/api_samples/keypairs/v2.35/keypairs-list-resp.json
@@ -0,0 +1,18 @@
+{
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd",
+ "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
+ "type": "ssh",
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n"
+ }
+ }
+ ],
+ "keypairs_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/keypairs?limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
+ "rel": "next"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json b/doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json
new file mode 100644
index 0000000000..547cb000c9
--- /dev/null
+++ b/doc/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json
@@ -0,0 +1,12 @@
+{
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd",
+ "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
+ "type": "ssh",
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json b/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json
new file mode 100644
index 0000000000..939a1c2c3d
--- /dev/null
+++ b/doc/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json
@@ -0,0 +1,18 @@
+{
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd",
+ "name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
+ "type": "ssh",
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n"
+ }
+ }
+ ],
+ "keypairs_links": [
+ {
+ "href": "http://openstack.example.com/v2.1/6f70656e737461636b20342065766572/keypairs?user_id=user2&limit=1&marker=keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
+ "rel": "next"
+ }
+ ]
+} \ No newline at end of file
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-post-req.json b/doc/api_samples/keypairs/v2.35/keypairs-post-req.json
new file mode 100644
index 0000000000..005a3f5045
--- /dev/null
+++ b/doc/api_samples/keypairs/v2.35/keypairs-post-req.json
@@ -0,0 +1,7 @@
+{
+ "keypair": {
+ "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9",
+ "type": "ssh",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/keypairs/v2.35/keypairs-post-resp.json b/doc/api_samples/keypairs/v2.35/keypairs-post-resp.json
new file mode 100644
index 0000000000..394960868b
--- /dev/null
+++ b/doc/api_samples/keypairs/v2.35/keypairs-post-resp.json
@@ -0,0 +1,10 @@
+{
+ "keypair": {
+ "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd",
+ "name": "keypair-ab9ff2e6-a6d7-4915-a241-044c369c07f9",
+ "type": "ssh",
+ "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEApBdzF+fTq5QbN3R+QlO5TZr6W64GcUqcho5ZxPBZZIq53P1K\ndtpaY856ManqEwME1tN+JOw8+mmCK2RpkMHtk5BNPOMqr5Y+OQ5MqI/eX1v7GWnJ\ntHGTbi+vRDmxBh3aa3xiUGo66c9tjUKAg/ExQfFr/vKJvTR/S3urPlj3vfFgu+yi\n8PKoH0LGyHsviWsD1peDuu2XS+ca8qbkY3yD1o4Mv1R/OSF4P2fxjjWdp8R4EkoT\nJMKkhRAgAuS9zxwftPv9djP4opHWrRUlRo6bh75CzrN6Hu5uh5Tn5bkifOQcy1gW\n772vd6pBpi4OGQHPKz4djvmCLAVBzSyzDP6EKQIDAQABAoIBAQCB+tU/ZXKlIe+h\nMNTmoz1QfOe+AY625Rwx9cakGqMk4kKyC62VkgcxshfXCToSjzyhEuyEQOFYloT2\n7FY2xXb0gcS861Efv0pQlcQhbbz/GnQ/wC13ktPu3zTdPTm9l54xsFiMTGmYVaf4\n0mnMmhyjmKIsVGDJEDGZUD/oZj7wJGOFha5M4FZrZlJIrEZC0rGGlcC0kGF2no6B\nj1Mu7HjyK3pTKf4dlp+jeRikUF5Pct+qT+rcv2rZ3fl3inxtlLEwZeFPbp/njf/U\nIGxFzZsuLmiFlsJar6M5nEckTB3p25maWWaR8/0jvJRgsPnuoUrUoGDq87DMKCdk\nlw6by9fRAoGBANhnS9ko7Of+ntqIFR7xOG9p/oPATztgHkFxe4GbQ0leaDRTx3vE\ndQmUCnn24xtyVECaI9a4IV+LP1npw8niWUJ4pjgdAlkF4cCTu9sN+cBO15SfdACI\nzD1DaaHmpFCAWlpTo68VWlvWll6i2ncCkRJR1+q/C/yQz7asvl4AakElAoGBAMId\nxqMT2Sy9xLuHsrAoMUvBOkwaMYZH+IAb4DvUDjVIiKWjmonrmopS5Lpb+ALBKqZe\neVfD6HwWQqGwCFItToaEkZvrNfTapoNCHWWg001D49765UV5lMrArDbM1vXtFfM4\nDRYM6+Y6o/6QH8EBgXtyBxcYthIDBM3wBJa67xG1AoGAKTm8fFlMkIG0N4N3Kpbf\nnnH915GaRoBwIx2AXtd6QQ7oIRfYx95MQY/fUw7SgxcLr+btbulTCkWXwwRClUI2\nqPAdElGMcfMp56r9PaTy8EzUyu55heSJrB4ckIhEw0VAcTa/1wnlVduSd+LkZYmq\no2fOD11n5iycNXvBJF1F4LUCgYAMaRbwCi7SW3eefbiA5rDwJPRzNSGBckyC9EVL\nzezynyaNYH5a3wNMYKxa9dJPasYtSND9OXs9o7ay26xMhLUGiKc+jrUuaGRI9Asp\nGjUoNXT2JphN7s4CgHsCLep4YqYKnMTJah4S5CDj/5boIg6DM/EcGupZEHRYLkY8\n1MrAGQKBgQCi9yeC39ctLUNn+Ix604gttWWChdt3ozufTZ7HybJOSRA9Gh3iD5gm\nzlz0xqpGShKpOY2k+ftvja0poMdGeJLt84P3r2q01IgI7w0LmOj5m0W10dHysH27\nBWpCnHdBJMxnBsMRPoM4MKkmKWD9l5PSTCTWtkIpsyuDCko6D9UwZA==\n-----END RSA PRIVATE KEY-----\n",
+ "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGjlnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qgfQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3PHB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+YIsBUHNLLMM/oQp Generated-by-Nova\n",
+ "user_id": "fake"
+ }
+}
diff --git a/doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json b/doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json
index 2d32f56e5b..997129b490 100644
--- a/doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json
+++ b/doc/api_samples/os-assisted-volume-snapshots/snapshot-create-assisted-req.json
@@ -5,7 +5,7 @@
"snapshot_id": "421752a6-acf6-4b2d-bc7a-119f9148cd8c",
"type": "qcow2",
"new_file": "new_file_name",
- "id": "421752a6-acf6-4b2d-bc7a-119f9148cd8c",
+ "id": "421752a6-acf6-4b2d-bc7a-119f9148cd8c"
}
}
}
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
index 3d2e02d183..15d4d28d2f 100644
--- a/doc/api_samples/versions/v21-version-get-resp.json
+++ b/doc/api_samples/versions/v21-version-get-resp.json
@@ -19,7 +19,7 @@
}
],
"status": "CURRENT",
- "version": "2.33",
+ "version": "2.35",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
index 5ce4234d16..83eedf97d4 100644
--- a/doc/api_samples/versions/versions-get-resp.json
+++ b/doc/api_samples/versions/versions-get-resp.json
@@ -22,7 +22,7 @@
}
],
"status": "CURRENT",
- "version": "2.33",
+ "version": "2.35",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
diff --git a/doc/notification_samples/instance-restore-end.json b/doc/notification_samples/instance-restore-end.json
new file mode 100644
index 0000000000..46afe08ebf
--- /dev/null
+++ b/doc/notification_samples/instance-restore-end.json
@@ -0,0 +1,62 @@
+{
+ "event_type":"instance.restore.end",
+ "payload":{
+ "nova_object.data":{
+ "architecture":"x86_64",
+ "availability_zone":null,
+ "created_at":"2012-10-29T13:42:11Z",
+ "deleted_at":null,
+ "display_name":"some-server",
+ "fault":null,
+ "host":"compute",
+ "host_name":"some-server",
+ "ip_addresses": [{
+ "nova_object.name": "IpPayload",
+ "nova_object.namespace": "nova",
+ "nova_object.version": "1.0",
+ "nova_object.data": {
+ "mac": "fa:16:3e:4c:2c:30",
+ "address": "192.168.1.3",
+ "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
+ "meta": {},
+ "version": 4,
+ "label": "private-network",
+ "device_name": "tapce531f90-19"
+ }
+ }],
+ "kernel_id":"",
+ "launched_at":"2012-10-29T13:42:11Z",
+ "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "metadata":{},
+ "node":"fake-mini",
+ "os_type":null,
+ "progress":0,
+ "ramdisk_id":"",
+ "reservation_id":"r-npxv0e40",
+ "state":"active",
+ "task_state":null,
+ "power_state":"running",
+ "tenant_id":"6f70656e737461636b20342065766572",
+ "terminated_at":null,
+ "flavor": {
+ "nova_object.name": "FlavorPayload",
+ "nova_object.data": {
+ "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3",
+ "root_gb": 1,
+ "vcpus": 1,
+ "ephemeral_gb": 0,
+ "memory_mb": 512
+ },
+ "nova_object.version": "1.0",
+ "nova_object.namespace": "nova"
+ },
+ "user_id":"fake",
+ "uuid":"178b0921-8f85-4257-88b6-2e743b5a975c"
+ },
+ "nova_object.name":"InstanceActionPayload",
+ "nova_object.namespace":"nova",
+ "nova_object.version":"1.0"
+ },
+ "priority":"INFO",
+ "publisher_id":"nova-compute:compute"
+} \ No newline at end of file
diff --git a/doc/notification_samples/instance-restore-start.json b/doc/notification_samples/instance-restore-start.json
new file mode 100644
index 0000000000..6106b8ed71
--- /dev/null
+++ b/doc/notification_samples/instance-restore-start.json
@@ -0,0 +1,62 @@
+{
+ "event_type":"instance.restore.start",
+ "payload":{
+ "nova_object.data":{
+ "architecture":"x86_64",
+ "availability_zone":null,
+ "created_at":"2012-10-29T13:42:11Z",
+ "deleted_at":null,
+ "display_name":"some-server",
+ "fault":null,
+ "host":"compute",
+ "host_name":"some-server",
+ "ip_addresses": [{
+ "nova_object.name": "IpPayload",
+ "nova_object.namespace": "nova",
+ "nova_object.version": "1.0",
+ "nova_object.data": {
+ "mac": "fa:16:3e:4c:2c:30",
+ "address": "192.168.1.3",
+ "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
+ "meta": {},
+ "version": 4,
+ "label": "private-network",
+ "device_name": "tapce531f90-19"
+ }
+ }],
+ "kernel_id":"",
+ "launched_at":"2012-10-29T13:42:11Z",
+ "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+ "metadata":{},
+ "node":"fake-mini",
+ "os_type":null,
+ "progress":0,
+ "ramdisk_id":"",
+ "reservation_id":"r-npxv0e40",
+ "state":"soft-delete",
+ "task_state":"restoring",
+ "power_state":"running",
+ "tenant_id":"6f70656e737461636b20342065766572",
+ "terminated_at":null,
+ "flavor": {
+ "nova_object.name": "FlavorPayload",
+ "nova_object.data": {
+ "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3",
+ "root_gb": 1,
+ "vcpus": 1,
+ "ephemeral_gb": 0,
+ "memory_mb": 512
+ },
+ "nova_object.version": "1.0",
+ "nova_object.namespace": "nova"
+ },
+ "user_id":"fake",
+ "uuid":"178b0921-8f85-4257-88b6-2e743b5a975c"
+ },
+ "nova_object.name":"InstanceActionPayload",
+ "nova_object.namespace":"nova",
+ "nova_object.version":"1.0"
+ },
+ "priority":"INFO",
+ "publisher_id":"nova-compute:compute"
+} \ No newline at end of file
diff --git a/etc/nova/nova-config-generator.conf b/etc/nova/nova-config-generator.conf
index 8a1ba1fcbd..c11a4153df 100644
--- a/etc/nova/nova-config-generator.conf
+++ b/etc/nova/nova-config-generator.conf
@@ -12,6 +12,7 @@ namespace = oslo.policy
namespace = oslo.service.periodic_task
namespace = oslo.service.service
namespace = oslo.db
+namespace = oslo.db.concurrency
namespace = oslo.middleware
namespace = oslo.concurrency
namespace = keystonemiddleware.auth_token
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 357ad56ec5..07dddd28c2 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -223,9 +223,6 @@ class APIRouterV21(base_wsgi.Router):
return 'nova.api.v21.extensions'
def __init__(self, init_only=None):
- # TODO(cyeoh): bp v3-api-extension-framework. Currently load
- # all extensions but eventually should be able to exclude
- # based on a config file
def _check_load_extension(ext):
return self._register_extension(ext)
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
index 769ae17c1f..702aeb7443 100644
--- a/nova/api/openstack/api_version_request.py
+++ b/nova/api/openstack/api_version_request.py
@@ -83,6 +83,11 @@ REST_API_VERSION_HISTORY = """REST API Version History:
* 2.32 - Add tag to networks and block_device_mapping_v2 in server boot
request body.
* 2.33 - Add pagination support for hypervisors.
+ * 2.34 - Checks before live-migration are made in asynchronous way.
+ os-Migratelive Action does not throw badRequest in case of
+ pre-checks failure. Verification result is available over
+ instance-actions.
+ * 2.35 - Adds keypairs pagination support.
"""
# The minimum and maximum versions of the API supported
@@ -91,7 +96,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
_MIN_API_VERSION = "2.1"
-_MAX_API_VERSION = "2.33"
+_MAX_API_VERSION = "2.35"
DEFAULT_API_VERSION = _MIN_API_VERSION
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 13c19813c1..3fcb4bbdbb 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -195,6 +195,8 @@ def get_pagination_params(request):
params['page_size'] = _get_int_param(request, 'page_size')
if 'marker' in request.GET:
params['marker'] = _get_marker_param(request)
+ if 'offset' in request.GET:
+ params['offset'] = _get_int_param(request, 'offset')
return params
@@ -213,7 +215,7 @@ def _get_marker_param(request):
return request.GET['marker']
-def limited(items, request, max_limit=CONF.osapi_max_limit):
+def limited(items, request):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
@@ -223,28 +225,21 @@ def limited(items, request, max_limit=CONF.osapi_max_limit):
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
- :kwarg max_limit: The maximum number of items to return from 'items'
"""
- offset = request.GET.get("offset", 0)
- limit = request.GET.get('limit', max_limit)
-
- try:
- offset = utils.validate_integer(offset, "offset", min_value=0)
- limit = utils.validate_integer(limit, "limit", min_value=0)
- except exception.InvalidInput as e:
- raise webob.exc.HTTPBadRequest(explanation=e.format_message())
+ params = get_pagination_params(request)
+ offset = params.get('offset', 0)
+ limit = CONF.osapi_max_limit
+ limit = min(limit, params.get('limit') or limit)
- limit = min(max_limit, limit or max_limit)
- range_end = offset + limit
- return items[offset:range_end]
+ return items[offset:(offset + limit)]
-def get_limit_and_marker(request, max_limit=CONF.osapi_max_limit):
- """get limited parameter from request."""
+def get_limit_and_marker(request):
+ """Get limited parameter from request."""
params = get_pagination_params(request)
- limit = params.get('limit', max_limit)
- limit = min(max_limit, limit)
- marker = params.get('marker')
+ limit = CONF.osapi_max_limit
+ limit = min(limit, params.get('limit', limit))
+ marker = params.get('marker', None)
return limit, marker
diff --git a/nova/api/openstack/compute/keypairs.py b/nova/api/openstack/compute/keypairs.py
index 935ae16144..fbfd23cb99 100644
--- a/nova/api/openstack/compute/keypairs.py
+++ b/nova/api/openstack/compute/keypairs.py
@@ -18,8 +18,10 @@
import webob
import webob.exc
+from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import keypairs
+from nova.api.openstack.compute.views import keypairs as keypairs_view
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
@@ -36,8 +38,12 @@ ALIAS = 'os-keypairs'
class KeypairController(wsgi.Controller):
"""Keypair API controller for the OpenStack API."""
+
+ _view_builder_class = keypairs_view.ViewBuilder
+
def __init__(self):
self.api = compute_api.KeypairAPI()
+ super(KeypairController, self).__init__()
def _filter_keypair(self, keypair, **attrs):
# TODO(claudiub): After v2 and v2.1 is no longer supported,
@@ -221,7 +227,13 @@ class KeypairController(wsgi.Controller):
# behaviors in this keypair resource.
return {'keypair': keypair}
- @wsgi.Controller.api_version("2.10")
+ @wsgi.Controller.api_version("2.35")
+ @extensions.expected_errors(400)
+ def index(self, req):
+ user_id = self._get_user_id(req)
+ return self._index(req, links=True, type=True, user_id=user_id)
+
+ @wsgi.Controller.api_version("2.10", "2.34") # noqa
@extensions.expected_errors(())
def index(self, req):
# handle optional user-id for admin only
@@ -238,20 +250,38 @@ class KeypairController(wsgi.Controller):
def index(self, req):
return self._index(req)
- def _index(self, req, user_id=None, **keypair_filters):
+ def _index(self, req, user_id=None, links=False, **keypair_filters):
"""List of keypairs for a user."""
context = req.environ['nova.context']
user_id = user_id or context.user_id
context.can(kp_policies.POLICY_ROOT % 'index',
target={'user_id': user_id,
'project_id': context.project_id})
- key_pairs = self.api.get_key_pairs(context, user_id)
- rval = []
- for key_pair in key_pairs:
- rval.append({'keypair': self._filter_keypair(key_pair,
- **keypair_filters)})
- return {'keypairs': rval}
+ if api_version_request.is_supported(req, min_version='2.35'):
+ limit, marker = common.get_limit_and_marker(req)
+ else:
+ limit = marker = None
+
+ try:
+ key_pairs = self.api.get_key_pairs(
+ context, user_id, limit=limit, marker=marker)
+ except exception.MarkerNotFound as e:
+ raise webob.exc.HTTPBadRequest(explanation=e.format_message())
+
+ key_pairs = [self._filter_keypair(key_pair, **keypair_filters)
+ for key_pair in key_pairs]
+
+ keypairs_list = [{'keypair': key_pair} for key_pair in key_pairs]
+ keypairs_dict = {'keypairs': keypairs_list}
+
+ if links:
+ keypairs_links = self._view_builder.get_links(req, key_pairs)
+
+ if keypairs_links:
+ keypairs_dict['keypairs_links'] = keypairs_links
+
+ return keypairs_dict
class Controller(wsgi.Controller):
diff --git a/nova/api/openstack/compute/migrate_server.py b/nova/api/openstack/compute/migrate_server.py
index 0fa47f8f23..94e68260a3 100644
--- a/nova/api/openstack/compute/migrate_server.py
+++ b/nova/api/openstack/compute/migrate_server.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import log as logging
+from oslo_utils import excutils
from oslo_utils import strutils
from webob import exc
@@ -25,8 +27,10 @@ from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
+from nova.i18n import _LE
from nova.policies import migrate_server as ms_policies
+LOG = logging.getLogger(__name__)
ALIAS = "os-migrate-server"
@@ -72,13 +76,9 @@ class MigrateServerController(wsgi.Controller):
host = body["os-migrateLive"]["host"]
block_migration = body["os-migrateLive"]["block_migration"]
force = None
-
+ async = api_version_request.is_supported(req, min_version='2.34')
if api_version_request.is_supported(req, min_version='2.30'):
- force = body["os-migrateLive"].get("force", False)
- force = strutils.bool_from_string(force, strict=True)
- if force is True and not host:
- message = _("Can't force to a non-provided destination")
- raise exc.HTTPBadRequest(explanation=message)
+ force = self._get_force_param_for_live_migration(body, host)
if api_version_request.is_supported(req, min_version='2.25'):
if block_migration == 'auto':
block_migration = None
@@ -97,7 +97,7 @@ class MigrateServerController(wsgi.Controller):
try:
instance = common.get_instance(self.compute_api, context, id)
self.compute_api.live_migrate(context, instance, block_migration,
- disk_over_commit, host, force)
+ disk_over_commit, host, force, async)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except (exception.NoValidHost,
@@ -111,13 +111,27 @@ class MigrateServerController(wsgi.Controller):
exception.HypervisorUnavailable,
exception.MigrationPreCheckError,
exception.LiveMigrationWithOldNovaNotSupported) as ex:
- raise exc.HTTPBadRequest(explanation=ex.format_message())
+ if async:
+ with excutils.save_and_reraise_exception():
+ LOG.error(_LE("Unexpected exception received from "
+ "conductor during pre-live-migration checks "
+ "'%(ex)s'"), {'ex': ex})
+ else:
+ raise exc.HTTPBadRequest(explanation=ex.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'os-migrateLive', id)
+ def _get_force_param_for_live_migration(self, body, host):
+ force = body["os-migrateLive"].get("force", False)
+ force = strutils.bool_from_string(force, strict=True)
+ if force is True and not host:
+ message = _("Can't force to a non-provided destination")
+ raise exc.HTTPBadRequest(explanation=message)
+ return force
+
class MigrateServer(extensions.V21APIExtensionBase):
"""Enable migrate and live-migrate server actions."""
diff --git a/nova/api/openstack/compute/server_tags.py b/nova/api/openstack/compute/server_tags.py
index 7461875f41..a229e7b26c 100644
--- a/nova/api/openstack/compute/server_tags.py
+++ b/nova/api/openstack/compute/server_tags.py
@@ -11,7 +11,7 @@
# under the License.
import jsonschema
-from webob import exc
+import webob
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import server_tags as schema
@@ -62,12 +62,12 @@ class ServerTagsController(wsgi.Controller):
try:
exists = objects.Tag.exists(context, server_id, id)
except exception.InstanceNotFound as e:
- raise exc.HTTPNotFound(explanation=e.format_message())
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
if not exists:
msg = (_("Server %(server_id)s has no tag '%(tag)s'")
% {'server_id': server_id, 'tag': id})
- raise exc.HTTPNotFound(explanation=msg)
+ raise webob.exc.HTTPNotFound(explanation=msg)
@wsgi.Controller.api_version("2.26")
@extensions.expected_errors(404)
@@ -78,7 +78,7 @@ class ServerTagsController(wsgi.Controller):
try:
tags = objects.TagList.get_by_resource_id(context, server_id)
except exception.InstanceNotFound as e:
- raise exc.HTTPNotFound(explanation=e.format_message())
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'tags': _get_tags_names(tags)}
@@ -96,36 +96,36 @@ class ServerTagsController(wsgi.Controller):
msg = (_("Tag '%(tag)s' is invalid. It must be a string without "
"characters '/' and ','. Validation error message: "
"%(err)s") % {'tag': id, 'err': e.message})
- raise exc.HTTPBadRequest(explanation=msg)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
try:
tags = objects.TagList.get_by_resource_id(context, server_id)
except exception.InstanceNotFound as e:
- raise exc.HTTPNotFound(explanation=e.format_message())
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
if len(tags) >= objects.instance.MAX_TAG_COUNT:
msg = (_("The number of tags exceeded the per-server limit %d")
% objects.instance.MAX_TAG_COUNT)
- raise exc.HTTPBadRequest(explanation=msg)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
if len(id) > objects.tag.MAX_TAG_LENGTH:
msg = (_("Tag '%(tag)s' is too long. Maximum length of a tag "
"is %(length)d") % {'tag': id,
'length': objects.tag.MAX_TAG_LENGTH})
- raise exc.HTTPBadRequest(explanation=msg)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
if id in _get_tags_names(tags):
# NOTE(snikitin): server already has specified tag
- return exc.HTTPNoContent()
+ return webob.Response(status_int=204)
tag = objects.Tag(context=context, resource_id=server_id, tag=id)
try:
tag.create()
except exception.InstanceNotFound as e:
- raise exc.HTTPNotFound(explanation=e.format_message())
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
- response = exc.HTTPCreated()
+ response = webob.Response(status_int=201)
response.headers['Location'] = self._view_builder.get_location(
req, server_id, id)
return response
@@ -147,7 +147,7 @@ class ServerTagsController(wsgi.Controller):
if invalid_tags:
msg = (_("Tags '%s' are invalid. Each tag must be a string "
"without characters '/' and ','.") % invalid_tags)
- raise exc.HTTPBadRequest(explanation=msg)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
tag_count = len(body['tags'])
if tag_count > objects.instance.MAX_TAG_COUNT:
@@ -155,7 +155,7 @@ class ServerTagsController(wsgi.Controller):
"%(max)d. The number of tags in request is %(count)d.")
% {'max': objects.instance.MAX_TAG_COUNT,
'count': tag_count})
- raise exc.HTTPBadRequest(explanation=msg)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
long_tags = [
t for t in body['tags'] if len(t) > objects.tag.MAX_TAG_LENGTH]
@@ -163,12 +163,12 @@ class ServerTagsController(wsgi.Controller):
msg = (_("Tags %(tags)s are too long. Maximum length of a tag "
"is %(length)d") % {'tags': long_tags,
'length': objects.tag.MAX_TAG_LENGTH})
- raise exc.HTTPBadRequest(explanation=msg)
+ raise webob.exc.HTTPBadRequest(explanation=msg)
try:
tags = objects.TagList.create(context, server_id, body['tags'])
except exception.InstanceNotFound as e:
- raise exc.HTTPNotFound(explanation=e.format_message())
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
return {'tags': _get_tags_names(tags)}
@@ -183,9 +183,9 @@ class ServerTagsController(wsgi.Controller):
try:
objects.Tag.destroy(context, server_id, id)
except exception.InstanceTagNotFound as e:
- raise exc.HTTPNotFound(explanation=e.format_message())
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotFound as e:
- raise exc.HTTPNotFound(explanation=e.format_message())
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
@wsgi.Controller.api_version("2.26")
@wsgi.response(204)
@@ -198,7 +198,7 @@ class ServerTagsController(wsgi.Controller):
try:
objects.TagList.destroy(context, server_id)
except exception.InstanceNotFound as e:
- raise exc.HTTPNotFound(explanation=e.format_message())
+ raise webob.exc.HTTPNotFound(explanation=e.format_message())
class ServerTags(extensions.V21APIExtensionBase):
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 8352c405f0..c970aedb81 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -662,10 +662,6 @@ class ServersController(wsgi.Controller):
# If the caller wanted a reservation_id, return it
if return_reservation_id:
- # NOTE(cyeoh): In v3 reservation_id was wrapped in
- # servers_reservation but this is reverted for V2 API
- # compatibility. In the long term with the tasks API we
- # will probably just drop the concept of reservation_id
return wsgi.ResponseObject({'reservation_id': resv_id})
req.cache_db_instances(instances)
diff --git a/nova/api/sizelimit.py b/nova/api/openstack/compute/views/keypairs.py
index 0d8587d824..020c7a0ac8 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/openstack/compute/views/keypairs.py
@@ -1,4 +1,5 @@
-# Copyright (c) 2012 OpenStack Foundation
+# Copyright 2016 Mirantis Inc
+# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -11,14 +12,14 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Request Body limiting middleware.
-"""
-from oslo_middleware import sizelimit
+from nova.api.openstack import common
-# TODO(ekudryashova): Remove below aliases when grenade jobs
-# will start upgrading from juno
-LimitingReader = sizelimit.LimitingReader
-RequestBodySizeLimiter = sizelimit.RequestBodySizeLimiter
+class ViewBuilder(common.ViewBuilder):
+
+ _collection_name = "keypairs"
+
+ def get_links(self, request, keypairs):
+ return self._get_collection_links(request, keypairs,
+ self._collection_name, 'name')
diff --git a/nova/api/openstack/rest_api_version_history.rst b/nova/api/openstack/rest_api_version_history.rst
index 34608bae00..fed6afa5b4 100644
--- a/nova/api/openstack/rest_api_version_history.rst
+++ b/nova/api/openstack/rest_api_version_history.rst
@@ -343,3 +343,21 @@ user documentation.
API request::
GET /v2.1/{tenant_id}/os-hypervisors?marker={hypervisor_id}&limit={limit}
+
+2.34
+----
+
+ Checks in ``os-migrateLive`` before live-migration actually starts are now
+ made in background. ``os-migrateLive`` is not throwing `400 Bad Request` if
+ pre-live-migration checks fail.
+
+2.35
+----
+
+ Added pagination support for keypairs.
+
+ Optional parameters 'limit' and 'marker' were added to GET /os-keypairs
+ request, the default sort_key was changed to 'name' field as ASC order,
+ the generic request format is::
+
+ GET /os-keypairs?limit={limit}&marker={kp_name}
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 013ef9cd5c..2b44137f5f 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -1588,12 +1588,9 @@ class API(base.Base):
cb(context, instance, bdms, reservations=None)
quotas.commit()
return
- # NOTE(melwitt): We expect instances in certain states to have no
- # host set but also have resources on a compute host. If this is
- # the case, we want to try to clean up their resources. Otherwse,
- # we can destroy the instance here and return.
- expect_no_instance_host = self._expect_no_host(instance)
- if not instance.host and not expect_no_instance_host:
+ shelved_offloaded = (instance.vm_state
+ == vm_states.SHELVED_OFFLOADED)
+ if not instance.host and not shelved_offloaded:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
@@ -1605,17 +1602,7 @@ class API(base.Base):
system_metadata=instance.system_metadata)
quotas.commit()
return
- except exception.ObjectActionError as ex:
- # The instance's host likely changed under us as
- # this instance could be building and has since been
- # scheduled. Continue with attempts to delete it.
- # NOTE(melwitt): Often instance.host is None after the
- # refresh because the claim was aborted on the compute
- # host. The service up check will raise ComputeHostNotFound
- # in this case and we will do a local delete with compute
- # host cleanup
- LOG.debug('Refreshing instance because: %s', ex,
- instance=instance)
+ except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
@@ -1623,7 +1610,7 @@ class API(base.Base):
is_local_delete = True
try:
- if not expect_no_instance_host:
+ if not shelved_offloaded:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
is_local_delete = not self.servicegroup_api.service_is_up(
@@ -1650,9 +1637,6 @@ class API(base.Base):
cb(context, instance, bdms,
reservations=quotas.reservations)
except exception.ComputeHostNotFound:
- # NOTE(melwitt): We expect this if instance.host has been
- # set to None by the compute host during a claim abort
- # and we pick it up in the instance.refresh()
pass
if is_local_delete:
@@ -1671,13 +1655,6 @@ class API(base.Base):
if quotas:
quotas.rollback()
- def _expect_no_host(self, instance):
- # NOTE(melwitt): Instances in ERROR state have their host and node
- # set to None as part of exception handling.
- if instance.vm_state in (vm_states.SHELVED_OFFLOADED, vm_states.ERROR):
- return True
- return False
-
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
@@ -1771,22 +1748,6 @@ class API(base.Base):
connector)
self.volume_api.detach(elevated, bdm.volume_id,
instance.uuid)
- except Exception as exc:
- err_str = _LW("Ignoring volume cleanup failure due to %s")
- LOG.warn(err_str % exc, instance=instance)
- # This block handles the following case:
- # 1. Instance scheduled to host and fails on the host.
- # 2. Compute manager's cleanup calls terminate_connection
- # and detach if the spawn made it that far.
- # 3. Instance fails to boot on all other reschedule attempts.
- # 4. Instance is left in error state with no assigned host.
- # 5. Volumes in the instance's BDMs are left in the available
- # state.
- # When this is the case the terminate_connection and detach
- # calls above will fail. We attempt the volume delete in a
- # separate try-except to clean up these volume and avoid the
- # storage leak.
- try:
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
@@ -4020,9 +3981,10 @@ class KeypairAPI(base.Base):
objects.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
- def get_key_pairs(self, context, user_id):
+ def get_key_pairs(self, context, user_id, limit=None, marker=None):
"""List key pairs."""
- return objects.KeyPairList.get_by_user(context, user_id)
+ return objects.KeyPairList.get_by_user(
+ context, user_id, limit=limit, marker=marker)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 132f011c1a..155bb6dac3 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -2534,6 +2534,9 @@ class ComputeManager(manager.Manager):
def restore_instance(self, context, instance):
"""Restore a soft-deleted instance on this host."""
self._notify_about_instance_usage(context, instance, "restore.start")
+ compute_utils.notify_about_instance_action(context, instance,
+ self.host, action=fields.NotificationAction.RESTORE,
+ phase=fields.NotificationPhase.START)
try:
self.driver.restore(instance)
except NotImplementedError:
@@ -2545,6 +2548,9 @@ class ComputeManager(manager.Manager):
instance.task_state = None
instance.save(expected_task_state=task_states.RESTORING)
self._notify_about_instance_usage(context, instance, "restore.end")
+ compute_utils.notify_about_instance_action(context, instance,
+ self.host, action=fields.NotificationAction.RESTORE,
+ phase=fields.NotificationPhase.END)
@staticmethod
def _set_migration_status(migration, status):
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index 4f3ac541e2..608ea79437 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -721,13 +721,12 @@ class ResourceTracker(object):
'evacuation')
def _get_migration_context_resource(self, resource, instance,
- prefix='new_', itype=None):
+ prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
- else:
- return None
+ return None
def _update_usage_from_migration(self, context, instance, image_meta,
migration):
@@ -836,7 +835,7 @@ class ResourceTracker(object):
migration)
except exception.FlavorNotFound:
LOG.warning(_LW("Flavor could not be found, skipping "
- "migration."), instance_uuid=uuid)
+ "migration."), instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, is_removed=False):
diff --git a/nova/conf/compute.py b/nova/conf/compute.py
index cc342d01db..1bb4b25c55 100644
--- a/nova/conf/compute.py
+++ b/nova/conf/compute.py
@@ -336,16 +336,56 @@ running_deleted_opts = [
cfg.StrOpt("running_deleted_instance_action",
default="reap",
choices=('noop', 'log', 'shutdown', 'reap'),
- help="Action to take if a running deleted instance is detected."
- "Set to 'noop' to take no action."),
+ help="""
+The compute service periodically checks for instances that have been
+deleted in the database but remain running on the compute node. The
+above option enables action to be taken when such instances are
+identified.
+
+Possible values:
+
+* reap: Powers down the instances and deletes them(default)
+* log: Logs warning message about deletion of the resource
+* shutdown: Powers down instances and marks them as non-
+ bootable which can be later used for debugging/analysis
+* noop: Takes no action
+
+Related options:
+
+* running_deleted_instance_poll
+* running_deleted_instance_timeout
+"""),
cfg.IntOpt("running_deleted_instance_poll_interval",
default=1800,
- help="Number of seconds to wait between runs of the cleanup "
- "task."),
+ help="""
+Time interval in seconds to wait between runs for the clean up action.
+If set to 0, above check will be disabled. If "running_deleted_instance
+_action" is set to "log" or "reap", a value greater than 0 must be set.
+
+Possible values:
+
+* Any positive integer in seconds enables the option.
+* 0: Disables the option.
+* 1800: Default value.
+
+Related options:
+
+* running_deleted_instance_action
+"""),
cfg.IntOpt("running_deleted_instance_timeout",
default=0,
- help="Number of seconds after being deleted when a running "
- "instance should be considered eligible for cleanup."),
+ help="""
+Time interval in seconds to wait for the instances that have
+been marked as deleted in database to be eligible for cleanup.
+
+Possible values:
+
+* Any positive integer in seconds(default is 0).
+
+Related options:
+
+* "running_deleted_instance_action"
+"""),
]
instance_cleaning_opts = [
diff --git a/nova/conf/exceptions.py b/nova/conf/exceptions.py
index 57df7dfd93..94a34bf500 100644
--- a/nova/conf/exceptions.py
+++ b/nova/conf/exceptions.py
@@ -26,8 +26,20 @@ exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
deprecated_for_removal=True,
- deprecated_reason='This is only used for internal testing.',
- help='DEPRECATED: Make exception message format errors fatal'),
+ deprecated_reason="This is only used for internal testing.",
+ help="""
+When set to true, this option enables validation of exception
+message format.
+
+This option is used to detect errors in NovaException class when it formats
+error messages. If True, raise an exception; if False, use the unformatted
+message.
+
+Possible values:
+
+ * True
+ * False (Default)
+"""),
]
diff --git a/nova/conf/serial_console.py b/nova/conf/serial_console.py
index 3b84ffd925..eac01ae561 100644
--- a/nova/conf/serial_console.py
+++ b/nova/conf/serial_console.py
@@ -1,5 +1,4 @@
# needs:fix_opt_description
-# needs:check_deprecation_status
# needs:check_opt_group_and_type
# needs:fix_opt_description_indentation
# needs:fix_opt_registration_consistency
@@ -105,15 +104,6 @@ Interdependencies to other options:
and ``key`` in the ``[DEFAULT]`` section have to be set for that.
""")
-# This config option was never used
-listen_opt = cfg.StrOpt('listen',
- default='127.0.0.1',
- deprecated_for_removal=True,
- help="""
-DEPRECATED: this option has no effect anymore. Please use
-"proxyclient_address" instead. This option is deprecated and will be removed
-in future releases.""")
-
proxyclient_address_opt = cfg.StrOpt('proxyclient_address',
default='127.0.0.1',
help="""
@@ -187,7 +177,6 @@ Interdependencies to other options:
ALL_OPTS = [enabled_opt,
port_range_opt,
base_url_opt,
- listen_opt,
proxyclient_address_opt,
serialproxy_host_opt,
serialproxy_port_opt]
diff --git a/nova/db/api.py b/nova/db/api.py
index a664be73c0..694e9ba9cf 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -687,6 +687,11 @@ def virtual_interface_delete_by_instance(context, instance_id):
return IMPL.virtual_interface_delete_by_instance(context, instance_id)
+def virtual_interface_delete(context, id):
+ """Delete virtual interface by id."""
+ return IMPL.virtual_interface_delete(context, id)
+
+
def virtual_interface_get_all(context):
"""Gets all virtual interfaces from the table."""
return IMPL.virtual_interface_get_all(context)
@@ -964,9 +969,10 @@ def key_pair_get(context, user_id, name):
return IMPL.key_pair_get(context, user_id, name)
-def key_pair_get_all_by_user(context, user_id):
+def key_pair_get_all_by_user(context, user_id, limit=None, marker=None):
"""Get all key_pairs by user."""
- return IMPL.key_pair_get_all_by_user(context, user_id)
+ return IMPL.key_pair_get_all_by_user(
+ context, user_id, limit=limit, marker=marker)
def key_pair_count_by_user(context, user_id):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index a97c28d62f..80e5cdcd05 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -586,7 +586,7 @@ def _compute_node_select(context, filters=None, limit=None, marker=None):
try:
compute_node_get(context, marker)
except exception.ComputeHostNotFound:
- raise exception.MarkerNotFound(marker)
+ raise exception.MarkerNotFound(marker=marker)
select = select.where(cn_tbl.c.id > marker)
if limit is not None:
select = select.limit(limit)
@@ -838,8 +838,7 @@ def floating_ip_get(context, id):
if not result:
raise exception.FloatingIpNotFound(id=id)
except db_exc.DBError:
- msg = _LW("Invalid floating IP ID %s in request") % id
- LOG.warning(msg)
+ LOG.warning(_LW("Invalid floating IP ID %s in request"), id)
raise exception.InvalidID(id=id)
return result
@@ -1641,6 +1640,18 @@ def virtual_interface_delete_by_instance(context, instance_uuid):
@require_context
+@pick_context_manager_writer
+def virtual_interface_delete(context, id):
+ """Delete virtual interface records.
+
+ :param id: id of the interface
+ """
+ _virtual_interface_query(context).\
+ filter_by(id=id).\
+ soft_delete()
+
+
+@require_context
@pick_context_manager_reader
def virtual_interface_get_all(context):
"""Get all vifs."""
@@ -1679,10 +1690,9 @@ def _validate_unique_server_name(context, name):
instance_with_same_name = base_query.count()
else:
- msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
- ' Flag must be empty, "global" or'
- ' "project"') % CONF.osapi_compute_unique_server_name_scope
- LOG.warning(msg)
+ LOG.warning(_LW('Unknown osapi_compute_unique_server_name_scope value:'
+ ' %s. Flag must be empty, "global" or "project"'),
+ CONF.osapi_compute_unique_server_name_scope)
return
if instance_with_same_name > 0:
@@ -1867,8 +1877,7 @@ def instance_get(context, instance_id, columns_to_join=None):
except db_exc.DBError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
- msg = _("Invalid instance id %s in request") % instance_id
- LOG.warning(msg)
+ LOG.warning(_LW("Invalid instance id %s in request"), instance_id)
raise exception.InvalidID(id=instance_id)
@@ -2197,7 +2206,7 @@ def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
marker = _instance_get_by_uuid(
context.elevated(read_deleted='yes'), marker)
except exception.InstanceNotFound:
- raise exception.MarkerNotFound(marker)
+ raise exception.MarkerNotFound(marker=marker)
try:
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
@@ -2929,10 +2938,21 @@ def key_pair_get(context, user_id, name):
@require_context
@main_context_manager.reader
-def key_pair_get_all_by_user(context, user_id):
- return model_query(context, models.KeyPair, read_deleted="no").\
- filter_by(user_id=user_id).\
- all()
+def key_pair_get_all_by_user(context, user_id, limit=None, marker=None):
+ marker_row = None
+ if marker is not None:
+ marker_row = model_query(context, models.KeyPair, read_deleted="no").\
+ filter_by(name=marker).filter_by(user_id=user_id).first()
+ if not marker_row:
+ raise exception.MarkerNotFound(marker=marker)
+
+ query = model_query(context, models.KeyPair, read_deleted="no").\
+ filter_by(user_id=user_id)
+
+ query = sqlalchemyutils.paginate_query(
+ query, models.KeyPair, limit, ['name'], marker=marker_row)
+
+ return query.all()
@require_context
@@ -4987,7 +5007,7 @@ def flavor_get_all(context, inactive=False, filters=None,
filter_by(flavorid=marker).\
first()
if not marker_row:
- raise exception.MarkerNotFound(marker)
+ raise exception.MarkerNotFound(marker=marker)
query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
[sort_key, 'id'],
@@ -5889,10 +5909,9 @@ def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
if attempt < max_retries - 1:
ctxt.reraise = False
else:
- msg = _("Add metadata failed for aggregate %(id)s after "
- "%(retries)s retries") % {"id": aggregate_id,
- "retries": max_retries}
- LOG.warning(msg)
+ LOG.warning(_LW("Add metadata failed for aggregate %(id)s "
+ "after %(retries)s retries"),
+ {"id": aggregate_id, "retries": max_retries})
@require_aggregate_exists
@@ -6326,8 +6345,8 @@ def _archive_deleted_rows_for_table(tablename, max_rows):
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
LOG.warning(_LW("IntegrityError detected when archiving table "
- "%(tablename)s: %(error)s"),
- {'tablename': tablename, 'error': six.text_type(ex)})
+ "%(tablename)s: %(error)s"),
+ {'tablename': tablename, 'error': six.text_type(ex)})
return rows_archived
rows_archived = result_delete.rowcount
diff --git a/nova/notifications/objects/instance.py b/nova/notifications/objects/instance.py
index da5fb79b85..e871edbbcf 100644
--- a/nova/notifications/objects/instance.py
+++ b/nova/notifications/objects/instance.py
@@ -247,8 +247,8 @@ class InstanceStateUpdatePayload(base.NotificationPayloadBase):
@base.notification_sample('instance-shelve-end.json')
# @base.notification_sample('instance-resume-start.json')
# @base.notification_sample('instance-resume-end.json')
-# @base.notification_sample('instance-restore-start.json')
-# @base.notification_sample('instance-restore-end.json')
+@base.notification_sample('instance-restore-start.json')
+@base.notification_sample('instance-restore-end.json')
@nova_base.NovaObjectRegistry.register_notification
class InstanceActionNotification(base.NotificationBase):
# Version 1.0: Initial version
diff --git a/nova/objects/flavor.py b/nova/objects/flavor.py
index 0007b2099e..d8bcaebe1a 100644
--- a/nova/objects/flavor.py
+++ b/nova/objects/flavor.py
@@ -629,7 +629,7 @@ def _flavor_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
filter_by(flavorid=marker).\
first()
if not marker_row:
- raise exception.MarkerNotFound(marker)
+ raise exception.MarkerNotFound(marker=marker)
query = sqlalchemyutils.paginate_query(query, api_models.Flavors,
limit,
diff --git a/nova/objects/keypair.py b/nova/objects/keypair.py
index 3cd305ae25..7fe9dc518e 100644
--- a/nova/objects/keypair.py
+++ b/nova/objects/keypair.py
@@ -13,6 +13,7 @@
# under the License.
from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_log import log as logging
from oslo_utils import versionutils
@@ -32,7 +33,7 @@ LOG = logging.getLogger(__name__)
@db_api.api_context_manager.reader
-def _get_from_db(context, user_id, name=None):
+def _get_from_db(context, user_id, name=None, limit=None, marker=None):
query = context.session.query(api_models.KeyPair).\
filter(api_models.KeyPair.user_id == user_id)
if name is not None:
@@ -41,8 +42,19 @@ def _get_from_db(context, user_id, name=None):
if not db_keypair:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return db_keypair
- else:
- return query.all()
+
+ marker_row = None
+ if marker is not None:
+ marker_row = context.session.query(api_models.KeyPair).\
+ filter(api_models.KeyPair.name == marker).\
+ filter(api_models.KeyPair.user_id == user_id).first()
+ if not marker_row:
+ raise exception.MarkerNotFound(marker=marker)
+
+ query = sqlalchemyutils.paginate_query(
+ query, api_models.KeyPair, limit, ['name'], marker=marker_row)
+
+ return query.all()
@db_api.api_context_manager.reader
@@ -180,24 +192,44 @@ class KeyPairList(base.ObjectListBase, base.NovaObject):
# KeyPair <= version 1.1
# Version 1.1: KeyPair <= version 1.2
# Version 1.2: KeyPair <= version 1.3
- VERSION = '1.2'
+ # Version 1.3: Add new parameters 'limit' and 'marker' to get_by_user()
+ VERSION = '1.3'
fields = {
'objects': fields.ListOfObjectsField('KeyPair'),
}
@staticmethod
- def _get_from_db(context, user_id):
- return _get_from_db(context, user_id)
+ def _get_from_db(context, user_id, limit, marker):
+ return _get_from_db(context, user_id, limit=limit, marker=marker)
@staticmethod
def _get_count_from_db(context, user_id):
return _get_count_from_db(context, user_id)
@base.remotable_classmethod
- def get_by_user(cls, context, user_id):
- api_db_keypairs = cls._get_from_db(context, user_id)
- main_db_keypairs = db.key_pair_get_all_by_user(context, user_id)
+ def get_by_user(cls, context, user_id, limit=None, marker=None):
+ try:
+ api_db_keypairs = cls._get_from_db(
+ context, user_id, limit=limit, marker=marker)
+ # NOTE(pkholkin): If we were asked for a marker and found it in
+ # results from the API DB, we must continue our pagination with
+ # just the limit (if any) to the main DB.
+ marker = None
+ except exception.MarkerNotFound:
+ api_db_keypairs = []
+
+ if limit is not None:
+ limit_more = limit - len(api_db_keypairs)
+ else:
+ limit_more = None
+
+ if limit_more is None or limit_more > 0:
+ main_db_keypairs = db.key_pair_get_all_by_user(
+ context, user_id, limit=limit_more, marker=marker)
+ else:
+ main_db_keypairs = []
+
return base.obj_make_list(context, cls(context), objects.KeyPair,
api_db_keypairs + main_db_keypairs)
diff --git a/nova/objects/resource_provider.py b/nova/objects/resource_provider.py
index 4b1ae35d18..ecfd43f14f 100644
--- a/nova/objects/resource_provider.py
+++ b/nova/objects/resource_provider.py
@@ -25,48 +25,6 @@ _INV_TBL = models.Inventory.__table__
_RP_TBL = models.ResourceProvider.__table__
-@db_api.api_context_manager.writer
-def _create_rp_in_db(context, updates):
- db_rp = models.ResourceProvider()
- db_rp.update(updates)
- context.session.add(db_rp)
- return db_rp
-
-
-@db_api.api_context_manager.writer
-def _delete_rp_from_db(context, _id):
- # Don't delete the resource provider if it has allocations.
- rp_allocations = context.session.query(models.Allocation).\
- filter(models.Allocation.resource_provider_id == _id).\
- count()
- if rp_allocations:
- raise exception.ResourceProviderInUse()
- # Delete any inventory associated with the resource provider
- context.session.query(models.Inventory).\
- filter(models.Inventory.resource_provider_id == _id).delete()
- result = context.session.query(models.ResourceProvider).\
- filter(models.ResourceProvider.id == _id).delete()
- if not result:
- raise exception.NotFound()
-
-
-@db_api.api_context_manager.writer
-def _update_rp_in_db(context, id, updates):
- db_rp = context.session.query(models.ResourceProvider).filter_by(
- id=id).first()
- db_rp.update(updates)
- db_rp.save(context.session)
-
-
-@db_api.api_context_manager.reader
-def _get_rp_by_uuid_from_db(context, uuid):
- result = context.session.query(models.ResourceProvider).filter_by(
- uuid=uuid).first()
- if not result:
- raise exception.NotFound()
- return result
-
-
def _get_current_inventory_resources(conn, rp):
"""Returns a set() containing the resource class IDs for all resources
currently having an inventory record for the supplied resource provider.
@@ -341,16 +299,37 @@ class ResourceProvider(base.NovaObject):
self.obj_reset_changes()
@staticmethod
+ @db_api.api_context_manager.writer
def _create_in_db(context, updates):
- return _create_rp_in_db(context, updates)
+ db_rp = models.ResourceProvider()
+ db_rp.update(updates)
+ context.session.add(db_rp)
+ return db_rp
@staticmethod
- def _delete(context, id):
- _delete_rp_from_db(context, id)
+ @db_api.api_context_manager.writer
+ def _delete(context, _id):
+ # Don't delete the resource provider if it has allocations.
+ rp_allocations = context.session.query(models.Allocation).\
+ filter(models.Allocation.resource_provider_id == _id).\
+ count()
+ if rp_allocations:
+ raise exception.ResourceProviderInUse()
+ # Delete any inventory associated with the resource provider
+ context.session.query(models.Inventory).\
+ filter(models.Inventory.resource_provider_id == _id).delete()
+ result = context.session.query(models.ResourceProvider).\
+ filter(models.ResourceProvider.id == _id).delete()
+ if not result:
+ raise exception.NotFound()
@staticmethod
+ @db_api.api_context_manager.writer
def _update_in_db(context, id, updates):
- return _update_rp_in_db(context, id, updates)
+ db_rp = context.session.query(models.ResourceProvider).filter_by(
+ id=id).first()
+ db_rp.update(updates)
+ db_rp.save(context.session)
@staticmethod
def _from_db_object(context, resource_provider, db_resource_provider):
@@ -361,8 +340,13 @@ class ResourceProvider(base.NovaObject):
return resource_provider
@staticmethod
+ @db_api.api_context_manager.reader
def _get_by_uuid_from_db(context, uuid):
- return _get_rp_by_uuid_from_db(context, uuid)
+ result = context.session.query(models.ResourceProvider).filter_by(
+ uuid=uuid).first()
+ if not result:
+ raise exception.NotFound()
+ return result
@base.NovaObjectRegistry.register
diff --git a/nova/objects/virtual_interface.py b/nova/objects/virtual_interface.py
index 1615a8dff8..659611e538 100644
--- a/nova/objects/virtual_interface.py
+++ b/nova/objects/virtual_interface.py
@@ -29,10 +29,12 @@ class VirtualInterface(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Add tag field
# Version 1.2: Adding a save method
- VERSION = '1.2'
+ # Version 1.3: Added destroy() method
+ VERSION = '1.3'
fields = {
'id': fields.IntegerField(),
+ # This is a MAC address.
'address': fields.StringField(nullable=True),
'network_id': fields.IntegerField(),
'instance_uuid': fields.UUIDField(),
@@ -109,6 +111,10 @@ class VirtualInterface(base.NovaPersistentObject, base.NovaObject):
def delete_by_instance_uuid(cls, context, instance_uuid):
db.virtual_interface_delete_by_instance(context, instance_uuid)
+ @base.remotable
+ def destroy(self):
+ db.virtual_interface_delete(self._context, self.id)
+
@base.NovaObjectRegistry.register
class VirtualInterfaceList(base.ObjectListBase, base.NovaObject):
diff --git a/nova/scheduler/filters/core_filter.py b/nova/scheduler/filters/core_filter.py
index 0fe2aaf787..15cd4da5ea 100644
--- a/nova/scheduler/filters/core_filter.py
+++ b/nova/scheduler/filters/core_filter.py
@@ -30,7 +30,12 @@ class BaseCoreFilter(filters.BaseHostFilter):
raise NotImplementedError
def host_passes(self, host_state, spec_obj):
- """Return True if host has sufficient CPU cores."""
+ """Return True if host has sufficient CPU cores.
+
+ :param host_state: nova.scheduler.host_manager.HostState
+ :param spec_obj: filter options
+ :return: boolean
+ """
if not host_state.vcpus_total:
# Fail safe
LOG.warning(_LW("VCPUs not set; assuming CPU collection broken"))
@@ -50,10 +55,10 @@ class BaseCoreFilter(filters.BaseHostFilter):
# against other instances.
if instance_vcpus > host_state.vcpus_total:
LOG.debug("%(host_state)s does not have %(instance_vcpus)d "
- "total cpus before overcommit, it only has %(cpus)d",
- {'host_state': host_state,
- 'instance_vcpus': instance_vcpus,
- 'cpus': host_state.vcpus_total})
+ "total cpus before overcommit, it only has %(cpus)d",
+ {'host_state': host_state,
+ 'instance_vcpus': instance_vcpus,
+ 'cpus': host_state.vcpus_total})
return False
free_vcpus = vcpus_total - host_state.vcpus_used
diff --git a/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-resp.json.tpl
new file mode 100644
index 0000000000..0b2bf63e7f
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "public_key": "%(public_key)s"
+ }
+ }
+ ],
+ "keypairs_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/keypairs?limit=1&marker=%(keypair_name)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json.tpl
new file mode 100644
index 0000000000..8e0963bc7a
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user1-resp.json.tpl
@@ -0,0 +1,12 @@
+{
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "public_key": "%(public_key)s"
+ }
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json.tpl
new file mode 100644
index 0000000000..6c3402b24c
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-list-user2-resp.json.tpl
@@ -0,0 +1,18 @@
+{
+ "keypairs": [
+ {
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "public_key": "%(public_key)s"
+ }
+ }
+ ],
+ "keypairs_links": [
+ {
+ "href": "%(versioned_compute_endpoint)s/keypairs?user_id=user2&limit=1&marker=%(keypair_name)s",
+ "rel": "next"
+ }
+ ]
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-req.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-req.json.tpl
new file mode 100644
index 0000000000..f6a6d47b56
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-req.json.tpl
@@ -0,0 +1,7 @@
+{
+ "keypair": {
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "user_id": "%(user_id)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-resp.json.tpl b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-resp.json.tpl
new file mode 100644
index 0000000000..ee5eb23f77
--- /dev/null
+++ b/nova/tests/functional/api_sample_tests/api_samples/keypairs/v2.35/keypairs-post-resp.json.tpl
@@ -0,0 +1,10 @@
+{
+ "keypair": {
+ "fingerprint": "%(fingerprint)s",
+ "name": "%(keypair_name)s",
+ "type": "%(keypair_type)s",
+ "private_key": "%(private_key)s",
+ "public_key": "%(public_key)s",
+ "user_id": "%(user_id)s"
+ }
+}
diff --git a/nova/tests/functional/api_sample_tests/test_keypairs.py b/nova/tests/functional/api_sample_tests/test_keypairs.py
index 50bc9931ab..65f97ffe12 100644
--- a/nova/tests/functional/api_sample_tests/test_keypairs.py
+++ b/nova/tests/functional/api_sample_tests/test_keypairs.py
@@ -210,3 +210,76 @@ class KeyPairsV210SampleJsonTestNotAdmin(KeyPairsV210SampleJsonTest):
response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
self.assertEqual(403, response.status_code)
+
+
+class KeyPairsV235SampleJsonTest(api_sample_base.ApiSampleTestBaseV21):
+ ADMIN_API = True
+ sample_dir = "keypairs"
+ microversion = '2.35'
+ expected_post_status_code = 201
+ scenarios = [('v2_35', {'api_major_version': 'v2.1'})]
+
+ def setUp(self):
+ super(KeyPairsV235SampleJsonTest, self).setUp()
+ self.api.microversion = self.microversion
+
+ # TODO(pkholkin): this is only needed because we randomly choose the
+ # uuid each time.
+ def generalize_subs(self, subs, vanilla_regexes):
+ subs['keypair_name'] = 'keypair-[0-9a-f-]+'
+ return subs
+
+ def test_keypairs_post(self, user="admin", kp_name=None):
+ return self._check_keypairs_post(
+ keypair_type=keypair_obj.KEYPAIR_TYPE_SSH,
+ user_id=user, kp_name=kp_name)
+
+ def _check_keypairs_post(self, **kwargs):
+ """Get api sample of key pairs post request."""
+ key_name = kwargs.pop('kp_name')
+ if not key_name:
+ key_name = 'keypair-' + str(uuid.uuid4())
+
+ subs = dict(keypair_name=key_name, **kwargs)
+ response = self._do_post('os-keypairs', 'keypairs-post-req', subs)
+ subs = {'keypair_name': key_name}
+
+ self._verify_response('keypairs-post-resp', subs, response,
+ self.expected_post_status_code)
+ return key_name
+
+ def test_keypairs_list(self):
+ # Get api sample of key pairs list request.
+
+ # sort key_pairs by name before paging
+ keypairs = sorted([self.test_keypairs_post() for i in range(3)])
+
+ response = self._do_get('os-keypairs?marker=%s&limit=1' % keypairs[1])
+ subs = {'keypair_name': keypairs[2]}
+ self._verify_response('keypairs-list-resp', subs, response, 200)
+
+ def test_keypairs_list_for_different_users(self):
+ # Get api sample of key pairs list request.
+
+ # create common kp_names for two users
+ kp_names = ['keypair-' + str(uuid.uuid4()) for i in range(3)]
+
+ # sort key_pairs by name before paging
+ keypairs_user1 = sorted([self.test_keypairs_post(
+ user="user1", kp_name=kp_name) for kp_name in kp_names])
+ keypairs_user2 = sorted([self.test_keypairs_post(
+ user="user2", kp_name=kp_name) for kp_name in kp_names])
+
+ # get all keypairs after the second for user1
+ response = self._do_get('os-keypairs?user_id=user1&marker=%s'
+ % keypairs_user1[1])
+ subs = {'keypair_name': keypairs_user1[2]}
+ self._verify_response(
+ 'keypairs-list-user1-resp', subs, response, 200)
+
+ # get only one keypair after the second for user2
+ response = self._do_get('os-keypairs?user_id=user2&marker=%s&limit=1'
+ % keypairs_user2[1])
+ subs = {'keypair_name': keypairs_user2[2]}
+ self._verify_response(
+ 'keypairs-list-user2-resp', subs, response, 200)
diff --git a/nova/tests/functional/api_sample_tests/test_server_tags.py b/nova/tests/functional/api_sample_tests/test_server_tags.py
index bf0884a4be..683098902c 100644
--- a/nova/tests/functional/api_sample_tests/test_server_tags.py
+++ b/nova/tests/functional/api_sample_tests/test_server_tags.py
@@ -90,6 +90,7 @@ class ServerTagsJsonTest(test_servers.ServersSampleBase):
expected_location = "%s/servers/%s/tags/%s" % (
self._get_vers_compute_endpoint(), uuid, tag.tag)
self.assertEqual(expected_location, response.headers['Location'])
+ self.assertEqual('', response.content)
def test_server_tags_delete(self):
uuid = self._put_server_tags()
diff --git a/nova/tests/functional/db/test_keypair.py b/nova/tests/functional/db/test_keypair.py
index dd840852fe..b374efc34c 100644
--- a/nova/tests/functional/db/test_keypair.py
+++ b/nova/tests/functional/db/test_keypair.py
@@ -146,7 +146,9 @@ class KeyPairObjectTestCase(test.NoDBTestCase):
# NOTE(danms): This only fetches from the API DB
api_keys = objects.KeyPairList._get_from_db(self.context,
- self.context.user_id)
+ self.context.user_id,
+ limit=None,
+ marker=None)
self.assertEqual(3, len(api_keys))
# NOTE(danms): This only fetches from the main DB
@@ -177,3 +179,108 @@ class KeyPairObjectTestCase(test.NoDBTestCase):
total, done = keypair.migrate_keypairs_to_api_db(self.context, 100)
self.assertEqual(0, total)
self.assertEqual(0, done)
+
+ def test_get_by_user_limit_and_marker(self):
+ self._api_kp(name='apikey1')
+ self._api_kp(name='apikey2')
+ self._main_kp(name='mainkey1')
+ self._main_kp(name='mainkey2')
+
+ # check all 4 keypairs (2 api and 2 main)
+ kpl = objects.KeyPairList.get_by_user(self.context,
+ self.context.user_id)
+ self.assertEqual(4, len(kpl))
+ self.assertEqual(set(['apikey1', 'apikey2', 'mainkey1', 'mainkey2']),
+ set([x.name for x in kpl]))
+
+ # check only 1 keypair (1 api)
+ kpl = objects.KeyPairList.get_by_user(self.context,
+ self.context.user_id,
+ limit=1)
+ self.assertEqual(1, len(kpl))
+ self.assertEqual(set(['apikey1']),
+ set([x.name for x in kpl]))
+
+ # check only 3 keypairs (2 api and 1 main)
+ kpl = objects.KeyPairList.get_by_user(self.context,
+ self.context.user_id,
+ limit=3)
+ self.assertEqual(3, len(kpl))
+ self.assertEqual(set(['apikey1', 'apikey2', 'mainkey1']),
+ set([x.name for x in kpl]))
+
+ # check keypairs after 'apikey1' (1 api and 2 main)
+ kpl = objects.KeyPairList.get_by_user(self.context,
+ self.context.user_id,
+ marker='apikey1')
+ self.assertEqual(3, len(kpl))
+ self.assertEqual(set(['apikey2', 'mainkey1', 'mainkey2']),
+ set([x.name for x in kpl]))
+
+ # check keypairs after 'mainkey2' (no keypairs)
+ kpl = objects.KeyPairList.get_by_user(self.context,
+ self.context.user_id,
+ marker='mainkey2')
+ self.assertEqual(0, len(kpl))
+
+ # check only 2 keypairs after 'apikey1' (1 api and 1 main)
+ kpl = objects.KeyPairList.get_by_user(self.context,
+ self.context.user_id,
+ limit=2,
+ marker='apikey1')
+ self.assertEqual(2, len(kpl))
+ self.assertEqual(set(['apikey2', 'mainkey1']),
+ set([x.name for x in kpl]))
+
+ # check non-existing keypair
+ self.assertRaises(exception.MarkerNotFound,
+ objects.KeyPairList.get_by_user,
+ self.context, self.context.user_id,
+ limit=2, marker='unknown_kp')
+
+ def test_get_by_user_different_users(self):
+ # create keypairs for two users
+ self._api_kp(name='apikey', user_id='user1')
+ self._api_kp(name='apikey', user_id='user2')
+ self._main_kp(name='mainkey', user_id='user1')
+ self._main_kp(name='mainkey', user_id='user2')
+
+ # check all 2 keypairs for user1 (1 api and 1 main)
+ kpl = objects.KeyPairList.get_by_user(self.context, 'user1')
+ self.assertEqual(2, len(kpl))
+ self.assertEqual(set(['apikey', 'mainkey']),
+ set([x.name for x in kpl]))
+
+ # check all 2 keypairs for user2 (1 api and 1 main)
+ kpl = objects.KeyPairList.get_by_user(self.context, 'user2')
+ self.assertEqual(2, len(kpl))
+ self.assertEqual(set(['apikey', 'mainkey']),
+ set([x.name for x in kpl]))
+
+ # check only 1 keypair for user1 (1 api)
+ kpl = objects.KeyPairList.get_by_user(self.context, 'user1', limit=1)
+ self.assertEqual(1, len(kpl))
+ self.assertEqual(set(['apikey']),
+ set([x.name for x in kpl]))
+
+ # check keypairs after 'apikey' for user2 (1 main)
+ kpl = objects.KeyPairList.get_by_user(self.context, 'user2',
+ marker='apikey')
+ self.assertEqual(1, len(kpl))
+ self.assertEqual(set(['mainkey']),
+ set([x.name for x in kpl]))
+
+ # check only 2 keypairs after 'apikey' for user1 (1 main)
+ kpl = objects.KeyPairList.get_by_user(self.context,
+ 'user1',
+ limit=2,
+ marker='apikey')
+ self.assertEqual(1, len(kpl))
+ self.assertEqual(set(['mainkey']),
+ set([x.name for x in kpl]))
+
+ # check non-existing keypair for user2
+ self.assertRaises(exception.MarkerNotFound,
+ objects.KeyPairList.get_by_user,
+ self.context, 'user2',
+ limit=2, marker='unknown_kp')
diff --git a/nova/tests/functional/notification_sample_tests/test_instance.py b/nova/tests/functional/notification_sample_tests/test_instance.py
index f186b4de61..ac10179230 100644
--- a/nova/tests/functional/notification_sample_tests/test_instance.py
+++ b/nova/tests/functional/notification_sample_tests/test_instance.py
@@ -314,3 +314,28 @@ class TestInstanceNotificationSample(
notification_sample_base.NotificationSampleTestBase.ANY,
'uuid': server['id']},
actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
+
+ def test_delete_restore_server(self):
+ self.flags(reclaim_instance_interval=30)
+ server = self._boot_a_server(
+ extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
+ self.api.delete_server(server['id'])
+ self._wait_for_state_change(self.api, server, 'SOFT_DELETED')
+ self.api.post_server_action(server['id'], {'restore': {}})
+ self._wait_for_state_change(self.api, server, 'ACTIVE')
+
+ self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
+ self._verify_notification(
+ 'instance-restore-start',
+ replacements={
+ 'reservation_id':
+ notification_sample_base.NotificationSampleTestBase.ANY,
+ 'uuid': server['id']},
+ actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
+ self._verify_notification(
+ 'instance-restore-end',
+ replacements={
+ 'reservation_id':
+ notification_sample_base.NotificationSampleTestBase.ANY,
+ 'uuid': server['id']},
+ actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
diff --git a/nova/tests/unit/api/openstack/compute/test_keypairs.py b/nova/tests/unit/api/openstack/compute/test_keypairs.py
index 430e8827f1..3c971107fa 100644
--- a/nova/tests/unit/api/openstack/compute/test_keypairs.py
+++ b/nova/tests/unit/api/openstack/compute/test_keypairs.py
@@ -47,7 +47,7 @@ def fake_keypair(name):
name=name, **keypair_data)
-def db_key_pair_get_all_by_user(self, user_id):
+def db_key_pair_get_all_by_user(self, user_id, limit, marker):
return [fake_keypair('FAKE')]
@@ -562,3 +562,43 @@ class KeypairsTestV210(KeypairsTestV22):
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.create,
req, body=body)
+
+
+class KeypairsTestV235(test.TestCase):
+ base_url = '/v2/fake'
+ wsgi_api_version = '2.35'
+
+ def _setup_app_and_controller(self):
+ self.app_server = fakes.wsgi_app_v21(init_only=('os-keypairs'))
+ self.controller = keypairs_v21.KeypairController()
+
+ def setUp(self):
+ super(KeypairsTestV235, self).setUp()
+ self._setup_app_and_controller()
+
+ @mock.patch("nova.db.key_pair_get_all_by_user")
+ def test_keypair_list_limit_and_marker(self, mock_kp_get):
+ mock_kp_get.side_effect = db_key_pair_get_all_by_user
+
+ req = fakes.HTTPRequest.blank(
+ self.base_url + '/os-keypairs?limit=3&marker=fake_marker',
+ version=self.wsgi_api_version, use_admin_context=True)
+
+ res_dict = self.controller.index(req)
+
+ mock_kp_get.assert_called_once_with(
+ req.environ['nova.context'], 'fake_user',
+ limit=3, marker='fake_marker')
+ response = {'keypairs': [{'keypair': dict(keypair_data, name='FAKE',
+ type='ssh')}]}
+ self.assertEqual(res_dict, response)
+
+ @mock.patch('nova.compute.api.KeypairAPI.get_key_pairs')
+ def test_keypair_list_limit_and_marker_invalid_marker(self, mock_kp_get):
+ mock_kp_get.side_effect = exception.MarkerNotFound(marker='unknown_kp')
+
+ req = fakes.HTTPRequest.blank(
+ self.base_url + '/os-keypairs?limit=3&marker=unknown_kp',
+ version=self.wsgi_api_version, use_admin_context=True)
+
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.index, req)
diff --git a/nova/tests/unit/api/openstack/compute/test_migrate_server.py b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
index fe5e5690f9..a21ba78586 100644
--- a/nova/tests/unit/api/openstack/compute/test_migrate_server.py
+++ b/nova/tests/unit/api/openstack/compute/test_migrate_server.py
@@ -33,6 +33,7 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
_api_version = '2.1'
disk_over_commit = False
force = None
+ async = False
def setUp(self):
super(MigrateServerTestsV21, self).setUp()
@@ -59,7 +60,8 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': self._get_migration_body(host='hostname')}
args_map = {'_migrate_live': ((False, self.disk_over_commit,
- 'hostname', self.force), {})}
+ 'hostname', self.force, self.async),
+ {})}
self._test_actions(['_migrate', '_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
@@ -69,7 +71,7 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': self._get_migration_body(host=None)}
args_map = {'_migrate_live': ((False, self.disk_over_commit, None,
- self.force),
+ self.force, self.async),
{})}
self._test_actions(['_migrate', '_migrate_live'], body_map=body_map,
method_translations=method_translations,
@@ -85,7 +87,8 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
'_migrate_live': 'live_migrate'}
body_map = self._get_migration_body(host='hostname')
args_map = {'_migrate_live': ((False, self.disk_over_commit,
- 'hostname', self.force), {})}
+ 'hostname', self.force, self.async),
+ {})}
exception_arg = {'_migrate': 'migrate',
'_migrate_live': 'os-migrateLive'}
self._test_actions_raise_conflict_on_invalid_state(
@@ -100,7 +103,8 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
body_map = {'_migrate_live':
self._get_migration_body(host='hostname')}
args_map = {'_migrate_live': ((False, self.disk_over_commit,
- 'hostname', self.force), {})}
+ 'hostname', self.force, self.async),
+ {})}
self._test_actions_with_locked_instance(
['_migrate', '_migrate_live'], body_map=body_map,
args_map=args_map, method_translations=method_translations)
@@ -125,19 +129,13 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
instance = self._stub_instance_get()
self.compute_api.live_migrate(self.context, instance, False,
self.disk_over_commit, 'hostname',
- self.force)
+ self.force, self.async)
self.mox.ReplayAll()
-
- res = self.controller._migrate_live(self.req, instance.uuid,
- body={'os-migrateLive': param})
- # NOTE: on v2.1, http status code is set as wsgi_code of API
- # method instead of status_int in a response object.
- if self._api_version == '2.1':
- status_int = self.controller._migrate_live.wsgi_code
- else:
- status_int = res.status_int
- self.assertEqual(202, status_int)
+ live_migrate_method = self.controller._migrate_live
+ live_migrate_method(self.req, instance.uuid,
+ body={'os-migrateLive': param})
+ self.assertEqual(202, live_migrate_method.wsgi_code)
def test_migrate_live_enabled(self):
param = self._get_params(host='hostname')
@@ -204,9 +202,8 @@ class MigrateServerTestsV21(admin_only_action_common.CommonTests):
instance = self._stub_instance_get(uuid=uuid)
self.compute_api.live_migrate(self.context, instance, False,
self.disk_over_commit,
- 'hostname', self.force
+ 'hostname', self.force, self.async
).AndRaise(fake_exc)
-
self.mox.ReplayAll()
body = self._get_migration_body(host='hostname')
@@ -308,8 +305,8 @@ class MigrateServerTestsV225(MigrateServerTestsV21):
method_translations = {'_migrate_live': 'live_migrate'}
body_map = {'_migrate_live': {'os-migrateLive': {'host': 'hostname',
'block_migration': 'auto'}}}
- args_map = {'_migrate_live': ((None, None, 'hostname', self.force),
- {})}
+ args_map = {'_migrate_live': ((None, None, 'hostname', self.force,
+ self.async), {})}
self._test_actions(['_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
@@ -329,7 +326,6 @@ class MigrateServerTestsV225(MigrateServerTestsV21):
class MigrateServerTestsV230(MigrateServerTestsV225):
-
force = False
def setUp(self):
@@ -346,8 +342,8 @@ class MigrateServerTestsV230(MigrateServerTestsV225):
body_map = {'_migrate_live': {'os-migrateLive': {'host': 'hostname',
'block_migration': 'auto',
'force': litteral_force}}}
- args_map = {'_migrate_live': ((None, None, 'hostname', force),
- {})}
+ args_map = {'_migrate_live': ((None, None, 'hostname', force,
+ self.async), {})}
self._test_actions(['_migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
@@ -366,6 +362,75 @@ class MigrateServerTestsV230(MigrateServerTestsV225):
self.req, fakes.FAKE_UUID, body=body)
+class MigrateServerTestsV234(MigrateServerTestsV230):
+ async = True
+
+ def setUp(self):
+ super(MigrateServerTestsV230, self).setUp()
+ self.req.api_version_request = api_version_request.APIVersionRequest(
+ '2.34')
+
+ # NOTE(tdurakov): for REST API version 2.34 and above, tests below are not
+ # valid, as they are made in background.
+ def test_migrate_live_compute_service_unavailable(self):
+ pass
+
+ def test_migrate_live_invalid_hypervisor_type(self):
+ pass
+
+ def test_migrate_live_invalid_cpu_info(self):
+ pass
+
+ def test_migrate_live_unable_to_migrate_to_self(self):
+ pass
+
+ def test_migrate_live_destination_hypervisor_too_old(self):
+ pass
+
+ def test_migrate_live_no_valid_host(self):
+ pass
+
+ def test_migrate_live_invalid_local_storage(self):
+ pass
+
+ def test_migrate_live_invalid_shared_storage(self):
+ pass
+
+ def test_migrate_live_hypervisor_unavailable(self):
+ pass
+
+ def test_migrate_live_instance_not_active(self):
+ pass
+
+ def test_migrate_live_pre_check_error(self):
+ pass
+
+ def test_migrate_live_migration_precheck_client_exception(self):
+ pass
+
+ def test_migrate_live_migration_with_unexpected_error(self):
+ pass
+
+ def test_migrate_live_migration_with_old_nova_not_supported(self):
+ pass
+
+ def test_migrate_live_unexpected_error(self):
+ exc = exception.NoValidHost(reason="No valid host found")
+ self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
+ instance = self._stub_instance_get()
+ self.compute_api.live_migrate(self.context, instance, None,
+ self.disk_over_commit, 'hostname',
+ self.force, self.async).AndRaise(exc)
+
+ self.mox.ReplayAll()
+ body = {'os-migrateLive':
+ {'host': 'hostname', 'block_migration': 'auto'}}
+
+ self.assertRaises(webob.exc.HTTPInternalServerError,
+ self.controller._migrate_live,
+ self.req, instance.uuid, body=body)
+
+
class MigrateServerPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
diff --git a/nova/tests/unit/api/openstack/compute/test_server_tags.py b/nova/tests/unit/api/openstack/compute/test_server_tags.py
index 057acb5ddd..a5269b7bad 100644
--- a/nova/tests/unit/api/openstack/compute/test_server_tags.py
+++ b/nova/tests/unit/api/openstack/compute/test_server_tags.py
@@ -162,6 +162,7 @@ class ServerTagsTest(test.TestCase):
res = self.controller.update(req, UUID, TAG2, body=None)
self.assertEqual(201, res.status_int)
+ self.assertEqual(0, len(res.body))
self.assertEqual(location, res.headers['Location'])
mock_db_add_inst_tags.assert_called_once_with(context, UUID, TAG2)
mock_db_get_inst_tags.assert_called_once_with(context, UUID)
@@ -177,6 +178,7 @@ class ServerTagsTest(test.TestCase):
res = self.controller.update(req, UUID, TAG1, body=None)
self.assertEqual(204, res.status_int)
+ self.assertEqual(0, len(res.body))
mock_db_get_inst_tags.assert_called_once_with(context, UUID)
@mock.patch('nova.db.instance_tag_get_by_instance_uuid')
diff --git a/nova/tests/unit/api/openstack/test_common.py b/nova/tests/unit/api/openstack/test_common.py
index 246affbd19..24b2247b51 100644
--- a/nova/tests/unit/api/openstack/test_common.py
+++ b/nova/tests/unit/api/openstack/test_common.py
@@ -134,18 +134,20 @@ class LimiterTest(test.NoDBTestCase):
def test_limiter_custom_max_limit(self):
# Test a max_limit other than 1000.
- items = range(2000)
+ max_limit = 2000
+ self.flags(osapi_max_limit=max_limit)
+ items = range(max_limit)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
- common.limited(items, req, max_limit=2000), items[1:4])
+ common.limited(items, req), items[1:4])
req = webob.Request.blank('/?offset=3&limit=0')
self.assertEqual(
- common.limited(items, req, max_limit=2000), items[3:])
+ common.limited(items, req), items[3:])
req = webob.Request.blank('/?offset=3&limit=2500')
self.assertEqual(
- common.limited(items, req, max_limit=2000), items[3:])
+ common.limited(items, req), items[3:])
req = webob.Request.blank('/?offset=3000&limit=10')
- self.assertEqual(0, len(common.limited(items, req, max_limit=2000)))
+ self.assertEqual(0, len(common.limited(items, req)))
def test_limiter_negative_limit(self):
# Test a negative limit.
diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py
index 1876c6a126..36890978b1 100644
--- a/nova/tests/unit/compute/test_compute.py
+++ b/nova/tests/unit/compute/test_compute.py
@@ -25,10 +25,8 @@ import time
import traceback
import uuid
-from eventlet import greenthread
from itertools import chain
import mock
-from mox3 import mox
from neutronclient.common import exceptions as neutron_exceptions
from oslo_log import log as logging
import oslo_messaging as messaging
@@ -55,7 +53,6 @@ from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
-from nova.conductor import manager as conductor_manager
import nova.conf
from nova.console import type as ctype
from nova import context
@@ -167,7 +164,7 @@ class BaseTestCase(test.TestCase):
self.compute.driver, NODENAME)
self.compute._resource_tracker_dict[NODENAME] = fake_rt
- def fake_get_compute_nodes_in_db(context, use_slave=False):
+ def fake_get_compute_nodes_in_db(self, context, use_slave=False):
fake_compute_nodes = [{'local_gb': 259,
'uuid': uuids.fake_compute_node,
'vcpus_used': 0,
@@ -205,8 +202,9 @@ class BaseTestCase(test.TestCase):
def fake_compute_node_delete(context, compute_node_id):
self.assertEqual(2, compute_node_id)
- self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
- fake_get_compute_nodes_in_db)
+ self.stub_out(
+ 'nova.compute.manager.ComputeManager._get_compute_nodes_in_db',
+ fake_get_compute_nodes_in_db)
self.stub_out('nova.db.compute_node_delete',
fake_compute_node_delete)
@@ -232,10 +230,8 @@ class BaseTestCase(test.TestCase):
raise exception.ImageNotFound(image_id=id)
fake_image.stub_out_image_service(self)
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
-
- fake_taskapi = FakeComputeTaskAPI()
- self.stubs.Set(self.compute, 'compute_task_api', fake_taskapi)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ fake_show)
fake_network.set_stub_network_methods(self)
fake_server_actions.stub_out_action_events(self)
@@ -243,14 +239,14 @@ class BaseTestCase(test.TestCase):
def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
return network_model.NetworkInfo()
- self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ self.stub_out('nova.network.api.API.get_instance_nw_info',
fake_get_nw_info)
def fake_allocate_for_instance(cls, ctxt, instance, *args, **kwargs):
self.assertFalse(ctxt.is_admin)
return fake_network.fake_get_instance_nw_info(self, 1, 1)
- self.stubs.Set(network_api.API, 'allocate_for_instance',
+ self.stub_out('nova.network.api.API.allocate_for_instance',
fake_allocate_for_instance)
self.compute_api = compute.API()
@@ -338,8 +334,8 @@ class BaseTestCase(test.TestCase):
def _fake_migrate_server(*args, **kwargs):
pass
- self.stubs.Set(conductor_manager.ComputeTaskManager,
- 'migrate_server', _fake_migrate_server)
+ self.stub_out('nova.conductor.manager.ComputeTaskManager'
+ '.migrate_server', _fake_migrate_server)
def _init_aggregate_with_host(self, aggr, aggr_name, zone, host):
if not aggr:
@@ -365,22 +361,20 @@ class ComputeVolumeTestCase(BaseTestCase):
self.instance_object = objects.Instance._from_db_object(
self.context, objects.Instance(),
fake_instance.fake_db_instance())
- self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw:
+ self.stub_out('nova.volume.cinder.API.get', lambda *a, **kw:
{'id': uuids.volume_id, 'size': 4,
'attach_status': 'detached'})
- self.stubs.Set(self.compute.driver, 'get_volume_connector',
+ self.stub_out('nova.virt.fake.FakeDriver.get_volume_connector',
lambda *a, **kw: None)
- self.stubs.Set(self.compute.volume_api, 'initialize_connection',
+ self.stub_out('nova.volume.cinder.API.initialize_connection',
lambda *a, **kw: {})
- self.stubs.Set(self.compute.volume_api, 'terminate_connection',
- lambda *a, **kw: None)
- self.stubs.Set(self.compute.volume_api, 'attach',
+ self.stub_out('nova.volume.cinder.API.terminate_connection',
lambda *a, **kw: None)
- self.stubs.Set(self.compute.volume_api, 'detach',
+ self.stub_out('nova.volume.cinder.API.attach',
lambda *a, **kw: None)
- self.stubs.Set(self.compute.volume_api, 'check_attach',
+ self.stub_out('nova.volume.cinder.API.detach',
lambda *a, **kw: None)
- self.stubs.Set(greenthread, 'sleep',
+ self.stub_out('eventlet.greenthread.sleep',
lambda *a, **kw: None)
def store_cinfo(context, *args, **kwargs):
@@ -445,13 +439,13 @@ class ComputeVolumeTestCase(BaseTestCase):
self.flags(block_device_allocate_retries=2)
self.flags(block_device_allocate_retries_interval=0.1)
- def never_get(context, vol_id):
+ def never_get(self, context, vol_id):
return {
'status': 'creating',
'id': 'blah',
}
- self.stubs.Set(self.compute.volume_api, 'get', never_get)
+ self.stub_out('nova.volume.cinder.API.get', never_get)
self.assertRaises(exception.VolumeNotCreated,
self.compute._await_block_device_map_created,
self.context, '1')
@@ -472,7 +466,7 @@ class ComputeVolumeTestCase(BaseTestCase):
self.flags(block_device_allocate_retries=4)
self.flags(block_device_allocate_retries_interval=0.1)
- def slow_get(context, vol_id):
+ def slow_get(cls, context, vol_id):
if self.fetched_attempts < 2:
self.fetched_attempts += 1
return {
@@ -484,7 +478,7 @@ class ComputeVolumeTestCase(BaseTestCase):
'id': 'blah',
}
- self.stubs.Set(c.volume_api, 'get', slow_get)
+ self.stub_out('nova.volume.cinder.API.get', slow_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(attempts, 3)
@@ -493,13 +487,13 @@ class ComputeVolumeTestCase(BaseTestCase):
self.flags(block_device_allocate_retries=-1)
self.flags(block_device_allocate_retries_interval=0.1)
- def volume_get(context, vol_id):
+ def volume_get(self, context, vol_id):
return {
'status': 'available',
'id': 'blah',
}
- self.stubs.Set(c.volume_api, 'get', volume_get)
+ self.stub_out('nova.volume.cinder.API.get', volume_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(1, attempts)
@@ -508,17 +502,19 @@ class ComputeVolumeTestCase(BaseTestCase):
self.flags(block_device_allocate_retries=0)
self.flags(block_device_allocate_retries_interval=0.1)
- def volume_get(context, vol_id):
+ def volume_get(self, context, vol_id):
return {
'status': 'available',
'id': 'blah',
}
- self.stubs.Set(c.volume_api, 'get', volume_get)
+ self.stub_out('nova.volume.cinder.API.get', volume_get)
attempts = c._await_block_device_map_created(self.context, '1')
self.assertEqual(1, attempts)
def test_boot_volume_serial(self):
+ self.stub_out('nova.volume.cinder.API.check_attach',
+ lambda *a, **kw: None)
with (
mock.patch.object(objects.BlockDeviceMapping, 'save')
) as mock_save:
@@ -557,7 +553,7 @@ class ComputeVolumeTestCase(BaseTestCase):
else:
return {}
- self.stubs.Set(self.compute_api.volume_api, 'get', volume_api_get)
+ self.stub_out('nova.volume.cinder.API.get', volume_api_get)
expected_no_metadata = {'min_disk': 0, 'min_ram': 0, 'properties': {},
'size': 0, 'status': 'active'}
@@ -615,7 +611,7 @@ class ComputeVolumeTestCase(BaseTestCase):
else:
return {}
- self.stubs.Set(self.compute_api.image_api, 'get', image_api_get)
+ self.stub_out('nova.image.api.API.get', image_api_get)
block_device_mapping = [{
'boot_index': 0,
@@ -877,9 +873,9 @@ class ComputeVolumeTestCase(BaseTestCase):
def fake_check_attach(*args, **kwargs):
pass
- self.stubs.Set(cinder.API, 'get', fake_get)
- self.stubs.Set(cinder.API, 'get_snapshot', fake_get)
- self.stubs.Set(cinder.API, 'check_attach',
+ self.stub_out('nova.volume.cinder.API.get', fake_get)
+ self.stub_out('nova.volume.cinder.API.get_snapshot', fake_get)
+ self.stub_out('nova.volume.cinder.API.check_attach',
fake_check_attach)
volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
@@ -1130,7 +1126,7 @@ class ComputeVolumeTestCase(BaseTestCase):
'status': status,
'attach_status': attach_status,
'multiattach': False}
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
+ self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
self.assertRaises(exception.InvalidVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
@@ -1140,7 +1136,7 @@ class ComputeVolumeTestCase(BaseTestCase):
def fake_volume_get_not_found(self, context, volume_id):
raise exception.VolumeNotFound(volume_id)
- self.stubs.Set(cinder.API, 'get', fake_volume_get_not_found)
+ self.stub_out('nova.volume.cinder.API.get', fake_volume_get_not_found)
self.assertRaises(exception.InvalidBDMVolume,
self.compute_api._validate_bdm,
self.context, self.instance,
@@ -1153,7 +1149,7 @@ class ComputeVolumeTestCase(BaseTestCase):
'status': 'available',
'attach_status': 'detached',
'multiattach': False}
- self.stubs.Set(cinder.API, 'get', fake_volume_get_ok)
+ self.stub_out('nova.volume.cinder.API.get', fake_volume_get_ok)
self.compute_api._validate_bdm(self.context, self.instance,
instance_type, bdms)
@@ -1292,7 +1288,7 @@ class ComputeTestCase(BaseTestCase):
def did_it_add_fault(*args):
called['fault_added'] = True
- self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ self.stub_out('nova.compute.utils.add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
@@ -1312,7 +1308,7 @@ class ComputeTestCase(BaseTestCase):
def did_it_add_fault(*args):
called['fault_added'] = True
- self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ self.stub_out('nova.compute.utils.add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
@@ -1332,7 +1328,7 @@ class ComputeTestCase(BaseTestCase):
def did_it_add_fault(*args):
called['fault_added'] = True
- self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
+ self.stub_out('nova.utils.add_instance_fault_from_exc',
did_it_add_fault)
@compute_manager.wrap_instance_fault
@@ -1470,7 +1466,7 @@ class ComputeTestCase(BaseTestCase):
def fake_is_neutron():
return True
- self.stubs.Set(utils, 'is_neutron', fake_is_neutron)
+ self.stub_out('nova.utils.is_neutron', fake_is_neutron)
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest(port_id=uuids.port_instance)])
self.assertRaises(exception.MultiplePortsNotApplicable,
@@ -1659,13 +1655,14 @@ class ComputeTestCase(BaseTestCase):
# Make sure the access_ip_* updates happen in the same DB
# update as the set to ACTIVE.
- def _instance_update(ctxt, instance_uuid, **kwargs):
+ def _instance_update(self, ctxt, instance_uuid, **kwargs):
if kwargs.get('vm_state', None) == vm_states.ACTIVE:
self.assertEqual(kwargs['access_ip_v4'], '192.168.1.100')
self.assertEqual(kwargs['access_ip_v6'], '2001:db8:0:1::1')
return orig_update(ctxt, instance_uuid, **kwargs)
- self.stubs.Set(self.compute, '_instance_update', _instance_update)
+ self.stub_out('nova.compute.manager.ComputeManager._instance_update',
+ _instance_update)
try:
self.compute.build_and_run_instance(self.context, instance, {},
@@ -1711,8 +1708,8 @@ class ComputeTestCase(BaseTestCase):
"""
def fake(*args, **kwargs):
raise exception.InvalidBDM()
- self.stubs.Set(nova.compute.manager.ComputeManager,
- '_prep_block_device', fake)
+ self.stub_out('nova.compute.manager.ComputeManager'
+ '._prep_block_device', fake)
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(
self.context, instance=instance, image={},
@@ -1902,14 +1899,14 @@ class ComputeTestCase(BaseTestCase):
bdms.append(bdm)
return bdm
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
- self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
- self.stubs.Set(cinder.API, 'reserve_volume',
+ self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
+ self.stub_out('nova.volume.cinder.API.check_attach', fake_check_attach)
+ self.stub_out('nova.volume.cinder.API.reserve_volume',
fake_reserve_volume)
- self.stubs.Set(cinder.API, 'terminate_connection',
+ self.stub_out('nova.volume.cinder.API.terminate_connection',
fake_terminate_connection)
- self.stubs.Set(cinder.API, 'detach', fake_detach)
- self.stubs.Set(compute_rpcapi.ComputeAPI,
+ self.stub_out('nova.volume.cinder.API.detach', fake_detach)
+ self.stub_out('nova.compute.rpcapi.ComputeAPI.'
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
@@ -1995,8 +1992,8 @@ class ComputeTestCase(BaseTestCase):
def _fake_deallocate_network(*args, **kwargs):
raise test.TestingException()
- self.stubs.Set(self.compute, '_deallocate_network',
- _fake_deallocate_network)
+ self.stub_out('nova.compute.manager.ComputeManager.'
+ '_deallocate_network', _fake_deallocate_network)
self.assertRaises(test.TestingException,
self.compute.terminate_instance,
@@ -2054,7 +2051,8 @@ class ComputeTestCase(BaseTestCase):
self.deleted_image_id = image_id
fake_image.stub_out_image_service(self)
- self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.delete',
+ fake_delete)
instance = self._create_fake_instance_obj()
image = {'id': 'fake_id'}
@@ -2116,12 +2114,12 @@ class ComputeTestCase(BaseTestCase):
rescue_password):
called['rescued'] = True
- self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
+ self.stub_out('nova.virt.fake.FakeDriver.rescue', fake_rescue)
def fake_unrescue(self, instance_ref, network_info):
called['unrescued'] = True
- self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
+ self.stub_out('nova.virt.fake.FakeDriver.unrescue',
fake_unrescue)
instance = self._create_fake_instance_obj()
@@ -2144,7 +2142,7 @@ class ComputeTestCase(BaseTestCase):
def fake_rescue(self, context, instance_ref, network_info, image_meta,
rescue_password):
pass
- self.stubs.Set(nova.virt.fake.FakeDriver, 'rescue', fake_rescue)
+ self.stub_out('nova.virt.fake.FakeDriver.rescue', fake_rescue)
instance = self._create_fake_instance_obj()
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
@@ -2186,7 +2184,7 @@ class ComputeTestCase(BaseTestCase):
# Ensure notifications on instance rescue.
def fake_unrescue(self, instance_ref, network_info):
pass
- self.stubs.Set(nova.virt.fake.FakeDriver, 'unrescue',
+ self.stub_out('nova.virt.fake.FakeDriver.unrescue',
fake_unrescue)
instance = self._create_fake_instance_obj()
@@ -2307,7 +2305,7 @@ class ComputeTestCase(BaseTestCase):
block_device_info):
called['power_on'] = True
- self.stubs.Set(nova.virt.fake.FakeDriver, 'power_on',
+ self.stub_out('nova.virt.fake.FakeDriver.power_on',
fake_driver_power_on)
instance = self._create_fake_instance_obj()
@@ -2332,7 +2330,7 @@ class ComputeTestCase(BaseTestCase):
shutdown_timeout, shutdown_attempts):
called['power_off'] = True
- self.stubs.Set(nova.virt.fake.FakeDriver, 'power_off',
+ self.stub_out('nova.virt.fake.FakeDriver.power_off',
fake_driver_power_off)
instance = self._create_fake_instance_obj()
@@ -2550,7 +2548,7 @@ class ComputeTestCase(BaseTestCase):
# Make sure virt drivers can override default rebuild
called = {'rebuild': False}
- def fake(**kwargs):
+ def fake(*args, **kwargs):
instance = kwargs['instance']
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
@@ -2559,7 +2557,7 @@ class ComputeTestCase(BaseTestCase):
expected_task_state=[task_states.REBUILD_BLOCK_DEVICE_MAPPING])
called['rebuild'] = True
- self.stubs.Set(self.compute.driver, 'rebuild', fake)
+ self.stub_out('nova.virt.fake.FakeDriver.rebuild', fake)
instance = self._create_fake_instance_obj()
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
@@ -2596,7 +2594,7 @@ class ComputeTestCase(BaseTestCase):
# Make sure virt drivers can override default rebuild
called = {'rebuild': False}
- def fake(**kwargs):
+ def fake(*args, **kwargs):
instance = kwargs['instance']
instance.task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
instance.save(expected_task_state=[task_states.REBUILDING])
@@ -2612,7 +2610,7 @@ class ComputeTestCase(BaseTestCase):
bdms[0].volume_id,
instance, destroy_bdm=False)
- self.stubs.Set(self.compute.driver, 'rebuild', fake)
+ self.stub_out('nova.virt.fake.FakeDriver.rebuild', fake)
instance = self._create_fake_instance_obj()
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
@@ -2683,11 +2681,11 @@ class ComputeTestCase(BaseTestCase):
(b'/a/b/c', b'foobarbaz'),
]
- def _spawn(context, instance, image_meta, injected_files,
+ def _spawn(cls, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info):
self.assertEqual(self.decoded_files, injected_files)
- self.stubs.Set(self.compute.driver, 'spawn', _spawn)
+ self.stub_out('nova.virt.fake.FakeDriver.spawn', _spawn)
instance = self._create_fake_instance_obj()
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
@@ -3064,7 +3062,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_inject_network(self, instance, network_info):
called['inject'] = True
- self.stubs.Set(nova.virt.fake.FakeDriver, 'inject_network_info',
+ self.stub_out('nova.virt.fake.FakeDriver.inject_network_info',
fake_driver_inject_network)
instance = self._create_fake_instance_obj()
@@ -3082,7 +3080,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_reset_network(self, instance):
called['count'] += 1
- self.stubs.Set(nova.virt.fake.FakeDriver, 'reset_network',
+ self.stub_out('nova.virt.fake.FakeDriver.reset_network',
fake_driver_reset_network)
instance = self._create_fake_instance_obj()
@@ -3128,9 +3126,10 @@ class ComputeTestCase(BaseTestCase):
if raise_during_cleanup:
raise Exception()
- self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
+ self.stub_out('nova.virt.fake.FakeDriver.snapshot', fake_snapshot)
fake_image.stub_out_image_service(self)
- self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.delete',
+ fake_delete)
inst_obj = self._get_snapshotting_instance()
if method == 'snapshot':
@@ -3178,18 +3177,20 @@ class ComputeTestCase(BaseTestCase):
'status': status}
return image
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ fake_show)
def fake_delete(self_, context, image_id):
self.fake_image_delete_called = True
self.assertEqual('fakesnap', image_id)
- self.stubs.Set(fake_image._FakeImageService, 'delete', fake_delete)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.delete',
+ fake_delete)
def fake_snapshot(*args, **kwargs):
raise exc
- self.stubs.Set(self.compute.driver, 'snapshot', fake_snapshot)
+ self.stub_out('nova.virt.fake.FakeDriver.snapshot', fake_snapshot)
fake_image.stub_out_image_service(self)
@@ -3292,7 +3293,7 @@ class ComputeTestCase(BaseTestCase):
def fake_not_implemented(*args, **kwargs):
raise NotImplementedError()
- self.stubs.Set(self.compute.driver, 'get_console_output',
+ self.stub_out('nova.virt.fake.FakeDriver.get_console_output',
fake_not_implemented)
instance = self._create_fake_instance_obj()
@@ -3315,7 +3316,7 @@ class ComputeTestCase(BaseTestCase):
def fake_not_found(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake-instance')
- self.stubs.Set(self.compute.driver, 'get_console_output',
+ self.stub_out('nova.virt.fake.FakeDriver.get_console_output',
fake_not_found)
instance = self._create_fake_instance_obj()
@@ -3358,7 +3359,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_get_console(*args, **kwargs):
return ctype.ConsoleVNC(host="fake_host", port=5900)
- self.stubs.Set(self.compute.driver, "get_vnc_console",
+ self.stub_out("nova.virt.fake.FakeDriver.get_vnc_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(
@@ -3373,7 +3374,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_get_console(*args, **kwargs):
return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
- self.stubs.Set(self.compute.driver, "get_spice_console",
+ self.stub_out("nova.virt.fake.FakeDriver.get_spice_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(
@@ -3387,7 +3388,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_get_console(*args, **kwargs):
return ctype.ConsoleRDP(host="fake_host", port=5900)
- self.stubs.Set(self.compute.driver, "get_rdp_console",
+ self.stub_out("nova.virt.fake.FakeDriver.get_rdp_console",
fake_driver_get_console)
self.assertTrue(self.compute.validate_console_port(
@@ -3413,7 +3414,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_get_console(*args, **kwargs):
return ctype.ConsoleSpice(host="fake_host", port=5900, tlsPort=88)
- self.stubs.Set(self.compute.driver, "get_vnc_console",
+ self.stub_out("nova.virt.fake.FakeDriver.get_vnc_console",
fake_driver_get_console)
self.assertFalse(self.compute.validate_console_port(
@@ -3477,7 +3478,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance, [], [])
def test_get_vnc_console_not_implemented(self):
- self.stubs.Set(self.compute.driver, 'get_vnc_console',
+ self.stub_out('nova.virt.fake.FakeDriver.get_vnc_console',
fake_not_implemented)
instance = self._create_fake_instance_obj()
@@ -3534,7 +3535,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance, [], [])
def test_get_spice_console_not_implemented(self):
- self.stubs.Set(self.compute.driver, 'get_spice_console',
+ self.stub_out('nova.virt.fake.FakeDriver.get_spice_console',
fake_not_implemented)
self.flags(enabled=False, group='vnc')
self.flags(enabled=True, group='spice')
@@ -3642,7 +3643,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
- self.stubs.Set(self.compute.driver, "get_vnc_console",
+ self.stub_out("nova.virt.fake.FakeDriver.get_vnc_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
@@ -3660,7 +3661,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
- self.stubs.Set(self.compute.driver, "get_spice_console",
+ self.stub_out("nova.virt.fake.FakeDriver.get_spice_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
@@ -3678,7 +3679,7 @@ class ComputeTestCase(BaseTestCase):
def fake_driver_get_console(*args, **kwargs):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
- self.stubs.Set(self.compute.driver, "get_rdp_console",
+ self.stub_out("nova.virt.fake.FakeDriver.get_rdp_console",
fake_driver_get_console)
self.compute = utils.ExceptionHelper(self.compute)
@@ -3786,11 +3787,11 @@ class ComputeTestCase(BaseTestCase):
def dummy(*args, **kwargs):
pass
- self.stubs.Set(network_api.API, 'add_fixed_ip_to_instance',
+ self.stub_out('nova.network.api.API.add_fixed_ip_to_instance',
dummy)
- self.stubs.Set(nova.compute.manager.ComputeManager,
+ self.stub_out('nova.compute.manager.ComputeManager.'
'inject_network_info', dummy)
- self.stubs.Set(nova.compute.manager.ComputeManager,
+ self.stub_out('nova.compute.manager.ComputeManager.'
'reset_network', dummy)
instance = self._create_fake_instance_obj()
@@ -3806,11 +3807,11 @@ class ComputeTestCase(BaseTestCase):
def dummy(*args, **kwargs):
pass
- self.stubs.Set(network_api.API, 'remove_fixed_ip_from_instance',
+ self.stub_out('nova.network.api.API.remove_fixed_ip_from_instance',
dummy)
- self.stubs.Set(nova.compute.manager.ComputeManager,
+ self.stub_out('nova.compute.manager.ComputeManager.'
'inject_network_info', dummy)
- self.stubs.Set(nova.compute.manager.ComputeManager,
+ self.stub_out('nova.compute.manager.ComputeManager.'
'reset_network', dummy)
instance = self._create_fake_instance_obj()
@@ -3879,7 +3880,7 @@ class ComputeTestCase(BaseTestCase):
raise exception.BuildAbortException(reason="already deleted",
instance_uuid=instance_uuid)
- self.stubs.Set(self.compute.driver, 'spawn',
+ self.stub_out('nova.virt.fake.FakeDriver.spawn',
build_inst_abort)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
@@ -3904,7 +3905,7 @@ class ComputeTestCase(BaseTestCase):
raise exception.RescheduledException(instance_uuid=instance_uuid,
reason="something bad happened")
- self.stubs.Set(self.compute.driver, 'spawn',
+ self.stub_out('nova.virt.fake.FakeDriver.spawn',
build_inst_fail)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
@@ -3928,7 +3929,7 @@ class ComputeTestCase(BaseTestCase):
def build_inst_fail(*args, **kwargs):
raise test.TestingException("i'm dying")
- self.stubs.Set(self.compute.driver, 'spawn',
+ self.stub_out('nova.virt.fake.FakeDriver.spawn',
build_inst_fail)
self.compute.build_and_run_instance(
@@ -4097,8 +4098,8 @@ class ComputeTestCase(BaseTestCase):
def fake_delete_tokens(*args, **kwargs):
self.tokens_deleted = True
- cauth_rpcapi = self.compute.consoleauth_rpcapi
- self.stubs.Set(cauth_rpcapi, 'delete_tokens_for_instance',
+ self.stub_out('nova.consoleauth.rpcapi.ConsoleAuthAPI.'
+ 'delete_tokens_for_instance',
fake_delete_tokens)
self.compute._delete_instance(self.context, instance, [],
@@ -4116,8 +4117,7 @@ class ComputeTestCase(BaseTestCase):
def fake_delete_tokens(*args, **kwargs):
self.tokens_deleted = True
- cells_rpcapi = self.compute.cells_rpcapi
- self.stubs.Set(cells_rpcapi, 'consoleauth_delete_tokens',
+ self.stub_out('nova.cells.rpcapi.CellsAPI.consoleauth_delete_tokens',
fake_delete_tokens)
self.compute._delete_instance(self.context, instance,
@@ -4138,11 +4138,11 @@ class ComputeTestCase(BaseTestCase):
"""
instance = self._create_fake_instance_obj()
- def fake_delete_instance(context, instance, bdms,
+ def fake_delete_instance(self, context, instance, bdms,
reservations=None):
raise exception.InstanceTerminationFailure(reason='')
- self.stubs.Set(self.compute, '_delete_instance',
+ self.stub_out('nova.compute.manager.ComputeManager._delete_instance',
fake_delete_instance)
self.assertRaises(exception.InstanceTerminationFailure,
@@ -4180,8 +4180,9 @@ class ComputeTestCase(BaseTestCase):
def _get_an_exception(*args, **kwargs):
raise test.TestingException()
- self.stubs.Set(self.context, 'elevated', _get_an_exception)
- self.stubs.Set(self.compute,
+ self.stub_out('nova.context.RequestContext.elevated',
+ _get_an_exception)
+ self.stub_out('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage', _get_an_exception)
func = getattr(self.compute, operation)
@@ -4189,8 +4190,8 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(test.TestingException,
func, self.context, instance=instance, **kwargs)
# self.context.elevated() is called in tearDown()
- self.stubs.Set(self.context, 'elevated', orig_elevated)
- self.stubs.Set(self.compute,
+ self.stub_out('nova.context.RequestContext.elevated', orig_elevated)
+ self.stub_out('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage', orig_notify)
# Fetch the instance's task_state and make sure it reverted to None.
@@ -4268,7 +4269,8 @@ class ComputeTestCase(BaseTestCase):
def fake_migration_save(*args, **kwargs):
raise test.TestingException()
- self.stubs.Set(migration, 'save', fake_migration_save)
+ self.stub_out('nova.objects.migration.Migration.save',
+ fake_migration_save)
self._test_state_revert(instance, *operation)
def _ensure_quota_reservations(self, instance,
@@ -5298,7 +5300,7 @@ class ComputeTestCase(BaseTestCase):
def fake_finish_revert_migration_driver(*args, **kwargs):
# Confirm the instance uses the old type in finish_revert_resize
- inst = args[1]
+ inst = args[2]
self.assertEqual('1', inst.flavor.flavorid)
old_vm_state = None
@@ -5309,9 +5311,9 @@ class ComputeTestCase(BaseTestCase):
params = {'vm_state': old_vm_state}
instance = self._create_fake_instance_obj(params)
- self.stubs.Set(self.compute.driver, 'finish_migration', fake)
- self.stubs.Set(self.compute.driver, 'finish_revert_migration',
- fake_finish_revert_migration_driver)
+ self.stub_out('nova.virt.fake.FakeDriver.finish_migration', fake)
+ self.stub_out('nova.virt.fake.FakeDriver.finish_revert_migration',
+ fake_finish_revert_migration_driver)
self._stub_out_resize_network_methods()
@@ -6264,7 +6266,8 @@ class ComputeTestCase(BaseTestCase):
return instance_map[instance_uuid]
# NOTE(comstud): Override the stub in setUp()
- def fake_get_instance_nw_info(context, instance, use_slave=False):
+ def fake_get_instance_nw_info(cls, context, instance,
+ use_slave=False):
# Note that this exception gets caught in compute/manager
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
@@ -6282,7 +6285,7 @@ class ComputeTestCase(BaseTestCase):
fake_instance_get_all_by_host)
self.stub_out('nova.db.instance_get_by_uuid',
fake_instance_get_by_uuid)
- self.stubs.Set(self.compute.network_api, 'get_instance_nw_info',
+ self.stub_out('nova.network.api.API.get_instance_nw_info',
fake_get_instance_nw_info)
# Make an instance appear to be still Building
@@ -6486,7 +6489,7 @@ class ComputeTestCase(BaseTestCase):
migration.update(updates)
return migration
- def fake_confirm_resize(context, instance, migration=None):
+ def fake_confirm_resize(cls, context, instance, migration=None):
# raise exception for uuids.migration_instance_4 to check
# migration status does not get set to 'error' on confirm_resize
# failure.
@@ -6503,8 +6506,8 @@ class ComputeTestCase(BaseTestCase):
self.stub_out('nova.db.migration_get_unconfirmed_by_dest_compute',
fake_migration_get_unconfirmed_by_dest_compute)
self.stub_out('nova.db.migration_update', fake_migration_update)
- self.stubs.Set(self.compute.compute_api, 'confirm_resize',
- fake_confirm_resize)
+ self.stub_out('nova.compute.api.API.confirm_resize',
+ fake_confirm_resize)
def fetch_instance_migration_status(instance_uuid):
for migration in migrations:
@@ -6580,9 +6583,10 @@ class ComputeTestCase(BaseTestCase):
@mock.patch.object(objects.Instance, 'save')
def test_instance_update_host_check(self, mock_save):
# make sure rt usage doesn't happen if the host or node is different
- def fail_get(nodename):
+ def fail_get(self, nodename):
raise test.TestingException("wrong host/node")
- self.stubs.Set(self.compute, '_get_resource_tracker', fail_get)
+ self.stub_out('nova.compute.manager.ComputeManager.'
+ '_get_resource_tracker', fail_get)
instance = self._create_fake_instance_obj({'host': 'someotherhost'})
self.compute._instance_update(self.context, instance, vcpus=4)
@@ -6771,19 +6775,20 @@ class ComputeTestCase(BaseTestCase):
instance.user_id = 'fake-user'
instance.deleted = False
- def fake_destroy():
+ def fake_destroy(self):
instance.deleted = True
- self.stubs.Set(instance, 'destroy', fake_destroy)
+ self.stub_out('nova.objects.instance.Instance.destroy', fake_destroy)
self.stub_out('nova.db.block_device_mapping_get_all_by_instance',
lambda *a, **k: None)
- self.stubs.Set(self.compute,
+ self.stub_out('nova.compute.manager.ComputeManager.'
'_complete_deletion',
lambda *a, **k: None)
- self.stubs.Set(objects.Quotas, 'reserve', lambda *a, **k: None)
+ self.stub_out('nova.objects.quotas.Quotas.reserve',
+ lambda *a, **k: None)
self.compute._complete_partial_deletion(admin_context, instance)
@@ -6837,10 +6842,10 @@ class ComputeTestCase(BaseTestCase):
instance.deleted = False
instance.host = self.compute.host
- def fake_partial_deletion(context, instance):
+ def fake_partial_deletion(self, context, instance):
instance['deleted'] = instance['id']
- self.stubs.Set(self.compute,
+ self.stub_out('nova.compute.manager.ComputeManager.'
'_complete_partial_deletion',
fake_partial_deletion)
self.compute._init_instance(admin_context, instance)
@@ -6866,9 +6871,9 @@ class ComputeTestCase(BaseTestCase):
def _noop(*args, **kwargs):
pass
- self.stubs.Set(self.compute.network_api,
+ self.stub_out('nova.network.api.API.'
'add_fixed_ip_to_instance', _noop)
- self.stubs.Set(self.compute.network_api,
+ self.stub_out('nova.network.api.API.'
'remove_fixed_ip_from_instance', _noop)
instance = self._create_fake_instance_obj()
@@ -7397,7 +7402,7 @@ class ComputeAPITestCase(BaseTestCase):
super(ComputeAPITestCase, self).setUp()
self.useFixture(fixtures.SpawnIsSynchronousFixture())
- self.stubs.Set(network_api.API, 'get_instance_nw_info',
+ self.stub_out('nova.network.api.API.get_instance_nw_info',
fake_get_nw_info)
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
@@ -7449,7 +7454,8 @@ class ComputeAPITestCase(BaseTestCase):
inst_type['memory_mb'] = 1
self.fake_image['min_ram'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api.create, self.context,
@@ -7467,7 +7473,8 @@ class ComputeAPITestCase(BaseTestCase):
inst_type['root_gb'] = 1
self.fake_image['min_disk'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.assertRaises(exception.FlavorDiskSmallerThanMinDisk,
self.compute_api.create, self.context,
@@ -7486,7 +7493,8 @@ class ComputeAPITestCase(BaseTestCase):
self.fake_image['size'] = '1073741825'
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self.compute_api.create, self.context,
@@ -7507,7 +7515,8 @@ class ComputeAPITestCase(BaseTestCase):
self.fake_image['min_ram'] = 2
self.fake_image['min_disk'] = 2
self.fake_image['name'] = 'fake_name'
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
@@ -7519,7 +7528,8 @@ class ComputeAPITestCase(BaseTestCase):
inst_type['root_gb'] = 1
inst_type['memory_mb'] = 1
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context,
inst_type, self.fake_image['id'])
@@ -7531,7 +7541,8 @@ class ComputeAPITestCase(BaseTestCase):
}
self._create_instance_type(params=instance_type_params)
inst_type = flavors.get_flavor_by_name('test')
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
(refs, resv_id) = self.compute_api.create(self.context, inst_type,
self.fake_image['id'])
@@ -7554,7 +7565,8 @@ class ComputeAPITestCase(BaseTestCase):
self.fake_image['name'] = 'fake_name'
self.fake_image['status'] = 'DELETED'
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
expected_message = (
exception.ImageNotActive.msg_fmt % {'image_id':
@@ -7661,7 +7673,8 @@ class ComputeAPITestCase(BaseTestCase):
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.assertRaises(exception.InstanceUserDataTooLarge,
self.compute_api.create, self.context, inst_type,
@@ -7673,7 +7686,8 @@ class ComputeAPITestCase(BaseTestCase):
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.assertRaises(exception.InstanceUserDataMalformed,
self.compute_api.create, self.context, inst_type,
@@ -7685,7 +7699,8 @@ class ComputeAPITestCase(BaseTestCase):
inst_type = flavors.get_default_flavor()
self.fake_image['min_ram'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
# NOTE(mikal): a string of length 48510 encodes to 65532 characters of
# base64
@@ -7723,13 +7738,12 @@ class ComputeAPITestCase(BaseTestCase):
orig_populate = self.compute_api._populate_instance_for_create
- def _fake_populate(context, base_options, *args, **kwargs):
+ def _fake_populate(self, context, base_options, *args, **kwargs):
base_options['uuid'] = fake_uuids.pop(0)
return orig_populate(context, base_options, *args, **kwargs)
- self.stubs.Set(self.compute_api,
- '_populate_instance_for_create',
- _fake_populate)
+ self.stub_out('nova.compute.api.API.'
+ '_populate_instance_for_create', _fake_populate)
cases = [(None, 'server-%s' % fake_uuids[0]),
('Hello, Server!', 'hello-server'),
@@ -7744,7 +7758,8 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(ref[0]['hostname'], hostname)
def test_instance_create_adds_to_instance_group(self):
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
group = objects.InstanceGroup(self.context)
group.uuid = str(uuid.uuid4())
@@ -7835,29 +7850,29 @@ class ComputeAPITestCase(BaseTestCase):
info['clean'] = ('progress' not in
kwargs['instance'].obj_what_changed())
- self.stubs.Set(self.compute_api.compute_task_api, 'rebuild_instance',
- fake_rpc_rebuild)
-
- image_ref = instance["image_ref"] + '-new_image_ref'
- password = "new_password"
+ with mock.patch.object(self.compute_api.compute_task_api,
+ 'rebuild_instance', fake_rpc_rebuild):
+ image_ref = instance["image_ref"] + '-new_image_ref'
+ password = "new_password"
- instance.vm_state = vm_state
- instance.save()
+ instance.vm_state = vm_state
+ instance.save()
- self.compute_api.rebuild(self.context, instance, image_ref, password)
- self.assertEqual(info['image_ref'], image_ref)
- self.assertTrue(info['clean'])
+ self.compute_api.rebuild(self.context, instance,
+ image_ref, password)
+ self.assertEqual(info['image_ref'], image_ref)
+ self.assertTrue(info['clean'])
- instance.refresh()
- self.assertEqual(instance.task_state, task_states.REBUILDING)
- sys_meta = {k: v for k, v in instance.system_metadata.items()
- if not k.startswith('instance_type')}
- self.assertEqual(sys_meta,
- {'image_kernel_id': uuids.kernel_id,
- 'image_min_disk': '1',
- 'image_ramdisk_id': uuids.ramdisk_id,
- 'image_something_else': 'meow',
- 'preserved': 'preserve this!'})
+ instance.refresh()
+ self.assertEqual(instance.task_state, task_states.REBUILDING)
+ sys_meta = {k: v for k, v in instance.system_metadata.items()
+ if not k.startswith('instance_type')}
+ self.assertEqual(sys_meta,
+ {'image_kernel_id': uuids.kernel_id,
+ 'image_min_disk': '1',
+ 'image_ramdisk_id': uuids.ramdisk_id,
+ 'image_something_else': 'meow',
+ 'preserved': 'preserve this!'})
def test_rebuild(self):
self._test_rebuild(vm_state=vm_states.ACTIVE)
@@ -7867,7 +7882,8 @@ class ComputeAPITestCase(BaseTestCase):
def test_rebuild_in_error_not_launched(self):
instance = self._create_fake_instance_obj(params={'image_ref': ''})
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
@@ -7887,7 +7903,8 @@ class ComputeAPITestCase(BaseTestCase):
def test_rebuild_no_image(self):
instance = self._create_fake_instance_obj(params={'image_ref': ''})
instance_uuid = instance.uuid
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.compute_api.rebuild(self.context, instance, '', 'new_password')
instance = db.instance_get_by_uuid(self.context, instance_uuid)
@@ -7900,7 +7917,8 @@ class ComputeAPITestCase(BaseTestCase):
params={'image_ref': FAKE_IMAGE_REF})
self.fake_image['name'] = 'fake_name'
self.fake_image['status'] = 'DELETED'
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
expected_message = (
exception.ImageNotActive.msg_fmt % {'image_id':
@@ -7917,7 +7935,8 @@ class ComputeAPITestCase(BaseTestCase):
instance.flavor.root_gb = 1
self.fake_image['min_ram'] = 128
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.assertRaises(exception.FlavorMemoryTooSmall,
self.compute_api.rebuild, self.context,
@@ -7942,11 +7961,12 @@ class ComputeAPITestCase(BaseTestCase):
else:
raise KeyError()
- self.stubs.Set(flavors, 'extract_flavor',
+ self.stub_out('nova.compute.flavors.extract_flavor',
fake_extract_flavor)
self.fake_image['min_disk'] = 2
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.assertRaises(exception.FlavorDiskSmallerThanMinDisk,
self.compute_api.rebuild, self.context,
@@ -7971,12 +7991,13 @@ class ComputeAPITestCase(BaseTestCase):
else:
raise KeyError()
- self.stubs.Set(flavors, 'extract_flavor',
+ self.stub_out('nova.compute.flavors.extract_flavor',
fake_extract_flavor)
self.fake_image['min_ram'] = 64
self.fake_image['min_disk'] = 1
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
@@ -7994,9 +8015,10 @@ class ComputeAPITestCase(BaseTestCase):
else:
raise KeyError()
- self.stubs.Set(flavors, 'extract_flavor',
+ self.stub_out('nova.compute.flavors.extract_flavor',
fake_extract_flavor)
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.compute_api.rebuild(self.context,
instance, self.fake_image['id'], 'new_password')
@@ -8014,11 +8036,12 @@ class ComputeAPITestCase(BaseTestCase):
else:
raise KeyError()
- self.stubs.Set(flavors, 'extract_flavor',
+ self.stub_out('nova.compute.flavors.extract_flavor',
fake_extract_flavor)
self.fake_image['size'] = '1073741825'
- self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ self.fake_show)
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self.compute_api.rebuild, self.context,
@@ -8419,8 +8442,9 @@ class ComputeAPITestCase(BaseTestCase):
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
meta_changes[0] = diff
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
- fake_change_instance_metadata)
+ self.stub_out('nova.compute.rpcapi.ComputeAPI.'
+ 'change_instance_metadata',
+ fake_change_instance_metadata)
_context = context.get_admin_context()
instance = self._create_fake_instance_obj({'metadata':
@@ -8473,7 +8497,8 @@ class ComputeAPITestCase(BaseTestCase):
def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
instance_uuid=None):
pass
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
+ self.stub_out('nova.compute.rpcapi.ComputeAPI.'
+ 'change_instance_metadata',
fake_change_instance_metadata)
instance = self._create_fake_instance_obj(
@@ -8856,7 +8881,7 @@ class ComputeAPITestCase(BaseTestCase):
def test_add_remove_fixed_ip(self):
instance = self._create_fake_instance_obj(params={'host': CONF.host})
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ self.stub_out('nova.network.api.API.deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.add_fixed_ip(self.context, instance, '1')
self.compute_api.remove_fixed_ip(self.context,
@@ -8918,9 +8943,9 @@ class ComputeAPITestCase(BaseTestCase):
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
- self.stubs.Set(cinder.API, 'check_attach', fake)
- self.stubs.Set(cinder.API, 'reserve_volume', fake)
+ self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
+ self.stub_out('nova.volume.cinder.API.check_attach', fake)
+ self.stub_out('nova.volume.cinder.API.reserve_volume', fake)
instance = fake_instance.fake_instance_obj(None, **{
'uuid': 'f3000000-0000-0000-0000-000000000000', 'locked': False,
@@ -9011,7 +9036,10 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.get_vnc_console,
self.context, instance, 'novnc')
- def test_spice_console(self):
+ @mock.patch.object(compute_api.consoleauth_rpcapi.ConsoleAuthAPI,
+ 'authorize_console')
+ @mock.patch.object(compute_rpcapi.ComputeAPI, 'get_spice_console')
+ def test_spice_console(self, mock_spice, mock_auth):
# Make sure we can a spice console for an instance.
fake_instance = self._fake_instance(
@@ -9025,27 +9053,21 @@ class ComputeAPITestCase(BaseTestCase):
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance.uuid,
'access_url': 'fake_console_url'}
+ mock_spice.return_value = fake_connect_info
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_spice_console')
- rpcapi.get_spice_console(
- self.context, instance=fake_instance,
- console_type=fake_console_type).AndReturn(fake_connect_info)
-
- self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
- 'authorize_console')
- self.compute_api.consoleauth_rpcapi.authorize_console(
+ console = self.compute_api.get_spice_console(self.context,
+ fake_instance, fake_console_type)
+
+ self.assertEqual(console, {'url': 'fake_console_url'})
+ mock_spice.assert_called_once_with(self.context,
+ instance=fake_instance,
+ console_type=fake_console_type)
+ mock_auth.assert_called_once_with(
self.context, 'fake_token', fake_console_type, 'fake_console_host',
'fake_console_port', 'fake_access_path',
'f3000000-0000-0000-0000-000000000000',
access_url='fake_console_url')
- self.mox.ReplayAll()
-
- console = self.compute_api.get_spice_console(self.context,
- fake_instance, fake_console_type)
- self.assertEqual(console, {'url': 'fake_console_url'})
-
def test_get_spice_console_no_host(self):
instance = self._create_fake_instance_obj(params={'host': ''})
@@ -9053,9 +9075,11 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.get_spice_console,
self.context, instance, 'spice')
- def test_rdp_console(self):
+ @mock.patch.object(compute_api.consoleauth_rpcapi.ConsoleAuthAPI,
+ 'authorize_console')
+ @mock.patch.object(compute_rpcapi.ComputeAPI, 'get_rdp_console')
+ def test_rdp_console(self, mock_rdp, mock_auth):
# Make sure we can a rdp console for an instance.
-
fake_instance = self._fake_instance({
'uuid': 'f3000000-0000-0000-0000-000000000000',
'host': 'fake_compute_host'})
@@ -9067,27 +9091,20 @@ class ComputeAPITestCase(BaseTestCase):
'internal_access_path': 'fake_access_path',
'instance_uuid': fake_instance.uuid,
'access_url': 'fake_console_url'}
+ mock_rdp.return_value = fake_connect_info
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_rdp_console')
- rpcapi.get_rdp_console(
- self.context, instance=fake_instance,
- console_type=fake_console_type).AndReturn(fake_connect_info)
-
- self.mox.StubOutWithMock(self.compute_api.consoleauth_rpcapi,
- 'authorize_console')
- self.compute_api.consoleauth_rpcapi.authorize_console(
+ console = self.compute_api.get_rdp_console(self.context,
+ fake_instance, fake_console_type)
+
+ self.assertEqual(console, {'url': 'fake_console_url'})
+ mock_rdp.assert_called_once_with(self.context, instance=fake_instance,
+ console_type=fake_console_type)
+ mock_auth.assert_called_once_with(
self.context, 'fake_token', fake_console_type, 'fake_console_host',
'fake_console_port', 'fake_access_path',
'f3000000-0000-0000-0000-000000000000',
access_url='fake_console_url')
- self.mox.ReplayAll()
-
- console = self.compute_api.get_rdp_console(self.context,
- fake_instance, fake_console_type)
- self.assertEqual(console, {'url': 'fake_console_url'})
-
def test_get_rdp_console_no_host(self):
instance = self._create_fake_instance_obj(params={'host': ''})
@@ -9171,24 +9188,22 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.get_mks_console,
self.context, instance, 'mks')
- def test_console_output(self):
+ @mock.patch.object(compute_rpcapi.ComputeAPI, 'get_console_output')
+ def test_console_output(self, mock_console):
fake_instance = self._fake_instance({
'uuid': 'f3000000-0000-0000-0000-000000000000',
'host': 'fake_compute_host'})
fake_tail_length = 699
fake_console_output = 'fake console output'
-
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_console_output')
- rpcapi.get_console_output(
- self.context, instance=fake_instance,
- tail_length=fake_tail_length).AndReturn(fake_console_output)
-
- self.mox.ReplayAll()
+ mock_console.return_value = fake_console_output
output = self.compute_api.get_console_output(self.context,
fake_instance, tail_length=fake_tail_length)
+
self.assertEqual(output, fake_console_output)
+ mock_console.assert_called_once_with(self.context,
+ instance=fake_instance,
+ tail_length=fake_tail_length)
def test_console_output_no_host(self):
instance = self._create_fake_instance_obj(params={'host': ''})
@@ -9197,24 +9212,19 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.get_console_output,
self.context, instance)
- def test_attach_interface(self):
+ @mock.patch.object(network_api.API, 'allocate_port_for_instance')
+ def test_attach_interface(self, mock_allocate):
new_type = flavors.get_flavor_by_flavor_id('4')
-
instance = objects.Instance(image_ref=uuids.image_instance,
system_metadata={},
flavor=new_type,
host='fake-host')
- self.mox.StubOutWithMock(self.compute.network_api,
- 'allocate_port_for_instance')
nwinfo = [fake_network_cache_model.new_vif()]
network_id = nwinfo[0]['network']['id']
port_id = nwinfo[0]['id']
req_ip = '1.2.3.4'
- self.compute.network_api.allocate_port_for_instance(
- self.context, instance, port_id, network_id, req_ip,
- bind_host_id='fake-host'
- ).AndReturn(nwinfo)
- self.mox.ReplayAll()
+ mock_allocate.return_value = nwinfo
+
with mock.patch.dict(self.compute.driver.capabilities,
supports_attach_interface=True):
vif = self.compute.attach_interface(self.context,
@@ -9223,6 +9233,9 @@ class ComputeAPITestCase(BaseTestCase):
port_id,
req_ip)
self.assertEqual(vif['id'], network_id)
+ mock_allocate.assert_called_once_with(
+ self.context, instance, port_id, network_id, req_ip,
+ bind_host_id='fake-host')
return nwinfo, port_id
def test_attach_interface_failed(self):
@@ -9262,9 +9275,9 @@ class ComputeAPITestCase(BaseTestCase):
def test_detach_interface(self):
nwinfo, port_id = self.test_attach_interface()
- self.stubs.Set(self.compute.network_api,
+ self.stub_out('nova.network.api.API.'
'deallocate_port_for_instance',
- lambda a, b, c: [])
+ lambda a, b, c, d: [])
instance = objects.Instance()
instance.info_cache = objects.InstanceInfoCache.new(
self.context, uuids.info_cache_instance)
@@ -9407,14 +9420,14 @@ class ComputeAPITestCase(BaseTestCase):
bdm['device_name'] = '/dev/vdb'
return bdm
- self.stubs.Set(cinder.API, 'get', fake_volume_get)
- self.stubs.Set(cinder.API, 'check_attach', fake_check_attach)
- self.stubs.Set(cinder.API, 'reserve_volume',
+ self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
+ self.stub_out('nova.volume.cinder.API.check_attach', fake_check_attach)
+ self.stub_out('nova.volume.cinder.API.reserve_volume',
fake_reserve_volume)
- self.stubs.Set(compute_rpcapi.ComputeAPI,
+ self.stub_out('nova.compute.rpcapi.ComputeAPI.'
'reserve_block_device_name',
fake_rpc_reserve_block_device_name)
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume',
+ self.stub_out('nova.compute.rpcapi.ComputeAPI.attach_volume',
fake_rpc_attach_volume)
instance = self._create_fake_instance_obj()
@@ -9441,9 +9454,10 @@ class ComputeAPITestCase(BaseTestCase):
def fake_rpc_detach_volume(self, context, **kwargs):
called['fake_rpc_detach_volume'] = True
- self.stubs.Set(cinder.API, 'check_detach', fake_check_detach)
- self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching)
- self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume',
+ self.stub_out('nova.volume.cinder.API.check_detach', fake_check_detach)
+ self.stub_out('nova.volume.cinder.API.begin_detaching',
+ fake_begin_detaching)
+ self.stub_out('nova.compute.rpcapi.ComputeAPI.detach_volume',
fake_rpc_detach_volume)
self.compute_api.detach_volume(self.context,
@@ -9516,9 +9530,10 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.detach_volume, self.context,
fake_instance, volume)
- def test_detach_volume_libvirt_is_down(self):
+ @mock.patch.object(objects.BlockDeviceMapping,
+ 'get_by_volume_and_instance')
+ def test_detach_volume_libvirt_is_down(self, mock_get):
# Ensure rollback during detach if libvirt goes down
-
called = {}
instance = self._create_fake_instance_obj()
@@ -9527,7 +9542,7 @@ class ComputeAPITestCase(BaseTestCase):
'source_type': 'snapshot', 'destination_type': 'volume',
'connection_info': '{"test": "test"}'})
- def fake_libvirt_driver_instance_exists(_instance):
+ def fake_libvirt_driver_instance_exists(self, _instance):
called['fake_libvirt_driver_instance_exists'] = True
return False
@@ -9538,24 +9553,20 @@ class ComputeAPITestCase(BaseTestCase):
def fake_roll_detaching(*args, **kwargs):
called['fake_roll_detaching'] = True
- self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching)
- self.stubs.Set(self.compute.driver, "instance_exists",
+ self.stub_out('nova.volume.cinder.API.roll_detaching',
+ fake_roll_detaching)
+ self.stub_out('nova.virt.fake.FakeDriver.instance_exists',
fake_libvirt_driver_instance_exists)
- self.stubs.Set(self.compute.driver, "detach_volume",
+ self.stub_out('nova.virt.fake.FakeDriver.detach_volume',
fake_libvirt_driver_detach_volume_fails)
-
- self.mox.StubOutWithMock(objects.BlockDeviceMapping,
- 'get_by_volume_and_instance')
- objects.BlockDeviceMapping.get_by_volume_and_instance(
- self.context, 1, instance.uuid).\
- AndReturn(objects.BlockDeviceMapping(
- context=self.context, **fake_bdm))
- self.mox.ReplayAll()
+ mock_get.return_value = objects.BlockDeviceMapping(
+ context=self.context, **fake_bdm)
self.assertRaises(AttributeError, self.compute.detach_volume,
self.context, 1, instance)
self.assertTrue(called.get('fake_libvirt_driver_instance_exists'))
self.assertTrue(called.get('fake_roll_detaching'))
+ mock_get.assert_called_once_with(self.context, 1, instance.uuid)
def test_detach_volume_not_found(self):
# Ensure that a volume can be detached even when it is removed
@@ -9612,18 +9623,18 @@ class ComputeAPITestCase(BaseTestCase):
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
- self.stubs.Set(cinder.API, "get", fake_volume_get)
+ self.stub_out("nova.volume.cinder.API.get", fake_volume_get)
# Stub out and record whether it gets detached
result = {"detached": False}
def fake_detach(self, context, volume_id_param, instance_uuid):
result["detached"] = volume_id_param == volume_id
- self.stubs.Set(cinder.API, "detach", fake_detach)
+ self.stub_out("nova.volume.cinder.API.detach", fake_detach)
def fake_terminate_connection(self, context, volume_id, connector):
return {}
- self.stubs.Set(cinder.API, "terminate_connection",
+ self.stub_out("nova.volume.cinder.API.terminate_connection",
fake_terminate_connection)
# Kill the instance and check that it was detached
@@ -9657,16 +9668,16 @@ class ComputeAPITestCase(BaseTestCase):
bdm_obj = objects.BlockDeviceMapping(**bdm)
bdm_obj.create()
bdms.append(bdm_obj)
-
self.stub_out('nova.volume.cinder.API.terminate_connection',
- mox.MockAnything())
- self.stub_out('nova.volume.cinder.API.detach', mox.MockAnything())
+ mock.MagicMock())
+ self.stub_out('nova.volume.cinder.API.detach', mock.MagicMock())
def fake_volume_get(self, context, volume_id):
return {'id': volume_id}
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
- self.stubs.Set(self.compute, '_prep_block_device', mox.MockAnything())
+ self.stub_out('nova.compute.manager.ComputeManager_prep_block_device',
+ mock.MagicMock())
self.compute.build_and_run_instance(self.context, instance, {}, {}, {},
block_device_mapping=[])
@@ -9692,13 +9703,13 @@ class ComputeAPITestCase(BaseTestCase):
def test_lock(self):
instance = self._create_fake_instance_obj()
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ self.stub_out('nova.network.api.API.deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.lock(self.context, instance)
def test_unlock(self):
instance = self._create_fake_instance_obj()
- self.stubs.Set(self.compute_api.network_api, 'deallocate_for_instance',
+ self.stub_out('nova.network.api.API.deallocate_for_instance',
lambda *a, **kw: None)
self.compute_api.unlock(self.context, instance)
@@ -9717,26 +9728,22 @@ class ComputeAPITestCase(BaseTestCase):
instance,
security_group_name)
- def test_get_diagnostics(self):
+ @mock.patch.object(compute_rpcapi.ComputeAPI, 'get_diagnostics')
+ def test_get_diagnostics(self, mock_get):
instance = self._create_fake_instance_obj()
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_diagnostics')
- rpcapi.get_diagnostics(self.context, instance=instance)
- self.mox.ReplayAll()
-
self.compute_api.get_diagnostics(self.context, instance)
- def test_get_instance_diagnostics(self):
- instance = self._create_fake_instance_obj()
+ mock_get.assert_called_once_with(self.context, instance=instance)
- rpcapi = compute_rpcapi.ComputeAPI
- self.mox.StubOutWithMock(rpcapi, 'get_instance_diagnostics')
- rpcapi.get_instance_diagnostics(self.context, instance=instance)
- self.mox.ReplayAll()
+ @mock.patch.object(compute_rpcapi.ComputeAPI, 'get_instance_diagnostics')
+ def test_get_instance_diagnostics(self, mock_get):
+ instance = self._create_fake_instance_obj()
self.compute_api.get_instance_diagnostics(self.context, instance)
+ mock_get.assert_called_once_with(self.context, instance=instance)
+
@mock.patch.object(compute_rpcapi.ComputeAPI,
'refresh_instance_security_rules')
def test_refresh_instance_security_rules(self, mock_refresh):
@@ -9975,8 +9982,8 @@ class ComputeAPITestCase(BaseTestCase):
def fake_service_is_up(*args, **kwargs):
return True
- self.stubs.Set(self.compute_api.servicegroup_api, 'service_is_up',
- fake_service_is_up)
+ self.stub_out('nova.servicegroup.api.API.service_is_up',
+ fake_service_is_up)
self.assertRaises(exception.ComputeServiceInUse,
self.compute_api.evacuate, self.context.elevated(), instance,
@@ -9996,18 +10003,17 @@ class ComputeAPITestCase(BaseTestCase):
host='fake_dest_host', on_shared_storage=True,
admin_password=None)
- def test_get_migrations(self):
+ @mock.patch.object(db, "migration_get_all_by_filters")
+ def test_get_migrations(self, mock_migration):
migration = test_migration.fake_db_migration()
filters = {'host': 'host1'}
- self.mox.StubOutWithMock(db, "migration_get_all_by_filters")
- db.migration_get_all_by_filters(self.context,
- filters).AndReturn([migration])
- self.mox.ReplayAll()
+ mock_migration.return_value = [migration]
migrations = self.compute_api.get_migrations(self.context,
filters)
self.assertEqual(1, len(migrations))
self.assertEqual(migrations[0].id, migration['id'])
+ mock_migration.assert_called_once_with(self.context, filters)
@mock.patch("nova.db.migration_get_in_progress_by_instance")
def test_get_migrations_in_progress_by_instance(self, mock_get):
@@ -10143,7 +10149,7 @@ class ComputeAPIIpFilterTestCase(test.NoDBTestCase):
self.assertEqual(1, kwargs['limit'])
-def fake_rpc_method(context, method, **kwargs):
+def fake_rpc_method(self, context, method, **kwargs):
pass
@@ -10169,8 +10175,8 @@ class ComputeAPIAggrTestCase(BaseTestCase):
super(ComputeAPIAggrTestCase, self).setUp()
self.api = compute_api.AggregateAPI()
self.context = context.get_admin_context()
- self.stubs.Set(self.api.compute_rpcapi.client, 'call', fake_rpc_method)
- self.stubs.Set(self.api.compute_rpcapi.client, 'cast', fake_rpc_method)
+ self.stub_out('oslo_messaging.rpc.client.call', fake_rpc_method)
+ self.stub_out('oslo_messaging.rpc.client.cast', fake_rpc_method)
def test_aggregate_no_zone(self):
# Ensure we can create an aggregate without an availability zone
@@ -10476,7 +10482,9 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertRaises(exception.InvalidAggregateActionDelete,
self.api.delete_aggregate, self.context, aggr.id)
- def test_add_host_to_aggregate(self):
+ @mock.patch.object(availability_zones,
+ 'update_host_availability_zone_cache')
+ def test_add_host_to_aggregate(self, mock_az):
# Ensure we can add a host to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values[0][0]
@@ -10488,16 +10496,9 @@ class ComputeAPIAggrTestCase(BaseTestCase):
hosts = kwargs["aggregate"].hosts
self.assertIn(fake_host, hosts)
- self.stubs.Set(self.api.compute_rpcapi, 'add_aggregate_host',
+ self.stub_out('nova.compute.rpcapi.ComputeAPI.add_aggregate_host',
fake_add_aggregate_host)
- self.mox.StubOutWithMock(availability_zones,
- 'update_host_availability_zone_cache')
-
- availability_zones.update_host_availability_zone_cache(self.context,
- fake_host)
- self.mox.ReplayAll()
-
fake_notifier.NOTIFICATIONS = []
aggr = self.api.add_host_to_aggregate(self.context,
aggr.id, fake_host)
@@ -10509,6 +10510,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(msg.event_type,
'aggregate.addhost.end')
self.assertEqual(len(aggr.hosts), 1)
+ mock_az.assert_called_once_with(self.context, fake_host)
def test_add_host_to_aggr_with_no_az(self):
values = _create_service_entries(self.context)
@@ -10584,7 +10586,9 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(fake_notifier.NOTIFICATIONS[1].publisher_id,
'compute.fake-mini')
- def test_remove_host_from_aggregate_active(self):
+ @mock.patch.object(availability_zones,
+ 'update_host_availability_zone_cache')
+ def test_remove_host_from_aggregate_active(self, mock_az):
# Ensure we can remove a host from an aggregate.
values = _create_service_entries(self.context)
fake_zone = values[0][0]
@@ -10599,15 +10603,9 @@ class ComputeAPIAggrTestCase(BaseTestCase):
hosts = kwargs["aggregate"].hosts
self.assertNotIn(host_to_remove, hosts)
- self.stubs.Set(self.api.compute_rpcapi, 'remove_aggregate_host',
+ self.stub_out('nova.compute.rpcapi.ComputeAPI.remove_aggregate_host',
fake_remove_aggregate_host)
- self.mox.StubOutWithMock(availability_zones,
- 'update_host_availability_zone_cache')
- availability_zones.update_host_availability_zone_cache(self.context,
- host_to_remove)
- self.mox.ReplayAll()
-
fake_notifier.NOTIFICATIONS = []
expected = self.api.remove_host_from_aggregate(self.context,
aggr.id,
@@ -10620,6 +10618,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(msg.event_type,
'aggregate.removehost.end')
self.assertEqual(len(aggr.hosts) - 1, len(expected.hosts))
+ mock_az.assert_called_with(self.context, host_to_remove)
def test_remove_host_from_aggregate_raise_not_found(self):
# Ensure ComputeHostNotFound is raised when removing invalid host.
@@ -10759,10 +10758,11 @@ class ComputeAggrTestCase(BaseTestCase):
self.aggr = db.aggregate_create(self.context, values, metadata=az)
def test_add_aggregate_host(self):
- def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
+ def fake_driver_add_to_aggregate(self, context, aggregate, host,
+ **_ignore):
fake_driver_add_to_aggregate.called = True
return {"foo": "bar"}
- self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ self.stub_out("nova.virt.fake.FakeDriver.add_to_aggregate",
fake_driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="host",
@@ -10770,12 +10770,12 @@ class ComputeAggrTestCase(BaseTestCase):
self.assertTrue(fake_driver_add_to_aggregate.called)
def test_remove_aggregate_host(self):
- def fake_driver_remove_from_aggregate(context, aggregate, host,
+ def fake_driver_remove_from_aggregate(cls, context, aggregate, host,
**_ignore):
fake_driver_remove_from_aggregate.called = True
self.assertEqual("host", host, "host")
return {"foo": "bar"}
- self.stubs.Set(self.compute.driver, "remove_from_aggregate",
+ self.stub_out("nova.virt.fake.FakeDriver.remove_from_aggregate",
fake_driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
@@ -10784,13 +10784,13 @@ class ComputeAggrTestCase(BaseTestCase):
self.assertTrue(fake_driver_remove_from_aggregate.called)
def test_add_aggregate_host_passes_slave_info_to_driver(self):
- def driver_add_to_aggregate(context, aggregate, host, **kwargs):
+ def driver_add_to_aggregate(cls, context, aggregate, host, **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
- self.stubs.Set(self.compute.driver, "add_to_aggregate",
+ self.stub_out("nova.virt.fake.FakeDriver.add_to_aggregate",
driver_add_to_aggregate)
self.compute.add_aggregate_host(self.context, host="the_host",
@@ -10798,13 +10798,14 @@ class ComputeAggrTestCase(BaseTestCase):
aggregate=jsonutils.to_primitive(self.aggr))
def test_remove_from_aggregate_passes_slave_info_to_driver(self):
- def driver_remove_from_aggregate(context, aggregate, host, **kwargs):
+ def driver_remove_from_aggregate(cls, context, aggregate, host,
+ **kwargs):
self.assertEqual(self.context, context)
self.assertEqual(aggregate['id'], self.aggr['id'])
self.assertEqual(host, "the_host")
self.assertEqual("SLAVE_INFO", kwargs.get("slave_info"))
- self.stubs.Set(self.compute.driver, "remove_from_aggregate",
+ self.stub_out("nova.virt.fake.FakeDriver.remove_from_aggregate",
driver_remove_from_aggregate)
self.compute.remove_aggregate_host(self.context,
@@ -10853,7 +10854,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
instance_type['disabled'] = False
return instance_type
- self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
+ self.stub_out('nova.compute.flavors.get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
self._stub_migrate_server()
@@ -10872,7 +10873,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
instance_type['disabled'] = True
return instance_type
- self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
+ self.stub_out('nova.compute.flavors.get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
self.assertRaises(exception.FlavorNotFound,
@@ -10889,22 +10890,25 @@ class ComputeReschedulingTestCase(BaseTestCase):
def fake_update(*args, **kwargs):
self.updated_task_state = kwargs.get('task_state')
- self.stubs.Set(self.compute, '_instance_update', fake_update)
+ self.stub_out('nova.compute.manager.ComputeManager._instance_update',
+ fake_update)
def _reschedule(self, request_spec=None, filter_properties=None,
exc_info=None):
if not filter_properties:
filter_properties = {}
+ fake_taskapi = FakeComputeTaskAPI()
+ with mock.patch.object(self.compute, 'compute_task_api',
+ fake_taskapi):
+ instance = self._create_fake_instance_obj()
- instance = self._create_fake_instance_obj()
-
- scheduler_method = self.compute.compute_task_api.resize_instance
- method_args = (instance, None,
- dict(filter_properties=filter_properties),
- {}, None)
- return self.compute._reschedule(self.context, request_spec,
- filter_properties, instance, scheduler_method,
- method_args, self.expected_task_state, exc_info=exc_info)
+ scheduler_method = self.compute.compute_task_api.resize_instance
+ method_args = (instance, None,
+ dict(filter_properties=filter_properties),
+ {}, None)
+ return self.compute._reschedule(self.context, request_spec,
+ filter_properties, instance, scheduler_method,
+ method_args, self.expected_task_state, exc_info=exc_info)
def test_reschedule_no_filter_properties(self):
# no filter_properties will disable re-scheduling.
@@ -10953,24 +10957,15 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
self.instance_type = flavors.get_flavor_by_name(
"m1.tiny")
- def test_reschedule_resize_or_reraise_called(self):
+ @mock.patch.object(db, 'migration_create')
+ @mock.patch.object(compute_manager.ComputeManager,
+ '_reschedule_resize_or_reraise')
+ def test_reschedule_resize_or_reraise_called(self, mock_res, mock_mig):
"""Verify the rescheduling logic gets called when there is an error
during prep_resize.
"""
inst_obj = self._create_fake_instance_obj()
-
- self.mox.StubOutWithMock(self.compute.db, 'migration_create')
- self.mox.StubOutWithMock(self.compute, '_reschedule_resize_or_reraise')
-
- self.compute.db.migration_create(mox.IgnoreArg(),
- mox.IgnoreArg()).AndRaise(test.TestingException("Original"))
-
- self.compute._reschedule_resize_or_reraise(mox.IgnoreArg(), None,
- inst_obj, mox.IgnoreArg(), self.instance_type,
- mox.IgnoreArg(), {},
- {})
-
- self.mox.ReplayAll()
+ mock_mig.side_effect = test.TestingException("Original")
self.compute.prep_resize(self.context, image=None,
instance=inst_obj,
@@ -10979,7 +10974,12 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
filter_properties={}, node=None,
clean_shutdown=True)
- def test_reschedule_fails_with_exception(self):
+ mock_mig.assert_called_once_with(mock.ANY, mock.ANY)
+ mock_res.assert_called_once_with(mock.ANY, None, inst_obj, mock.ANY,
+ self.instance_type, mock.ANY, {}, {})
+
+ @mock.patch.object(compute_manager.ComputeManager, "_reschedule")
+ def test_reschedule_fails_with_exception(self, mock_res):
"""Original exception should be raised if the _reschedule method
raises another exception
"""
@@ -10987,14 +10987,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
scheduler_hint = dict(filter_properties={})
method_args = (instance, None, scheduler_hint, self.instance_type,
None)
- self.mox.StubOutWithMock(self.compute, "_reschedule")
-
- self.compute._reschedule(
- self.context, None, None, instance,
- self.compute.compute_task_api.resize_instance, method_args,
- task_states.RESIZE_PREP).AndRaise(
- InnerTestingException("Inner"))
- self.mox.ReplayAll()
+ mock_res.side_effect = InnerTestingException("Inner")
try:
raise test.TestingException("Original")
@@ -11005,7 +10998,13 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
None, instance, exc_info, self.instance_type,
self.none_quotas, {}, {})
- def test_reschedule_false(self):
+ mock_res.assert_called_once_with(
+ self.context, {}, {}, instance,
+ self.compute.compute_task_api.resize_instance, method_args,
+ task_states.RESIZE_PREP, exc_info)
+
+ @mock.patch.object(compute_manager.ComputeManager, "_reschedule")
+ def test_reschedule_false(self, mock_res):
"""Original exception should be raised if the resize is not
rescheduled.
"""
@@ -11013,13 +11012,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
scheduler_hint = dict(filter_properties={})
method_args = (instance, None, scheduler_hint, self.instance_type,
None)
- self.mox.StubOutWithMock(self.compute, "_reschedule")
-
- self.compute._reschedule(
- self.context, None, None, instance,
- self.compute.compute_task_api.resize_instance, method_args,
- task_states.RESIZE_PREP).AndReturn(False)
- self.mox.ReplayAll()
+ mock_res.return_value = False
try:
raise test.TestingException("Original")
@@ -11030,7 +11023,14 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
None, instance, exc_info, self.instance_type,
self.none_quotas, {}, {})
- def test_reschedule_true(self):
+ mock_res.assert_called_once_with(
+ self.context, {}, {}, instance,
+ self.compute.compute_task_api.resize_instance, method_args,
+ task_states.RESIZE_PREP, exc_info)
+
+ @mock.patch.object(compute_manager.ComputeManager, "_reschedule")
+ @mock.patch.object(compute_manager.ComputeManager, "_log_original_error")
+ def test_reschedule_true(self, mock_log, mock_res):
# If rescheduled, the original resize exception should be logged.
instance = self._create_fake_instance_obj()
scheduler_hint = dict(filter_properties={})
@@ -11041,21 +11041,17 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
raise test.TestingException("Original")
except Exception:
exc_info = sys.exc_info()
-
- self.mox.StubOutWithMock(self.compute, "_reschedule")
- self.mox.StubOutWithMock(self.compute, "_log_original_error")
- self.compute._reschedule(self.context, {}, {},
- instance,
- self.compute.compute_task_api.resize_instance, method_args,
- task_states.RESIZE_PREP, exc_info).AndReturn(True)
-
- self.compute._log_original_error(exc_info, instance.uuid)
- self.mox.ReplayAll()
+ mock_res.return_value = True
self.compute._reschedule_resize_or_reraise(
self.context, None, instance, exc_info,
self.instance_type, self.none_quotas, {}, {})
+ mock_res.assert_called_once_with(self.context, {}, {},
+ instance, self.compute.compute_task_api.resize_instance,
+ method_args, task_states.RESIZE_PREP, exc_info)
+ mock_log.assert_called_once_with(exc_info, instance.uuid)
+
class ComputeInactiveImageTestCase(BaseTestCase):
def setUp(self):
@@ -11069,7 +11065,8 @@ class ComputeInactiveImageTestCase(BaseTestCase):
'something_else': 'meow'}}
fake_image.stub_out_image_service(self)
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
+ fake_show)
self.compute_api = compute.API()
def test_create_instance_with_deleted_image(self):
@@ -11088,11 +11085,11 @@ class EvacuateHostTestCase(BaseTestCase):
self.inst.task_state = task_states.REBUILDING
self.inst.save()
- def fake_get_compute_info(context, host):
+ def fake_get_compute_info(cls, context, host):
cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename)
return cn
- self.stubs.Set(self.compute, '_get_compute_info',
+ self.stub_out('nova.compute.manager.ComputeManager._get_compute_info',
fake_get_compute_info)
self.useFixture(fixtures.SpawnIsSynchronousFixture())
@@ -11134,42 +11131,45 @@ class EvacuateHostTestCase(BaseTestCase):
def test_rebuild_on_host_updated_target(self):
"""Confirm evacuate scenario updates host and node."""
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
-
def fake_get_compute_info(context, host):
self.assertTrue(context.is_admin)
self.assertEqual('fake-mini', host)
cn = objects.ComputeNode(hypervisor_hostname=self.rt.nodename)
return cn
- self.stubs.Set(self.compute, '_get_compute_info',
- fake_get_compute_info)
- self.mox.ReplayAll()
-
- self._rebuild()
-
- # Should be on destination host
- instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], self.compute.host)
- self.assertEqual(NODENAME, instance['node'])
+ with test.nested(
+ mock.patch.object(self.compute.driver, 'instance_on_disk',
+ side_effect=lambda x: True),
+ mock.patch.object(self.compute, '_get_compute_info',
+ side_effect=fake_get_compute_info)
+ ) as (mock_inst, mock_get):
+ self._rebuild()
+
+ # Should be on destination host
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['host'], self.compute.host)
+ self.assertEqual(NODENAME, instance['node'])
+ self.assertTrue(mock_inst.called)
+ self.assertTrue(mock_get.called)
def test_rebuild_on_host_updated_target_node_not_found(self):
"""Confirm evacuate scenario where compute_node isn't found."""
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
-
def fake_get_compute_info(context, host):
raise exception.ComputeHostNotFound(host=host)
-
- self.stubs.Set(self.compute, '_get_compute_info',
- fake_get_compute_info)
- self.mox.ReplayAll()
-
- self._rebuild()
-
- # Should be on destination host
- instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], self.compute.host)
- self.assertIsNone(instance['node'])
+ with test.nested(
+ mock.patch.object(self.compute.driver, 'instance_on_disk',
+ side_effect=lambda x: True),
+ mock.patch.object(self.compute, '_get_compute_info',
+ side_effect=fake_get_compute_info)
+ ) as (mock_inst, mock_get):
+ self._rebuild()
+
+ # Should be on destination host
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['host'], self.compute.host)
+ self.assertIsNone(instance['node'])
+ self.assertTrue(mock_inst.called)
+ self.assertTrue(mock_get.called)
def test_rebuild_on_host_node_passed(self):
patch_get_info = mock.patch.object(self.compute, '_get_compute_info')
@@ -11193,8 +11193,8 @@ class EvacuateHostTestCase(BaseTestCase):
{"vm_state": vm_states.STOPPED})
self.inst.vm_state = vm_states.STOPPED
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.mox.ReplayAll()
+ self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',
+ lambda *a, **ka: True)
self._rebuild()
@@ -11204,17 +11204,21 @@ class EvacuateHostTestCase(BaseTestCase):
def test_rebuild_with_wrong_shared_storage(self):
"""Confirm evacuate scenario does not update host."""
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.mox.ReplayAll()
-
- self.assertRaises(exception.InvalidSharedStorage,
+ with mock.patch.object(self.compute.driver, 'instance_on_disk',
+ side_effect=lambda x: True) as mock_inst:
+ self.assertRaises(exception.InvalidSharedStorage,
lambda: self._rebuild(on_shared_storage=False))
- # Should remain on original host
- instance = db.instance_get(self.context, self.inst.id)
- self.assertEqual(instance['host'], 'fake_host_2')
+ # Should remain on original host
+ instance = db.instance_get(self.context, self.inst.id)
+ self.assertEqual(instance['host'], 'fake_host_2')
+ self.assertTrue(mock_inst.called)
- def test_rebuild_on_host_with_volumes(self):
+ @mock.patch.object(cinder.API, 'detach')
+ @mock.patch.object(compute_manager.ComputeManager, '_prep_block_device')
+ @mock.patch.object(compute_manager.ComputeManager, '_driver_detach_volume')
+ def test_rebuild_on_host_with_volumes(self, mock_drv_detach, mock_prep,
+ mock_detach):
"""Confirm evacuate scenario reconnects volumes."""
values = {'instance_uuid': self.inst.uuid,
'source_type': 'volume',
@@ -11227,38 +11231,21 @@ class EvacuateHostTestCase(BaseTestCase):
def fake_volume_get(self, context, volume):
return {'id': 'fake_volume_id'}
- self.stubs.Set(cinder.API, "get", fake_volume_get)
+ self.stub_out("nova.volume.cinder.API.get", fake_volume_get)
# Stub out and record whether it gets detached
result = {"detached": False}
- def fake_detach(self, context, volume, instance_uuid, attachment_id):
- result["detached"] = volume["id"] == 'fake_volume_id'
- self.stubs.Set(cinder.API, "detach", fake_detach)
-
- self.mox.StubOutWithMock(self.compute, '_driver_detach_volume')
- self.compute._driver_detach_volume(mox.IsA(self.context),
- mox.IsA(instance_obj.Instance),
- mox.IsA(objects.BlockDeviceMapping))
+ def fake_detach(context, volume, instance_uuid, attachment_id):
+ result["detached"] = volume == 'fake_volume_id'
+ mock_detach.side_effect = fake_detach
def fake_terminate_connection(self, context, volume, connector):
return {}
- self.stubs.Set(cinder.API, "terminate_connection",
- fake_terminate_connection)
-
- # make sure volumes attach, detach are called
- self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
- self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg(),
- mox.IgnoreArg(), None)
-
- self.mox.StubOutWithMock(self.compute, '_prep_block_device')
- self.compute._prep_block_device(mox.IsA(self.context),
- mox.IsA(objects.Instance),
- mox.IgnoreArg())
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.mox.ReplayAll()
-
+ self.stub_out("nova.volume.cinder.API.terminate_connection",
+ fake_terminate_connection)
+ self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',
+ lambda *a, **ka: True)
self._rebuild()
# cleanup
@@ -11269,40 +11256,52 @@ class EvacuateHostTestCase(BaseTestCase):
for bdm in bdms:
db.block_device_mapping_destroy(self.context, bdm['id'])
- def test_rebuild_on_host_with_shared_storage(self):
+ mock_drv_detach.assert_called_once_with(
+ test.MatchType(context.RequestContext),
+ test.MatchType(objects.Instance),
+ test.MatchType(objects.BlockDeviceMapping))
+ # make sure volumes attach, detach are called
+ mock_detach.assert_called_once_with(
+ test.MatchType(context.RequestContext),
+ mock.ANY, mock.ANY, None)
+ mock_prep.assert_called_once_with(
+ test.MatchType(context.RequestContext),
+ test.MatchType(objects.Instance), mock.ANY)
+
+ @mock.patch.object(fake.FakeDriver, 'spawn')
+ def test_rebuild_on_host_with_shared_storage(self, mock_spawn):
"""Confirm evacuate scenario on shared storage."""
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance),
- mox.IsA(objects.ImageMeta),
- mox.IgnoreArg(), 'newpass',
- network_info=mox.IgnoreArg(),
- block_device_info=mox.IgnoreArg())
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.mox.ReplayAll()
+ self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',
+ lambda *a, **ka: True)
self._rebuild()
- def test_rebuild_on_host_without_shared_storage(self):
+ mock_spawn.assert_called_once_with(
+ test.MatchType(context.RequestContext),
+ test.MatchType(objects.Instance),
+ test.MatchType(objects.ImageMeta),
+ mock.ANY, 'newpass',
+ network_info=mock.ANY,
+ block_device_info=mock.ANY)
+
+ @mock.patch.object(fake.FakeDriver, 'spawn')
+ def test_rebuild_on_host_without_shared_storage(self, mock_spawn):
"""Confirm evacuate scenario without shared storage
(rebuild from image)
"""
-
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance),
- mox.IsA(objects.ImageMeta),
- mox.IgnoreArg(), mox.IsA('newpass'),
- network_info=mox.IgnoreArg(),
- block_device_info=mox.IgnoreArg())
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- lambda x: False)
- self.mox.ReplayAll()
+ self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',
+ lambda *a, **ka: False)
self._rebuild(on_shared_storage=False)
+ mock_spawn.assert_called_once_with(
+ test.MatchType(context.RequestContext),
+ test.MatchType(objects.Instance),
+ test.MatchType(objects.ImageMeta),
+ mock.ANY, 'newpass',
+ network_info=mock.ANY,
+ block_device_info=mock.ANY)
+
def test_rebuild_on_host_instance_exists(self):
"""Rebuild if instance exists raises an exception."""
db.instance_update(self.context, self.inst.uuid,
@@ -11310,54 +11309,55 @@ class EvacuateHostTestCase(BaseTestCase):
self.compute.build_and_run_instance(self.context,
self.inst, {}, {}, {}, block_device_mapping=[])
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
+ self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',
+ lambda *a, **kw: True)
self.assertRaises(exception.InstanceExists,
lambda: self._rebuild(on_shared_storage=True))
def test_driver_does_not_support_recreate(self):
with mock.patch.dict(self.compute.driver.capabilities,
supports_recreate=False):
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- lambda x: True)
+ self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',
+ lambda *a, **kw: True)
self.assertRaises(exception.InstanceRecreateNotSupported,
lambda: self._rebuild(on_shared_storage=True))
+ @mock.patch.object(fake.FakeDriver, 'spawn')
@mock.patch('nova.objects.ImageMeta.from_image_ref')
def test_on_shared_storage_not_provided_host_without_shared_storage(self,
- mock_image_meta):
- # 'spawn' should be called with the image_meta from the image_ref
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance),
- mock_image_meta.return_value,
- mox.IgnoreArg(), mox.IsA('newpass'),
- network_info=mox.IgnoreArg(),
- block_device_info=mox.IgnoreArg())
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk',
- lambda x: False)
- self.mox.ReplayAll()
+ mock_image_meta, mock_spawn):
+ self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',
+ lambda *a, **ka: False)
self._rebuild(on_shared_storage=None)
+ # 'spawn' should be called with the image_meta from the image_ref
+ mock_spawn.assert_called_once_with(
+ test.MatchType(context.RequestContext),
+ test.MatchType(objects.Instance),
+ mock_image_meta.return_value,
+ mock.ANY, 'newpass',
+ network_info=mock.ANY,
+ block_device_info=mock.ANY)
+
+ @mock.patch.object(fake.FakeDriver, 'spawn')
@mock.patch('nova.objects.Instance.image_meta',
new_callable=mock.PropertyMock)
def test_on_shared_storage_not_provided_host_with_shared_storage(self,
- mock_image_meta):
- # 'spawn' should be called with the image_meta from the instance
- self.mox.StubOutWithMock(self.compute.driver, 'spawn')
- self.compute.driver.spawn(mox.IsA(self.context),
- mox.IsA(objects.Instance),
- mock_image_meta.return_value,
- mox.IgnoreArg(), 'newpass',
- network_info=mox.IgnoreArg(),
- block_device_info=mox.IgnoreArg())
-
- self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
- self.mox.ReplayAll()
+ mock_image_meta, mock_spawn):
+ self.stub_out('nova.virt.fake.FakeDriver.instance_on_disk',
+ lambda *a, **ka: True)
self._rebuild(on_shared_storage=None)
+ mock_spawn.assert_called_once_with(
+ test.MatchType(context.RequestContext),
+ test.MatchType(objects.Instance),
+ mock_image_meta.return_value,
+ mock.ANY, 'newpass',
+ network_info=mock.ANY,
+ block_device_info=mock.ANY)
+
def test_rebuild_migration_passed_in(self):
migration = mock.Mock(spec=objects.Migration)
@@ -11446,7 +11446,7 @@ class ComputeInjectedFilesTestCase(BaseTestCase):
def setUp(self):
super(ComputeInjectedFilesTestCase, self).setUp()
self.instance = self._create_fake_instance_obj()
- self.stubs.Set(self.compute.driver, 'spawn', self._spawn)
+ self.stub_out('nova.virt.fake.FakeDriver.spawn', self._spawn)
self.useFixture(fixtures.SpawnIsSynchronousFixture())
def _spawn(self, context, instance, image_meta, injected_files,
diff --git a/nova/tests/unit/compute/test_compute_api.py b/nova/tests/unit/compute/test_compute_api.py
index a9263938c5..e4e959efc2 100644
--- a/nova/tests/unit/compute/test_compute_api.py
+++ b/nova/tests/unit/compute/test_compute_api.py
@@ -1040,7 +1040,7 @@ class _ComputeAPIUnitTestMixIn(object):
self._test_delete_resized_part(inst)
if inst.vm_state == vm_states.SOFT_DELETED:
soft_delete = True
- if not self.compute_api._expect_no_host(inst):
+ if inst.vm_state != vm_states.SHELVED_OFFLOADED:
self.context.elevated().AndReturn(self.context)
objects.Service.get_by_compute_host(self.context,
inst.host).AndReturn(objects.Service())
@@ -1049,7 +1049,8 @@ class _ComputeAPIUnitTestMixIn(object):
inst.host != 'down-host')
if (inst.host == 'down-host' or
- self.compute_api._expect_no_host(inst)):
+ inst.vm_state == vm_states.SHELVED_OFFLOADED):
+
self._test_downed_host_part(inst, updates, delete_time,
delete_type)
cast = False
@@ -3853,54 +3854,6 @@ class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase):
self.assertRaises(exception.CannotResizeToSameFlavor,
self._test_resize, same_flavor=True)
- @mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
- @mock.patch('nova.context.RequestContext.elevated')
- @mock.patch.object(objects.Instance, 'save')
- @mock.patch.object(quota.QUOTAS, 'reserve')
- @mock.patch.object(compute_utils, 'notify_about_instance_usage')
- @mock.patch.object(objects.BlockDeviceMapping, 'destroy')
- @mock.patch.object(objects.Instance, 'destroy')
- def _test_delete_volume_backed_instance(
- self, vm_state, mock_instance_destroy, bdm_destroy,
- notify_about_instance_usage, mock_reserve,
- mock_save, mock_elevated, bdm_get_by_instance_uuid):
- volume_id = uuidutils.generate_uuid()
- bdms = [objects.BlockDeviceMapping(
- **fake_block_device.FakeDbBlockDeviceDict(
- {'id': 42, 'volume_id': volume_id,
- 'source_type': 'volume', 'destination_type': 'volume',
- 'delete_on_termination': False}))]
- reservations = ['fake-resv']
-
- bdm_get_by_instance_uuid.return_value = bdms
- mock_reserve.return_value = reservations
- mock_elevated.return_value = self.context
-
- params = {'host': None, 'vm_state': vm_state}
- inst = self._create_instance_obj(params=params)
- connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
-
- with mock.patch.object(self.compute_api.network_api,
- 'deallocate_for_instance') as mock_deallocate, \
- mock.patch.object(self.compute_api.volume_api,
- 'terminate_connection') as mock_terminate_conn, \
- mock.patch.object(self.compute_api.volume_api,
- 'detach') as mock_detach:
- self.compute_api.delete(self.context, inst)
-
- mock_deallocate.assert_called_once_with(self.context, inst)
- mock_detach.assert_called_once_with(self.context, volume_id,
- inst.uuid)
- mock_terminate_conn.assert_called_once_with(self.context,
- volume_id, connector)
- bdm_destroy.assert_called_once_with()
-
- def test_delete_volume_backed_instance_in_error(self):
- self._test_delete_volume_backed_instance(vm_states.ERROR)
-
- def test_delete_volume_backed_instance_in_shelved_offloaded(self):
- self._test_delete_volume_backed_instance(vm_states.SHELVED_OFFLOADED)
-
class ComputeAPIAPICellUnitTestCase(_ComputeAPIUnitTestMixIn,
test.NoDBTestCase):
diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py
index 0621611422..480b4c2f76 100644..100755
--- a/nova/tests/unit/compute/test_compute_mgr.py
+++ b/nova/tests/unit/compute/test_compute_mgr.py
@@ -3214,6 +3214,23 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
+ def test_instance_restore_notification(self):
+ inst_obj = fake_instance.fake_instance_obj(self.context,
+ vm_state=vm_states.SOFT_DELETED)
+ with test.nested(
+ mock.patch.object(nova.compute.utils,
+ 'notify_about_instance_action'),
+ mock.patch.object(self.compute, '_notify_about_instance_usage'),
+ mock.patch.object(objects.Instance, 'save'),
+ mock.patch.object(self.compute.driver, 'restore')
+ ) as (fake_notify, fake_notify_usage, fake_save, fake_restore):
+ self.compute.restore_instance(self.context, inst_obj)
+ fake_notify.assert_has_calls([
+ mock.call(self.context, inst_obj, 'fake-mini',
+ action='restore', phase='start'),
+ mock.call(self.context, inst_obj, 'fake-mini',
+ action='restore', phase='end')])
+
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
@@ -4129,34 +4146,6 @@ class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
@mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
@mock.patch.object(manager.ComputeManager, '_shutdown_instance')
@mock.patch.object(objects.Instance, 'save')
- def test_build_resources_with_network_info_obj_on_spawn_failure(self,
- mock_save, mock_shutdown, mock_build):
- mock_save.return_value = self.instance
- mock_build.return_value = self.network_info
-
- test_exception = test.TestingException()
-
- def fake_spawn():
- raise test_exception
-
- try:
- with self.compute._build_resources(self.context, self.instance,
- self.requested_networks, self.security_groups,
- self.image, self.block_device_mapping):
- fake_spawn()
- except Exception as e:
- self.assertEqual(test_exception, e)
-
- mock_save.assert_called_once_with()
- mock_build.assert_called_once_with(self.context, self.instance,
- self.requested_networks, self.security_groups)
- mock_shutdown.assert_called_once_with(self.context, self.instance,
- self.block_device_mapping, self.requested_networks,
- try_deallocate_networks=False)
-
- @mock.patch.object(manager.ComputeManager, '_build_networks_for_instance')
- @mock.patch.object(manager.ComputeManager, '_shutdown_instance')
- @mock.patch.object(objects.Instance, 'save')
def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self,
mock_save, mock_shutdown, mock_build):
mock_save.return_value = self.instance
diff --git a/nova/tests/unit/compute/test_keypairs.py b/nova/tests/unit/compute/test_keypairs.py
index f85825a163..528a2a46b2 100644
--- a/nova/tests/unit/compute/test_keypairs.py
+++ b/nova/tests/unit/compute/test_keypairs.py
@@ -53,7 +53,7 @@ class KeypairAPITestCase(test_compute.BaseTestCase):
def _keypair_db_call_stubs(self):
- def db_key_pair_get_all_by_user(context, user_id):
+ def db_key_pair_get_all_by_user(context, user_id, limit, marker):
return [dict(test_keypair.fake_keypair,
name=self.existing_key_name,
public_key=self.pub_key,
diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py
index 83778e6535..b1aa61ee66 100644
--- a/nova/tests/unit/db/test_db_api.py
+++ b/nova/tests/unit/db/test_db_api.py
@@ -6540,6 +6540,20 @@ class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
self.assertEqual(len(real_vifs1), 0)
self.assertEqual(len(real_vifs2), 1)
+ def test_virtual_interface_delete(self):
+ values = [dict(address='fake1'), dict(address='fake2'),
+ dict(address='fake3')]
+ vifs = []
+ for vals in values:
+ vifs.append(self._create_virt_interface(
+ dict(vals, instance_uuid=self.instance_uuid)))
+
+ db.virtual_interface_delete(self.ctxt, vifs[0]['id'])
+
+ real_vifs = db.virtual_interface_get_by_instance(self.ctxt,
+ self.instance_uuid)
+ self.assertEqual(2, len(real_vifs))
+
def test_virtual_interface_get_all(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
@@ -6958,6 +6972,92 @@ class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
+ def test_key_pair_get_all_by_user_limit_and_marker(self):
+ params = [
+ {'name': 'test_1', 'user_id': 'test_user_id', 'type': 'ssh'},
+ {'name': 'test_2', 'user_id': 'test_user_id', 'type': 'ssh'},
+ {'name': 'test_3', 'user_id': 'test_user_id', 'type': 'ssh'}
+ ]
+
+ # check all 3 keypairs
+ keys = [self._create_key_pair(p) for p in params]
+ db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id')
+ self._assertEqualListsOfObjects(keys, db_keys)
+
+ # check only 1 keypair
+ expected_keys = [keys[0]]
+ db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id',
+ limit=1)
+ self._assertEqualListsOfObjects(expected_keys, db_keys)
+
+ # check keypairs after 'test_1'
+ expected_keys = [keys[1], keys[2]]
+ db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id',
+ marker='test_1')
+ self._assertEqualListsOfObjects(expected_keys, db_keys)
+
+ # check only 1 keypairs after 'test_1'
+ expected_keys = [keys[1]]
+ db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id',
+ limit=1,
+ marker='test_1')
+ self._assertEqualListsOfObjects(expected_keys, db_keys)
+
+ # check non-existing keypair
+ self.assertRaises(exception.MarkerNotFound,
+ db.key_pair_get_all_by_user,
+ self.ctxt, 'test_user_id',
+ limit=1, marker='unknown_kp')
+
+ def test_key_pair_get_all_by_user_different_users(self):
+ params1 = [
+ {'name': 'test_1', 'user_id': 'test_user_1', 'type': 'ssh'},
+ {'name': 'test_2', 'user_id': 'test_user_1', 'type': 'ssh'},
+ {'name': 'test_3', 'user_id': 'test_user_1', 'type': 'ssh'}
+ ]
+ params2 = [
+ {'name': 'test_1', 'user_id': 'test_user_2', 'type': 'ssh'},
+ {'name': 'test_2', 'user_id': 'test_user_2', 'type': 'ssh'},
+ {'name': 'test_3', 'user_id': 'test_user_2', 'type': 'ssh'}
+ ]
+
+ # create keypairs for two users
+ keys1 = [self._create_key_pair(p) for p in params1]
+ keys2 = [self._create_key_pair(p) for p in params2]
+
+ # check all 2 keypairs for test_user_1
+ db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_1')
+ self._assertEqualListsOfObjects(keys1, db_keys)
+
+ # check all 2 keypairs for test_user_2
+ db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_2')
+ self._assertEqualListsOfObjects(keys2, db_keys)
+
+ # check only 1 keypair for test_user_1
+ expected_keys = [keys1[0]]
+ db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_1',
+ limit=1)
+ self._assertEqualListsOfObjects(expected_keys, db_keys)
+
+ # check keypairs after 'test_1' for test_user_2
+ expected_keys = [keys2[1], keys2[2]]
+ db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_2',
+ marker='test_1')
+ self._assertEqualListsOfObjects(expected_keys, db_keys)
+
+ # check only 1 keypairs after 'test_1' for test_user_1
+ expected_keys = [keys1[1]]
+ db_keys = db.key_pair_get_all_by_user(self.ctxt, 'test_user_1',
+ limit=1,
+ marker='test_1')
+ self._assertEqualListsOfObjects(expected_keys, db_keys)
+
+ # check non-existing keypair for test_user_2
+ self.assertRaises(exception.MarkerNotFound,
+ db.key_pair_get_all_by_user,
+ self.ctxt, 'test_user_2',
+ limit=1, marker='unknown_kp')
+
def test_key_pair_count_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1', 'type': 'ssh'},
diff --git a/nova/tests/unit/objects/test_keypair.py b/nova/tests/unit/objects/test_keypair.py
index ce063123bb..a252003f22 100644
--- a/nova/tests/unit/objects/test_keypair.py
+++ b/nova/tests/unit/objects/test_keypair.py
@@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
import mock
from oslo_utils import timeutils
@@ -118,9 +120,11 @@ class _TestKeyPairObject(object):
self.compare_obj(keypairs[1], fake_keypair)
self.assertEqual(2, keypair.KeyPairList.get_count_by_user(self.context,
'fake-user'))
- mock_kp_get.assert_called_once_with(self.context, 'fake-user')
+ mock_kp_get.assert_called_once_with(self.context, 'fake-user',
+ limit=None, marker=None)
mock_kp_count.assert_called_once_with(self.context, 'fake-user')
- mock_api_get.assert_called_once_with(self.context, 'fake-user')
+ mock_api_get.assert_called_once_with(self.context, 'fake-user',
+ limit=None, marker=None)
mock_api_count.assert_called_once_with(self.context, 'fake-user')
def test_obj_make_compatible(self):
@@ -130,6 +134,96 @@ class _TestKeyPairObject(object):
keypair_obj.obj_make_compatible(fake_keypair_copy, '1.1')
self.assertNotIn('type', fake_keypair_copy)
+ @mock.patch('nova.db.key_pair_get_all_by_user')
+ @mock.patch('nova.objects.KeyPairList._get_from_db')
+ def test_get_by_user_limit(self, mock_api_get, mock_kp_get):
+ api_keypair = copy.deepcopy(fake_keypair)
+ api_keypair['name'] = 'api_kp'
+
+ mock_api_get.return_value = [api_keypair]
+ mock_kp_get.return_value = [fake_keypair]
+
+ keypairs = keypair.KeyPairList.get_by_user(self.context, 'fake-user',
+ limit=1)
+ self.assertEqual(1, len(keypairs))
+ self.compare_obj(keypairs[0], api_keypair)
+ mock_api_get.assert_called_once_with(self.context, 'fake-user',
+ limit=1, marker=None)
+ self.assertFalse(mock_kp_get.called)
+
+ @mock.patch('nova.db.key_pair_get_all_by_user')
+ @mock.patch('nova.objects.KeyPairList._get_from_db')
+ def test_get_by_user_marker(self, mock_api_get, mock_kp_get):
+ api_kp_name = 'api_kp'
+ mock_api_get.side_effect = exception.MarkerNotFound(marker=api_kp_name)
+ mock_kp_get.return_value = [fake_keypair]
+
+ keypairs = keypair.KeyPairList.get_by_user(self.context, 'fake-user',
+ marker=api_kp_name)
+ self.assertEqual(1, len(keypairs))
+ self.compare_obj(keypairs[0], fake_keypair)
+ mock_api_get.assert_called_once_with(self.context, 'fake-user',
+ limit=None,
+ marker=api_kp_name)
+ mock_kp_get.assert_called_once_with(self.context, 'fake-user',
+ limit=None,
+ marker=api_kp_name)
+
+ @mock.patch('nova.db.key_pair_get_all_by_user')
+ @mock.patch('nova.objects.KeyPairList._get_from_db')
+ def test_get_by_user_limit_and_marker_api(self, mock_api_get, mock_kp_get):
+ first_api_kp_name = 'first_api_kp'
+ api_keypair = copy.deepcopy(fake_keypair)
+ api_keypair['name'] = 'api_kp'
+
+ mock_api_get.return_value = [api_keypair]
+ mock_kp_get.return_value = [fake_keypair]
+
+ keypairs = keypair.KeyPairList.get_by_user(self.context, 'fake-user',
+ limit=5,
+ marker=first_api_kp_name)
+ self.assertEqual(2, len(keypairs))
+ self.compare_obj(keypairs[0], api_keypair)
+ self.compare_obj(keypairs[1], fake_keypair)
+ mock_api_get.assert_called_once_with(self.context, 'fake-user',
+ limit=5,
+ marker=first_api_kp_name)
+ mock_kp_get.assert_called_once_with(self.context, 'fake-user',
+ limit=4, marker=None)
+
+ @mock.patch('nova.db.key_pair_get_all_by_user')
+ @mock.patch('nova.objects.KeyPairList._get_from_db')
+ def test_get_by_user_limit_and_marker_main(self, mock_api_get,
+ mock_kp_get):
+ first_main_kp_name = 'first_main_kp'
+ mock_api_get.side_effect = exception.MarkerNotFound(
+ marker=first_main_kp_name)
+ mock_kp_get.return_value = [fake_keypair]
+
+ keypairs = keypair.KeyPairList.get_by_user(self.context, 'fake-user',
+ limit=5,
+ marker=first_main_kp_name)
+ self.assertEqual(1, len(keypairs))
+ self.compare_obj(keypairs[0], fake_keypair)
+ mock_api_get.assert_called_once_with(self.context, 'fake-user',
+ limit=5,
+ marker=first_main_kp_name)
+ mock_kp_get.assert_called_once_with(self.context, 'fake-user',
+ limit=5, marker=first_main_kp_name)
+
+ @mock.patch('nova.db.key_pair_get_all_by_user')
+ @mock.patch('nova.objects.KeyPairList._get_from_db')
+ def test_get_by_user_limit_and_marker_invalid_marker(
+ self, mock_api_get, mock_kp_get):
+ kp_name = 'unknown_kp'
+ mock_api_get.side_effect = exception.MarkerNotFound(marker=kp_name)
+ mock_kp_get.side_effect = exception.MarkerNotFound(marker=kp_name)
+
+ self.assertRaises(exception.MarkerNotFound,
+ keypair.KeyPairList.get_by_user,
+ self.context, 'fake-user',
+ limit=5, marker=kp_name)
+
class TestMigrationObject(test_objects._LocalTest,
_TestKeyPairObject):
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index 8865136d42..26aa98c132 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -1157,7 +1157,7 @@ object_data = {
'LibvirtLiveMigrateBDMInfo': '1.0-252aabb723ca79d5469fa56f64b57811',
'LibvirtLiveMigrateData': '1.3-2795e5646ee21e8c7f1c3e64fb6c80a3',
'KeyPair': '1.4-1244e8d1b103cc69d038ed78ab3a8cc6',
- 'KeyPairList': '1.2-58b94f96e776bedaf1e192ddb2a24c4e',
+ 'KeyPairList': '1.3-94aad3ac5c938eef4b5e83da0212f506',
'Migration': '1.4-17979b9f2ae7f28d97043a220b2a8350',
'MigrationContext': '1.1-9fb17b0b521370957a884636499df52d',
'MigrationList': '1.3-55595bfc1a299a5962614d0821a3567e',
@@ -1200,7 +1200,7 @@ object_data = {
'VirtCPUFeature': '1.0-3310718d8c72309259a6e39bdefe83ee',
'VirtCPUModel': '1.0-6a5cc9f322729fc70ddc6733bacd57d3',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
- 'VirtualInterface': '1.2-25730967393678bd4da092b98694f971',
+ 'VirtualInterface': '1.3-efd3ca8ebcc5ce65fff5a25f31754c54',
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
'VolumeUsage': '1.0-6c8190c46ce1469bb3286a1f21c2e475',
'XenapiLiveMigrateData': '1.0-5f982bec68f066e194cd9ce53a24ac4c',
diff --git a/nova/tests/unit/objects/test_virtual_interface.py b/nova/tests/unit/objects/test_virtual_interface.py
index 3cdbefd457..000e6dbdfb 100644
--- a/nova/tests/unit/objects/test_virtual_interface.py
+++ b/nova/tests/unit/objects/test_virtual_interface.py
@@ -124,6 +124,20 @@ class _TestVirtualInterface(object):
'fake-uuid')
delete.assert_called_with(self.context, 'fake-uuid')
+ def test_destroy(self):
+ vif = vif_obj.VirtualInterface(context=self.context)
+ vif.address = '00:00:00:00:00:00'
+ vif.network_id = 123
+ vif.instance_uuid = uuids.instance_uuid
+ vif.uuid = uuids.vif_uuid
+ vif.tag = 'foo'
+ vif.create()
+
+ vif = vif_obj.VirtualInterface.get_by_id(self.context, vif.id)
+ vif.destroy()
+ self.assertIsNone(vif_obj.VirtualInterface.get_by_id(self.context,
+ vif.id))
+
def test_obj_make_compatible_pre_1_1(self):
vif = vif_obj.VirtualInterface(context=self.context)
vif.address = '00:00:00:00:00:00'
diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py
index 23f8b49a37..7de8e7ba7e 100644
--- a/nova/tests/unit/virt/libvirt/test_driver.py
+++ b/nova/tests/unit/virt/libvirt/test_driver.py
@@ -11989,7 +11989,9 @@ class LibvirtConnTestCase(test.NoDBTestCase):
instance_domains = [
DiagFakeDomain("instance0000001"),
- DiagFakeDomain("instance0000002")]
+ DiagFakeDomain("instance0000002"),
+ DiagFakeDomain("instance0000003"),
+ DiagFakeDomain("instance0000004")]
mock_list.return_value = instance_domains
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
@@ -12005,16 +12007,36 @@ class LibvirtConnTestCase(test.NoDBTestCase):
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
- 'over_committed_disk_size': '21474836480'}]}
+ 'over_committed_disk_size': '21474836480'}],
+ 'instance0000003':
+ [{'type': 'raw', 'path': '/somepath/disk3',
+ 'virt_disk_size': '0',
+ 'backing_file': '/somepath/disk3',
+ 'disk_size': '21474836480',
+ 'over_committed_disk_size': '32212254720'}],
+ 'instance0000004':
+ [{'type': 'raw', 'path': '/somepath/disk4',
+ 'virt_disk_size': '0',
+ 'backing_file': '/somepath/disk4',
+ 'disk_size': '32212254720',
+ 'over_committed_disk_size': '42949672960'}]}
def side_effect(name, dom, block_device_info):
if name == 'instance0000001':
self.assertEqual('/dev/vda',
block_device_info['root_device_name'])
- raise OSError(errno.EACCES, 'Permission denied')
+ raise OSError(errno.ENOENT, 'No such file or directory')
if name == 'instance0000002':
self.assertEqual('/dev/vdb',
block_device_info['root_device_name'])
+ raise OSError(errno.ESTALE, 'Stale NFS file handle')
+ if name == 'instance0000003':
+ self.assertEqual('/dev/vdc',
+ block_device_info['root_device_name'])
+ raise OSError(errno.EACCES, 'Permission denied')
+ if name == 'instance0000004':
+ self.assertEqual('/dev/vdd',
+ block_device_info['root_device_name'])
return fake_disks.get(name)
get_disk_info = mock.Mock()
get_disk_info.side_effect = side_effect
@@ -12026,14 +12048,20 @@ class LibvirtConnTestCase(test.NoDBTestCase):
root_device_name='/dev/vda'),
objects.Instance(
uuid=instance_uuids[1],
- root_device_name='/dev/vdb')
+ root_device_name='/dev/vdb'),
+ objects.Instance(
+ uuid=instance_uuids[2],
+ root_device_name='/dev/vdc'),
+ objects.Instance(
+ uuid=instance_uuids[3],
+ root_device_name='/dev/vdd')
]
mock_get.return_value = instances
result = drvr._get_disk_over_committed_size_total()
- self.assertEqual(21474836480, result)
+ self.assertEqual(42949672960, result)
mock_list.assert_called_once_with()
- self.assertEqual(2, get_disk_info.call_count)
+ self.assertEqual(4, get_disk_info.call_count)
filters = {'uuid': instance_uuids}
mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True)
mock_bdms.assert_called_with(mock.ANY, instance_uuids)
@@ -14372,17 +14400,18 @@ class LibvirtConnTestCase(test.NoDBTestCase):
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume')
- @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_and_instance')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@mock.patch('nova.virt.libvirt.host.Host.get_guest')
- def test_swap_volume_driver_bdm_save(self, get_guest,
+ def _test_swap_volume_driver_bdm_save(self, get_guest,
connect_volume, get_volume_config,
get_by_volume_and_instance,
- volume_save, swap_volume,
- disconnect_volume):
+ swap_volume,
+ disconnect_volume,
+ volume_save,
+ source_type):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
old_connection_info = {'driver_volume_type': 'fake',
@@ -14415,7 +14444,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/vdb',
- 'source_type': 'volume',
+ 'source_type': source_type,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-2',
'boot_index': 0}))
@@ -14432,6 +14461,21 @@ class LibvirtConnTestCase(test.NoDBTestCase):
disconnect_volume.assert_called_once_with(old_connection_info, 'vdb')
volume_save.assert_called_once_with()
+ @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
+ def test_swap_volume_driver_bdm_save_source_is_volume(self, volume_save):
+ self._test_swap_volume_driver_bdm_save(volume_save=volume_save,
+ source_type='volume')
+
+ @mock.patch('nova.virt.block_device.DriverImageBlockDevice.save')
+ def test_swap_volume_driver_bdm_save_source_is_image(self, volume_save):
+ self._test_swap_volume_driver_bdm_save(volume_save=volume_save,
+ source_type='image')
+
+ @mock.patch('nova.virt.block_device.DriverSnapshotBlockDevice.save')
+ def test_swap_volume_driver_bdm_save_source_is_snapshot(self, volume_save):
+ self._test_swap_volume_driver_bdm_save(volume_save=volume_save,
+ source_type='snapshot')
+
def _test_live_snapshot(self, can_quiesce=False, require_quiesce=False):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
diff --git a/nova/tests/unit/virt/vmwareapi/test_read_write_util.py b/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
deleted file mode 100644
index d58402273f..0000000000
--- a/nova/tests/unit/virt/vmwareapi/test_read_write_util.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2013 IBM Corp.
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import urllib
-
-import mock
-
-from nova import test
-from nova.virt.vmwareapi import read_write_util
-
-
-class ReadWriteUtilTestCase(test.NoDBTestCase):
-
- def test_ipv6_host_read(self):
- ipv6_host = 'fd8c:215d:178e:c51e:200:c9ff:fed1:584c'
- port = 7443
- folder = 'tmp/fake.txt'
- # NOTE(sdague): the VMwareHTTPReadFile makes implicit http
- # call via requests during construction, block that from
- # happening here in the test.
- with mock.patch.object(read_write_util.VMwareHTTPReadFile,
- '_create_read_connection'):
- reader = read_write_util.VMwareHTTPReadFile(ipv6_host,
- port,
- 'fake_dc',
- 'fake_ds',
- dict(),
- folder)
- param_list = {"dcPath": 'fake_dc', "dsName": 'fake_ds'}
- base_url = 'https://[%s]:%s/folder/%s' % (ipv6_host, port, folder)
- base_url += '?' + urllib.urlencode(param_list)
- self.assertEqual(base_url, reader._base_url)
diff --git a/nova/tests/unit/virt/xenapi/plugins/test_partition_utils.py b/nova/tests/unit/virt/xenapi/plugins/test_partition_utils.py
index 3761588d5c..e1fceacfc7 100644
--- a/nova/tests/unit/virt/xenapi/plugins/test_partition_utils.py
+++ b/nova/tests/unit/virt/xenapi/plugins/test_partition_utils.py
@@ -22,6 +22,12 @@ from nova.tests.unit.virt.xenapi.plugins import plugin_test
class PartitionUtils(plugin_test.PluginTestBase):
def setUp(self):
super(PartitionUtils, self).setUp()
+ self.pluginlib = self.load_plugin("pluginlib_nova.py")
+
+ # Prevent any logging to syslog
+ self.mock_patch_object(self.pluginlib,
+ 'configure_logging')
+
self.partition_utils = self.load_plugin("partition_utils.py")
def test_wait_for_dev_ok(self):
diff --git a/nova/virt/block_device.py b/nova/virt/block_device.py
index e86abb72d9..f6ed6a30e9 100644
--- a/nova/virt/block_device.py
+++ b/nova/virt/block_device.py
@@ -42,10 +42,6 @@ class _InvalidType(_NotTransformable):
pass
-class _NoLegacy(Exception):
- pass
-
-
def update_db(method):
@functools.wraps(method)
def wrapped(obj, context, *args, **kwargs):
@@ -527,16 +523,7 @@ def refresh_conn_infos(block_device_mapping, *refresh_args, **refresh_kwargs):
def legacy_block_devices(block_device_mapping):
- def _has_legacy(bdm):
- try:
- bdm.legacy()
- except _NoLegacy:
- return False
- return True
-
- bdms = [bdm.legacy()
- for bdm in block_device_mapping
- if _has_legacy(bdm)]
+ bdms = [bdm.legacy() for bdm in block_device_mapping]
# Re-enumerate ephemeral devices
if all(isinstance(bdm, DriverEphemeralBlockDevice)
diff --git a/nova/virt/libvirt/blockinfo.py b/nova/virt/libvirt/blockinfo.py
index b7e5df053c..8fc7fc7e9f 100644
--- a/nova/virt/libvirt/blockinfo.py
+++ b/nova/virt/libvirt/blockinfo.py
@@ -473,7 +473,7 @@ def default_device_names(virt_type, context, instance, block_device_info,
driver_bdm.save()
-def has_default_ephemeral(instance, disk_bus, block_device_info, mapping):
+def get_default_ephemeral_info(instance, disk_bus, block_device_info, mapping):
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
if instance.ephemeral_gb <= 0 or ephemerals:
return None
@@ -559,8 +559,8 @@ def get_disk_mapping(virt_type, instance,
# set, nothing is changed.
update_bdm(root_bdm, root_info)
- default_eph = has_default_ephemeral(instance, disk_bus, block_device_info,
- mapping)
+ default_eph = get_default_ephemeral_info(instance, disk_bus,
+ block_device_info, mapping)
if default_eph:
mapping['disk.local'] = default_eph
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index d58841081d..b3f4492569 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -1306,7 +1306,7 @@ class LibvirtDriver(driver.ComputeDriver):
volume_id = new_connection_info.get('serial')
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
nova_context.get_admin_context(), volume_id, instance.uuid)
- driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
+ driver_bdm = driver_block_device.convert_volume(bdm)
driver_bdm['connection_info'] = new_connection_info
driver_bdm.save()
@@ -6912,7 +6912,7 @@ class LibvirtDriver(driver.ComputeDriver):
'error_code': error_code,
'ex': ex})
except OSError as e:
- if e.errno == errno.ENOENT:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
LOG.warning(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
deleted file mode 100644
index 2a3d3f97e5..0000000000
--- a/nova/virt/vmwareapi/read_write_util.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Classes to handle image files
-
-Collection of classes to handle image upload/download to/from Image service
-(like Glance image storage and retrieval service) from/to ESX/ESXi server.
-
-"""
-
-import urllib
-
-from oslo_utils import netutils
-from oslo_vmware import rw_handles
-
-
-class VMwareHTTPReadFile(rw_handles.FileHandle):
- """VMware file read handler class."""
-
- def __init__(self, host, port, data_center_name, datastore_name, cookies,
- file_path, scheme="https"):
- self._base_url = self._get_base_url(scheme, host, port, file_path)
- param_list = {"dcPath": data_center_name, "dsName": datastore_name}
- self._base_url = self._base_url + "?" + urllib.urlencode(param_list)
- self._conn = self._create_read_connection(self._base_url,
- cookies=cookies)
- rw_handles.FileHandle.__init__(self, self._conn)
-
- def read(self, chunk_size):
- return self._file_handle.read(rw_handles.READ_CHUNKSIZE)
-
- def _get_base_url(self, scheme, host, port, file_path):
- if netutils.is_valid_ipv6(host):
- base_url = "%s://[%s]:%s/folder/%s" % (scheme, host, port,
- urllib.pathname2url(file_path))
- else:
- base_url = "%s://%s:%s/folder/%s" % (scheme, host, port,
- urllib.pathname2url(file_path))
- return base_url
-
- def get_size(self):
- """Get size of the file to be read."""
- return self._file_handle.headers.get("Content-Length", -1)
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 113d47ae69..ff49b210be 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -232,31 +232,21 @@ class VMwareVMOps(object):
def _get_instance_metadata(self, context, instance):
flavor = instance.flavor
- return ('name:%s\n'
- 'userid:%s\n'
- 'username:%s\n'
- 'projectid:%s\n'
- 'projectname:%s\n'
- 'flavor:name:%s\n'
- 'flavor:memory_mb:%s\n'
- 'flavor:vcpus:%s\n'
- 'flavor:ephemeral_gb:%s\n'
- 'flavor:root_gb:%s\n'
- 'flavor:swap:%s\n'
- 'imageid:%s\n'
- 'package:%s\n') % (instance.display_name,
- context.user_id,
- context.user_name,
- context.project_id,
- context.project_name,
- flavor.name,
- flavor.memory_mb,
- flavor.vcpus,
- flavor.ephemeral_gb,
- flavor.root_gb,
- flavor.swap,
- instance.image_ref,
- version.version_string_with_package())
+ metadata = [('name', instance.display_name),
+ ('userid', context.user_id),
+ ('username', context.user_name),
+ ('projectid', context.project_id),
+ ('projectname', context.project_name),
+ ('flavor:name', flavor.name),
+ ('flavor:memory_mb', flavor.memory_mb),
+ ('flavor:vcpus', flavor.vcpus),
+ ('flavor:ephemeral_gb', flavor.ephemeral_gb),
+ ('flavor:root_gb', flavor.root_gb),
+ ('flavor:swap', flavor.swap),
+ ('imageid', instance.image_ref),
+ ('package', version.version_string_with_package())]
+ # NOTE: formatted as lines like this: 'name:NAME\nuserid:ID\n...'
+ return ''.join(['%s:%s\n' % (k, v) for k, v in metadata])
def _create_folders(self, parent_folder, folder_path):
folders = folder_path.split('/')
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index 739a9ca47c..b16ddad192 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -51,7 +51,7 @@ def parse_sr_info(connection_data, description=''):
sr_identity = "%s/%s/%s" % (params['target'], params['port'],
params['targetIQN'])
# PY2 can only support taking an ascii string to uuid5
- if six.PY2 and isinstance(sr_identity, unicode):
+ if six.PY2 and isinstance(sr_identity, six.text_type):
sr_identity = sr_identity.encode('utf-8')
sr_uuid = str(uuid.uuid5(SR_NAMESPACE, sr_identity))
else:
diff --git a/releasenotes/notes/async-live-migration-rest-check-675ec309a9ccc28e.yaml b/releasenotes/notes/async-live-migration-rest-check-675ec309a9ccc28e.yaml
new file mode 100644
index 0000000000..4341cb910f
--- /dev/null
+++ b/releasenotes/notes/async-live-migration-rest-check-675ec309a9ccc28e.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - Starting from REST API microversion 2.34 pre-live-migration checks are
+ performed asynchronously. ``instance-actions`` should be used for getting
+ information about the checks results. New approach allows to reduce rpc
+ timeouts amount, as previous workflow was fully blocking and checks before
+ live-migration make blocking rpc request to both source and destionation
+ compute node.
diff --git a/releasenotes/notes/bp-keypairs-pagination-634c46aaa1058161.yaml b/releasenotes/notes/bp-keypairs-pagination-634c46aaa1058161.yaml
new file mode 100644
index 0000000000..c83c62089e
--- /dev/null
+++ b/releasenotes/notes/bp-keypairs-pagination-634c46aaa1058161.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Added microversion v2.35 that adds pagination support for keypairs with
+ the help of new optional parameters 'limit' and 'marker' which were added
+ to GET /os-keypairs request.
diff --git a/releasenotes/notes/notification-transformation-newton-29a9324d1428b7d3.yaml b/releasenotes/notes/notification-transformation-newton-29a9324d1428b7d3.yaml
new file mode 100644
index 0000000000..625002079b
--- /dev/null
+++ b/releasenotes/notes/notification-transformation-newton-29a9324d1428b7d3.yaml
@@ -0,0 +1,20 @@
+---
+features:
+ - |
+
+ The following legacy notifications have been been transformed to
+ a new versioned payload:
+
+ * instance.delete
+ * instance.pause
+ * instance.power_on
+ * instance.shelve
+ * instance.suspend
+ * instance.restore
+ * instance.resize
+ * instance.update
+ * compute.exception
+
+ Every versioned notification has a sample file stored under
+ doc/notification_samples directory. Consult
+ http://docs.openstack.org/developer/nova/notifications.html for more information.
diff --git a/releasenotes/notes/remove-config-serial-listen-2660be1c0863ea5a.yaml b/releasenotes/notes/remove-config-serial-listen-2660be1c0863ea5a.yaml
new file mode 100644
index 0000000000..3fe5567cc6
--- /dev/null
+++ b/releasenotes/notes/remove-config-serial-listen-2660be1c0863ea5a.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+ - |
+ The previously deprecated config option ``listen```of the group
+ ``serial_console`` has been removed, as it was never used in the code.
+