summaryrefslogtreecommitdiff
path: root/nova
diff options
context:
space:
mode:
Diffstat (limited to 'nova')
-rw-r--r--nova/api/openstack/placement/handlers/allocation.py38
-rw-r--r--nova/api/openstack/placement/microversion.py2
-rw-r--r--nova/objects/resource_provider.py92
-rw-r--r--nova/scheduler/client/report.py19
-rw-r--r--nova/tests/fixtures.py2
-rw-r--r--nova/tests/functional/api/openstack/placement/fixtures.py2
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/allocations-1-8.yaml152
-rw-r--r--nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml4
-rw-r--r--nova/tests/functional/api/openstack/placement/test_report_client.py2
-rw-r--r--nova/tests/functional/compute/test_resource_tracker.py1
-rw-r--r--nova/tests/functional/db/test_resource_provider.py45
-rw-r--r--nova/tests/unit/api/openstack/placement/test_microversion.py2
-rw-r--r--nova/tests/unit/objects/test_objects.py2
-rw-r--r--nova/tests/unit/scheduler/client/test_report.py47
14 files changed, 386 insertions, 24 deletions
diff --git a/nova/api/openstack/placement/handlers/allocation.py b/nova/api/openstack/placement/handlers/allocation.py
index 607ae92452..acaa456604 100644
--- a/nova/api/openstack/placement/handlers/allocation.py
+++ b/nova/api/openstack/placement/handlers/allocation.py
@@ -12,12 +12,14 @@
"""Placement API handlers for setting and deleting allocations."""
import collections
+import copy
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import webob
+from nova.api.openstack.placement import microversion
from nova.api.openstack.placement import util
from nova.api.openstack.placement import wsgi_wrapper
from nova import exception
@@ -69,6 +71,15 @@ ALLOCATION_SCHEMA = {
"additionalProperties": False
}
+ALLOCATION_SCHEMA_V1_8 = copy.deepcopy(ALLOCATION_SCHEMA)
+ALLOCATION_SCHEMA_V1_8['properties']['project_id'] = {'type': 'string',
+ 'minLength': 1,
+ 'maxLength': 255}
+ALLOCATION_SCHEMA_V1_8['properties']['user_id'] = {'type': 'string',
+ 'minLength': 1,
+ 'maxLength': 255}
+ALLOCATION_SCHEMA_V1_8['required'].extend(['project_id', 'user_id'])
+
def _allocations_dict(allocations, key_fetcher, resource_provider=None):
"""Turn allocations into a dict of resources keyed by key_fetcher."""
@@ -197,12 +208,10 @@ def list_for_resource_provider(req):
return req.response
-@wsgi_wrapper.PlacementWsgify
-@util.require_content('application/json')
-def set_allocations(req):
+def _set_allocations(req, schema):
context = req.environ['placement.context']
consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid')
- data = util.extract_json(req.body, ALLOCATION_SCHEMA)
+ data = util.extract_json(req.body, schema)
allocation_data = data['allocations']
# If the body includes an allocation for a resource provider
@@ -229,7 +238,12 @@ def set_allocations(req):
used=resources[resource_class])
allocation_objects.append(allocation)
- allocations = objects.AllocationList(context, objects=allocation_objects)
+ allocations = objects.AllocationList(
+ context,
+ objects=allocation_objects,
+ project_id=data.get('project_id'),
+ user_id=data.get('user_id'),
+ )
try:
allocations.create_all()
@@ -258,6 +272,20 @@ def set_allocations(req):
@wsgi_wrapper.PlacementWsgify
+@microversion.version_handler('1.0', '1.7')
+@util.require_content('application/json')
+def set_allocations(req):
+ return _set_allocations(req, ALLOCATION_SCHEMA)
+
+
+@wsgi_wrapper.PlacementWsgify # noqa
+@microversion.version_handler('1.8')
+@util.require_content('application/json')
+def set_allocations(req):
+ return _set_allocations(req, ALLOCATION_SCHEMA_V1_8)
+
+
+@wsgi_wrapper.PlacementWsgify
def delete_allocations(req):
context = req.environ['placement.context']
consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid')
diff --git a/nova/api/openstack/placement/microversion.py b/nova/api/openstack/placement/microversion.py
index fac180853e..2f7ccaa2c8 100644
--- a/nova/api/openstack/placement/microversion.py
+++ b/nova/api/openstack/placement/microversion.py
@@ -44,6 +44,8 @@ VERSIONS = [
'1.6', # Adds /traits and /resource_providers{uuid}/traits resource
# endpoints
'1.7', # PUT /resource_classes/{name} is bodiless create or update
+ '1.8', # Adds 'project_id' and 'user_id' required request parameters to
+ # PUT /allocations
]
diff --git a/nova/objects/resource_provider.py b/nova/objects/resource_provider.py
index 51b24af4fb..d33d027eae 100644
--- a/nova/objects/resource_provider.py
+++ b/nova/objects/resource_provider.py
@@ -44,6 +44,9 @@ _RC_TBL = models.ResourceClass.__table__
_AGG_TBL = models.PlacementAggregate.__table__
_RP_AGG_TBL = models.ResourceProviderAggregate.__table__
_RP_TRAIT_TBL = models.ResourceProviderTrait.__table__
+_PROJECT_TBL = models.Project.__table__
+_USER_TBL = models.User.__table__
+_CONSUMER_TBL = models.Consumer.__table__
_RC_CACHE = None
_TRAIT_LOCK = 'trait_sync'
_TRAITS_SYNCED = False
@@ -1653,15 +1656,66 @@ def _check_capacity_exceeded(conn, allocs):
return list(res_providers.values())
+def _ensure_lookup_table_entry(conn, tbl, external_id):
+ """Ensures the supplied external ID exists in the specified lookup table
+ and if not, adds it. Returns the internal ID.
+
+ :param conn: DB connection object to use
+ :param tbl: The lookup table
+ :param external_id: The external project or user identifier
+ :type external_id: string
+ """
+ # Grab the project internal ID if it exists in the projects table
+ sel = sa.select([tbl.c.id]).where(
+ tbl.c.external_id == external_id
+ )
+ res = conn.execute(sel).fetchall()
+ if not res:
+ try:
+ res = conn.execute(tbl.insert().values(external_id=external_id))
+ return res.inserted_primary_key[0]
+ except db_exc.DBDuplicateEntry:
+ # Another thread added it just before us, so just read the
+ # internal ID that that thread created...
+ res = conn.execute(sel).fetchall()
+
+ return res[0][0]
+
+
+def _ensure_project(conn, external_id):
+ """Ensures the supplied external project ID exists in the projects lookup
+ table and if not, adds it. Returns the internal project ID.
+
+ :param conn: DB connection object to use
+ :param external_id: The external project identifier
+ :type external_id: string
+ """
+ return _ensure_lookup_table_entry(conn, _PROJECT_TBL, external_id)
+
+
+def _ensure_user(conn, external_id):
+ """Ensures the supplied external user ID exists in the users lookup table
+ and if not, adds it. Returns the internal user ID.
+
+ :param conn: DB connection object to use
+ :param external_id: The external user identifier
+ :type external_id: string
+ """
+ return _ensure_lookup_table_entry(conn, _USER_TBL, external_id)
+
+
@base.NovaObjectRegistry.register
class AllocationList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial Version
# Version 1.1: Add create_all() and delete_all()
# Version 1.2: Turn off remotable
- VERSION = '1.2'
+ # Version 1.3: Add project_id and user_id fields
+ VERSION = '1.3'
fields = {
'objects': fields.ListOfObjectsField('Allocation'),
+ 'project_id': fields.StringField(nullable=True),
+ 'user_id': fields.StringField(nullable=True),
}
@staticmethod
@@ -1686,9 +1740,40 @@ class AllocationList(base.ObjectListBase, base.NovaObject):
models.Allocation.consumer_id == consumer_id)
return query.all()
- @staticmethod
+ def _ensure_consumer_project_user(self, conn, consumer_id):
+ """Examines the project_id, user_id of the object along with the
+ supplied consumer_id and ensures that there are records in the
+ consumers, projects, and users table for these entities.
+
+ :param consumer_id: Comes from the Allocation object being processed
+ """
+ if (self.obj_attr_is_set('project_id') and
+ self.project_id is not None and
+ self.obj_attr_is_set('user_id') and
+ self.user_id is not None):
+ # Grab the project internal ID if it exists in the projects table
+ pid = _ensure_project(conn, self.project_id)
+ # Grab the user internal ID if it exists in the users table
+ uid = _ensure_user(conn, self.user_id)
+
+ # Add the consumer if it doesn't already exist
+ sel_stmt = sa.select([_CONSUMER_TBL.c.uuid]).where(
+ _CONSUMER_TBL.c.uuid == consumer_id)
+ result = conn.execute(sel_stmt).fetchall()
+ if not result:
+ try:
+ conn.execute(_CONSUMER_TBL.insert().values(
+ uuid=consumer_id,
+ project_id=pid,
+ user_id=uid))
+ except db_exc.DBDuplicateEntry:
+ # We assume at this time that a consumer project/user can't
+ # change, so if we get here, we raced and should just pass
+ # if the consumer already exists.
+ pass
+
@db_api.api_context_manager.writer
- def _set_allocations(context, allocs):
+ def _set_allocations(self, context, allocs):
"""Write a set of allocations.
We must check that there is capacity for each allocation.
@@ -1723,6 +1808,7 @@ class AllocationList(base.ObjectListBase, base.NovaObject):
# First delete any existing allocations for that rp/consumer combo.
_delete_current_allocs(conn, allocs)
before_gens = _check_capacity_exceeded(conn, allocs)
+ self._ensure_consumer_project_user(conn, allocs[0].consumer_id)
# Now add the allocations that were passed in.
for alloc in allocs:
rp = alloc.resource_provider
diff --git a/nova/scheduler/client/report.py b/nova/scheduler/client/report.py
index cdffa5c0ab..e62eb794d4 100644
--- a/nova/scheduler/client/report.py
+++ b/nova/scheduler/client/report.py
@@ -839,13 +839,15 @@ class SchedulerReportClient(object):
LOG.debug('Sending allocation for instance %s',
my_allocations,
instance=instance)
- res = self.put_allocations(rp_uuid, instance.uuid, my_allocations)
+ res = self.put_allocations(rp_uuid, instance.uuid, my_allocations,
+ instance.project_id, instance.user_id)
if res:
LOG.info(_LI('Submitted allocation for instance'),
instance=instance)
@safe_connect
- def put_allocations(self, rp_uuid, consumer_uuid, alloc_data):
+ def put_allocations(self, rp_uuid, consumer_uuid, alloc_data, project_id,
+ user_id):
"""Creates allocation records for the supplied instance UUID against
the supplied resource provider.
@@ -857,6 +859,8 @@ class SchedulerReportClient(object):
:param consumer_uuid: The instance's UUID.
:param alloc_data: Dict, keyed by resource class, of amounts to
consume.
+ :param project_id: The project_id associated with the allocations.
+ :param user_id: The user_id associated with the allocations.
:returns: True if the allocations were created, False otherwise.
"""
payload = {
@@ -868,9 +872,18 @@ class SchedulerReportClient(object):
'resources': alloc_data,
},
],
+ 'project_id': project_id,
+ 'user_id': user_id,
}
url = '/allocations/%s' % consumer_uuid
- r = self.put(url, payload)
+ r = self.put(url, payload, version='1.8')
+ if r.status_code == 406:
+ # microversion 1.8 not available so try the earlier way
+ # TODO(melwitt): Remove this when we can be sure all placement
+ # servers support version 1.8.
+ payload.pop('project_id')
+ payload.pop('user_id')
+ r = self.put(url, payload)
if r.status_code != 204:
LOG.warning(
'Unable to submit allocation for instance '
diff --git a/nova/tests/fixtures.py b/nova/tests/fixtures.py
index dd375293b7..dc9e1902a9 100644
--- a/nova/tests/fixtures.py
+++ b/nova/tests/fixtures.py
@@ -1426,7 +1426,7 @@ class PlacementFixture(fixtures.Fixture):
headers={'x-auth-token': self.token},
raise_exc=False)
- def _fake_put(self, *args):
+ def _fake_put(self, *args, **kwargs):
(url, data) = args[1:]
# NOTE(sdague): using json= instead of data= sets the
# media type to application/json for us. Placement API is
diff --git a/nova/tests/functional/api/openstack/placement/fixtures.py b/nova/tests/functional/api/openstack/placement/fixtures.py
index 83c8e6b573..3c31f21b47 100644
--- a/nova/tests/functional/api/openstack/placement/fixtures.py
+++ b/nova/tests/functional/api/openstack/placement/fixtures.py
@@ -77,6 +77,8 @@ class APIFixture(fixture.GabbiFixture):
os.environ['RP_UUID'] = uuidutils.generate_uuid()
os.environ['RP_NAME'] = uuidutils.generate_uuid()
os.environ['CUSTOM_RES_CLASS'] = 'CUSTOM_IRON_NFV'
+ os.environ['PROJECT_ID'] = uuidutils.generate_uuid()
+ os.environ['USER_ID'] = uuidutils.generate_uuid()
def stop_fixture(self):
self.api_db_fixture.cleanup()
diff --git a/nova/tests/functional/api/openstack/placement/gabbits/allocations-1-8.yaml b/nova/tests/functional/api/openstack/placement/gabbits/allocations-1-8.yaml
new file mode 100644
index 0000000000..d3a8a6bdc9
--- /dev/null
+++ b/nova/tests/functional/api/openstack/placement/gabbits/allocations-1-8.yaml
@@ -0,0 +1,152 @@
+fixtures:
+ - APIFixture
+
+defaults:
+ request_headers:
+ x-auth-token: admin
+ accept: application/json
+ OpenStack-API-Version: placement 1.8
+
+tests:
+
+- name: put an allocation no project_id or user_id
+ PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
+ request_headers:
+ content-type: application/json
+ data:
+ allocations:
+ - resource_provider:
+ uuid: $ENVIRON['RP_UUID']
+ resources:
+ DISK_GB: 10
+ status: 400
+ response_strings:
+ - Failed validating 'required' in schema
+
+- name: put an allocation no project_id
+ PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
+ request_headers:
+ content-type: application/json
+ data:
+ allocations:
+ - resource_provider:
+ uuid: $ENVIRON['RP_UUID']
+ resources:
+ DISK_GB: 10
+ user_id: $ENVIRON['USER_ID']
+ status: 400
+ response_strings:
+ - Failed validating 'required' in schema
+
+- name: put an allocation no user_id
+ PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
+ request_headers:
+ content-type: application/json
+ data:
+ allocations:
+ - resource_provider:
+ uuid: $ENVIRON['RP_UUID']
+ resources:
+ DISK_GB: 10
+ project_id: $ENVIRON['PROJECT_ID']
+ status: 400
+ response_strings:
+ - Failed validating 'required' in schema
+
+- name: put an allocation project_id less than min length
+ PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
+ request_headers:
+ content-type: application/json
+ data:
+ allocations:
+ - resource_provider:
+ uuid: $ENVIRON['RP_UUID']
+ resources:
+ DISK_GB: 10
+ project_id: ""
+ user_id: $ENVIRON['USER_ID']
+ status: 400
+ response_strings:
+ - "Failed validating 'minLength'"
+
+- name: put an allocation user_id less than min length
+ PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
+ request_headers:
+ content-type: application/json
+ data:
+ allocations:
+ - resource_provider:
+ uuid: $ENVIRON['RP_UUID']
+ resources:
+ DISK_GB: 10
+ project_id: $ENVIRON['PROJECT_ID']
+ user_id: ""
+ status: 400
+ response_strings:
+ - "Failed validating 'minLength'"
+
+- name: put an allocation project_id exceeds max length
+ PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
+ request_headers:
+ content-type: application/json
+ data:
+ allocations:
+ - resource_provider:
+ uuid: $ENVIRON['RP_UUID']
+ resources:
+ DISK_GB: 10
+ project_id: 78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1
+ user_id: $ENVIRON['USER_ID']
+ status: 400
+ response_strings:
+ - "Failed validating 'maxLength'"
+
+- name: put an allocation user_id exceeds max length
+ PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
+ request_headers:
+ content-type: application/json
+ data:
+ allocations:
+ - resource_provider:
+ uuid: $ENVIRON['RP_UUID']
+ resources:
+ DISK_GB: 10
+ project_id: $ENVIRON['PROJECT_ID']
+ user_id: 78725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b178725f09-5c01-4c9e-97a5-98d75e1e32b1
+ status: 400
+ response_strings:
+ - "Failed validating 'maxLength'"
+
+- name: create the resource provider
+ POST: /resource_providers
+ request_headers:
+ content-type: application/json
+ data:
+ name: $ENVIRON['RP_NAME']
+ uuid: $ENVIRON['RP_UUID']
+ status: 201
+
+- name: post some inventory
+ POST: /resource_providers/$ENVIRON['RP_UUID']/inventories
+ request_headers:
+ content-type: application/json
+ data:
+ resource_class: DISK_GB
+ total: 2048
+ min_unit: 10
+ max_unit: 1024
+ status: 201
+
+- name: put an allocation
+ PUT: /allocations/599ffd2d-526a-4b2e-8683-f13ad25f9958
+ request_headers:
+ content-type: application/json
+ data:
+ allocations:
+ - resource_provider:
+ uuid: $ENVIRON['RP_UUID']
+ resources:
+ DISK_GB: 10
+ project_id: $ENVIRON['PROJECT_ID']
+ user_id: $ENVIRON['USER_ID']
+ status: 204
diff --git a/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml b/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml
index 50a9ec5778..33cdd3338d 100644
--- a/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml
+++ b/nova/tests/functional/api/openstack/placement/gabbits/microversion.yaml
@@ -39,13 +39,13 @@ tests:
response_json_paths:
$.errors[0].title: Not Acceptable
-- name: latest microversion is 1.7
+- name: latest microversion is 1.8
GET: /
request_headers:
openstack-api-version: placement latest
response_headers:
vary: /OpenStack-API-Version/
- openstack-api-version: placement 1.7
+ openstack-api-version: placement 1.8
- name: other accept header bad version
GET: /
diff --git a/nova/tests/functional/api/openstack/placement/test_report_client.py b/nova/tests/functional/api/openstack/placement/test_report_client.py
index 41c022962b..8811c8209d 100644
--- a/nova/tests/functional/api/openstack/placement/test_report_client.py
+++ b/nova/tests/functional/api/openstack/placement/test_report_client.py
@@ -76,6 +76,8 @@ class SchedulerReportClientTests(test.TestCase):
self.instance_uuid = uuids.inst
self.instance = objects.Instance(
uuid=self.instance_uuid,
+ project_id = uuids.project,
+ user_id = uuids.user,
flavor=objects.Flavor(root_gb=10,
swap=1,
ephemeral_gb=100,
diff --git a/nova/tests/functional/compute/test_resource_tracker.py b/nova/tests/functional/compute/test_resource_tracker.py
index 129698e4d0..5484d46419 100644
--- a/nova/tests/functional/compute/test_resource_tracker.py
+++ b/nova/tests/functional/compute/test_resource_tracker.py
@@ -123,6 +123,7 @@ class IronicResourceTrackerTest(test.TestCase):
task_state=task_states.SPAWNING,
power_state=power_state.RUNNING,
project_id='project',
+ user_id=uuids.user,
),
}
diff --git a/nova/tests/functional/db/test_resource_provider.py b/nova/tests/functional/db/test_resource_provider.py
index c95bc88cf7..9fd9bd2995 100644
--- a/nova/tests/functional/db/test_resource_provider.py
+++ b/nova/tests/functional/db/test_resource_provider.py
@@ -1213,6 +1213,51 @@ class TestAllocationListCreateDelete(ResourceProviderBaseCase):
self._check_create_allocations(inventory_kwargs,
bad_used, good_used)
+ def test_create_all_with_project_user(self):
+ consumer_uuid = uuidsentinel.consumer
+ rp_class = fields.ResourceClass.DISK_GB
+ rp = self._make_rp_and_inventory(resource_class=rp_class,
+ max_unit=500)
+ allocation1 = objects.Allocation(resource_provider=rp,
+ consumer_id=consumer_uuid,
+ resource_class=rp_class,
+ used=100)
+ allocation2 = objects.Allocation(resource_provider=rp,
+ consumer_id=consumer_uuid,
+ resource_class=rp_class,
+ used=200)
+ allocation_list = objects.AllocationList(
+ self.context,
+ objects=[allocation1, allocation2],
+ project_id=self.context.project_id,
+ user_id=self.context.user_id,
+ )
+ allocation_list.create_all()
+
+ # Verify that we have records in the consumers, projects, and users
+ # table for the information used in the above allocation creation
+ with self.api_db.get_engine().connect() as conn:
+ tbl = rp_obj._PROJECT_TBL
+ sel = sa.select([tbl.c.id]).where(
+ tbl.c.external_id == self.context.project_id,
+ )
+ res = conn.execute(sel).fetchall()
+ self.assertEqual(1, len(res), "project lookup not created.")
+
+ tbl = rp_obj._USER_TBL
+ sel = sa.select([tbl.c.id]).where(
+ tbl.c.external_id == self.context.user_id,
+ )
+ res = conn.execute(sel).fetchall()
+ self.assertEqual(1, len(res), "user lookup not created.")
+
+ tbl = rp_obj._CONSUMER_TBL
+ sel = sa.select([tbl.c.id]).where(
+ tbl.c.uuid == consumer_uuid,
+ )
+ res = conn.execute(sel).fetchall()
+ self.assertEqual(1, len(res), "consumer lookup not created.")
+
class UsageListTestCase(ResourceProviderBaseCase):
diff --git a/nova/tests/unit/api/openstack/placement/test_microversion.py b/nova/tests/unit/api/openstack/placement/test_microversion.py
index 22e6317037..2a98fa9049 100644
--- a/nova/tests/unit/api/openstack/placement/test_microversion.py
+++ b/nova/tests/unit/api/openstack/placement/test_microversion.py
@@ -74,7 +74,7 @@ class TestMicroversionIntersection(test.NoDBTestCase):
# if you add two different versions of method 'foobar' the
# number only goes up by one if no other version foobar yet
# exists. This operates as a simple sanity check.
- TOTAL_VERSIONED_METHODS = 12
+ TOTAL_VERSIONED_METHODS = 13
def test_methods_versioned(self):
methods_data = microversion.VERSIONED_METHODS
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
index 5032531cef..e64cd73be5 100644
--- a/nova/tests/unit/objects/test_objects.py
+++ b/nova/tests/unit/objects/test_objects.py
@@ -1063,7 +1063,7 @@ object_data = {
'Aggregate': '1.3-f315cb68906307ca2d1cca84d4753585',
'AggregateList': '1.2-fb6e19f3c3a3186b04eceb98b5dadbfa',
'Allocation': '1.2-54f99dfa9651922219c205a7fba69e2f',
- 'AllocationList': '1.2-15ecf022a68ddbb8c2a6739cfc9f8f5e',
+ 'AllocationList': '1.3-453a548b961f59804cccd05ae29ba4f7',
'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c',
'BandwidthUsageList': '1.2-5fe7475ada6fe62413cbfcc06ec70746',
'BlockDeviceMapping': '1.18-ad87cece6f84c65f5ec21615755bc6d3',
diff --git a/nova/tests/unit/scheduler/client/test_report.py b/nova/tests/unit/scheduler/client/test_report.py
index 8f70c6cd81..50d684f529 100644
--- a/nova/tests/unit/scheduler/client/test_report.py
+++ b/nova/tests/unit/scheduler/client/test_report.py
@@ -187,9 +187,34 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
- resp = self.client.put_allocations(rp_uuid, consumer_uuid, data)
+ resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
+ mock.sentinel.project_id,
+ mock.sentinel.user_id)
self.assertTrue(resp)
- mock_put.assert_called_once_with(expected_url, mock.ANY)
+ mock_put.assert_called_once_with(expected_url, mock.ANY, version='1.8')
+
+ @mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
+ def test_put_allocations_fail_fallback_succeeds(self, mock_put):
+ not_acceptable = mock.Mock()
+ not_acceptable.status_code = 406
+ not_acceptable.text = 'microversion not supported'
+ ok_request = mock.Mock()
+ ok_request.status_code = 204
+ ok_request.text = 'cool'
+ mock_put.side_effect = [not_acceptable, ok_request]
+ rp_uuid = mock.sentinel.rp
+ consumer_uuid = mock.sentinel.consumer
+ data = {"MEMORY_MB": 1024}
+ expected_url = "/allocations/%s" % consumer_uuid
+ resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
+ mock.sentinel.project_id,
+ mock.sentinel.user_id)
+ self.assertTrue(resp)
+ # Should fall back to earlier way if 1.8 fails.
+ call1 = mock.call(expected_url, mock.ANY, version='1.8')
+ call2 = mock.call(expected_url, mock.ANY)
+ self.assertEqual(2, mock_put.call_count)
+ mock_put.assert_has_calls([call1, call2])
@mock.patch.object(report.LOG, 'warning')
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put')
@@ -200,9 +225,11 @@ class TestPutAllocations(SchedulerReportClientTestCase):
consumer_uuid = mock.sentinel.consumer
data = {"MEMORY_MB": 1024}
expected_url = "/allocations/%s" % consumer_uuid
- resp = self.client.put_allocations(rp_uuid, consumer_uuid, data)
+ resp = self.client.put_allocations(rp_uuid, consumer_uuid, data,
+ mock.sentinel.project_id,
+ mock.sentinel.user_id)
self.assertFalse(resp)
- mock_put.assert_called_once_with(expected_url, mock.ANY)
+ mock_put.assert_called_once_with(expected_url, mock.ANY, version='1.8')
log_msg = mock_warn.call_args[0][0]
self.assertIn("Unable to submit allocation for instance", log_msg)
@@ -1427,17 +1454,20 @@ class TestAllocations(SchedulerReportClientTestCase):
def test_update_instance_allocation_new(self, mock_a, mock_get,
mock_put):
cn = objects.ComputeNode(uuid=uuids.cn)
- inst = objects.Instance(uuid=uuids.inst)
+ inst = objects.Instance(uuid=uuids.inst, project_id=uuids.project,
+ user_id=uuids.user)
mock_get.return_value.json.return_value = {'allocations': {}}
expected = {
'allocations': [
{'resource_provider': {'uuid': cn.uuid},
- 'resources': mock_a.return_value}]
+ 'resources': mock_a.return_value}],
+ 'project_id': inst.project_id,
+ 'user_id': inst.user_id,
}
self.client.update_instance_allocation(cn, inst, 1)
mock_put.assert_called_once_with(
'/allocations/%s' % inst.uuid,
- expected)
+ expected, version='1.8')
self.assertTrue(mock_get.called)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
@@ -1478,7 +1508,8 @@ class TestAllocations(SchedulerReportClientTestCase):
def test_update_instance_allocation_new_failed(self, mock_warn, mock_a,
mock_put, mock_get):
cn = objects.ComputeNode(uuid=uuids.cn)
- inst = objects.Instance(uuid=uuids.inst)
+ inst = objects.Instance(uuid=uuids.inst, project_id=uuids.project,
+ user_id=uuids.user)
try:
mock_put.return_value.__nonzero__.return_value = False
except AttributeError: