summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--doc/source/conf.py1
-rw-r--r--nova/api/openstack/placement/__init__.py0
-rw-r--r--nova/api/openstack/placement/auth.py102
-rw-r--r--nova/api/openstack/placement/context.py52
-rw-r--r--nova/api/openstack/placement/db_api.py48
-rw-r--r--nova/api/openstack/placement/deploy.py120
-rw-r--r--nova/api/openstack/placement/direct.py94
-rw-r--r--nova/api/openstack/placement/errors.py48
-rw-r--r--nova/api/openstack/placement/exception.py207
-rw-r--r--nova/api/openstack/placement/fault_wrap.py48
-rw-r--r--nova/api/openstack/placement/handler.py231
-rw-r--r--nova/api/openstack/placement/handlers/__init__.py0
-rw-r--r--nova/api/openstack/placement/handlers/aggregate.py133
-rw-r--r--nova/api/openstack/placement/handlers/allocation.py576
-rw-r--r--nova/api/openstack/placement/handlers/allocation_candidate.py332
-rw-r--r--nova/api/openstack/placement/handlers/inventory.py467
-rw-r--r--nova/api/openstack/placement/handlers/reshaper.py129
-rw-r--r--nova/api/openstack/placement/handlers/resource_class.py241
-rw-r--r--nova/api/openstack/placement/handlers/resource_provider.py308
-rw-r--r--nova/api/openstack/placement/handlers/root.py54
-rw-r--r--nova/api/openstack/placement/handlers/trait.py270
-rw-r--r--nova/api/openstack/placement/handlers/usage.py120
-rw-r--r--nova/api/openstack/placement/lib.py53
-rw-r--r--nova/api/openstack/placement/microversion.py172
-rw-r--r--nova/api/openstack/placement/objects/__init__.py0
-rw-r--r--nova/api/openstack/placement/objects/consumer.py257
-rw-r--r--nova/api/openstack/placement/objects/project.py92
-rw-r--r--nova/api/openstack/placement/objects/resource_provider.py4282
-rw-r--r--nova/api/openstack/placement/objects/user.py92
-rw-r--r--nova/api/openstack/placement/policies/__init__.py39
-rw-r--r--nova/api/openstack/placement/policies/aggregate.py53
-rw-r--r--nova/api/openstack/placement/policies/allocation.py92
-rw-r--r--nova/api/openstack/placement/policies/allocation_candidate.py38
-rw-r--r--nova/api/openstack/placement/policies/base.py42
-rw-r--r--nova/api/openstack/placement/policies/inventory.py95
-rw-r--r--nova/api/openstack/placement/policies/reshaper.py38
-rw-r--r--nova/api/openstack/placement/policies/resource_class.py86
-rw-r--r--nova/api/openstack/placement/policies/resource_provider.py86
-rw-r--r--nova/api/openstack/placement/policies/trait.py120
-rw-r--r--nova/api/openstack/placement/policies/usage.py54
-rw-r--r--nova/api/openstack/placement/policy.py94
-rw-r--r--nova/api/openstack/placement/requestlog.py87
-rw-r--r--nova/api/openstack/placement/resource_class_cache.py154
-rw-r--r--nova/api/openstack/placement/rest_api_version_history.rst518
-rw-r--r--nova/api/openstack/placement/schemas/__init__.py0
-rw-r--r--nova/api/openstack/placement/schemas/aggregate.py42
-rw-r--r--nova/api/openstack/placement/schemas/allocation.py169
-rw-r--r--nova/api/openstack/placement/schemas/allocation_candidate.py78
-rw-r--r--nova/api/openstack/placement/schemas/common.py22
-rw-r--r--nova/api/openstack/placement/schemas/inventory.py93
-rw-r--r--nova/api/openstack/placement/schemas/reshaper.py47
-rw-r--r--nova/api/openstack/placement/schemas/resource_class.py33
-rw-r--r--nova/api/openstack/placement/schemas/resource_provider.py106
-rw-r--r--nova/api/openstack/placement/schemas/trait.py56
-rw-r--r--nova/api/openstack/placement/schemas/usage.py33
-rw-r--r--nova/api/openstack/placement/util.py697
-rw-r--r--nova/api/openstack/placement/wsgi.py120
-rw-r--r--nova/api/openstack/placement/wsgi_wrapper.py38
-rw-r--r--nova/cmd/manage.py22
-rw-r--r--nova/conf/database.py54
-rw-r--r--nova/conf/placement.py60
-rw-r--r--nova/config.py2
-rw-r--r--nova/db/sqlalchemy/migration.py9
-rw-r--r--nova/hacking/checks.py9
-rw-r--r--nova/rc_fields.py70
-rw-r--r--nova/tests/functional/test_nova_manage.py7
-rw-r--r--nova/tests/unit/policy_fixture.py30
-rw-r--r--nova/tests/unit/test_conf.py5
-rw-r--r--nova/tests/unit/test_nova_manage.py12
-rw-r--r--releasenotes/notes/placement-deleted-a79ad405f428a5f8.yaml13
-rw-r--r--setup.cfg3
-rw-r--r--tox.ini7
73 files changed, 30 insertions, 11933 deletions
diff --git a/.gitignore b/.gitignore
index 1a38386695..3c64ffabe0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -47,7 +47,6 @@ nova/vcsversion.py
tools/conf/nova.conf*
doc/source/_static/nova.conf.sample
doc/source/_static/nova.policy.yaml.sample
-doc/source/_static/placement.policy.yaml.sample
# Files created by releasenotes build
releasenotes/build
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 1220e0f5cb..c5a97436da 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -55,7 +55,6 @@ sample_config_basename = '_static/nova'
policy_generator_config_file = [
('../../etc/nova/nova-policy-generator.conf', '_static/nova'),
- ('../../etc/nova/placement-policy-generator.conf', '_static/placement')
]
actdiag_html_image_format = 'SVG'
diff --git a/nova/api/openstack/placement/__init__.py b/nova/api/openstack/placement/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/api/openstack/placement/__init__.py
+++ /dev/null
diff --git a/nova/api/openstack/placement/auth.py b/nova/api/openstack/placement/auth.py
deleted file mode 100644
index ff2551e26f..0000000000
--- a/nova/api/openstack/placement/auth.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from keystonemiddleware import auth_token
-from oslo_log import log as logging
-from oslo_middleware import request_id
-import webob.dec
-import webob.exc
-
-from nova.api.openstack.placement import context
-
-LOG = logging.getLogger(__name__)
-
-
-class Middleware(object):
-
- def __init__(self, application, **kwargs):
- self.application = application
-
-
-# NOTE(cdent): Only to be used in tests where auth is being faked.
-class NoAuthMiddleware(Middleware):
- """Require a token if one isn't present."""
-
- def __init__(self, application):
- self.application = application
-
- @webob.dec.wsgify
- def __call__(self, req):
- if req.environ['PATH_INFO'] == '/':
- return self.application
-
- if 'X-Auth-Token' not in req.headers:
- return webob.exc.HTTPUnauthorized()
-
- token = req.headers['X-Auth-Token']
- user_id, _sep, project_id = token.partition(':')
- project_id = project_id or user_id
- if user_id == 'admin':
- roles = ['admin']
- else:
- roles = []
- req.headers['X_USER_ID'] = user_id
- req.headers['X_TENANT_ID'] = project_id
- req.headers['X_ROLES'] = ','.join(roles)
- return self.application
-
-
-class PlacementKeystoneContext(Middleware):
- """Make a request context from keystone headers."""
-
- @webob.dec.wsgify
- def __call__(self, req):
- req_id = req.environ.get(request_id.ENV_REQUEST_ID)
-
- ctx = context.RequestContext.from_environ(
- req.environ, request_id=req_id)
-
- if ctx.user_id is None and req.environ['PATH_INFO'] != '/':
- LOG.debug("Neither X_USER_ID nor X_USER found in request")
- return webob.exc.HTTPUnauthorized()
-
- req.environ['placement.context'] = ctx
- return self.application
-
-
-class PlacementAuthProtocol(auth_token.AuthProtocol):
- """A wrapper on Keystone auth_token middleware.
-
- Does not perform verification of authentication tokens
- for root in the API.
-
- """
- def __init__(self, app, conf):
- self._placement_app = app
- super(PlacementAuthProtocol, self).__init__(app, conf)
-
- def __call__(self, environ, start_response):
- if environ['PATH_INFO'] == '/':
- return self._placement_app(environ, start_response)
-
- return super(PlacementAuthProtocol, self).__call__(
- environ, start_response)
-
-
-def filter_factory(global_conf, **local_conf):
- conf = global_conf.copy()
- conf.update(local_conf)
-
- def auth_filter(app):
- return PlacementAuthProtocol(app, conf)
- return auth_filter
diff --git a/nova/api/openstack/placement/context.py b/nova/api/openstack/placement/context.py
deleted file mode 100644
index ee0786f494..0000000000
--- a/nova/api/openstack/placement/context.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_context import context
-from oslo_db.sqlalchemy import enginefacade
-
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import policy
-
-
-@enginefacade.transaction_context_provider
-class RequestContext(context.RequestContext):
-
- def can(self, action, target=None, fatal=True):
- """Verifies that the given action is valid on the target in this
- context.
-
- :param action: string representing the action to be checked.
- :param target: As much information about the object being operated on
- as possible. The target argument should be a dict instance or an
- instance of a class that fully supports the Mapping abstract base
- class and deep copying. For object creation this should be a
- dictionary representing the location of the object e.g.
- ``{'project_id': context.project_id}``. If None, then this default
- target will be considered::
-
- {'project_id': self.project_id, 'user_id': self.user_id}
- :param fatal: if False, will return False when an
- exception.PolicyNotAuthorized occurs.
- :raises nova.api.openstack.placement.exception.PolicyNotAuthorized:
- if verification fails and fatal is True.
- :return: returns a non-False value (not necessarily "True") if
- authorized and False if not authorized and fatal is False.
- """
- if target is None:
- target = {'project_id': self.project_id,
- 'user_id': self.user_id}
- try:
- return policy.authorize(self, action, target)
- except exception.PolicyNotAuthorized:
- if fatal:
- raise
- return False
diff --git a/nova/api/openstack/placement/db_api.py b/nova/api/openstack/placement/db_api.py
deleted file mode 100644
index 31426cbd6d..0000000000
--- a/nova/api/openstack/placement/db_api.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Database context manager for placement database connection, kept in its
-own file so the nova db_api (which has cascading imports) is not imported.
-"""
-
-from oslo_db.sqlalchemy import enginefacade
-from oslo_log import log as logging
-
-from nova.utils import run_once
-
-placement_context_manager = enginefacade.transaction_context()
-LOG = logging.getLogger(__name__)
-
-
-def _get_db_conf(conf_group):
- return dict(conf_group.items())
-
-
-@run_once("TransactionFactory already started, not reconfiguring.",
- LOG.warning)
-def configure(conf):
- # If [placement_database]/connection is not set in conf, then placement
- # data will be stored in the nova_api database.
- if conf.placement_database.connection is None:
- placement_context_manager.configure(
- **_get_db_conf(conf.api_database))
- else:
- placement_context_manager.configure(
- **_get_db_conf(conf.placement_database))
-
-
-def get_placement_engine():
- return placement_context_manager.writer.get_engine()
-
-
-@enginefacade.transaction_context_provider
-class DbContext(object):
- """Stub class for db session handling outside of web requests."""
diff --git a/nova/api/openstack/placement/deploy.py b/nova/api/openstack/placement/deploy.py
deleted file mode 100644
index 76de333ebb..0000000000
--- a/nova/api/openstack/placement/deploy.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Deployment handling for Placmenent API."""
-
-from microversion_parse import middleware as mp_middleware
-import oslo_middleware
-from oslo_middleware import cors
-
-from nova.api.openstack.placement import auth
-from nova.api.openstack.placement import db_api
-from nova.api.openstack.placement import fault_wrap
-from nova.api.openstack.placement import handler
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider
-from nova.api.openstack.placement import requestlog
-from nova.api.openstack.placement import util
-
-
-# TODO(cdent): NAME points to the config project being used, so for
-# now this is "nova" but we probably want "placement" eventually.
-NAME = "nova"
-
-
-def deploy(conf):
- """Assemble the middleware pipeline leading to the placement app."""
- if conf.api.auth_strategy == 'noauth2':
- auth_middleware = auth.NoAuthMiddleware
- else:
- # Do not use 'oslo_config_project' param here as the conf
- # location may have been overridden earlier in the deployment
- # process with OS_PLACEMENT_CONFIG_DIR in wsgi.py.
- auth_middleware = auth.filter_factory(
- {}, oslo_config_config=conf)
-
- # Pass in our CORS config, if any, manually as that's a)
- # explicit, b) makes testing more straightfoward, c) let's
- # us control the use of cors by the presence of its config.
- conf.register_opts(cors.CORS_OPTS, 'cors')
- if conf.cors.allowed_origin:
- cors_middleware = oslo_middleware.CORS.factory(
- {}, **conf.cors)
- else:
- cors_middleware = None
-
- context_middleware = auth.PlacementKeystoneContext
- req_id_middleware = oslo_middleware.RequestId
- microversion_middleware = mp_middleware.MicroversionMiddleware
- fault_middleware = fault_wrap.FaultWrapper
- request_log = requestlog.RequestLog
-
- application = handler.PlacementHandler()
- # configure microversion middleware in the old school way
- application = microversion_middleware(
- application, microversion.SERVICE_TYPE, microversion.VERSIONS,
- json_error_formatter=util.json_error_formatter)
-
- # NOTE(cdent): The ordering here is important. The list is ordered
- # from the inside out. For a single request req_id_middleware is called
- # first and microversion_middleware last. Then the request is finally
- # passed to the application (the PlacementHandler). At that point
- # the response ascends the middleware in the reverse of the
- # order the request went in. This order ensures that log messages
- # all see the same contextual information including request id and
- # authentication information.
- for middleware in (fault_middleware,
- request_log,
- context_middleware,
- auth_middleware,
- cors_middleware,
- req_id_middleware,
- ):
- if middleware:
- application = middleware(application)
-
- # NOTE(mriedem): Ignore scope check UserWarnings from oslo.policy.
- if not conf.oslo_policy.enforce_scope:
- import warnings
- warnings.filterwarnings('ignore',
- message="Policy .* failed scope check",
- category=UserWarning)
-
- return application
-
-
-def update_database():
- """Do any database updates required at process boot time, such as
- updating the traits table.
- """
- ctx = db_api.DbContext()
- resource_provider.ensure_trait_sync(ctx)
- resource_provider.ensure_rc_cache(ctx)
-
-
-# NOTE(cdent): Althought project_name is no longer used because of the
-# resolution of https://bugs.launchpad.net/nova/+bug/1734491, loadapp()
-# is considered a public interface for the creation of a placement
-# WSGI app so must maintain its interface. The canonical placement WSGI
-# app is created by init_application in wsgi.py, but this is not
-# required and in fact can be limiting. loadapp() may be used from
-# fixtures or arbitrary WSGI frameworks and loaders.
-def loadapp(config, project_name=NAME):
- """WSGI application creator for placement.
-
- :param config: An olso_config.cfg.ConfigOpts containing placement
- configuration.
- :param project_name: oslo_config project name. Ignored, preserved for
- backwards compatibility
- """
- application = deploy(config)
- update_database()
- return application
diff --git a/nova/api/openstack/placement/direct.py b/nova/api/openstack/placement/direct.py
deleted file mode 100644
index 66e11e7f62..0000000000
--- a/nova/api/openstack/placement/direct.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Call any URI in the placement service directly without real HTTP.
-
-This is useful for those cases where processes wish to manipulate the
-Placement datastore but do not want to run Placement as a long running
-service. A PlacementDirect context manager is provided. Within that
-HTTP requests may be made as normal but they will not actually traverse
-a real socket.
-"""
-
-from keystoneauth1 import adapter
-from keystoneauth1 import session
-import mock
-from oslo_utils import uuidutils
-import requests
-from wsgi_intercept import interceptor
-
-from nova.api.openstack.placement import deploy
-
-
-class PlacementDirect(interceptor.RequestsInterceptor):
- """Provide access to the placement service without real HTTP.
-
- wsgi-intercept is used to provide a keystoneauth1 Adapter that has access
- to an in-process placement service. This provides access to making changes
- to the placement database without requiring HTTP over the network - it
- remains in-process.
-
- Authentication to the service is turned off; admin access is assumed.
-
- Access is provided via a context manager which is responsible for
- turning the wsgi-intercept on and off, and setting and removing
- mocks required to keystoneauth1 to work around endpoint discovery.
-
- Example::
-
- with PlacementDirect(cfg.CONF, latest_microversion=True) as client:
- allocations = client.get('/allocations/%s' % consumer)
-
- :param conf: An oslo config with the options used to configure
- the placement service (notably database connection
- string).
- :param latest_microversion: If True, API requests will use the latest
- microversion if not otherwise specified. If
- False (the default), the base microversion is
- the default.
- """
-
- def __init__(self, conf, latest_microversion=False):
- conf.set_override('auth_strategy', 'noauth2', group='api')
- app = lambda: deploy.loadapp(conf)
- self.url = 'http://%s/placement' % str(uuidutils.generate_uuid())
- # Supply our own session so the wsgi-intercept can intercept
- # the right thing.
- request_session = requests.Session()
- headers = {
- 'x-auth-token': 'admin',
- }
- # TODO(efried): See below
- if latest_microversion:
- headers['OpenStack-API-Version'] = 'placement latest'
- self.adapter = adapter.Adapter(
- session.Session(auth=None, session=request_session,
- additional_headers=headers),
- service_type='placement', raise_exc=False)
- # TODO(efried): Figure out why this isn't working:
- # default_microversion='latest' if latest_microversion else None)
- self._mocked_endpoint = mock.patch(
- 'keystoneauth1.session.Session.get_endpoint',
- new=mock.Mock(return_value=self.url))
- super(PlacementDirect, self).__init__(app, url=self.url)
-
- def __enter__(self):
- """Start the wsgi-intercept interceptor and keystone endpoint mock.
-
- A no auth ksa Adapter is provided to the context being managed.
- """
- super(PlacementDirect, self).__enter__()
- self._mocked_endpoint.start()
- return self.adapter
-
- def __exit__(self, *exc):
- self._mocked_endpoint.stop()
- return super(PlacementDirect, self).__exit__(*exc)
diff --git a/nova/api/openstack/placement/errors.py b/nova/api/openstack/placement/errors.py
deleted file mode 100644
index 15e4fbc4cd..0000000000
--- a/nova/api/openstack/placement/errors.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Error code symbols to be used in structured JSON error responses.
-
-These are strings to be used in the 'code' attribute, as described by
-the API guideline on `errors`_.
-
-There must be only one instance of any string value and it should have
-only one associated constant SYMBOL.
-
-In a WSGI handler (representing the sole handler for an HTTP method and
-URI) each error condition should get a separate error code. Reusing an
-error code in a different handler is not just acceptable, but useful.
-
-For example 'placement.inventory.inuse' is meaningful and correct in both
-``PUT /resource_providers/{uuid}/inventories`` and ``DELETE`` on the same
-URI.
-
-.. _errors: http://specs.openstack.org/openstack/api-wg/guidelines/errors.html
-"""
-
-# NOTE(cdent): This is the simplest thing that can possibly work, for now.
-# If it turns out we want to automate this, or put different resources in
-# different files, or otherwise change things, that's fine. The only thing
-# that needs to be maintained as the same are the strings that API end
-# users use. How they are created is completely fungible.
-
-
-# Do not change the string values. Once set, they are set.
-# Do not reuse string values. There should be only one symbol for any
-# value.
-DEFAULT = 'placement.undefined_code'
-INVENTORY_INUSE = 'placement.inventory.inuse'
-CONCURRENT_UPDATE = 'placement.concurrent_update'
-DUPLICATE_NAME = 'placement.duplicate_name'
-PROVIDER_IN_USE = 'placement.resource_provider.inuse'
-PROVIDER_CANNOT_DELETE_PARENT = (
- 'placement.resource_provider.cannot_delete_parent')
-RESOURCE_PROVIDER_NOT_FOUND = 'placement.resource_provider.not_found'
diff --git a/nova/api/openstack/placement/exception.py b/nova/api/openstack/placement/exception.py
deleted file mode 100644
index f6fa3ec7e4..0000000000
--- a/nova/api/openstack/placement/exception.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Exceptions for use in the Placement API."""
-
-# NOTE(cdent): The exceptions are copied from nova.exception, where they
-# were originally used. To prepare for extracting placement to its own
-# repository we wish to no longer do that. Instead, exceptions used by
-# placement should be in the placement hierarchy.
-
-from oslo_log import log as logging
-
-from nova.i18n import _
-
-
-LOG = logging.getLogger(__name__)
-
-
-class _BaseException(Exception):
- """Base Exception
-
- To correctly use this class, inherit from it and define
- a 'msg_fmt' property. That msg_fmt will get printf'd
- with the keyword arguments provided to the constructor.
-
- """
- msg_fmt = _("An unknown exception occurred.")
-
- def __init__(self, message=None, **kwargs):
- self.kwargs = kwargs
-
- if not message:
- try:
- message = self.msg_fmt % kwargs
- except Exception:
- # NOTE(melwitt): This is done in a separate method so it can be
- # monkey-patched during testing to make it a hard failure.
- self._log_exception()
- message = self.msg_fmt
-
- self.message = message
- super(_BaseException, self).__init__(message)
-
- def _log_exception(self):
- # kwargs doesn't match a variable in the message
- # log the issue and the kwargs
- LOG.exception('Exception in string format operation')
- for name, value in self.kwargs.items():
- LOG.error("%s: %s" % (name, value)) # noqa
-
- def format_message(self):
- # Use the first argument to the python Exception object which
- # should be our full exception message, (see __init__).
- return self.args[0]
-
-
-class NotFound(_BaseException):
- msg_fmt = _("Resource could not be found.")
-
-
-class Exists(_BaseException):
- msg_fmt = _("Resource already exists.")
-
-
-class InvalidInventory(_BaseException):
- msg_fmt = _("Inventory for '%(resource_class)s' on "
- "resource provider '%(resource_provider)s' invalid.")
-
-
-class CannotDeleteParentResourceProvider(_BaseException):
- msg_fmt = _("Cannot delete resource provider that is a parent of "
- "another. Delete child providers first.")
-
-
-class ConcurrentUpdateDetected(_BaseException):
- msg_fmt = _("Another thread concurrently updated the data. "
- "Please retry your update")
-
-
-class ResourceProviderConcurrentUpdateDetected(ConcurrentUpdateDetected):
- msg_fmt = _("Another thread concurrently updated the resource provider "
- "data. Please retry your update")
-
-
-class InvalidAllocationCapacityExceeded(InvalidInventory):
- msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
- "resource provider '%(resource_provider)s'. The requested "
- "amount would exceed the capacity.")
-
-
-class InvalidAllocationConstraintsViolated(InvalidInventory):
- msg_fmt = _("Unable to create allocation for '%(resource_class)s' on "
- "resource provider '%(resource_provider)s'. The requested "
- "amount would violate inventory constraints.")
-
-
-class InvalidInventoryCapacity(InvalidInventory):
- msg_fmt = _("Invalid inventory for '%(resource_class)s' on "
- "resource provider '%(resource_provider)s'. "
- "The reserved value is greater than or equal to total.")
-
-
-class InvalidInventoryCapacityReservedCanBeTotal(InvalidInventoryCapacity):
- msg_fmt = _("Invalid inventory for '%(resource_class)s' on "
- "resource provider '%(resource_provider)s'. "
- "The reserved value is greater than total.")
-
-
-# An exception with this name is used on both sides of the placement/
-# nova interaction.
-class InventoryInUse(InvalidInventory):
- # NOTE(mriedem): This message cannot change without impacting the
- # nova.scheduler.client.report._RE_INV_IN_USE regex.
- msg_fmt = _("Inventory for '%(resource_classes)s' on "
- "resource provider '%(resource_provider)s' in use.")
-
-
-class InventoryWithResourceClassNotFound(NotFound):
- msg_fmt = _("No inventory of class %(resource_class)s found.")
-
-
-class MaxDBRetriesExceeded(_BaseException):
- msg_fmt = _("Max retries of DB transaction exceeded attempting to "
- "perform %(action)s.")
-
-
-class ObjectActionError(_BaseException):
- msg_fmt = _('Object action %(action)s failed because: %(reason)s')
-
-
-class PolicyNotAuthorized(_BaseException):
- msg_fmt = _("Policy does not allow %(action)s to be performed.")
-
-
-class ResourceClassCannotDeleteStandard(_BaseException):
- msg_fmt = _("Cannot delete standard resource class %(resource_class)s.")
-
-
-class ResourceClassCannotUpdateStandard(_BaseException):
- msg_fmt = _("Cannot update standard resource class %(resource_class)s.")
-
-
-class ResourceClassExists(_BaseException):
- msg_fmt = _("Resource class %(resource_class)s already exists.")
-
-
-class ResourceClassInUse(_BaseException):
- msg_fmt = _("Cannot delete resource class %(resource_class)s. "
- "Class is in use in inventory.")
-
-
-class ResourceClassNotFound(NotFound):
- msg_fmt = _("No such resource class %(resource_class)s.")
-
-
-# An exception with this name is used on both sides of the placement/
-# nova interaction.
-class ResourceProviderInUse(_BaseException):
- msg_fmt = _("Resource provider has allocations.")
-
-
-class TraitCannotDeleteStandard(_BaseException):
- msg_fmt = _("Cannot delete standard trait %(name)s.")
-
-
-class TraitExists(_BaseException):
- msg_fmt = _("The Trait %(name)s already exists")
-
-
-class TraitInUse(_BaseException):
- msg_fmt = _("The trait %(name)s is in use by a resource provider.")
-
-
-class TraitNotFound(NotFound):
- msg_fmt = _("No such trait(s): %(names)s.")
-
-
-class ProjectNotFound(NotFound):
- msg_fmt = _("No such project(s): %(external_id)s.")
-
-
-class ProjectExists(Exists):
- msg_fmt = _("The project %(external_id)s already exists.")
-
-
-class UserNotFound(NotFound):
- msg_fmt = _("No such user(s): %(external_id)s.")
-
-
-class UserExists(Exists):
- msg_fmt = _("The user %(external_id)s already exists.")
-
-
-class ConsumerNotFound(NotFound):
- msg_fmt = _("No such consumer(s): %(uuid)s.")
-
-
-class ConsumerExists(Exists):
- msg_fmt = _("The consumer %(uuid)s already exists.")
diff --git a/nova/api/openstack/placement/fault_wrap.py b/nova/api/openstack/placement/fault_wrap.py
deleted file mode 100644
index 764d628b49..0000000000
--- a/nova/api/openstack/placement/fault_wrap.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Simple middleware for safely catching unexpected exceptions."""
-
-# NOTE(cdent): This is a super simplified replacement for the nova
-# FaultWrapper, which does more than placement needs.
-
-from oslo_log import log as logging
-import six
-from webob import exc
-
-from nova.api.openstack.placement import util
-
-LOG = logging.getLogger(__name__)
-
-
-class FaultWrapper(object):
- """Turn an uncaught exception into a status 500.
-
- Uncaught exceptions usually shouldn't happen, if it does it
- means there is a bug in the placement service, which should be
- fixed.
- """
-
- def __init__(self, application):
- self.application = application
-
- def __call__(self, environ, start_response):
- try:
- return self.application(environ, start_response)
- except Exception as unexpected_exception:
- LOG.exception('Placement API unexpected error: %s',
- unexpected_exception)
- formatted_exception = exc.HTTPInternalServerError(
- six.text_type(unexpected_exception))
- formatted_exception.json_formatter = util.json_error_formatter
- return formatted_exception.generate_response(
- environ, start_response)
diff --git a/nova/api/openstack/placement/handler.py b/nova/api/openstack/placement/handler.py
deleted file mode 100644
index c714c464c5..0000000000
--- a/nova/api/openstack/placement/handler.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Handlers for placement API.
-
-Individual handlers are associated with URL paths in the
-ROUTE_DECLARATIONS dictionary. At the top level each key is a Routes
-compliant path. The value of that key is a dictionary mapping
-individual HTTP request methods to a Python function representing a
-simple WSGI application for satisfying that request.
-
-The ``make_map`` method processes ROUTE_DECLARATIONS to create a
-Routes.Mapper, including automatic handlers to respond with a
-405 when a request is made against a valid URL with an invalid
-method.
-"""
-
-import routes
-import webob
-
-from oslo_log import log as logging
-
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement.handlers import aggregate
-from nova.api.openstack.placement.handlers import allocation
-from nova.api.openstack.placement.handlers import allocation_candidate
-from nova.api.openstack.placement.handlers import inventory
-from nova.api.openstack.placement.handlers import reshaper
-from nova.api.openstack.placement.handlers import resource_class
-from nova.api.openstack.placement.handlers import resource_provider
-from nova.api.openstack.placement.handlers import root
-from nova.api.openstack.placement.handlers import trait
-from nova.api.openstack.placement.handlers import usage
-from nova.api.openstack.placement import util
-from nova.i18n import _
-
-LOG = logging.getLogger(__name__)
-
-# URLs and Handlers
-# NOTE(cdent): When adding URLs here, do not use regex patterns in
-# the path parameters (e.g. {uuid:[0-9a-zA-Z-]+}) as that will lead
-# to 404s that are controlled outside of the individual resources
-# and thus do not include specific information on the why of the 404.
-ROUTE_DECLARATIONS = {
- '/': {
- 'GET': root.home,
- },
- # NOTE(cdent): This allows '/placement/' and '/placement' to
- # both work as the root of the service, which we probably want
- # for those situations where the service is mounted under a
- # prefix (as it is in devstack). While weird, an empty string is
- # a legit key in a dictionary and matches as desired in Routes.
- '': {
- 'GET': root.home,
- },
- '/resource_classes': {
- 'GET': resource_class.list_resource_classes,
- 'POST': resource_class.create_resource_class
- },
- '/resource_classes/{name}': {
- 'GET': resource_class.get_resource_class,
- 'PUT': resource_class.update_resource_class,
- 'DELETE': resource_class.delete_resource_class,
- },
- '/resource_providers': {
- 'GET': resource_provider.list_resource_providers,
- 'POST': resource_provider.create_resource_provider
- },
- '/resource_providers/{uuid}': {
- 'GET': resource_provider.get_resource_provider,
- 'DELETE': resource_provider.delete_resource_provider,
- 'PUT': resource_provider.update_resource_provider
- },
- '/resource_providers/{uuid}/inventories': {
- 'GET': inventory.get_inventories,
- 'POST': inventory.create_inventory,
- 'PUT': inventory.set_inventories,
- 'DELETE': inventory.delete_inventories
- },
- '/resource_providers/{uuid}/inventories/{resource_class}': {
- 'GET': inventory.get_inventory,
- 'PUT': inventory.update_inventory,
- 'DELETE': inventory.delete_inventory
- },
- '/resource_providers/{uuid}/usages': {
- 'GET': usage.list_usages
- },
- '/resource_providers/{uuid}/aggregates': {
- 'GET': aggregate.get_aggregates,
- 'PUT': aggregate.set_aggregates
- },
- '/resource_providers/{uuid}/allocations': {
- 'GET': allocation.list_for_resource_provider,
- },
- '/allocations': {
- 'POST': allocation.set_allocations,
- },
- '/allocations/{consumer_uuid}': {
- 'GET': allocation.list_for_consumer,
- 'PUT': allocation.set_allocations_for_consumer,
- 'DELETE': allocation.delete_allocations,
- },
- '/allocation_candidates': {
- 'GET': allocation_candidate.list_allocation_candidates,
- },
- '/traits': {
- 'GET': trait.list_traits,
- },
- '/traits/{name}': {
- 'GET': trait.get_trait,
- 'PUT': trait.put_trait,
- 'DELETE': trait.delete_trait,
- },
- '/resource_providers/{uuid}/traits': {
- 'GET': trait.list_traits_for_resource_provider,
- 'PUT': trait.update_traits_for_resource_provider,
- 'DELETE': trait.delete_traits_for_resource_provider
- },
- '/usages': {
- 'GET': usage.get_total_usages,
- },
- '/reshaper': {
- 'POST': reshaper.reshape,
- },
-}
-
-
-def dispatch(environ, start_response, mapper):
- """Find a matching route for the current request.
-
- If no match is found, raise a 404 response.
- If there is a matching route, but no matching handler
- for the given method, raise a 405.
- """
- result = mapper.match(environ=environ)
- if result is None:
- raise webob.exc.HTTPNotFound(
- json_formatter=util.json_error_formatter)
- # We can't reach this code without action being present.
- handler = result.pop('action')
- environ['wsgiorg.routing_args'] = ((), result)
- return handler(environ, start_response)
-
-
-def handle_405(environ, start_response):
- """Return a 405 response when method is not allowed.
-
- If _methods are in routing_args, send an allow header listing
- the methods that are possible on the provided URL.
- """
- _methods = util.wsgi_path_item(environ, '_methods')
- headers = {}
- if _methods:
- # Ensure allow header is a python 2 or 3 native string (thus
- # not unicode in python 2 but stay a string in python 3)
- # In the process done by Routes to save the allowed methods
- # to its routing table they become unicode in py2.
- headers['allow'] = str(_methods)
- # Use Exception class as WSGI Application. We don't want to raise here.
- response = webob.exc.HTTPMethodNotAllowed(
- _('The method specified is not allowed for this resource.'),
- headers=headers, json_formatter=util.json_error_formatter)
- return response(environ, start_response)
-
-
-def make_map(declarations):
- """Process route declarations to create a Route Mapper."""
- mapper = routes.Mapper()
- for route, targets in declarations.items():
- allowed_methods = []
- for method in targets:
- mapper.connect(route, action=targets[method],
- conditions=dict(method=[method]))
- allowed_methods.append(method)
- allowed_methods = ', '.join(allowed_methods)
- mapper.connect(route, action=handle_405, _methods=allowed_methods)
- return mapper
-
-
-class PlacementHandler(object):
- """Serve Placement API.
-
- Dispatch to handlers defined in ROUTE_DECLARATIONS.
- """
-
- def __init__(self, **local_config):
- # NOTE(cdent): Local config currently unused.
- self._map = make_map(ROUTE_DECLARATIONS)
-
- def __call__(self, environ, start_response):
- # Check that an incoming request with a content-length header
- # that is an integer > 0 and not empty, also has a content-type
- # header that is not empty. If not raise a 400.
- clen = environ.get('CONTENT_LENGTH')
- try:
- if clen and (int(clen) > 0) and not environ.get('CONTENT_TYPE'):
- raise webob.exc.HTTPBadRequest(
- _('content-type header required when content-length > 0'),
- json_formatter=util.json_error_formatter)
- except ValueError as exc:
- raise webob.exc.HTTPBadRequest(
- _('content-length header must be an integer'),
- json_formatter=util.json_error_formatter)
- try:
- return dispatch(environ, start_response, self._map)
- # Trap the NotFound exceptions raised by the objects used
- # with the API and transform them into webob.exc.HTTPNotFound.
- except exception.NotFound as exc:
- raise webob.exc.HTTPNotFound(
- exc, json_formatter=util.json_error_formatter)
- except exception.PolicyNotAuthorized as exc:
- raise webob.exc.HTTPForbidden(
- exc.format_message(),
- json_formatter=util.json_error_formatter)
- # Remaining uncaught exceptions will rise first to the Microversion
- # middleware, where any WebOb generated exceptions will be caught and
- # transformed into legit HTTP error responses (with microversion
- # headers added), and then to the FaultWrapper middleware which will
- # catch anything else and transform them into 500 responses.
- # NOTE(cdent): There should be very few uncaught exceptions which are
- # not WebOb exceptions at this stage as the handlers are contained by
- # the wsgify decorator which will transform those exceptions to
- # responses itself.
diff --git a/nova/api/openstack/placement/handlers/__init__.py b/nova/api/openstack/placement/handlers/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/api/openstack/placement/handlers/__init__.py
+++ /dev/null
diff --git a/nova/api/openstack/placement/handlers/aggregate.py b/nova/api/openstack/placement/handlers/aggregate.py
deleted file mode 100644
index a26839c373..0000000000
--- a/nova/api/openstack/placement/handlers/aggregate.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Aggregate handlers for Placement API."""
-
-from oslo_db import exception as db_exc
-from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
-from oslo_utils import timeutils
-import webob
-
-from nova.api.openstack.placement import errors
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider as rp_obj
-from nova.api.openstack.placement.policies import aggregate as policies
-from nova.api.openstack.placement.schemas import aggregate as schema
-from nova.api.openstack.placement import util
-from nova.api.openstack.placement import wsgi_wrapper
-from nova.i18n import _
-
-
-_INCLUDE_GENERATION_VERSION = (1, 19)
-
-
-def _send_aggregates(req, resource_provider, aggregate_uuids):
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- response = req.response
- response.status = 200
- payload = _serialize_aggregates(aggregate_uuids)
- if want_version.matches(min_version=_INCLUDE_GENERATION_VERSION):
- payload['resource_provider_generation'] = resource_provider.generation
- response.body = encodeutils.to_utf8(
- jsonutils.dumps(payload))
- response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- req.response.cache_control = 'no-cache'
- # We never get an aggregate itself, we get the list of aggregates
- # that are associated with a resource provider. We don't record the
- # time when that association was made and the time when an aggregate
- # uuid was created is not relevant, so here we punt and use utcnow.
- req.response.last_modified = timeutils.utcnow(with_timezone=True)
- return response
-
-
-def _serialize_aggregates(aggregate_uuids):
- return {'aggregates': aggregate_uuids}
-
-
-def _set_aggregates(resource_provider, aggregate_uuids,
- increment_generation=False):
- """Set aggregates for the resource provider.
-
- If increment generation is true, the resource provider generation
- will be incremented if possible. If that fails (because something
- else incremented the generation in another thread), a
- ConcurrentUpdateDetected will be raised.
- """
- # NOTE(cdent): It's not clear what the DBDuplicateEntry handling
- # is doing here, set_aggregates already handles that, but I'm leaving
- # it here because it was already there.
- try:
- resource_provider.set_aggregates(
- aggregate_uuids, increment_generation=increment_generation)
- except exception.ConcurrentUpdateDetected as exc:
- raise webob.exc.HTTPConflict(
- _('Update conflict: %(error)s') % {'error': exc},
- comment=errors.CONCURRENT_UPDATE)
- except db_exc.DBDuplicateEntry as exc:
- raise webob.exc.HTTPConflict(
- _('Update conflict: %(error)s') % {'error': exc})
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.check_accept('application/json')
-@microversion.version_handler('1.1')
-def get_aggregates(req):
- """GET a list of aggregates associated with a resource provider.
-
- If the resource provider does not exist return a 404.
-
- On success return a 200 with an application/json body containing a
- list of aggregate uuids.
- """
- context = req.environ['placement.context']
- context.can(policies.LIST)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
- aggregate_uuids = resource_provider.get_aggregates()
-
- return _send_aggregates(req, resource_provider, aggregate_uuids)
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.require_content('application/json')
-@microversion.version_handler('1.1')
-def set_aggregates(req):
- context = req.environ['placement.context']
- context.can(policies.UPDATE)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- consider_generation = want_version.matches(
- min_version=_INCLUDE_GENERATION_VERSION)
- put_schema = schema.PUT_AGGREGATES_SCHEMA_V1_1
- if consider_generation:
- put_schema = schema.PUT_AGGREGATES_SCHEMA_V1_19
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
- data = util.extract_json(req.body, put_schema)
- if consider_generation:
- # Check for generation conflict
- rp_gen = data['resource_provider_generation']
- if resource_provider.generation != rp_gen:
- raise webob.exc.HTTPConflict(
- _("Resource provider's generation already changed. Please "
- "update the generation and try again."),
- comment=errors.CONCURRENT_UPDATE)
- aggregate_uuids = data['aggregates']
- else:
- aggregate_uuids = data
- _set_aggregates(resource_provider, aggregate_uuids,
- increment_generation=consider_generation)
-
- return _send_aggregates(req, resource_provider, aggregate_uuids)
diff --git a/nova/api/openstack/placement/handlers/allocation.py b/nova/api/openstack/placement/handlers/allocation.py
deleted file mode 100644
index 9b2f5d8f3a..0000000000
--- a/nova/api/openstack/placement/handlers/allocation.py
+++ /dev/null
@@ -1,576 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API handlers for setting and deleting allocations."""
-
-import collections
-import uuid
-
-from oslo_log import log as logging
-from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
-from oslo_utils import excutils
-from oslo_utils import timeutils
-from oslo_utils import uuidutils
-import webob
-
-from nova.api.openstack.placement import errors
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider as rp_obj
-from nova.api.openstack.placement.policies import allocation as policies
-from nova.api.openstack.placement.schemas import allocation as schema
-from nova.api.openstack.placement import util
-from nova.api.openstack.placement import wsgi_wrapper
-from nova.i18n import _
-
-
-LOG = logging.getLogger(__name__)
-
-
-def _last_modified_from_allocations(allocations, want_version):
- """Given a set of allocation objects, returns the last modified timestamp.
- """
- # NOTE(cdent): The last_modified for an allocation will always be
- # based off the created_at column because allocations are only
- # ever inserted, never updated.
- last_modified = None
- # Only calculate last-modified if we are using a microversion that
- # supports it.
- get_last_modified = want_version and want_version.matches((1, 15))
- for allocation in allocations:
- if get_last_modified:
- last_modified = util.pick_last_modified(last_modified, allocation)
-
- last_modified = last_modified or timeutils.utcnow(with_timezone=True)
- return last_modified
-
-
-def _serialize_allocations_for_consumer(allocations, want_version):
- """Turn a list of allocations into a dict by resource provider uuid.
-
- {
- 'allocations': {
- RP_UUID_1: {
- 'generation': GENERATION,
- 'resources': {
- 'DISK_GB': 4,
- 'VCPU': 2
- }
- },
- RP_UUID_2: {
- 'generation': GENERATION,
- 'resources': {
- 'DISK_GB': 6,
- 'VCPU': 3
- }
- }
- },
- # project_id and user_id are added with microverion 1.12
- 'project_id': PROJECT_ID,
- 'user_id': USER_ID,
- # Generation for consumer >= 1.28
- 'consumer_generation': 1
- }
- """
- allocation_data = collections.defaultdict(dict)
- for allocation in allocations:
- key = allocation.resource_provider.uuid
- if 'resources' not in allocation_data[key]:
- allocation_data[key]['resources'] = {}
-
- resource_class = allocation.resource_class
- allocation_data[key]['resources'][resource_class] = allocation.used
- generation = allocation.resource_provider.generation
- allocation_data[key]['generation'] = generation
-
- result = {'allocations': allocation_data}
- if allocations and want_version.matches((1, 12)):
- # We're looking at a list of allocations by consumer id so project and
- # user are consistent across the list
- consumer = allocations[0].consumer
- project_id = consumer.project.external_id
- user_id = consumer.user.external_id
- result['project_id'] = project_id
- result['user_id'] = user_id
- show_consumer_gen = want_version.matches((1, 28))
- if show_consumer_gen:
- result['consumer_generation'] = consumer.generation
-
- return result
-
-
-def _serialize_allocations_for_resource_provider(allocations,
- resource_provider,
- want_version):
- """Turn a list of allocations into a dict by consumer id.
-
- {'resource_provider_generation': GENERATION,
- 'allocations':
- CONSUMER_ID_1: {
- 'resources': {
- 'DISK_GB': 4,
- 'VCPU': 2
- },
- # Generation for consumer >= 1.28
- 'consumer_generation': 0
- },
- CONSUMER_ID_2: {
- 'resources': {
- 'DISK_GB': 6,
- 'VCPU': 3
- },
- # Generation for consumer >= 1.28
- 'consumer_generation': 0
- }
- }
- """
- show_consumer_gen = want_version.matches((1, 28))
- allocation_data = collections.defaultdict(dict)
- for allocation in allocations:
- key = allocation.consumer.uuid
- if 'resources' not in allocation_data[key]:
- allocation_data[key]['resources'] = {}
-
- resource_class = allocation.resource_class
- allocation_data[key]['resources'][resource_class] = allocation.used
-
- if show_consumer_gen:
- consumer_gen = None
- if allocation.consumer is not None:
- consumer_gen = allocation.consumer.generation
- allocation_data[key]['consumer_generation'] = consumer_gen
-
- result = {'allocations': allocation_data}
- result['resource_provider_generation'] = resource_provider.generation
- return result
-
-
-# TODO(cdent): Extracting this is useful, for reuse by reshaper code,
-# but having it in this file seems wrong, however, since it uses
-# _new_allocations it's being left here for now. We need a place for shared
-# handler code, but util.py is already too big and too diverse.
-def create_allocation_list(context, data, consumers):
- """Create an AllocationList based on provided data.
-
- :param context: The placement context.
- :param data: A dictionary of multiple allocations by consumer uuid.
- :param consumers: A dictionary, keyed by consumer UUID, of Consumer objects
- :return: An AllocationList.
- :raises: `webob.exc.HTTPBadRequest` if a resource provider included in the
- allocations does not exist.
- """
- allocation_objects = []
-
- for consumer_uuid in data:
- allocations = data[consumer_uuid]['allocations']
- consumer = consumers[consumer_uuid]
- if allocations:
- rp_objs = _resource_providers_by_uuid(context, allocations.keys())
- for resource_provider_uuid in allocations:
- resource_provider = rp_objs[resource_provider_uuid]
- resources = allocations[resource_provider_uuid]['resources']
- new_allocations = _new_allocations(context,
- resource_provider,
- consumer,
- resources)
- allocation_objects.extend(new_allocations)
- else:
- # The allocations are empty, which means wipe them out.
- # Internal to the allocation object this is signalled by a
- # used value of 0.
- allocations = rp_obj.AllocationList.get_all_by_consumer_id(
- context, consumer_uuid)
- for allocation in allocations:
- allocation.used = 0
- allocation_objects.append(allocation)
-
- return rp_obj.AllocationList(context, objects=allocation_objects)
-
-
-def inspect_consumers(context, data, want_version):
- """Look at consumer data in allocations and create consumers as needed.
-
- Keep a record of the consumers that are created in case they need
- to be removed later.
-
- If an exception is raised by ensure_consumer, commonly HTTPConflict but
- also anything else, the newly created consumers will be deleted and the
- exception reraised to the caller.
-
- :param context: The placement context.
- :param data: A dictionary of multiple allocations by consumer uuid.
- :param want_version: the microversion matcher.
- :return: A tuple of a dict of all consumer objects (by consumer uuid)
- and a list of those consumer objects which are new.
- """
- # First, ensure that all consumers referenced in the payload actually
- # exist. And if not, create them. Keep a record of auto-created consumers
- # so we can clean them up if the end allocation replace_all() fails.
- consumers = {} # dict of Consumer objects, keyed by consumer UUID
- new_consumers_created = []
- for consumer_uuid in data:
- project_id = data[consumer_uuid]['project_id']
- user_id = data[consumer_uuid]['user_id']
- consumer_generation = data[consumer_uuid].get('consumer_generation')
- try:
- consumer, new_consumer_created = util.ensure_consumer(
- context, consumer_uuid, project_id, user_id,
- consumer_generation, want_version)
- if new_consumer_created:
- new_consumers_created.append(consumer)
- consumers[consumer_uuid] = consumer
- except Exception:
- # If any errors (for instance, a consumer generation conflict)
- # occur when ensuring consumer records above, make sure we delete
- # any auto-created consumers.
- with excutils.save_and_reraise_exception():
- delete_consumers(new_consumers_created)
- return consumers, new_consumers_created
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.check_accept('application/json')
-def list_for_consumer(req):
- """List allocations associated with a consumer."""
- context = req.environ['placement.context']
- context.can(policies.ALLOC_LIST)
- consumer_id = util.wsgi_path_item(req.environ, 'consumer_uuid')
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
-
- # NOTE(cdent): There is no way for a 404 to be returned here,
- # only an empty result. We do not have a way to validate a
- # consumer id.
- allocations = rp_obj.AllocationList.get_all_by_consumer_id(
- context, consumer_id)
-
- output = _serialize_allocations_for_consumer(allocations, want_version)
- last_modified = _last_modified_from_allocations(allocations, want_version)
- allocations_json = jsonutils.dumps(output)
-
- response = req.response
- response.status = 200
- response.body = encodeutils.to_utf8(allocations_json)
- response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- response.last_modified = last_modified
- response.cache_control = 'no-cache'
- return response
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.check_accept('application/json')
-def list_for_resource_provider(req):
- """List allocations associated with a resource provider."""
- # TODO(cdent): On a shared resource provider (for example a
- # giant disk farm) this list could get very long. At the moment
- # we have no facility for limiting the output. Given that we are
- # using a dict of dicts for the output we are potentially limiting
- # ourselves in terms of sorting and filtering.
- context = req.environ['placement.context']
- context.can(policies.RP_ALLOC_LIST)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- uuid = util.wsgi_path_item(req.environ, 'uuid')
-
- # confirm existence of resource provider so we get a reasonable
- # 404 instead of empty list
- try:
- rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
- except exception.NotFound as exc:
- raise webob.exc.HTTPNotFound(
- _("Resource provider '%(rp_uuid)s' not found: %(error)s") %
- {'rp_uuid': uuid, 'error': exc})
-
- allocs = rp_obj.AllocationList.get_all_by_resource_provider(context, rp)
-
- output = _serialize_allocations_for_resource_provider(
- allocs, rp, want_version)
- last_modified = _last_modified_from_allocations(allocs, want_version)
- allocations_json = jsonutils.dumps(output)
-
- response = req.response
- response.status = 200
- response.body = encodeutils.to_utf8(allocations_json)
- response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- response.last_modified = last_modified
- response.cache_control = 'no-cache'
- return response
-
-
-def _resource_providers_by_uuid(ctx, rp_uuids):
- """Helper method that returns a dict, keyed by resource provider UUID, of
- ResourceProvider objects.
-
- :param ctx: The placement context.
- :param rp_uuids: iterable of UUIDs for providers to fetch.
- :raises: `webob.exc.HTTPBadRequest` if any of the UUIDs do not refer to
- an existing resource provider.
- """
- res = {}
- for rp_uuid in rp_uuids:
- # TODO(jaypipes): Clearly, this is not efficient to do one query for
- # each resource provider UUID in the allocations instead of doing a
- # single query for all the UUIDs. However, since
- # ResourceProviderList.get_all_by_filters() is way too complicated for
- # this purpose and doesn't raise NotFound anyway, we'll do this.
- # Perhaps consider adding a ResourceProviderList.get_all_by_uuids()
- # later on?
- try:
- res[rp_uuid] = rp_obj.ResourceProvider.get_by_uuid(ctx, rp_uuid)
- except exception.NotFound:
- raise webob.exc.HTTPBadRequest(
- _("Allocation for resource provider '%(rp_uuid)s' "
- "that does not exist.") %
- {'rp_uuid': rp_uuid})
- return res
-
-
-def _new_allocations(context, resource_provider, consumer, resources):
- """Create new allocation objects for a set of resources
-
- Returns a list of Allocation objects
-
- :param context: The placement context.
- :param resource_provider: The resource provider that has the resources.
- :param consumer: The Consumer object consuming the resources.
- :param resources: A dict of resource classes and values.
- """
- allocations = []
- for resource_class in resources:
- allocation = rp_obj.Allocation(
- resource_provider=resource_provider,
- consumer=consumer,
- resource_class=resource_class,
- used=resources[resource_class])
- allocations.append(allocation)
- return allocations
-
-
-def delete_consumers(consumers):
- """Helper function that deletes any consumer object supplied to it
-
- :param consumers: iterable of Consumer objects to delete
- """
- for consumer in consumers:
- try:
- consumer.delete()
- LOG.debug("Deleted auto-created consumer with consumer UUID "
- "%s after failed allocation", consumer.uuid)
- except Exception as err:
- LOG.warning("Got an exception when deleting auto-created "
- "consumer with UUID %s: %s", consumer.uuid, err)
-
-
-def _set_allocations_for_consumer(req, schema):
- context = req.environ['placement.context']
- context.can(policies.ALLOC_UPDATE)
- consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid')
- if not uuidutils.is_uuid_like(consumer_uuid):
- raise webob.exc.HTTPBadRequest(
- _('Malformed consumer_uuid: %(consumer_uuid)s') %
- {'consumer_uuid': consumer_uuid})
- consumer_uuid = str(uuid.UUID(consumer_uuid))
- data = util.extract_json(req.body, schema)
- allocation_data = data['allocations']
-
- # Normalize allocation data to dict.
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- if not want_version.matches((1, 12)):
- allocations_dict = {}
- # Allocation are list-ish, transform to dict-ish
- for allocation in allocation_data:
- resource_provider_uuid = allocation['resource_provider']['uuid']
- allocations_dict[resource_provider_uuid] = {
- 'resources': allocation['resources']
- }
- allocation_data = allocations_dict
-
- allocation_objects = []
- # Consumer object saved in case we need to delete the auto-created consumer
- # record
- consumer = None
- # Whether we created a new consumer record
- created_new_consumer = False
- if not allocation_data:
- # The allocations are empty, which means wipe them out. Internal
- # to the allocation object this is signalled by a used value of 0.
- # We still need to verify the consumer's generation, though, which
- # we do in _ensure_consumer()
- # NOTE(jaypipes): This will only occur 1.28+. The JSONSchema will
- # prevent an empty allocations object from being passed when there is
- # no consumer generation, so this is safe to do.
- util.ensure_consumer(context, consumer_uuid, data.get('project_id'),
- data.get('user_id'), data.get('consumer_generation'),
- want_version)
- allocations = rp_obj.AllocationList.get_all_by_consumer_id(
- context, consumer_uuid)
- for allocation in allocations:
- allocation.used = 0
- allocation_objects.append(allocation)
- else:
- # If the body includes an allocation for a resource provider
- # that does not exist, raise a 400.
- rp_objs = _resource_providers_by_uuid(context, allocation_data.keys())
- consumer, created_new_consumer = util.ensure_consumer(
- context, consumer_uuid, data.get('project_id'),
- data.get('user_id'), data.get('consumer_generation'),
- want_version)
- for resource_provider_uuid, allocation in allocation_data.items():
- resource_provider = rp_objs[resource_provider_uuid]
- new_allocations = _new_allocations(context,
- resource_provider,
- consumer,
- allocation['resources'])
- allocation_objects.extend(new_allocations)
-
- allocations = rp_obj.AllocationList(
- context, objects=allocation_objects)
-
- def _create_allocations(alloc_list):
- try:
- alloc_list.replace_all()
- LOG.debug("Successfully wrote allocations %s", alloc_list)
- except Exception:
- if created_new_consumer:
- delete_consumers([consumer])
- raise
-
- try:
- _create_allocations(allocations)
- # InvalidInventory is a parent for several exceptions that
- # indicate either that Inventory is not present, or that
- # capacity limits have been exceeded.
- except exception.NotFound as exc:
- raise webob.exc.HTTPBadRequest(
- _("Unable to allocate inventory for consumer "
- "%(consumer_uuid)s: %(error)s") %
- {'consumer_uuid': consumer_uuid, 'error': exc})
- except exception.InvalidInventory as exc:
- raise webob.exc.HTTPConflict(
- _('Unable to allocate inventory: %(error)s') % {'error': exc})
- except exception.ConcurrentUpdateDetected as exc:
- raise webob.exc.HTTPConflict(
- _('Inventory and/or allocations changed while attempting to '
- 'allocate: %(error)s') % {'error': exc},
- comment=errors.CONCURRENT_UPDATE)
-
- req.response.status = 204
- req.response.content_type = None
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.0', '1.7')
-@util.require_content('application/json')
-def set_allocations_for_consumer(req):
- return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA)
-
-
-@wsgi_wrapper.PlacementWsgify # noqa
-@microversion.version_handler('1.8', '1.11')
-@util.require_content('application/json')
-def set_allocations_for_consumer(req):
- return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA_V1_8)
-
-
-@wsgi_wrapper.PlacementWsgify # noqa
-@microversion.version_handler('1.12', '1.27')
-@util.require_content('application/json')
-def set_allocations_for_consumer(req):
- return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA_V1_12)
-
-
-@wsgi_wrapper.PlacementWsgify # noqa
-@microversion.version_handler('1.28')
-@util.require_content('application/json')
-def set_allocations_for_consumer(req):
- return _set_allocations_for_consumer(req, schema.ALLOCATION_SCHEMA_V1_28)
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.13')
-@util.require_content('application/json')
-def set_allocations(req):
- context = req.environ['placement.context']
- context.can(policies.ALLOC_MANAGE)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- want_schema = schema.POST_ALLOCATIONS_V1_13
- if want_version.matches((1, 28)):
- want_schema = schema.POST_ALLOCATIONS_V1_28
- data = util.extract_json(req.body, want_schema)
-
- consumers, new_consumers_created = inspect_consumers(
- context, data, want_version)
- # Create a sequence of allocation objects to be used in one
- # AllocationList.replace_all() call, which will mean all the changes
- # happen within a single transaction and with resource provider
- # and consumer generations (if applicable) check all in one go.
- allocations = create_allocation_list(context, data, consumers)
-
- def _create_allocations(alloc_list):
- try:
- alloc_list.replace_all()
- LOG.debug("Successfully wrote allocations %s", alloc_list)
- except Exception:
- delete_consumers(new_consumers_created)
- raise
-
- try:
- _create_allocations(allocations)
- except exception.NotFound as exc:
- raise webob.exc.HTTPBadRequest(
- _("Unable to allocate inventory %(error)s") % {'error': exc})
- except exception.InvalidInventory as exc:
- # InvalidInventory is a parent for several exceptions that
- # indicate either that Inventory is not present, or that
- # capacity limits have been exceeded.
- raise webob.exc.HTTPConflict(
- _('Unable to allocate inventory: %(error)s') % {'error': exc})
- except exception.ConcurrentUpdateDetected as exc:
- raise webob.exc.HTTPConflict(
- _('Inventory and/or allocations changed while attempting to '
- 'allocate: %(error)s') % {'error': exc},
- comment=errors.CONCURRENT_UPDATE)
-
- req.response.status = 204
- req.response.content_type = None
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-def delete_allocations(req):
- context = req.environ['placement.context']
- context.can(policies.ALLOC_DELETE)
- consumer_uuid = util.wsgi_path_item(req.environ, 'consumer_uuid')
-
- allocations = rp_obj.AllocationList.get_all_by_consumer_id(
- context, consumer_uuid)
- if allocations:
- try:
- allocations.delete_all()
- # NOTE(pumaranikar): Following NotFound exception added in the case
- # when allocation is deleted from allocations list by some other
- # activity. In that case, delete_all() will throw a NotFound exception.
- except exception.NotFound as exc:
- raise webob.exc.HTTPNotFound(
- _("Allocation for consumer with id %(id)s not found."
- "error: %(error)s") %
- {'id': consumer_uuid, 'error': exc})
- else:
- raise webob.exc.HTTPNotFound(
- _("No allocations for consumer '%(consumer_uuid)s'") %
- {'consumer_uuid': consumer_uuid})
- LOG.debug("Successfully deleted allocations %s", allocations)
-
- req.response.status = 204
- req.response.content_type = None
- return req.response
diff --git a/nova/api/openstack/placement/handlers/allocation_candidate.py b/nova/api/openstack/placement/handlers/allocation_candidate.py
deleted file mode 100644
index f5425cdf4f..0000000000
--- a/nova/api/openstack/placement/handlers/allocation_candidate.py
+++ /dev/null
@@ -1,332 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Placement API handlers for getting allocation candidates."""
-
-import collections
-
-from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
-from oslo_utils import timeutils
-import six
-import webob
-
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider as rp_obj
-from nova.api.openstack.placement.policies import allocation_candidate as \
- policies
-from nova.api.openstack.placement.schemas import allocation_candidate as schema
-from nova.api.openstack.placement import util
-from nova.api.openstack.placement import wsgi_wrapper
-from nova.i18n import _
-
-
-def _transform_allocation_requests_dict(alloc_reqs):
- """Turn supplied list of AllocationRequest objects into a list of
- allocations dicts keyed by resource provider uuid of resources involved
- in the allocation request. The returned results are intended to be used
- as the body of a PUT /allocations/{consumer_uuid} HTTP request at
- micoversion 1.12 (and beyond). The JSON objects look like the following:
-
- [
- {
- "allocations": {
- $rp_uuid1: {
- "resources": {
- "MEMORY_MB": 512
- ...
- }
- },
- $rp_uuid2: {
- "resources": {
- "DISK_GB": 1024
- ...
- }
- }
- },
- },
- ...
- ]
- """
- results = []
-
- for ar in alloc_reqs:
- # A default dict of {$rp_uuid: "resources": {})
- rp_resources = collections.defaultdict(lambda: dict(resources={}))
- for rr in ar.resource_requests:
- res_dict = rp_resources[rr.resource_provider.uuid]['resources']
- res_dict[rr.resource_class] = rr.amount
- results.append(dict(allocations=rp_resources))
-
- return results
-
-
-def _transform_allocation_requests_list(alloc_reqs):
- """Turn supplied list of AllocationRequest objects into a list of dicts of
- resources involved in the allocation request. The returned results is
- intended to be able to be used as the body of a PUT
- /allocations/{consumer_uuid} HTTP request, prior to microversion 1.12,
- so therefore we return a list of JSON objects that looks like the
- following:
-
- [
- {
- "allocations": [
- {
- "resource_provider": {
- "uuid": $rp_uuid,
- }
- "resources": {
- $resource_class: $requested_amount, ...
- },
- }, ...
- ],
- }, ...
- ]
- """
- results = []
- for ar in alloc_reqs:
- provider_resources = collections.defaultdict(dict)
- for rr in ar.resource_requests:
- res_dict = provider_resources[rr.resource_provider.uuid]
- res_dict[rr.resource_class] = rr.amount
-
- allocs = [
- {
- "resource_provider": {
- "uuid": rp_uuid,
- },
- "resources": resources,
- } for rp_uuid, resources in provider_resources.items()
- ]
- alloc = {
- "allocations": allocs
- }
- results.append(alloc)
- return results
-
-
-def _transform_provider_summaries(p_sums, requests, want_version):
- """Turn supplied list of ProviderSummary objects into a dict, keyed by
- resource provider UUID, of dicts of provider and inventory information.
- The traits only show up when `want_version` is 1.17 or newer. All the
- resource classes are shown when `want_version` is 1.27 or newer while
- only requested resources are included in the `provider_summaries`
- for older versions. The parent and root provider uuids only show up
- when `want_version` is 1.29 or newer.
-
- {
- RP_UUID_1: {
- 'resources': {
- 'DISK_GB': {
- 'capacity': 100,
- 'used': 0,
- },
- 'VCPU': {
- 'capacity': 4,
- 'used': 0,
- }
- },
- # traits shows up from microversion 1.17
- 'traits': [
- 'HW_CPU_X86_AVX512F',
- 'HW_CPU_X86_AVX512CD'
- ]
- # parent/root provider uuids show up from microversion 1.29
- parent_provider_uuid: null,
- root_provider_uuid: RP_UUID_1
- },
- RP_UUID_2: {
- 'resources': {
- 'DISK_GB': {
- 'capacity': 100,
- 'used': 0,
- },
- 'VCPU': {
- 'capacity': 4,
- 'used': 0,
- }
- },
- # traits shows up from microversion 1.17
- 'traits': [
- 'HW_NIC_OFFLOAD_TSO',
- 'HW_NIC_OFFLOAD_GRO'
- ],
- # parent/root provider uuids show up from microversion 1.29
- parent_provider_uuid: null,
- root_provider_uuid: RP_UUID_2
- }
- }
- """
- include_traits = want_version.matches((1, 17))
- include_all_resources = want_version.matches((1, 27))
- enable_nested_providers = want_version.matches((1, 29))
-
- ret = {}
- requested_resources = set()
-
- for requested_group in requests.values():
- requested_resources |= set(requested_group.resources)
-
- # if include_all_resources is false, only requested resources are
- # included in the provider_summaries.
- for ps in p_sums:
- resources = {
- psr.resource_class: {
- 'capacity': psr.capacity,
- 'used': psr.used,
- } for psr in ps.resources if (
- include_all_resources or
- psr.resource_class in requested_resources)
- }
-
- ret[ps.resource_provider.uuid] = {'resources': resources}
-
- if include_traits:
- ret[ps.resource_provider.uuid]['traits'] = [
- t.name for t in ps.traits]
-
- if enable_nested_providers:
- ret[ps.resource_provider.uuid]['parent_provider_uuid'] = (
- ps.resource_provider.parent_provider_uuid)
- ret[ps.resource_provider.uuid]['root_provider_uuid'] = (
- ps.resource_provider.root_provider_uuid)
-
- return ret
-
-
-def _exclude_nested_providers(alloc_cands):
- """Exclude allocation requests and provider summaries for old microversions
- if they involve more than one provider from the same tree.
- """
- # Build a temporary dict, keyed by root RP UUID of sets of UUIDs of all RPs
- # in that tree.
- tree_rps_by_root = collections.defaultdict(set)
- for ps in alloc_cands.provider_summaries:
- rp_uuid = ps.resource_provider.uuid
- root_uuid = ps.resource_provider.root_provider_uuid
- tree_rps_by_root[root_uuid].add(rp_uuid)
- # We use this to get a list of sets of providers in each tree
- tree_sets = list(tree_rps_by_root.values())
-
- for a_req in alloc_cands.allocation_requests[:]:
- alloc_rp_uuids = set([
- arr.resource_provider.uuid for arr in a_req.resource_requests])
- # If more than one allocation is provided by the same tree, kill
- # that allocation request.
- if any(len(tree_set & alloc_rp_uuids) > 1 for tree_set in tree_sets):
- alloc_cands.allocation_requests.remove(a_req)
-
- # Exclude eliminated providers from the provider summaries.
- all_rp_uuids = set()
- for a_req in alloc_cands.allocation_requests:
- all_rp_uuids |= set(
- arr.resource_provider.uuid for arr in a_req.resource_requests)
- for ps in alloc_cands.provider_summaries[:]:
- if ps.resource_provider.uuid not in all_rp_uuids:
- alloc_cands.provider_summaries.remove(ps)
-
- return alloc_cands
-
-
-def _transform_allocation_candidates(alloc_cands, requests, want_version):
- """Turn supplied AllocationCandidates object into a dict containing
- allocation requests and provider summaries.
-
- {
- 'allocation_requests': <ALLOC_REQUESTS>,
- 'provider_summaries': <PROVIDER_SUMMARIES>,
- }
- """
- # exclude nested providers with old microversions
- if not want_version.matches((1, 29)):
- alloc_cands = _exclude_nested_providers(alloc_cands)
-
- if want_version.matches((1, 12)):
- a_reqs = _transform_allocation_requests_dict(
- alloc_cands.allocation_requests)
- else:
- a_reqs = _transform_allocation_requests_list(
- alloc_cands.allocation_requests)
-
- p_sums = _transform_provider_summaries(
- alloc_cands.provider_summaries, requests, want_version)
-
- return {
- 'allocation_requests': a_reqs,
- 'provider_summaries': p_sums,
- }
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.10')
-@util.check_accept('application/json')
-def list_allocation_candidates(req):
- """GET a JSON object with a list of allocation requests and a JSON object
- of provider summary objects
-
- On success return a 200 and an application/json body representing
- a collection of allocation requests and provider summaries
- """
- context = req.environ['placement.context']
- context.can(policies.LIST)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- get_schema = schema.GET_SCHEMA_1_10
- if want_version.matches((1, 25)):
- get_schema = schema.GET_SCHEMA_1_25
- elif want_version.matches((1, 21)):
- get_schema = schema.GET_SCHEMA_1_21
- elif want_version.matches((1, 17)):
- get_schema = schema.GET_SCHEMA_1_17
- elif want_version.matches((1, 16)):
- get_schema = schema.GET_SCHEMA_1_16
- util.validate_query_params(req, get_schema)
-
- requests = util.parse_qs_request_groups(req)
- limit = req.GET.getall('limit')
- # JSONschema has already confirmed that limit has the form
- # of an integer.
- if limit:
- limit = int(limit[0])
-
- group_policy = req.GET.getall('group_policy') or None
- # Schema ensures we get either "none" or "isolate"
- if group_policy:
- group_policy = group_policy[0]
- else:
- # group_policy is required if more than one numbered request group was
- # specified.
- if len([rg for rg in requests.values() if rg.use_same_provider]) > 1:
- raise webob.exc.HTTPBadRequest(
- _('The "group_policy" parameter is required when specifying '
- 'more than one "resources{N}" parameter.'))
-
- try:
- cands = rp_obj.AllocationCandidates.get_by_requests(
- context, requests, limit=limit, group_policy=group_policy)
- except exception.ResourceClassNotFound as exc:
- raise webob.exc.HTTPBadRequest(
- _('Invalid resource class in resources parameter: %(error)s') %
- {'error': exc})
- except exception.TraitNotFound as exc:
- raise webob.exc.HTTPBadRequest(six.text_type(exc))
-
- response = req.response
- trx_cands = _transform_allocation_candidates(cands, requests, want_version)
- json_data = jsonutils.dumps(trx_cands)
- response.body = encodeutils.to_utf8(json_data)
- response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- response.cache_control = 'no-cache'
- response.last_modified = timeutils.utcnow(with_timezone=True)
- return response
diff --git a/nova/api/openstack/placement/handlers/inventory.py b/nova/api/openstack/placement/handlers/inventory.py
deleted file mode 100644
index 019ada01aa..0000000000
--- a/nova/api/openstack/placement/handlers/inventory.py
+++ /dev/null
@@ -1,467 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Inventory handlers for Placement API."""
-
-import copy
-import operator
-
-from oslo_db import exception as db_exc
-from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
-import webob
-
-from nova.api.openstack.placement import errors
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider as rp_obj
-from nova.api.openstack.placement.policies import inventory as policies
-from nova.api.openstack.placement.schemas import inventory as schema
-from nova.api.openstack.placement import util
-from nova.api.openstack.placement import wsgi_wrapper
-from nova.db import constants as db_const
-from nova.i18n import _
-
-
-# NOTE(cdent): We keep our own representation of inventory defaults
-# and output fields, separate from the versioned object to avoid
-# inadvertent API changes when the object defaults are changed.
-OUTPUT_INVENTORY_FIELDS = [
- 'total',
- 'reserved',
- 'min_unit',
- 'max_unit',
- 'step_size',
- 'allocation_ratio',
-]
-INVENTORY_DEFAULTS = {
- 'reserved': 0,
- 'min_unit': 1,
- 'max_unit': db_const.MAX_INT,
- 'step_size': 1,
- 'allocation_ratio': 1.0
-}
-
-
-def _extract_inventory(body, schema):
- """Extract and validate inventory from JSON body."""
- data = util.extract_json(body, schema)
-
- inventory_data = copy.copy(INVENTORY_DEFAULTS)
- inventory_data.update(data)
-
- return inventory_data
-
-
-def _extract_inventories(body, schema):
- """Extract and validate multiple inventories from JSON body."""
- data = util.extract_json(body, schema)
-
- inventories = {}
- for res_class, raw_inventory in data['inventories'].items():
- inventory_data = copy.copy(INVENTORY_DEFAULTS)
- inventory_data.update(raw_inventory)
- inventories[res_class] = inventory_data
-
- data['inventories'] = inventories
- return data
-
-
-def make_inventory_object(resource_provider, resource_class, **data):
- """Single place to catch malformed Inventories."""
- # TODO(cdent): Some of the validation checks that are done here
- # could be done via JSONschema (using, for example, "minimum":
- # 0) for non-negative integers. It's not clear if that is
- # duplication or decoupling so leaving it as this for now.
- try:
- inventory = rp_obj.Inventory(
- resource_provider=resource_provider,
- resource_class=resource_class, **data)
- except (ValueError, TypeError) as exc:
- raise webob.exc.HTTPBadRequest(
- _('Bad inventory %(class)s for resource provider '
- '%(rp_uuid)s: %(error)s') % {'class': resource_class,
- 'rp_uuid': resource_provider.uuid,
- 'error': exc})
- return inventory
-
-
-def _send_inventories(req, resource_provider, inventories):
- """Send a JSON representation of a list of inventories."""
- response = req.response
- response.status = 200
- output, last_modified = _serialize_inventories(
- inventories, resource_provider.generation)
- response.body = encodeutils.to_utf8(jsonutils.dumps(output))
- response.content_type = 'application/json'
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- if want_version.matches((1, 15)):
- response.last_modified = last_modified
- response.cache_control = 'no-cache'
- return response
-
-
-def _send_inventory(req, resource_provider, inventory, status=200):
- """Send a JSON representation of one single inventory."""
- response = req.response
- response.status = status
- response.body = encodeutils.to_utf8(jsonutils.dumps(_serialize_inventory(
- inventory, generation=resource_provider.generation)))
- response.content_type = 'application/json'
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- if want_version.matches((1, 15)):
- modified = util.pick_last_modified(None, inventory)
- response.last_modified = modified
- response.cache_control = 'no-cache'
- return response
-
-
-def _serialize_inventory(inventory, generation=None):
- """Turn a single inventory into a dictionary."""
- data = {
- field: getattr(inventory, field)
- for field in OUTPUT_INVENTORY_FIELDS
- }
- if generation:
- data['resource_provider_generation'] = generation
- return data
-
-
-def _serialize_inventories(inventories, generation):
- """Turn a list of inventories in a dict by resource class."""
- inventories_by_class = {inventory.resource_class: inventory
- for inventory in inventories}
- inventories_dict = {}
- last_modified = None
- for resource_class, inventory in inventories_by_class.items():
- last_modified = util.pick_last_modified(last_modified, inventory)
- inventories_dict[resource_class] = _serialize_inventory(
- inventory, generation=None)
- return ({'resource_provider_generation': generation,
- 'inventories': inventories_dict}, last_modified)
-
-
-def _validate_inventory_capacity(version, inventories):
- """Validate inventory capacity.
-
- :param version: request microversion.
- :param inventories: Inventory or InventoryList to validate capacities of.
- :raises: exception.InvalidInventoryCapacityReservedCanBeTotal if request
- microversion is 1.26 or higher and any inventory has capacity < 0.
- :raises: exception.InvalidInventoryCapacity if request
- microversion is lower than 1.26 and any inventory has capacity <= 0.
- """
- if not version.matches((1, 26)):
- op = operator.le
- exc_class = exception.InvalidInventoryCapacity
- else:
- op = operator.lt
- exc_class = exception.InvalidInventoryCapacityReservedCanBeTotal
- if isinstance(inventories, rp_obj.Inventory):
- inventories = rp_obj.InventoryList(objects=[inventories])
- for inventory in inventories:
- if op(inventory.capacity, 0):
- raise exc_class(
- resource_class=inventory.resource_class,
- resource_provider=inventory.resource_provider.uuid)
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.require_content('application/json')
-def create_inventory(req):
- """POST to create one inventory.
-
- On success return a 201 response, a location header pointing
- to the newly created inventory and an application/json representation
- of the inventory.
- """
- context = req.environ['placement.context']
- context.can(policies.CREATE)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
- data = _extract_inventory(req.body, schema.POST_INVENTORY_SCHEMA)
- resource_class = data.pop('resource_class')
-
- inventory = make_inventory_object(resource_provider,
- resource_class,
- **data)
-
- try:
- _validate_inventory_capacity(
- req.environ[microversion.MICROVERSION_ENVIRON], inventory)
- resource_provider.add_inventory(inventory)
- except (exception.ConcurrentUpdateDetected,
- db_exc.DBDuplicateEntry) as exc:
- raise webob.exc.HTTPConflict(
- _('Update conflict: %(error)s') % {'error': exc},
- comment=errors.CONCURRENT_UPDATE)
- except (exception.InvalidInventoryCapacity,
- exception.NotFound) as exc:
- raise webob.exc.HTTPBadRequest(
- _('Unable to create inventory for resource provider '
- '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
- 'error': exc})
-
- response = req.response
- response.location = util.inventory_url(
- req.environ, resource_provider, resource_class)
- return _send_inventory(req, resource_provider, inventory,
- status=201)
-
-
-@wsgi_wrapper.PlacementWsgify
-def delete_inventory(req):
- """DELETE to destroy a single inventory.
-
- If the inventory is in use or resource provider generation is out
- of sync return a 409.
-
- On success return a 204 and an empty body.
- """
- context = req.environ['placement.context']
- context.can(policies.DELETE)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- resource_class = util.wsgi_path_item(req.environ, 'resource_class')
-
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
- try:
- resource_provider.delete_inventory(resource_class)
- except (exception.ConcurrentUpdateDetected,
- exception.InventoryInUse) as exc:
- raise webob.exc.HTTPConflict(
- _('Unable to delete inventory of class %(class)s: %(error)s') %
- {'class': resource_class, 'error': exc},
- comment=errors.CONCURRENT_UPDATE)
- except exception.NotFound as exc:
- raise webob.exc.HTTPNotFound(
- _('No inventory of class %(class)s found for delete: %(error)s') %
- {'class': resource_class, 'error': exc})
-
- response = req.response
- response.status = 204
- response.content_type = None
- return response
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.check_accept('application/json')
-def get_inventories(req):
- """GET a list of inventories.
-
- On success return a 200 with an application/json body representing
- a collection of inventories.
- """
- context = req.environ['placement.context']
- context.can(policies.LIST)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- try:
- rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
- except exception.NotFound as exc:
- raise webob.exc.HTTPNotFound(
- _("No resource provider with uuid %(uuid)s found : %(error)s") %
- {'uuid': uuid, 'error': exc})
-
- inv_list = rp_obj.InventoryList.get_all_by_resource_provider(context, rp)
-
- return _send_inventories(req, rp, inv_list)
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.check_accept('application/json')
-def get_inventory(req):
- """GET one inventory.
-
- On success return a 200 an application/json body representing one
- inventory.
- """
- context = req.environ['placement.context']
- context.can(policies.SHOW)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- resource_class = util.wsgi_path_item(req.environ, 'resource_class')
- try:
- rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
- except exception.NotFound as exc:
- raise webob.exc.HTTPNotFound(
- _("No resource provider with uuid %(uuid)s found : %(error)s") %
- {'uuid': uuid, 'error': exc})
-
- inv_list = rp_obj.InventoryList.get_all_by_resource_provider(context, rp)
- inventory = inv_list.find(resource_class)
-
- if not inventory:
- raise webob.exc.HTTPNotFound(
- _('No inventory of class %(class)s for %(rp_uuid)s') %
- {'class': resource_class, 'rp_uuid': uuid})
-
- return _send_inventory(req, rp, inventory)
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.require_content('application/json')
-def set_inventories(req):
- """PUT to set all inventory for a resource provider.
-
- Create, update and delete inventory as required to reset all
- the inventory.
-
- If the resource generation is out of sync, return a 409.
- If an inventory to be deleted is in use, return a 409.
- If any inventory to be created or updated has settings which are
- invalid (for example reserved exceeds capacity), return a 400.
-
- On success return a 200 with an application/json body representing
- the inventories.
- """
- context = req.environ['placement.context']
- context.can(policies.UPDATE)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
-
- data = _extract_inventories(req.body, schema.PUT_INVENTORY_SCHEMA)
- if data['resource_provider_generation'] != resource_provider.generation:
- raise webob.exc.HTTPConflict(
- _('resource provider generation conflict'),
- comment=errors.CONCURRENT_UPDATE)
-
- inv_list = []
- for res_class, inventory_data in data['inventories'].items():
- inventory = make_inventory_object(
- resource_provider, res_class, **inventory_data)
- inv_list.append(inventory)
- inventories = rp_obj.InventoryList(objects=inv_list)
-
- try:
- _validate_inventory_capacity(
- req.environ[microversion.MICROVERSION_ENVIRON], inventories)
- resource_provider.set_inventory(inventories)
- except exception.ResourceClassNotFound as exc:
- raise webob.exc.HTTPBadRequest(
- _('Unknown resource class in inventory for resource provider '
- '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
- 'error': exc})
- except exception.InventoryWithResourceClassNotFound as exc:
- raise webob.exc.HTTPConflict(
- _('Race condition detected when setting inventory. No inventory '
- 'record with resource class for resource provider '
- '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
- 'error': exc})
- except (exception.ConcurrentUpdateDetected,
- db_exc.DBDuplicateEntry) as exc:
- raise webob.exc.HTTPConflict(
- _('update conflict: %(error)s') % {'error': exc},
- comment=errors.CONCURRENT_UPDATE)
- except exception.InventoryInUse as exc:
- raise webob.exc.HTTPConflict(
- _('update conflict: %(error)s') % {'error': exc},
- comment=errors.INVENTORY_INUSE)
- except exception.InvalidInventoryCapacity as exc:
- raise webob.exc.HTTPBadRequest(
- _('Unable to update inventory for resource provider '
- '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
- 'error': exc})
-
- return _send_inventories(req, resource_provider, inventories)
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.5', status_code=405)
-def delete_inventories(req):
- """DELETE all inventory for a resource provider.
-
- Delete inventory as required to reset all the inventory.
- If an inventory to be deleted is in use, return a 409 Conflict.
- On success return a 204 No content.
- Return 405 Method Not Allowed if the wanted microversion does not match.
- """
- context = req.environ['placement.context']
- context.can(policies.DELETE)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
-
- inventories = rp_obj.InventoryList(objects=[])
-
- try:
- resource_provider.set_inventory(inventories)
- except exception.ConcurrentUpdateDetected:
- raise webob.exc.HTTPConflict(
- _('Unable to delete inventory for resource provider '
- '%(rp_uuid)s because the inventory was updated by '
- 'another process. Please retry your request.')
- % {'rp_uuid': resource_provider.uuid},
- comment=errors.CONCURRENT_UPDATE)
- except exception.InventoryInUse as ex:
- # NOTE(mriedem): This message cannot change without impacting the
- # nova.scheduler.client.report._RE_INV_IN_USE regex.
- raise webob.exc.HTTPConflict(ex.format_message(),
- comment=errors.INVENTORY_INUSE)
-
- response = req.response
- response.status = 204
- response.content_type = None
-
- return response
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.require_content('application/json')
-def update_inventory(req):
- """PUT to update one inventory.
-
- If the resource generation is out of sync, return a 409.
- If the inventory has settings which are invalid (for example
- reserved exceeds capacity), return a 400.
-
- On success return a 200 with an application/json body representing
- the inventory.
- """
- context = req.environ['placement.context']
- context.can(policies.UPDATE)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- resource_class = util.wsgi_path_item(req.environ, 'resource_class')
-
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
-
- data = _extract_inventory(req.body, schema.BASE_INVENTORY_SCHEMA)
- if data['resource_provider_generation'] != resource_provider.generation:
- raise webob.exc.HTTPConflict(
- _('resource provider generation conflict'),
- comment=errors.CONCURRENT_UPDATE)
-
- inventory = make_inventory_object(resource_provider,
- resource_class,
- **data)
-
- try:
- _validate_inventory_capacity(
- req.environ[microversion.MICROVERSION_ENVIRON], inventory)
- resource_provider.update_inventory(inventory)
- except (exception.ConcurrentUpdateDetected,
- db_exc.DBDuplicateEntry) as exc:
- raise webob.exc.HTTPConflict(
- _('update conflict: %(error)s') % {'error': exc},
- comment=errors.CONCURRENT_UPDATE)
- except exception.InventoryWithResourceClassNotFound as exc:
- raise webob.exc.HTTPBadRequest(
- _('No inventory record with resource class for resource provider '
- '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
- 'error': exc})
- except exception.InvalidInventoryCapacity as exc:
- raise webob.exc.HTTPBadRequest(
- _('Unable to update inventory for resource provider '
- '%(rp_uuid)s: %(error)s') % {'rp_uuid': resource_provider.uuid,
- 'error': exc})
-
- return _send_inventory(req, resource_provider, inventory)
diff --git a/nova/api/openstack/placement/handlers/reshaper.py b/nova/api/openstack/placement/handlers/reshaper.py
deleted file mode 100644
index 0351ffd286..0000000000
--- a/nova/api/openstack/placement/handlers/reshaper.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API handler for the reshaper.
-
-The reshaper provides for atomically migrating resource provider inventories
-and associated allocations when some of the inventory moves from one resource
-provider to another, such as when a class of inventory moves from a parent
-provider to a new child provider.
-"""
-
-import copy
-
-from oslo_utils import excutils
-import webob
-
-from nova.api.openstack.placement import errors
-from nova.api.openstack.placement import exception
-# TODO(cdent): That we are doing this suggests that there's stuff to be
-# extracted from the handler to a shared module.
-from nova.api.openstack.placement.handlers import allocation
-from nova.api.openstack.placement.handlers import inventory
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider as rp_obj
-from nova.api.openstack.placement.policies import reshaper as policies
-from nova.api.openstack.placement.schemas import reshaper as schema
-from nova.api.openstack.placement import util
-from nova.api.openstack.placement import wsgi_wrapper
-# TODO(cdent): placement needs its own version of this
-from nova.i18n import _
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.30')
-@util.require_content('application/json')
-def reshape(req):
- context = req.environ['placement.context']
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- context.can(policies.RESHAPE)
- data = util.extract_json(req.body, schema.POST_RESHAPER_SCHEMA)
- inventories = data['inventories']
- allocations = data['allocations']
- # We're going to create several InventoryList, by rp uuid.
- inventory_by_rp = {}
-
- # TODO(cdent): this has overlaps with inventory:set_inventories
- # and is a mess of bad names and lack of method extraction.
- for rp_uuid, inventory_data in inventories.items():
- try:
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, rp_uuid)
- except exception.NotFound as exc:
- raise webob.exc.HTTPBadRequest(
- _('Resource provider %(rp_uuid)s in inventories not found: '
- '%(error)s') % {'rp_uuid': rp_uuid, 'error': exc},
- comment=errors.RESOURCE_PROVIDER_NOT_FOUND)
-
- # Do an early generation check.
- generation = inventory_data['resource_provider_generation']
- if generation != resource_provider.generation:
- raise webob.exc.HTTPConflict(
- _('resource provider generation conflict for provider %(rp)s: '
- 'actual: %(actual)s, given: %(given)s') %
- {'rp': rp_uuid,
- 'actual': resource_provider.generation,
- 'given': generation},
- comment=errors.CONCURRENT_UPDATE)
-
- inv_list = []
- for res_class, raw_inventory in inventory_data['inventories'].items():
- inv_data = copy.copy(inventory.INVENTORY_DEFAULTS)
- inv_data.update(raw_inventory)
- inv_obj = inventory.make_inventory_object(
- resource_provider, res_class, **inv_data)
- inv_list.append(inv_obj)
- inventory_by_rp[resource_provider] = rp_obj.InventoryList(
- objects=inv_list)
-
- # Make the consumer objects associated with the allocations.
- consumers, new_consumers_created = allocation.inspect_consumers(
- context, allocations, want_version)
-
- # Nest exception handling so that any exception results in new consumer
- # objects being deleted, then reraise for translating to HTTP exceptions.
- try:
- try:
- # When these allocations are created they get resource provider
- # objects which are different instances (usually with the same
- # data) from those loaded above when creating inventory objects.
- # The reshape method below is responsible for ensuring that the
- # resource providers and their generations do not conflict.
- allocation_objects = allocation.create_allocation_list(
- context, allocations, consumers)
-
- rp_obj.reshape(context, inventory_by_rp, allocation_objects)
- except Exception:
- with excutils.save_and_reraise_exception():
- allocation.delete_consumers(new_consumers_created)
- # Generation conflict is a (rare) possibility in a few different
- # places in reshape().
- except exception.ConcurrentUpdateDetected as exc:
- raise webob.exc.HTTPConflict(
- _('update conflict: %(error)s') % {'error': exc},
- comment=errors.CONCURRENT_UPDATE)
- # A NotFound here means a resource class that does not exist was named
- except exception.NotFound as exc:
- raise webob.exc.HTTPBadRequest(
- _('malformed reshaper data: %(error)s') % {'error': exc})
- # Distinguish inventory in use (has allocations on it)...
- except exception.InventoryInUse as exc:
- raise webob.exc.HTTPConflict(
- _('update conflict: %(error)s') % {'error': exc},
- comment=errors.INVENTORY_INUSE)
- # ...from allocations which won't fit for a variety of reasons.
- except exception.InvalidInventory as exc:
- raise webob.exc.HTTPConflict(
- _('Unable to allocate inventory: %(error)s') % {'error': exc})
-
- req.response.status = 204
- req.response.content_type = None
- return req.response
diff --git a/nova/api/openstack/placement/handlers/resource_class.py b/nova/api/openstack/placement/handlers/resource_class.py
deleted file mode 100644
index b8b0324a9e..0000000000
--- a/nova/api/openstack/placement/handlers/resource_class.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API handlers for resource classes."""
-
-from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
-from oslo_utils import timeutils
-import webob
-
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider as rp_obj
-from nova.api.openstack.placement.policies import resource_class as policies
-from nova.api.openstack.placement.schemas import resource_class as schema
-from nova.api.openstack.placement import util
-from nova.api.openstack.placement import wsgi_wrapper
-from nova.i18n import _
-
-
-def _serialize_links(environ, rc):
- url = util.resource_class_url(environ, rc)
- links = [{'rel': 'self', 'href': url}]
- return links
-
-
-def _serialize_resource_class(environ, rc):
- data = {
- 'name': rc.name,
- 'links': _serialize_links(environ, rc)
- }
- return data
-
-
-def _serialize_resource_classes(environ, rcs, want_version):
- output = []
- last_modified = None
- get_last_modified = want_version.matches((1, 15))
- for rc in rcs:
- if get_last_modified:
- last_modified = util.pick_last_modified(last_modified, rc)
- data = _serialize_resource_class(environ, rc)
- output.append(data)
- last_modified = last_modified or timeutils.utcnow(with_timezone=True)
- return ({"resource_classes": output}, last_modified)
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.2')
-@util.require_content('application/json')
-def create_resource_class(req):
- """POST to create a resource class.
-
- On success return a 201 response with an empty body and a location
- header pointing to the newly created resource class.
- """
- context = req.environ['placement.context']
- context.can(policies.CREATE)
- data = util.extract_json(req.body, schema.POST_RC_SCHEMA_V1_2)
-
- try:
- rc = rp_obj.ResourceClass(context, name=data['name'])
- rc.create()
- except exception.ResourceClassExists:
- raise webob.exc.HTTPConflict(
- _('Conflicting resource class already exists: %(name)s') %
- {'name': data['name']})
- except exception.MaxDBRetriesExceeded:
- raise webob.exc.HTTPConflict(
- _('Max retries of DB transaction exceeded attempting '
- 'to create resource class: %(name)s, please '
- 'try again.') %
- {'name': data['name']})
-
- req.response.location = util.resource_class_url(req.environ, rc)
- req.response.status = 201
- req.response.content_type = None
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.2')
-def delete_resource_class(req):
- """DELETE to destroy a single resource class.
-
- On success return a 204 and an empty body.
- """
- name = util.wsgi_path_item(req.environ, 'name')
- context = req.environ['placement.context']
- context.can(policies.DELETE)
- # The containing application will catch a not found here.
- rc = rp_obj.ResourceClass.get_by_name(context, name)
- try:
- rc.destroy()
- except exception.ResourceClassCannotDeleteStandard as exc:
- raise webob.exc.HTTPBadRequest(
- _('Error in delete resource class: %(error)s') % {'error': exc})
- except exception.ResourceClassInUse as exc:
- raise webob.exc.HTTPConflict(
- _('Error in delete resource class: %(error)s') % {'error': exc})
- req.response.status = 204
- req.response.content_type = None
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.2')
-@util.check_accept('application/json')
-def get_resource_class(req):
- """Get a single resource class.
-
- On success return a 200 with an application/json body representing
- the resource class.
- """
- name = util.wsgi_path_item(req.environ, 'name')
- context = req.environ['placement.context']
- context.can(policies.SHOW)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- # The containing application will catch a not found here.
- rc = rp_obj.ResourceClass.get_by_name(context, name)
-
- req.response.body = encodeutils.to_utf8(jsonutils.dumps(
- _serialize_resource_class(req.environ, rc))
- )
- req.response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- req.response.cache_control = 'no-cache'
- # Non-custom resource classes will return None from pick_last_modified,
- # so the 'or' causes utcnow to be used.
- last_modified = util.pick_last_modified(None, rc) or timeutils.utcnow(
- with_timezone=True)
- req.response.last_modified = last_modified
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.2')
-@util.check_accept('application/json')
-def list_resource_classes(req):
- """GET a list of resource classes.
-
- On success return a 200 and an application/json body representing
- a collection of resource classes.
- """
- context = req.environ['placement.context']
- context.can(policies.LIST)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- rcs = rp_obj.ResourceClassList.get_all(context)
-
- response = req.response
- output, last_modified = _serialize_resource_classes(
- req.environ, rcs, want_version)
- response.body = encodeutils.to_utf8(jsonutils.dumps(output))
- response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- response.last_modified = last_modified
- response.cache_control = 'no-cache'
- return response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.2', '1.6')
-@util.require_content('application/json')
-def update_resource_class(req):
- """PUT to update a single resource class.
-
- On success return a 200 response with a representation of the updated
- resource class.
- """
- name = util.wsgi_path_item(req.environ, 'name')
- context = req.environ['placement.context']
- context.can(policies.UPDATE)
-
- data = util.extract_json(req.body, schema.PUT_RC_SCHEMA_V1_2)
-
- # The containing application will catch a not found here.
- rc = rp_obj.ResourceClass.get_by_name(context, name)
-
- rc.name = data['name']
-
- try:
- rc.save()
- except exception.ResourceClassExists:
- raise webob.exc.HTTPConflict(
- _('Resource class already exists: %(name)s') %
- {'name': rc.name})
- except exception.ResourceClassCannotUpdateStandard:
- raise webob.exc.HTTPBadRequest(
- _('Cannot update standard resource class %(rp_name)s') %
- {'rp_name': name})
-
- req.response.body = encodeutils.to_utf8(jsonutils.dumps(
- _serialize_resource_class(req.environ, rc))
- )
- req.response.status = 200
- req.response.content_type = 'application/json'
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify # noqa
-@microversion.version_handler('1.7')
-def update_resource_class(req):
- """PUT to create or validate the existence of single resource class.
-
- On a successful create return 201. Return 204 if the class already
- exists. If the resource class is not a custom resource class, return
- a 400. 409 might be a better choice, but 400 aligns with previous code.
- """
- name = util.wsgi_path_item(req.environ, 'name')
- context = req.environ['placement.context']
- context.can(policies.UPDATE)
-
- # Use JSON validation to validation resource class name.
- util.extract_json('{"name": "%s"}' % name, schema.PUT_RC_SCHEMA_V1_2)
-
- status = 204
- try:
- rc = rp_obj.ResourceClass.get_by_name(context, name)
- except exception.NotFound:
- try:
- rc = rp_obj.ResourceClass(context, name=name)
- rc.create()
- status = 201
- # We will not see ResourceClassCannotUpdateStandard because
- # that was already caught when validating the {name}.
- except exception.ResourceClassExists:
- # Someone just now created the class, so stick with 204
- pass
-
- req.response.status = status
- req.response.content_type = None
- req.response.location = util.resource_class_url(req.environ, rc)
- return req.response
diff --git a/nova/api/openstack/placement/handlers/resource_provider.py b/nova/api/openstack/placement/handlers/resource_provider.py
deleted file mode 100644
index 8ac38ef635..0000000000
--- a/nova/api/openstack/placement/handlers/resource_provider.py
+++ /dev/null
@@ -1,308 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API handlers for resource providers."""
-
-import uuid as uuidlib
-
-from oslo_db import exception as db_exc
-from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
-from oslo_utils import timeutils
-from oslo_utils import uuidutils
-import webob
-
-from nova.api.openstack.placement import errors
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider as rp_obj
-from nova.api.openstack.placement.policies import resource_provider as policies
-from nova.api.openstack.placement.schemas import resource_provider as rp_schema
-from nova.api.openstack.placement import util
-from nova.api.openstack.placement import wsgi_wrapper
-from nova.i18n import _
-
-
-def _serialize_links(environ, resource_provider):
- url = util.resource_provider_url(environ, resource_provider)
- links = [{'rel': 'self', 'href': url}]
- rel_types = ['inventories', 'usages']
- want_version = environ[microversion.MICROVERSION_ENVIRON]
- if want_version >= (1, 1):
- rel_types.append('aggregates')
- if want_version >= (1, 6):
- rel_types.append('traits')
- if want_version >= (1, 11):
- rel_types.append('allocations')
- for rel in rel_types:
- links.append({'rel': rel, 'href': '%s/%s' % (url, rel)})
- return links
-
-
-def _serialize_provider(environ, resource_provider, want_version):
- data = {
- 'uuid': resource_provider.uuid,
- 'name': resource_provider.name,
- 'generation': resource_provider.generation,
- 'links': _serialize_links(environ, resource_provider)
- }
- if want_version.matches((1, 14)):
- data['parent_provider_uuid'] = resource_provider.parent_provider_uuid
- data['root_provider_uuid'] = resource_provider.root_provider_uuid
- return data
-
-
-def _serialize_providers(environ, resource_providers, want_version):
- output = []
- last_modified = None
- get_last_modified = want_version.matches((1, 15))
- for provider in resource_providers:
- if get_last_modified:
- last_modified = util.pick_last_modified(last_modified, provider)
- provider_data = _serialize_provider(environ, provider, want_version)
- output.append(provider_data)
- last_modified = last_modified or timeutils.utcnow(with_timezone=True)
- return ({"resource_providers": output}, last_modified)
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.require_content('application/json')
-def create_resource_provider(req):
- """POST to create a resource provider.
-
- On success return a 201 response with an empty body
- (microversions 1.0 - 1.19) or a 200 response with a
- payload representing the newly created resource provider
- (microversions 1.20 - latest), and a location header
- pointing to the resource provider.
- """
- context = req.environ['placement.context']
- context.can(policies.CREATE)
- schema = rp_schema.POST_RESOURCE_PROVIDER_SCHEMA
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- if want_version.matches((1, 14)):
- schema = rp_schema.POST_RP_SCHEMA_V1_14
- data = util.extract_json(req.body, schema)
-
- try:
- if data.get('uuid'):
- # Normalize UUID with no proper dashes into dashed one
- # with format {8}-{4}-{4}-{4}-{12}
- data['uuid'] = str(uuidlib.UUID(data['uuid']))
- else:
- data['uuid'] = uuidutils.generate_uuid()
-
- resource_provider = rp_obj.ResourceProvider(context, **data)
- resource_provider.create()
- except db_exc.DBDuplicateEntry as exc:
- # Whether exc.columns has one or two entries (in the event
- # of both fields being duplicates) appears to be database
- # dependent, so going with the complete solution here.
- duplicate = ', '.join(['%s: %s' % (column, data[column])
- for column in exc.columns])
- raise webob.exc.HTTPConflict(
- _('Conflicting resource provider %(duplicate)s already exists.') %
- {'duplicate': duplicate},
- comment=errors.DUPLICATE_NAME)
- except exception.ObjectActionError as exc:
- raise webob.exc.HTTPBadRequest(
- _('Unable to create resource provider "%(name)s", %(rp_uuid)s: '
- '%(error)s') %
- {'name': data['name'], 'rp_uuid': data['uuid'], 'error': exc})
-
- req.response.location = util.resource_provider_url(
- req.environ, resource_provider)
- if want_version.matches(min_version=(1, 20)):
- req.response.body = encodeutils.to_utf8(jsonutils.dumps(
- _serialize_provider(req.environ, resource_provider, want_version)))
- req.response.content_type = 'application/json'
- modified = util.pick_last_modified(None, resource_provider)
- req.response.last_modified = modified
- req.response.cache_control = 'no-cache'
- else:
- req.response.status = 201
- req.response.content_type = None
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-def delete_resource_provider(req):
- """DELETE to destroy a single resource provider.
-
- On success return a 204 and an empty body.
- """
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- context = req.environ['placement.context']
- context.can(policies.DELETE)
- # The containing application will catch a not found here.
- try:
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
- resource_provider.destroy()
- except exception.ResourceProviderInUse as exc:
- raise webob.exc.HTTPConflict(
- _('Unable to delete resource provider %(rp_uuid)s: %(error)s') %
- {'rp_uuid': uuid, 'error': exc},
- comment=errors.PROVIDER_IN_USE)
- except exception.NotFound as exc:
- raise webob.exc.HTTPNotFound(
- _("No resource provider with uuid %s found for delete") % uuid)
- except exception.CannotDeleteParentResourceProvider as exc:
- raise webob.exc.HTTPConflict(
- _("Unable to delete parent resource provider %(rp_uuid)s: "
- "It has child resource providers.") % {'rp_uuid': uuid},
- comment=errors.PROVIDER_CANNOT_DELETE_PARENT)
- req.response.status = 204
- req.response.content_type = None
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.check_accept('application/json')
-def get_resource_provider(req):
- """Get a single resource provider.
-
- On success return a 200 with an application/json body representing
- the resource provider.
- """
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- context = req.environ['placement.context']
- context.can(policies.SHOW)
-
- # The containing application will catch a not found here.
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
-
- response = req.response
- response.body = encodeutils.to_utf8(jsonutils.dumps(
- _serialize_provider(req.environ, resource_provider, want_version)))
- response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- modified = util.pick_last_modified(None, resource_provider)
- response.last_modified = modified
- response.cache_control = 'no-cache'
- return response
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.check_accept('application/json')
-def list_resource_providers(req):
- """GET a list of resource providers.
-
- On success return a 200 and an application/json body representing
- a collection of resource providers.
- """
- context = req.environ['placement.context']
- context.can(policies.LIST)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
-
- schema = rp_schema.GET_RPS_SCHEMA_1_0
- if want_version.matches((1, 18)):
- schema = rp_schema.GET_RPS_SCHEMA_1_18
- elif want_version.matches((1, 14)):
- schema = rp_schema.GET_RPS_SCHEMA_1_14
- elif want_version.matches((1, 4)):
- schema = rp_schema.GET_RPS_SCHEMA_1_4
- elif want_version.matches((1, 3)):
- schema = rp_schema.GET_RPS_SCHEMA_1_3
-
- allow_forbidden = want_version.matches((1, 22))
-
- util.validate_query_params(req, schema)
-
- filters = {}
- # special handling of member_of qparam since we allow multiple member_of
- # params at microversion 1.24.
- if 'member_of' in req.GET:
- filters['member_of'] = util.normalize_member_of_qs_params(req)
-
- qpkeys = ('uuid', 'name', 'in_tree', 'resources', 'required')
- for attr in qpkeys:
- if attr in req.GET:
- value = req.GET[attr]
- if attr == 'resources':
- value = util.normalize_resources_qs_param(value)
- elif attr == 'required':
- value = util.normalize_traits_qs_param(
- value, allow_forbidden=allow_forbidden)
- filters[attr] = value
- try:
- resource_providers = rp_obj.ResourceProviderList.get_all_by_filters(
- context, filters)
- except exception.ResourceClassNotFound as exc:
- raise webob.exc.HTTPBadRequest(
- _('Invalid resource class in resources parameter: %(error)s') %
- {'error': exc})
- except exception.TraitNotFound as exc:
- raise webob.exc.HTTPBadRequest(
- _('Invalid trait(s) in "required" parameter: %(error)s') %
- {'error': exc})
-
- response = req.response
- output, last_modified = _serialize_providers(
- req.environ, resource_providers, want_version)
- response.body = encodeutils.to_utf8(jsonutils.dumps(output))
- response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- response.last_modified = last_modified
- response.cache_control = 'no-cache'
- return response
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.require_content('application/json')
-def update_resource_provider(req):
- """PUT to update a single resource provider.
-
- On success return a 200 response with a representation of the updated
- resource provider.
- """
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- context = req.environ['placement.context']
- context.can(policies.UPDATE)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
-
- # The containing application will catch a not found here.
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
-
- schema = rp_schema.PUT_RESOURCE_PROVIDER_SCHEMA
- if want_version.matches((1, 14)):
- schema = rp_schema.PUT_RP_SCHEMA_V1_14
-
- data = util.extract_json(req.body, schema)
-
- for field in rp_obj.ResourceProvider.SETTABLE_FIELDS:
- if field in data:
- setattr(resource_provider, field, data[field])
-
- try:
- resource_provider.save()
- except db_exc.DBDuplicateEntry as exc:
- raise webob.exc.HTTPConflict(
- _('Conflicting resource provider %(name)s already exists.') %
- {'name': data['name']},
- comment=errors.DUPLICATE_NAME)
- except exception.ObjectActionError as exc:
- raise webob.exc.HTTPBadRequest(
- _('Unable to save resource provider %(rp_uuid)s: %(error)s') %
- {'rp_uuid': uuid, 'error': exc})
-
- response = req.response
- response.status = 200
- response.body = encodeutils.to_utf8(jsonutils.dumps(
- _serialize_provider(req.environ, resource_provider, want_version)))
- response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- response.last_modified = resource_provider.updated_at
- response.cache_control = 'no-cache'
- return response
diff --git a/nova/api/openstack/placement/handlers/root.py b/nova/api/openstack/placement/handlers/root.py
deleted file mode 100644
index 298dab3816..0000000000
--- a/nova/api/openstack/placement/handlers/root.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Handler for the root of the Placement API."""
-
-from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
-from oslo_utils import timeutils
-
-
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement import wsgi_wrapper
-
-
-@wsgi_wrapper.PlacementWsgify
-def home(req):
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- min_version = microversion.min_version_string()
- max_version = microversion.max_version_string()
- # NOTE(cdent): As sections of the api are added, links can be
- # added to this output to align with the guidelines at
- # http://specs.openstack.org/openstack/api-wg/guidelines/microversion_specification.html#version-discovery
- version_data = {
- 'id': 'v%s' % min_version,
- 'max_version': max_version,
- 'min_version': min_version,
- # for now there is only ever one version, so it must be CURRENT
- 'status': 'CURRENT',
- 'links': [{
- # Point back to this same URL as the root of this version.
- # NOTE(cdent): We explicitly want this to be a relative-URL
- # representation of "this same URL", otherwise placement needs
- # to keep track of proxy addresses and the like, which we have
- # avoided thus far, in order to construct full URLs. Placement
- # is much easier to scale if we never track that stuff.
- 'rel': 'self',
- 'href': '',
- }],
- }
- version_json = jsonutils.dumps({'versions': [version_data]})
- req.response.body = encodeutils.to_utf8(version_json)
- req.response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- req.response.cache_control = 'no-cache'
- req.response.last_modified = timeutils.utcnow(with_timezone=True)
- return req.response
diff --git a/nova/api/openstack/placement/handlers/trait.py b/nova/api/openstack/placement/handlers/trait.py
deleted file mode 100644
index b76907f1ad..0000000000
--- a/nova/api/openstack/placement/handlers/trait.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Traits handlers for Placement API."""
-
-import jsonschema
-from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
-from oslo_utils import timeutils
-import webob
-
-from nova.api.openstack.placement import errors
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider as rp_obj
-from nova.api.openstack.placement.policies import trait as policies
-from nova.api.openstack.placement.schemas import trait as schema
-from nova.api.openstack.placement import util
-from nova.api.openstack.placement import wsgi_wrapper
-from nova.i18n import _
-
-
-def _normalize_traits_qs_param(qs):
- try:
- op, value = qs.split(':', 1)
- except ValueError:
- msg = _('Badly formatted name parameter. Expected name query string '
- 'parameter in form: '
- '?name=[in|startswith]:[name1,name2|prefix]. Got: "%s"')
- msg = msg % qs
- raise webob.exc.HTTPBadRequest(msg)
-
- filters = {}
- if op == 'in':
- filters['name_in'] = value.split(',')
- elif op == 'startswith':
- filters['prefix'] = value
-
- return filters
-
-
-def _serialize_traits(traits, want_version):
- last_modified = None
- get_last_modified = want_version.matches((1, 15))
- trait_names = []
- for trait in traits:
- if get_last_modified:
- last_modified = util.pick_last_modified(last_modified, trait)
- trait_names.append(trait.name)
-
- # If there were no traits, set last_modified to now
- last_modified = last_modified or timeutils.utcnow(with_timezone=True)
-
- return {'traits': trait_names}, last_modified
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.6')
-def put_trait(req):
- context = req.environ['placement.context']
- context.can(policies.TRAITS_UPDATE)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- name = util.wsgi_path_item(req.environ, 'name')
-
- try:
- jsonschema.validate(name, schema.CUSTOM_TRAIT)
- except jsonschema.ValidationError:
- raise webob.exc.HTTPBadRequest(
- _('The trait is invalid. A valid trait must be no longer than '
- '255 characters, start with the prefix "CUSTOM_" and use '
- 'following characters: "A"-"Z", "0"-"9" and "_"'))
-
- trait = rp_obj.Trait(context)
- trait.name = name
-
- try:
- trait.create()
- req.response.status = 201
- except exception.TraitExists:
- # Get the trait that already exists to get last-modified time.
- if want_version.matches((1, 15)):
- trait = rp_obj.Trait.get_by_name(context, name)
- req.response.status = 204
-
- req.response.content_type = None
- req.response.location = util.trait_url(req.environ, trait)
- if want_version.matches((1, 15)):
- req.response.last_modified = trait.created_at
- req.response.cache_control = 'no-cache'
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.6')
-def get_trait(req):
- context = req.environ['placement.context']
- context.can(policies.TRAITS_SHOW)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- name = util.wsgi_path_item(req.environ, 'name')
-
- try:
- trait = rp_obj.Trait.get_by_name(context, name)
- except exception.TraitNotFound as ex:
- raise webob.exc.HTTPNotFound(ex.format_message())
-
- req.response.status = 204
- req.response.content_type = None
- if want_version.matches((1, 15)):
- req.response.last_modified = trait.created_at
- req.response.cache_control = 'no-cache'
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.6')
-def delete_trait(req):
- context = req.environ['placement.context']
- context.can(policies.TRAITS_DELETE)
- name = util.wsgi_path_item(req.environ, 'name')
-
- try:
- trait = rp_obj.Trait.get_by_name(context, name)
- trait.destroy()
- except exception.TraitNotFound as ex:
- raise webob.exc.HTTPNotFound(ex.format_message())
- except exception.TraitCannotDeleteStandard as ex:
- raise webob.exc.HTTPBadRequest(ex.format_message())
- except exception.TraitInUse as ex:
- raise webob.exc.HTTPConflict(ex.format_message())
-
- req.response.status = 204
- req.response.content_type = None
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.6')
-@util.check_accept('application/json')
-def list_traits(req):
- context = req.environ['placement.context']
- context.can(policies.TRAITS_LIST)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- filters = {}
-
- util.validate_query_params(req, schema.LIST_TRAIT_SCHEMA)
-
- if 'name' in req.GET:
- filters = _normalize_traits_qs_param(req.GET['name'])
- if 'associated' in req.GET:
- if req.GET['associated'].lower() not in ['true', 'false']:
- raise webob.exc.HTTPBadRequest(
- _('The query parameter "associated" only accepts '
- '"true" or "false"'))
- filters['associated'] = (
- True if req.GET['associated'].lower() == 'true' else False)
-
- traits = rp_obj.TraitList.get_all(context, filters)
- req.response.status = 200
- output, last_modified = _serialize_traits(traits, want_version)
- if want_version.matches((1, 15)):
- req.response.last_modified = last_modified
- req.response.cache_control = 'no-cache'
- req.response.body = encodeutils.to_utf8(jsonutils.dumps(output))
- req.response.content_type = 'application/json'
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.6')
-@util.check_accept('application/json')
-def list_traits_for_resource_provider(req):
- context = req.environ['placement.context']
- context.can(policies.RP_TRAIT_LIST)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- uuid = util.wsgi_path_item(req.environ, 'uuid')
-
- # Resource provider object is needed for two things: If it is
- # NotFound we'll get a 404 here, which needs to happen because
- # get_all_by_resource_provider can return an empty list.
- # It is also needed for the generation, used in the outgoing
- # representation.
- try:
- rp = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
- except exception.NotFound as exc:
- raise webob.exc.HTTPNotFound(
- _("No resource provider with uuid %(uuid)s found: %(error)s") %
- {'uuid': uuid, 'error': exc})
-
- traits = rp_obj.TraitList.get_all_by_resource_provider(context, rp)
- response_body, last_modified = _serialize_traits(traits, want_version)
- response_body["resource_provider_generation"] = rp.generation
-
- if want_version.matches((1, 15)):
- req.response.last_modified = last_modified
- req.response.cache_control = 'no-cache'
-
- req.response.status = 200
- req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body))
- req.response.content_type = 'application/json'
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.6')
-@util.require_content('application/json')
-def update_traits_for_resource_provider(req):
- context = req.environ['placement.context']
- context.can(policies.RP_TRAIT_UPDATE)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- data = util.extract_json(req.body, schema.SET_TRAITS_FOR_RP_SCHEMA)
- rp_gen = data['resource_provider_generation']
- traits = data['traits']
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
-
- if resource_provider.generation != rp_gen:
- raise webob.exc.HTTPConflict(
- _("Resource provider's generation already changed. Please update "
- "the generation and try again."),
- json_formatter=util.json_error_formatter,
- comment=errors.CONCURRENT_UPDATE)
-
- trait_objs = rp_obj.TraitList.get_all(
- context, filters={'name_in': traits})
- traits_name = set([obj.name for obj in trait_objs])
- non_existed_trait = set(traits) - set(traits_name)
- if non_existed_trait:
- raise webob.exc.HTTPBadRequest(
- _("No such trait %s") % ', '.join(non_existed_trait))
-
- resource_provider.set_traits(trait_objs)
-
- response_body, last_modified = _serialize_traits(trait_objs, want_version)
- response_body[
- 'resource_provider_generation'] = resource_provider.generation
- if want_version.matches((1, 15)):
- req.response.last_modified = last_modified
- req.response.cache_control = 'no-cache'
- req.response.status = 200
- req.response.body = encodeutils.to_utf8(jsonutils.dumps(response_body))
- req.response.content_type = 'application/json'
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.6')
-def delete_traits_for_resource_provider(req):
- context = req.environ['placement.context']
- context.can(policies.RP_TRAIT_DELETE)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
-
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(context, uuid)
- try:
- resource_provider.set_traits(rp_obj.TraitList(objects=[]))
- except exception.ConcurrentUpdateDetected as e:
- raise webob.exc.HTTPConflict(e.format_message(),
- comment=errors.CONCURRENT_UPDATE)
-
- req.response.status = 204
- req.response.content_type = None
- return req.response
diff --git a/nova/api/openstack/placement/handlers/usage.py b/nova/api/openstack/placement/handlers/usage.py
deleted file mode 100644
index 85213302d4..0000000000
--- a/nova/api/openstack/placement/handlers/usage.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API handlers for usage information."""
-
-from oslo_serialization import jsonutils
-from oslo_utils import encodeutils
-from oslo_utils import timeutils
-import webob
-
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import microversion
-from nova.api.openstack.placement.objects import resource_provider as rp_obj
-from nova.api.openstack.placement.policies import usage as policies
-from nova.api.openstack.placement.schemas import usage as schema
-from nova.api.openstack.placement import util
-from nova.api.openstack.placement import wsgi_wrapper
-from nova.i18n import _
-
-
-def _serialize_usages(resource_provider, usage):
- usage_dict = {resource.resource_class: resource.usage
- for resource in usage}
- return {'resource_provider_generation': resource_provider.generation,
- 'usages': usage_dict}
-
-
-@wsgi_wrapper.PlacementWsgify
-@util.check_accept('application/json')
-def list_usages(req):
- """GET a dictionary of resource provider usage by resource class.
-
- If the resource provider does not exist return a 404.
-
- On success return a 200 with an application/json representation of
- the usage dictionary.
- """
- context = req.environ['placement.context']
- context.can(policies.PROVIDER_USAGES)
- uuid = util.wsgi_path_item(req.environ, 'uuid')
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
-
- # Resource provider object needed for two things: If it is
- # NotFound we'll get a 404 here, which needs to happen because
- # get_all_by_resource_provider_uuid can return an empty list.
- # It is also needed for the generation, used in the outgoing
- # representation.
- try:
- resource_provider = rp_obj.ResourceProvider.get_by_uuid(
- context, uuid)
- except exception.NotFound as exc:
- raise webob.exc.HTTPNotFound(
- _("No resource provider with uuid %(uuid)s found: %(error)s") %
- {'uuid': uuid, 'error': exc})
-
- usage = rp_obj.UsageList.get_all_by_resource_provider_uuid(
- context, uuid)
-
- response = req.response
- response.body = encodeutils.to_utf8(jsonutils.dumps(
- _serialize_usages(resource_provider, usage)))
- req.response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- req.response.cache_control = 'no-cache'
- # While it would be possible to generate a last-modified time
- # based on the collection of allocations that result in a usage
- # value (with some spelunking in the SQL) that doesn't align with
- # the question that is being asked in a request for usages: What
- # is the usage, now? So the last-modified time is set to utcnow.
- req.response.last_modified = timeutils.utcnow(with_timezone=True)
- return req.response
-
-
-@wsgi_wrapper.PlacementWsgify
-@microversion.version_handler('1.9')
-@util.check_accept('application/json')
-def get_total_usages(req):
- """GET the sum of usages for a project or a project/user.
-
- On success return a 200 and an application/json body representing the
- sum/total of usages.
- Return 404 Not Found if the wanted microversion does not match.
- """
- context = req.environ['placement.context']
- # TODO(mriedem): When we support non-admins to use GET /usages we
- # should pass the project_id (and user_id?) from the query parameters
- # into context.can() for the target.
- context.can(policies.TOTAL_USAGES)
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
-
- util.validate_query_params(req, schema.GET_USAGES_SCHEMA_1_9)
-
- project_id = req.GET.get('project_id')
- user_id = req.GET.get('user_id')
-
- usages = rp_obj.UsageList.get_all_by_project_user(context, project_id,
- user_id=user_id)
-
- response = req.response
- usages_dict = {'usages': {resource.resource_class: resource.usage
- for resource in usages}}
- response.body = encodeutils.to_utf8(jsonutils.dumps(usages_dict))
- req.response.content_type = 'application/json'
- if want_version.matches((1, 15)):
- req.response.cache_control = 'no-cache'
- # While it would be possible to generate a last-modified time
- # based on the collection of allocations that result in a usage
- # value (with some spelunking in the SQL) that doesn't align with
- # the question that is being asked in a request for usages: What
- # is the usage, now? So the last-modified time is set to utcnow.
- req.response.last_modified = timeutils.utcnow(with_timezone=True)
- return req.response
diff --git a/nova/api/openstack/placement/lib.py b/nova/api/openstack/placement/lib.py
deleted file mode 100644
index 0518027c69..0000000000
--- a/nova/api/openstack/placement/lib.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Symbols intended to be imported by both placement code and placement API
-consumers. When placement is separated out, this module should be part of a
-common library that both placement and its consumers can require."""
-
-
-class RequestGroup(object):
- def __init__(self, use_same_provider=True, resources=None,
- required_traits=None, forbidden_traits=None, member_of=None):
- """Create a grouping of resource and trait requests.
-
- :param use_same_provider:
- If True, (the default) this RequestGroup represents requests for
- resources and traits which must be satisfied by a single resource
- provider. If False, represents a request for resources and traits
- in any resource provider in the same tree, or a sharing provider.
- :param resources: A dict of { resource_class: amount, ... }
- :param required_traits: A set of { trait_name, ... }
- :param forbidden_traits: A set of { trait_name, ... }
- :param member_of: A list of [ [aggregate_UUID],
- [aggregate_UUID, aggregate_UUID] ... ]
- """
- self.use_same_provider = use_same_provider
- self.resources = resources or {}
- self.required_traits = required_traits or set()
- self.forbidden_traits = forbidden_traits or set()
- self.member_of = member_of or []
-
- def __str__(self):
- ret = 'RequestGroup(use_same_provider=%s' % str(self.use_same_provider)
- ret += ', resources={%s}' % ', '.join(
- '%s:%d' % (rc, amount)
- for rc, amount in sorted(list(self.resources.items())))
- ret += ', traits=[%s]' % ', '.join(
- sorted(self.required_traits) +
- ['!%s' % ft for ft in sorted(self.forbidden_traits)])
- ret += ', aggregates=[%s]' % ', '.join(
- sorted('[%s]' % ', '.join(agglist)
- for agglist in sorted(self.member_of)))
- ret += ')'
- return ret
diff --git a/nova/api/openstack/placement/microversion.py b/nova/api/openstack/placement/microversion.py
deleted file mode 100644
index c832a1a4ed..0000000000
--- a/nova/api/openstack/placement/microversion.py
+++ /dev/null
@@ -1,172 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Microversion handling."""
-
-# NOTE(cdent): This code is taken from enamel:
-# https://github.com/jaypipes/enamel and was the original source of
-# the code now used in microversion_parse library.
-
-import collections
-import inspect
-
-import microversion_parse
-import webob
-
-
-SERVICE_TYPE = 'placement'
-MICROVERSION_ENVIRON = '%s.microversion' % SERVICE_TYPE
-VERSIONED_METHODS = collections.defaultdict(list)
-
-# The Canonical Version List
-VERSIONS = [
- '1.0',
- '1.1', # initial support for aggregate.get_aggregates and set_aggregates
- '1.2', # Adds /resource_classes resource endpoint
- '1.3', # Adds 'member_of' query parameter to get resource providers
- # that are members of any of the listed aggregates
- '1.4', # Adds resources query string parameter in GET /resource_providers
- '1.5', # Adds DELETE /resource_providers/{uuid}/inventories
- '1.6', # Adds /traits and /resource_providers{uuid}/traits resource
- # endpoints
- '1.7', # PUT /resource_classes/{name} is bodiless create or update
- '1.8', # Adds 'project_id' and 'user_id' required request parameters to
- # PUT /allocations
- '1.9', # Adds GET /usages
- '1.10', # Adds GET /allocation_candidates resource endpoint
- '1.11', # Adds 'allocations' link to the GET /resource_providers response
- '1.12', # Add project_id and user_id to GET /allocations/{consumer_uuid}
- # and PUT to /allocations/{consumer_uuid} in the same dict form
- # as GET. The 'allocation_requests' format in GET
- # /allocation_candidates is updated to be the same as well.
- '1.13', # Adds POST /allocations to set allocations for multiple consumers
- '1.14', # Adds parent and root provider UUID on resource provider
- # representation and 'in_tree' filter on GET /resource_providers
- '1.15', # Include last-modified and cache-control headers
- '1.16', # Add 'limit' query parameter to GET /allocation_candidates
- '1.17', # Add 'required' query parameter to GET /allocation_candidates and
- # return traits in the provider summary.
- '1.18', # Support ?required=<traits> queryparam on GET /resource_providers
- '1.19', # Include generation and conflict detection in provider aggregates
- # APIs
- '1.20', # Return 200 with provider payload from POST /resource_providers
- '1.21', # Support ?member_of=in:<agg UUIDs> queryparam on
- # GET /allocation_candidates
- '1.22', # Support forbidden traits in the required parameter of
- # GET /resource_providers and GET /allocation_candidates
- '1.23', # Add support for error codes in error response JSON
- '1.24', # Support multiple ?member_of=<agg UUIDs> queryparams on
- # GET /resource_providers
- '1.25', # Adds support for granular resource requests via numbered
- # querystring groups in GET /allocation_candidates
- '1.26', # Add ability to specify inventory with reserved value equal to
- # total.
- '1.27', # Include all resource class inventories in `provider_summaries`
- # field in response of `GET /allocation_candidates` API even if
- # the resource class is not in the requested resources.
- '1.28', # Add support for consumer generation
- '1.29', # Support nested providers in GET /allocation_candidates API.
- '1.30', # Add POST /reshaper for atomically migrating resource provider
- # inventories and allocations.
-]
-
-
-def max_version_string():
- return VERSIONS[-1]
-
-
-def min_version_string():
- return VERSIONS[0]
-
-
-# From twisted
-# https://github.com/twisted/twisted/blob/trunk/twisted/python/deprecate.py
-def _fully_qualified_name(obj):
- """Return the fully qualified name of a module, class, method or function.
-
- Classes and functions need to be module level ones to be correctly
- qualified.
- """
- try:
- name = obj.__qualname__
- except AttributeError:
- name = obj.__name__
-
- if inspect.isclass(obj) or inspect.isfunction(obj):
- moduleName = obj.__module__
- return "%s.%s" % (moduleName, name)
- elif inspect.ismethod(obj):
- try:
- cls = obj.im_class
- except AttributeError:
- # Python 3 eliminates im_class, substitutes __module__ and
- # __qualname__ to provide similar information.
- return "%s.%s" % (obj.__module__, obj.__qualname__)
- else:
- className = _fully_qualified_name(cls)
- return "%s.%s" % (className, name)
- return name
-
-
-def _find_method(f, version, status_code):
- """Look in VERSIONED_METHODS for method with right name matching version.
-
- If no match is found a HTTPError corresponding to status_code will
- be returned.
- """
- qualified_name = _fully_qualified_name(f)
- # A KeyError shouldn't be possible here, but let's be robust
- # just in case.
- method_list = VERSIONED_METHODS.get(qualified_name, [])
- for min_version, max_version, func in method_list:
- if min_version <= version <= max_version:
- return func
-
- raise webob.exc.status_map[status_code]
-
-
-def version_handler(min_ver, max_ver=None, status_code=404):
- """Decorator for versioning API methods.
-
- Add as a decorator to a placement API handler to constrain
- the microversions at which it will run. Add after the
- ``wsgify`` decorator.
-
- This does not check for version intersections. That's the
- domain of tests.
-
- :param min_ver: A string of two numerals, X.Y indicating the
- minimum version allowed for the decorated method.
- :param max_ver: A string of two numerals, X.Y, indicating the
- maximum version allowed for the decorated method.
- :param status_code: A status code to indicate error, 404 by default
- """
- def decorator(f):
- min_version = microversion_parse.parse_version_string(min_ver)
- if max_ver:
- max_version = microversion_parse.parse_version_string(max_ver)
- else:
- max_version = microversion_parse.parse_version_string(
- max_version_string())
- qualified_name = _fully_qualified_name(f)
- VERSIONED_METHODS[qualified_name].append(
- (min_version, max_version, f))
-
- def decorated_func(req, *args, **kwargs):
- version = req.environ[MICROVERSION_ENVIRON]
- return _find_method(f, version, status_code)(req, *args, **kwargs)
-
- # Sort highest min version to beginning of list.
- VERSIONED_METHODS[qualified_name].sort(key=lambda x: x[0],
- reverse=True)
- return decorated_func
- return decorator
diff --git a/nova/api/openstack/placement/objects/__init__.py b/nova/api/openstack/placement/objects/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/api/openstack/placement/objects/__init__.py
+++ /dev/null
diff --git a/nova/api/openstack/placement/objects/consumer.py b/nova/api/openstack/placement/objects/consumer.py
deleted file mode 100644
index 9d88a83adc..0000000000
--- a/nova/api/openstack/placement/objects/consumer.py
+++ /dev/null
@@ -1,257 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_db import exception as db_exc
-from oslo_versionedobjects import base
-from oslo_versionedobjects import fields
-import sqlalchemy as sa
-
-from nova.api.openstack.placement import db_api
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement.objects import project as project_obj
-from nova.api.openstack.placement.objects import user as user_obj
-from nova.db.sqlalchemy import api_models as models
-
-CONSUMER_TBL = models.Consumer.__table__
-_ALLOC_TBL = models.Allocation.__table__
-
-
-@db_api.placement_context_manager.writer
-def create_incomplete_consumers(ctx, batch_size):
- """Finds all the consumer records that are missing for allocations and
- creates consumer records for them, using the "incomplete consumer" project
- and user CONF options.
-
- Returns a tuple containing two identical elements with the number of
- consumer records created, since this is the expected return format for data
- migration routines.
- """
- # Create a record in the projects table for our incomplete project
- incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
-
- # Create a record in the users table for our incomplete user
- incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
-
- # Create a consumer table record for all consumers where
- # allocations.consumer_id doesn't exist in the consumers table. Use the
- # incomplete consumer project and user ID.
- alloc_to_consumer = sa.outerjoin(
- _ALLOC_TBL, CONSUMER_TBL,
- _ALLOC_TBL.c.consumer_id == CONSUMER_TBL.c.uuid)
- cols = [
- _ALLOC_TBL.c.consumer_id,
- incomplete_proj_id,
- incomplete_user_id,
- ]
- sel = sa.select(cols)
- sel = sel.select_from(alloc_to_consumer)
- sel = sel.where(CONSUMER_TBL.c.id.is_(None))
- # NOTE(mnaser): It is possible to have multiple consumers having many
- # allocations to the same resource provider, which would
- # make the INSERT FROM SELECT fail due to duplicates.
- sel = sel.group_by(_ALLOC_TBL.c.consumer_id)
- sel = sel.limit(batch_size)
- target_cols = ['uuid', 'project_id', 'user_id']
- ins_stmt = CONSUMER_TBL.insert().from_select(target_cols, sel)
- res = ctx.session.execute(ins_stmt)
- return res.rowcount, res.rowcount
-
-
-@db_api.placement_context_manager.writer
-def delete_consumers_if_no_allocations(ctx, consumer_uuids):
- """Looks to see if any of the supplied consumers has any allocations and if
- not, deletes the consumer record entirely.
-
- :param ctx: `nova.api.openstack.placement.context.RequestContext` that
- contains an oslo_db Session
- :param consumer_uuids: UUIDs of the consumers to check and maybe delete
- """
- # Delete consumers that are not referenced in the allocations table
- cons_to_allocs_join = sa.outerjoin(
- CONSUMER_TBL, _ALLOC_TBL,
- CONSUMER_TBL.c.uuid == _ALLOC_TBL.c.consumer_id)
- subq = sa.select([CONSUMER_TBL.c.uuid]).select_from(cons_to_allocs_join)
- subq = subq.where(sa.and_(
- _ALLOC_TBL.c.consumer_id.is_(None),
- CONSUMER_TBL.c.uuid.in_(consumer_uuids)))
- no_alloc_consumers = [r[0] for r in ctx.session.execute(subq).fetchall()]
- del_stmt = CONSUMER_TBL.delete()
- del_stmt = del_stmt.where(CONSUMER_TBL.c.uuid.in_(no_alloc_consumers))
- ctx.session.execute(del_stmt)
-
-
-@db_api.placement_context_manager.reader
-def _get_consumer_by_uuid(ctx, uuid):
- # The SQL for this looks like the following:
- # SELECT
- # c.id, c.uuid,
- # p.id AS project_id, p.external_id AS project_external_id,
- # u.id AS user_id, u.external_id AS user_external_id,
- # c.updated_at, c.created_at
- # FROM consumers c
- # INNER JOIN projects p
- # ON c.project_id = p.id
- # INNER JOIN users u
- # ON c.user_id = u.id
- # WHERE c.uuid = $uuid
- consumers = sa.alias(CONSUMER_TBL, name="c")
- projects = sa.alias(project_obj.PROJECT_TBL, name="p")
- users = sa.alias(user_obj.USER_TBL, name="u")
- cols = [
- consumers.c.id,
- consumers.c.uuid,
- projects.c.id.label("project_id"),
- projects.c.external_id.label("project_external_id"),
- users.c.id.label("user_id"),
- users.c.external_id.label("user_external_id"),
- consumers.c.generation,
- consumers.c.updated_at,
- consumers.c.created_at
- ]
- c_to_p_join = sa.join(
- consumers, projects, consumers.c.project_id == projects.c.id)
- c_to_u_join = sa.join(
- c_to_p_join, users, consumers.c.user_id == users.c.id)
- sel = sa.select(cols).select_from(c_to_u_join)
- sel = sel.where(consumers.c.uuid == uuid)
- res = ctx.session.execute(sel).fetchone()
- if not res:
- raise exception.ConsumerNotFound(uuid=uuid)
-
- return dict(res)
-
-
-@db_api.placement_context_manager.writer
-def _increment_consumer_generation(ctx, consumer):
- """Increments the supplied consumer's generation value, supplying the
- consumer object which contains the currently-known generation. Returns the
- newly-incremented generation.
-
- :param ctx: `nova.context.RequestContext` that contains an oslo_db Session
- :param consumer: `Consumer` whose generation should be updated.
- :returns: The newly-incremented generation.
- :raises nova.exception.ConcurrentUpdateDetected: if another thread updated
- the same consumer's view of its allocations in between the time
- when this object was originally read and the call which modified
- the consumer's state (e.g. replacing allocations for a consumer)
- """
- consumer_gen = consumer.generation
- new_generation = consumer_gen + 1
- upd_stmt = CONSUMER_TBL.update().where(sa.and_(
- CONSUMER_TBL.c.id == consumer.id,
- CONSUMER_TBL.c.generation == consumer_gen)).values(
- generation=new_generation)
-
- res = ctx.session.execute(upd_stmt)
- if res.rowcount != 1:
- raise exception.ConcurrentUpdateDetected
- return new_generation
-
-
-@db_api.placement_context_manager.writer
-def _delete_consumer(ctx, consumer):
- """Deletes the supplied consumer.
-
- :param ctx: `nova.context.RequestContext` that contains an oslo_db Session
- :param consumer: `Consumer` whose generation should be updated.
- """
- del_stmt = CONSUMER_TBL.delete().where(CONSUMER_TBL.c.id == consumer.id)
- ctx.session.execute(del_stmt)
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class Consumer(base.VersionedObject, base.TimestampedObject):
-
- fields = {
- 'id': fields.IntegerField(read_only=True),
- 'uuid': fields.UUIDField(nullable=False),
- 'project': fields.ObjectField('Project', nullable=False),
- 'user': fields.ObjectField('User', nullable=False),
- 'generation': fields.IntegerField(nullable=False),
- }
-
- @staticmethod
- def _from_db_object(ctx, target, source):
- target.id = source['id']
- target.uuid = source['uuid']
- target.generation = source['generation']
- target.created_at = source['created_at']
- target.updated_at = source['updated_at']
-
- target.project = project_obj.Project(
- ctx, id=source['project_id'],
- external_id=source['project_external_id'])
- target.user = user_obj.User(
- ctx, id=source['user_id'],
- external_id=source['user_external_id'])
-
- target._context = ctx
- target.obj_reset_changes()
- return target
-
- @classmethod
- def get_by_uuid(cls, ctx, uuid):
- res = _get_consumer_by_uuid(ctx, uuid)
- return cls._from_db_object(ctx, cls(ctx), res)
-
- def create(self):
- @db_api.placement_context_manager.writer
- def _create_in_db(ctx):
- db_obj = models.Consumer(
- uuid=self.uuid, project_id=self.project.id,
- user_id=self.user.id)
- try:
- db_obj.save(ctx.session)
- # NOTE(jaypipes): We don't do the normal _from_db_object()
- # thing here because models.Consumer doesn't have a
- # project_external_id or user_external_id attribute.
- self.id = db_obj.id
- self.generation = db_obj.generation
- except db_exc.DBDuplicateEntry:
- raise exception.ConsumerExists(uuid=self.uuid)
- _create_in_db(self._context)
- self.obj_reset_changes()
-
- def update(self):
- """Used to update the consumer's project and user information without
- incrementing the consumer's generation.
- """
- @db_api.placement_context_manager.writer
- def _update_in_db(ctx):
- upd_stmt = CONSUMER_TBL.update().values(
- project_id=self.project.id, user_id=self.user.id)
- # NOTE(jaypipes): We add the generation check to the WHERE clause
- # above just for safety. We don't need to check that the statement
- # actually updated a single row. If it did not, then the
- # consumer.increment_generation() call that happens in
- # AllocationList.replace_all() will end up raising
- # ConcurrentUpdateDetected anyway
- upd_stmt = upd_stmt.where(sa.and_(
- CONSUMER_TBL.c.id == self.id,
- CONSUMER_TBL.c.generation == self.generation))
- ctx.session.execute(upd_stmt)
- _update_in_db(self._context)
- self.obj_reset_changes()
-
- def increment_generation(self):
- """Increments the consumer's generation.
-
- :raises nova.exception.ConcurrentUpdateDetected: if another thread
- updated the same consumer's view of its allocations in between the
- time when this object was originally read and the call which
- modified the consumer's state (e.g. replacing allocations for a
- consumer)
- """
- self.generation = _increment_consumer_generation(self._context, self)
-
- def delete(self):
- _delete_consumer(self._context, self)
diff --git a/nova/api/openstack/placement/objects/project.py b/nova/api/openstack/placement/objects/project.py
deleted file mode 100644
index a6742da2fd..0000000000
--- a/nova/api/openstack/placement/objects/project.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_versionedobjects import base
-from oslo_versionedobjects import fields
-import sqlalchemy as sa
-
-from nova.api.openstack.placement import db_api
-from nova.api.openstack.placement import exception
-from nova.db.sqlalchemy import api_models as models
-
-CONF = cfg.CONF
-PROJECT_TBL = models.Project.__table__
-
-
-@db_api.placement_context_manager.writer
-def ensure_incomplete_project(ctx):
- """Ensures that a project record is created for the "incomplete consumer
- project". Returns the internal ID of that record.
- """
- incomplete_id = CONF.placement.incomplete_consumer_project_id
- sel = sa.select([PROJECT_TBL.c.id]).where(
- PROJECT_TBL.c.external_id == incomplete_id)
- res = ctx.session.execute(sel).fetchone()
- if res:
- return res[0]
- ins = PROJECT_TBL.insert().values(external_id=incomplete_id)
- res = ctx.session.execute(ins)
- return res.inserted_primary_key[0]
-
-
-@db_api.placement_context_manager.reader
-def _get_project_by_external_id(ctx, external_id):
- projects = sa.alias(PROJECT_TBL, name="p")
- cols = [
- projects.c.id,
- projects.c.external_id,
- projects.c.updated_at,
- projects.c.created_at
- ]
- sel = sa.select(cols)
- sel = sel.where(projects.c.external_id == external_id)
- res = ctx.session.execute(sel).fetchone()
- if not res:
- raise exception.ProjectNotFound(external_id=external_id)
-
- return dict(res)
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class Project(base.VersionedObject):
-
- fields = {
- 'id': fields.IntegerField(read_only=True),
- 'external_id': fields.StringField(nullable=False),
- }
-
- @staticmethod
- def _from_db_object(ctx, target, source):
- for field in target.fields:
- setattr(target, field, source[field])
-
- target._context = ctx
- target.obj_reset_changes()
- return target
-
- @classmethod
- def get_by_external_id(cls, ctx, external_id):
- res = _get_project_by_external_id(ctx, external_id)
- return cls._from_db_object(ctx, cls(ctx), res)
-
- def create(self):
- @db_api.placement_context_manager.writer
- def _create_in_db(ctx):
- db_obj = models.Project(external_id=self.external_id)
- try:
- db_obj.save(ctx.session)
- except db_exc.DBDuplicateEntry:
- raise exception.ProjectExists(external_id=self.external_id)
- self._from_db_object(ctx, self, db_obj)
- _create_in_db(self._context)
diff --git a/nova/api/openstack/placement/objects/resource_provider.py b/nova/api/openstack/placement/objects/resource_provider.py
deleted file mode 100644
index 87e75ec653..0000000000
--- a/nova/api/openstack/placement/objects/resource_provider.py
+++ /dev/null
@@ -1,4282 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-import copy
-import itertools
-import random
-
-# NOTE(cdent): The resource provider objects are designed to never be
-# used over RPC. Remote manipulation is done with the placement HTTP
-# API. The 'remotable' decorators should not be used, the objects should
-# not be registered and there is no need to express VERSIONs nor handle
-# obj_make_compatible.
-
-import os_traits
-from oslo_concurrency import lockutils
-from oslo_config import cfg
-from oslo_db import api as oslo_db_api
-from oslo_db import exception as db_exc
-from oslo_log import log as logging
-from oslo_utils import encodeutils
-from oslo_versionedobjects import base
-from oslo_versionedobjects import fields
-import six
-import sqlalchemy as sa
-from sqlalchemy import exc as sqla_exc
-from sqlalchemy import func
-from sqlalchemy import sql
-from sqlalchemy.sql import null
-
-from nova.api.openstack.placement import db_api
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement.objects import consumer as consumer_obj
-from nova.api.openstack.placement.objects import project as project_obj
-from nova.api.openstack.placement.objects import user as user_obj
-from nova.api.openstack.placement import resource_class_cache as rc_cache
-from nova.db.sqlalchemy import api_models as models
-from nova.i18n import _
-from nova import rc_fields
-
-_TRAIT_TBL = models.Trait.__table__
-_ALLOC_TBL = models.Allocation.__table__
-_INV_TBL = models.Inventory.__table__
-_RP_TBL = models.ResourceProvider.__table__
-# Not used in this file but used in tests.
-_RC_TBL = models.ResourceClass.__table__
-_AGG_TBL = models.PlacementAggregate.__table__
-_RP_AGG_TBL = models.ResourceProviderAggregate.__table__
-_RP_TRAIT_TBL = models.ResourceProviderTrait.__table__
-_PROJECT_TBL = models.Project.__table__
-_USER_TBL = models.User.__table__
-_CONSUMER_TBL = models.Consumer.__table__
-_RC_CACHE = None
-_TRAIT_LOCK = 'trait_sync'
-_TRAITS_SYNCED = False
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-
-@db_api.placement_context_manager.reader
-def ensure_rc_cache(ctx):
- """Ensures that a singleton resource class cache has been created in the
- module's scope.
-
- :param ctx: `nova.context.RequestContext` that may be used to grab a DB
- connection.
- """
- global _RC_CACHE
- if _RC_CACHE is not None:
- return
- _RC_CACHE = rc_cache.ResourceClassCache(ctx)
-
-
-@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
-# Bug #1760322: If the caller raises an exception, we don't want the trait
-# sync rolled back; so use an .independent transaction
-@db_api.placement_context_manager.writer.independent
-def _trait_sync(ctx):
- """Sync the os_traits symbols to the database.
-
- Reads all symbols from the os_traits library, checks if any of them do
- not exist in the database and bulk-inserts those that are not. This is
- done once per process using this code if either Trait.get_by_name or
- TraitList.get_all is called.
-
- :param ctx: `nova.context.RequestContext` that may be used to grab a DB
- connection.
- """
- # Create a set of all traits in the os_traits library.
- std_traits = set(os_traits.get_traits())
- sel = sa.select([_TRAIT_TBL.c.name])
- res = ctx.session.execute(sel).fetchall()
- # Create a set of all traits in the db that are not custom
- # traits.
- db_traits = set(
- r[0] for r in res
- if not os_traits.is_custom(r[0])
- )
- # Determine those traits which are in os_traits but not
- # currently in the database, and insert them.
- need_sync = std_traits - db_traits
- ins = _TRAIT_TBL.insert()
- batch_args = [
- {'name': six.text_type(trait)}
- for trait in need_sync
- ]
- if batch_args:
- try:
- ctx.session.execute(ins, batch_args)
- LOG.info("Synced traits from os_traits into API DB: %s",
- need_sync)
- except db_exc.DBDuplicateEntry:
- pass # some other process sync'd, just ignore
-
-
-def ensure_trait_sync(ctx):
- """Ensures that the os_traits library is synchronized to the traits db.
-
- If _TRAITS_SYNCED is False then this process has not tried to update the
- traits db. Do so by calling _trait_sync. Since the placement API server
- could be multi-threaded, lock around testing _TRAITS_SYNCED to avoid
- duplicating work.
-
- Different placement API server processes that talk to the same database
- will avoid issues through the power of transactions.
-
- :param ctx: `nova.context.RequestContext` that may be used to grab a DB
- connection.
- """
- global _TRAITS_SYNCED
- # If another thread is doing this work, wait for it to complete.
- # When that thread is done _TRAITS_SYNCED will be true in this
- # thread and we'll simply return.
- with lockutils.lock(_TRAIT_LOCK):
- if not _TRAITS_SYNCED:
- _trait_sync(ctx)
- _TRAITS_SYNCED = True
-
-
-def _get_current_inventory_resources(ctx, rp):
- """Returns a set() containing the resource class IDs for all resources
- currently having an inventory record for the supplied resource provider.
-
- :param ctx: `nova.context.RequestContext` that may be used to grab a DB
- connection.
- :param rp: Resource provider to query inventory for.
- """
- cur_res_sel = sa.select([_INV_TBL.c.resource_class_id]).where(
- _INV_TBL.c.resource_provider_id == rp.id)
- existing_resources = ctx.session.execute(cur_res_sel).fetchall()
- return set([r[0] for r in existing_resources])
-
-
-def _delete_inventory_from_provider(ctx, rp, to_delete):
- """Deletes any inventory records from the supplied provider and set() of
- resource class identifiers.
-
- If there are allocations for any of the inventories to be deleted raise
- InventoryInUse exception.
-
- :param ctx: `nova.context.RequestContext` that contains an oslo_db Session
- :param rp: Resource provider from which to delete inventory.
- :param to_delete: set() containing resource class IDs for records to
- delete.
- """
- allocation_query = sa.select(
- [_ALLOC_TBL.c.resource_class_id.label('resource_class')]).where(
- sa.and_(_ALLOC_TBL.c.resource_provider_id == rp.id,
- _ALLOC_TBL.c.resource_class_id.in_(to_delete))
- ).group_by(_ALLOC_TBL.c.resource_class_id)
- allocations = ctx.session.execute(allocation_query).fetchall()
- if allocations:
- resource_classes = ', '.join([_RC_CACHE.string_from_id(alloc[0])
- for alloc in allocations])
- raise exception.InventoryInUse(resource_classes=resource_classes,
- resource_provider=rp.uuid)
-
- del_stmt = _INV_TBL.delete().where(sa.and_(
- _INV_TBL.c.resource_provider_id == rp.id,
- _INV_TBL.c.resource_class_id.in_(to_delete)))
- res = ctx.session.execute(del_stmt)
- return res.rowcount
-
-
-def _add_inventory_to_provider(ctx, rp, inv_list, to_add):
- """Inserts new inventory records for the supplied resource provider.
-
- :param ctx: `nova.context.RequestContext` that contains an oslo_db Session
- :param rp: Resource provider to add inventory to.
- :param inv_list: InventoryList object
- :param to_add: set() containing resource class IDs to search inv_list for
- adding to resource provider.
- """
- for rc_id in to_add:
- rc_str = _RC_CACHE.string_from_id(rc_id)
- inv_record = inv_list.find(rc_str)
- ins_stmt = _INV_TBL.insert().values(
- resource_provider_id=rp.id,
- resource_class_id=rc_id,
- total=inv_record.total,
- reserved=inv_record.reserved,
- min_unit=inv_record.min_unit,
- max_unit=inv_record.max_unit,
- step_size=inv_record.step_size,
- allocation_ratio=inv_record.allocation_ratio)
- ctx.session.execute(ins_stmt)
-
-
-def _update_inventory_for_provider(ctx, rp, inv_list, to_update):
- """Updates existing inventory records for the supplied resource provider.
-
- :param ctx: `nova.context.RequestContext` that contains an oslo_db Session
- :param rp: Resource provider on which to update inventory.
- :param inv_list: InventoryList object
- :param to_update: set() containing resource class IDs to search inv_list
- for updating in resource provider.
- :returns: A list of (uuid, class) tuples that have exceeded their
- capacity after this inventory update.
- """
- exceeded = []
- for rc_id in to_update:
- rc_str = _RC_CACHE.string_from_id(rc_id)
- inv_record = inv_list.find(rc_str)
- allocation_query = sa.select(
- [func.sum(_ALLOC_TBL.c.used).label('usage')]).\
- where(sa.and_(
- _ALLOC_TBL.c.resource_provider_id == rp.id,
- _ALLOC_TBL.c.resource_class_id == rc_id))
- allocations = ctx.session.execute(allocation_query).first()
- if (allocations
- and allocations['usage'] is not None
- and allocations['usage'] > inv_record.capacity):
- exceeded.append((rp.uuid, rc_str))
- upd_stmt = _INV_TBL.update().where(sa.and_(
- _INV_TBL.c.resource_provider_id == rp.id,
- _INV_TBL.c.resource_class_id == rc_id)).values(
- total=inv_record.total,
- reserved=inv_record.reserved,
- min_unit=inv_record.min_unit,
- max_unit=inv_record.max_unit,
- step_size=inv_record.step_size,
- allocation_ratio=inv_record.allocation_ratio)
- res = ctx.session.execute(upd_stmt)
- if not res.rowcount:
- raise exception.InventoryWithResourceClassNotFound(
- resource_class=rc_str)
- return exceeded
-
-
-def _increment_provider_generation(ctx, rp):
- """Increments the supplied provider's generation value, supplying the
- currently-known generation. Returns whether the increment succeeded.
-
- :param ctx: `nova.context.RequestContext` that contains an oslo_db Session
- :param rp: `ResourceProvider` whose generation should be updated.
- :returns: The new resource provider generation value if successful.
- :raises nova.exception.ConcurrentUpdateDetected: if another thread updated
- the same resource provider's view of its inventory or allocations
- in between the time when this object was originally read
- and the call to set the inventory.
- """
- rp_gen = rp.generation
- new_generation = rp_gen + 1
- upd_stmt = _RP_TBL.update().where(sa.and_(
- _RP_TBL.c.id == rp.id,
- _RP_TBL.c.generation == rp_gen)).values(
- generation=(new_generation))
-
- res = ctx.session.execute(upd_stmt)
- if res.rowcount != 1:
- raise exception.ResourceProviderConcurrentUpdateDetected()
- return new_generation
-
-
-@db_api.placement_context_manager.writer
-def _add_inventory(context, rp, inventory):
- """Add one Inventory that wasn't already on the provider.
-
- :raises `exception.ResourceClassNotFound` if inventory.resource_class
- cannot be found in either the standard classes or the DB.
- """
- rc_id = _RC_CACHE.id_from_string(inventory.resource_class)
- inv_list = InventoryList(objects=[inventory])
- _add_inventory_to_provider(
- context, rp, inv_list, set([rc_id]))
- rp.generation = _increment_provider_generation(context, rp)
-
-
-@db_api.placement_context_manager.writer
-def _update_inventory(context, rp, inventory):
- """Update an inventory already on the provider.
-
- :raises `exception.ResourceClassNotFound` if inventory.resource_class
- cannot be found in either the standard classes or the DB.
- """
- rc_id = _RC_CACHE.id_from_string(inventory.resource_class)
- inv_list = InventoryList(objects=[inventory])
- exceeded = _update_inventory_for_provider(
- context, rp, inv_list, set([rc_id]))
- rp.generation = _increment_provider_generation(context, rp)
- return exceeded
-
-
-@db_api.placement_context_manager.writer
-def _delete_inventory(context, rp, resource_class):
- """Delete up to one Inventory of the given resource_class string.
-
- :raises `exception.ResourceClassNotFound` if resource_class
- cannot be found in either the standard classes or the DB.
- """
- rc_id = _RC_CACHE.id_from_string(resource_class)
- if not _delete_inventory_from_provider(context, rp, [rc_id]):
- raise exception.NotFound(
- 'No inventory of class %s found for delete'
- % resource_class)
- rp.generation = _increment_provider_generation(context, rp)
-
-
-@db_api.placement_context_manager.writer
-def _set_inventory(context, rp, inv_list):
- """Given an InventoryList object, replaces the inventory of the
- resource provider in a safe, atomic fashion using the resource
- provider's generation as a consistent view marker.
-
- :param context: Nova RequestContext.
- :param rp: `ResourceProvider` object upon which to set inventory.
- :param inv_list: `InventoryList` object to save to backend storage.
- :returns: A list of (uuid, class) tuples that have exceeded their
- capacity after this inventory update.
- :raises nova.exception.ConcurrentUpdateDetected: if another thread updated
- the same resource provider's view of its inventory or allocations
- in between the time when this object was originally read
- and the call to set the inventory.
- :raises `exception.ResourceClassNotFound` if any resource class in any
- inventory in inv_list cannot be found in either the standard
- classes or the DB.
- :raises `exception.InventoryInUse` if we attempt to delete inventory
- from a provider that has allocations for that resource class.
- """
- existing_resources = _get_current_inventory_resources(context, rp)
- these_resources = set([_RC_CACHE.id_from_string(r.resource_class)
- for r in inv_list.objects])
-
- # Determine which resources we should be adding, deleting and/or
- # updating in the resource provider's inventory by comparing sets
- # of resource class identifiers.
- to_add = these_resources - existing_resources
- to_delete = existing_resources - these_resources
- to_update = these_resources & existing_resources
- exceeded = []
-
- if to_delete:
- _delete_inventory_from_provider(context, rp, to_delete)
- if to_add:
- _add_inventory_to_provider(context, rp, inv_list, to_add)
- if to_update:
- exceeded = _update_inventory_for_provider(context, rp, inv_list,
- to_update)
-
- # Here is where we update the resource provider's generation value. If
- # this update updates zero rows, that means that another thread has updated
- # the inventory for this resource provider between the time the caller
- # originally read the resource provider record and inventory information
- # and this point. We raise an exception here which will rollback the above
- # transaction and return an error to the caller to indicate that they can
- # attempt to retry the inventory save after reverifying any capacity
- # conditions and re-reading the existing inventory information.
- rp.generation = _increment_provider_generation(context, rp)
-
- return exceeded
-
-
-@db_api.placement_context_manager.reader
-def _get_provider_by_uuid(context, uuid):
- """Given a UUID, return a dict of information about the resource provider
- from the database.
-
- :raises: NotFound if no such provider was found
- :param uuid: The UUID to look up
- """
- rpt = sa.alias(_RP_TBL, name="rp")
- parent = sa.alias(_RP_TBL, name="parent")
- root = sa.alias(_RP_TBL, name="root")
- # TODO(jaypipes): Change this to an inner join when we are sure all
- # root_provider_id values are NOT NULL
- rp_to_root = sa.outerjoin(rpt, root, rpt.c.root_provider_id == root.c.id)
- rp_to_parent = sa.outerjoin(rp_to_root, parent,
- rpt.c.parent_provider_id == parent.c.id)
- cols = [
- rpt.c.id,
- rpt.c.uuid,
- rpt.c.name,
- rpt.c.generation,
- root.c.uuid.label("root_provider_uuid"),
- parent.c.uuid.label("parent_provider_uuid"),
- rpt.c.updated_at,
- rpt.c.created_at,
- ]
- sel = sa.select(cols).select_from(rp_to_parent).where(rpt.c.uuid == uuid)
- res = context.session.execute(sel).fetchone()
- if not res:
- raise exception.NotFound(
- 'No resource provider with uuid %s found' % uuid)
- return dict(res)
-
-
-@db_api.placement_context_manager.reader
-def _get_aggregates_by_provider_id(context, rp_id):
- """Returns a dict, keyed by internal aggregate ID, of aggregate UUIDs
- associated with the supplied internal resource provider ID.
- """
- join_statement = sa.join(
- _AGG_TBL, _RP_AGG_TBL, sa.and_(
- _AGG_TBL.c.id == _RP_AGG_TBL.c.aggregate_id,
- _RP_AGG_TBL.c.resource_provider_id == rp_id))
- sel = sa.select([_AGG_TBL.c.id, _AGG_TBL.c.uuid]).select_from(
- join_statement)
- return {r[0]: r[1] for r in context.session.execute(sel).fetchall()}
-
-
-@db_api.placement_context_manager.reader
-def _anchors_for_sharing_providers(context, rp_ids, get_id=False):
- """Given a list of internal IDs of sharing providers, returns a set of
- tuples of (sharing provider UUID, anchor provider UUID), where each of
- anchor is the unique root provider of a tree associated with the same
- aggregate as the sharing provider. (These are the providers that can
- "anchor" a single AllocationRequest.)
-
- The sharing provider may or may not itself be part of a tree; in either
- case, an entry for this root provider is included in the result.
-
- If the sharing provider is not part of any aggregate, the empty list is
- returned.
-
- If get_id is True, it returns a set of tuples of (sharing provider ID,
- anchor provider ID) instead.
- """
- # SELECT sps.uuid, COALESCE(rps.uuid, shr_with_sps.uuid)
- # FROM resource_providers AS sps
- # INNER JOIN resource_provider_aggregates AS shr_aggs
- # ON sps.id = shr_aggs.resource_provider_id
- # INNER JOIN resource_provider_aggregates AS shr_with_sps_aggs
- # ON shr_aggs.aggregate_id = shr_with_sps_aggs.aggregate_id
- # INNER JOIN resource_providers AS shr_with_sps
- # ON shr_with_sps_aggs.resource_provider_id = shr_with_sps.id
- # LEFT JOIN resource_providers AS rps
- # ON shr_with_sps.root_provider_id = rps.id
- # WHERE sps.id IN $(RP_IDs)
- rps = sa.alias(_RP_TBL, name='rps')
- sps = sa.alias(_RP_TBL, name='sps')
- shr_aggs = sa.alias(_RP_AGG_TBL, name='shr_aggs')
- shr_with_sps_aggs = sa.alias(_RP_AGG_TBL, name='shr_with_sps_aggs')
- shr_with_sps = sa.alias(_RP_TBL, name='shr_with_sps')
- join_chain = sa.join(
- sps, shr_aggs, sps.c.id == shr_aggs.c.resource_provider_id)
- join_chain = sa.join(
- join_chain, shr_with_sps_aggs,
- shr_aggs.c.aggregate_id == shr_with_sps_aggs.c.aggregate_id)
- join_chain = sa.join(
- join_chain, shr_with_sps,
- shr_with_sps_aggs.c.resource_provider_id == shr_with_sps.c.id)
- if get_id:
- # TODO(yikun): Change `func.coalesce(shr_with_sps.c.root_provider_id,
- # shr_with_sps.c.id)` to `shr_with_sps.c.root_provider_id` when we are
- # sure all root_provider_id values are NOT NULL
- sel = sa.select([sps.c.id, func.coalesce(
- shr_with_sps.c.root_provider_id, shr_with_sps.c.id)])
- else:
- # TODO(efried): Change this to an inner join and change
- # 'func.coalesce(rps.c.uuid, shr_with_sps.c.uuid)' to `rps.c.uuid`
- # when we are sure all root_provider_id values are NOT NULL
- join_chain = sa.outerjoin(
- join_chain, rps, shr_with_sps.c.root_provider_id == rps.c.id)
- sel = sa.select([sps.c.uuid, func.coalesce(rps.c.uuid,
- shr_with_sps.c.uuid)])
- sel = sel.select_from(join_chain)
- sel = sel.where(sps.c.id.in_(rp_ids))
- return set([(r[0], r[1]) for r in context.session.execute(sel).fetchall()])
-
-
-@db_api.placement_context_manager.writer
-def _ensure_aggregate(ctx, agg_uuid):
- """Finds an aggregate and returns its internal ID. If not found, creates
- the aggregate and returns the new aggregate's internal ID.
- """
- sel = sa.select([_AGG_TBL.c.id]).where(_AGG_TBL.c.uuid == agg_uuid)
- res = ctx.session.execute(sel).fetchone()
- if res:
- return res[0]
-
- LOG.debug("_ensure_aggregate() did not find aggregate %s. "
- "Creating it.", agg_uuid)
- try:
- ins_stmt = _AGG_TBL.insert().values(uuid=agg_uuid)
- res = ctx.session.execute(ins_stmt)
- agg_id = res.inserted_primary_key[0]
- LOG.debug("_ensure_aggregate() created new aggregate %s (id=%d).",
- agg_uuid, agg_id)
- return agg_id
- except db_exc.DBDuplicateEntry:
- # Something else added this agg_uuid in between our initial
- # fetch above and when we tried flushing this session, so let's
- # grab whatever that other thing added.
- LOG.debug("_ensure_provider() failed to create new aggregate %s. "
- "Another thread already created an aggregate record. "
- "Looking up that aggregate record.",
- agg_uuid)
- return _ensure_aggregate(ctx, agg_uuid)
-
-
-@db_api.placement_context_manager.writer
-def _set_aggregates(context, resource_provider, provided_aggregates,
- increment_generation=False):
- rp_id = resource_provider.id
- # When aggregate uuids are persisted no validation is done
- # to ensure that they refer to something that has meaning
- # elsewhere. It is assumed that code which makes use of the
- # aggregates, later, will validate their fitness.
- # TODO(cdent): At the moment we do not delete
- # a PlacementAggregate that no longer has any associations
- # with at least one resource provider. We may wish to do that
- # to avoid bloat if it turns out we're creating a lot of noise.
- # Not doing now to move things along.
- provided_aggregates = set(provided_aggregates)
- existing_aggregates = _get_aggregates_by_provider_id(context, rp_id)
- agg_uuids_to_add = provided_aggregates - set(existing_aggregates.values())
- # A dict, keyed by internal aggregate ID, of aggregate UUIDs that will be
- # associated with the provider
- aggs_to_associate = {}
- # Same dict for those aggregates to remove the association with this
- # provider
- aggs_to_disassociate = {
- agg_id: agg_uuid for agg_id, agg_uuid in existing_aggregates.items()
- if agg_uuid not in provided_aggregates
- }
-
- # Create any aggregates that do not yet exist in
- # PlacementAggregates. This is different from
- # the set in existing_aggregates; those are aggregates for
- # which there are associations for the resource provider
- # at rp_id. The following loop checks for the existence of any
- # aggregate with the provided uuid. In this way we only
- # create a new row in the PlacementAggregate table if the
- # aggregate uuid has never been seen before. Code further
- # below will update the associations.
- for agg_uuid in agg_uuids_to_add:
- agg_id = _ensure_aggregate(context, agg_uuid)
- aggs_to_associate[agg_id] = agg_uuid
-
- for agg_id, agg_uuid in aggs_to_associate.items():
- try:
- ins_stmt = _RP_AGG_TBL.insert().values(
- resource_provider_id=rp_id, aggregate_id=agg_id)
- context.session.execute(ins_stmt)
- LOG.debug("Setting aggregates for provider %s. Successfully "
- "associated aggregate %s.",
- resource_provider.uuid, agg_uuid)
- except db_exc.DBDuplicateEntry:
- LOG.debug("Setting aggregates for provider %s. Another thread "
- "already associated aggregate %s. Skipping.",
- resource_provider.uuid, agg_uuid)
- pass
-
- for agg_id, agg_uuid in aggs_to_disassociate.items():
- del_stmt = _RP_AGG_TBL.delete().where(sa.and_(
- _RP_AGG_TBL.c.resource_provider_id == rp_id,
- _RP_AGG_TBL.c.aggregate_id == agg_id))
- context.session.execute(del_stmt)
- LOG.debug("Setting aggregates for provider %s. Successfully "
- "disassociated aggregate %s.",
- resource_provider.uuid, agg_uuid)
-
- if increment_generation:
- resource_provider.generation = _increment_provider_generation(
- context, resource_provider)
-
-
-@db_api.placement_context_manager.reader
-def _get_traits_by_provider_id(context, rp_id):
- t = sa.alias(_TRAIT_TBL, name='t')
- rpt = sa.alias(_RP_TRAIT_TBL, name='rpt')
-
- join_cond = sa.and_(t.c.id == rpt.c.trait_id,
- rpt.c.resource_provider_id == rp_id)
- join = sa.join(t, rpt, join_cond)
- sel = sa.select([t.c.id, t.c.name,
- t.c.created_at, t.c.updated_at]).select_from(join)
- return [dict(r) for r in context.session.execute(sel).fetchall()]
-
-
-def _add_traits_to_provider(ctx, rp_id, to_add):
- """Adds trait associations to the provider with the supplied ID.
-
- :param ctx: `nova.context.RequestContext` that has an oslo_db Session
- :param rp_id: Internal ID of the resource provider on which to add
- trait associations
- :param to_add: set() containing internal trait IDs for traits to add
- """
- for trait_id in to_add:
- try:
- ins_stmt = _RP_TRAIT_TBL.insert().values(
- resource_provider_id=rp_id,
- trait_id=trait_id)
- ctx.session.execute(ins_stmt)
- except db_exc.DBDuplicateEntry:
- # Another thread already set this trait for this provider. Ignore
- # this for now (but ConcurrentUpdateDetected will end up being
- # raised almost assuredly when we go to increment the resource
- # provider's generation later, but that's also fine)
- pass
-
-
-def _delete_traits_from_provider(ctx, rp_id, to_delete):
- """Deletes trait associations from the provider with the supplied ID and
- set() of internal trait IDs.
-
- :param ctx: `nova.context.RequestContext` that has an oslo_db Session
- :param rp_id: Internal ID of the resource provider from which to delete
- trait associations
- :param to_delete: set() containing internal trait IDs for traits to
- delete
- """
- del_stmt = _RP_TRAIT_TBL.delete().where(
- sa.and_(
- _RP_TRAIT_TBL.c.resource_provider_id == rp_id,
- _RP_TRAIT_TBL.c.trait_id.in_(to_delete)))
- ctx.session.execute(del_stmt)
-
-
-@db_api.placement_context_manager.writer
-def _set_traits(context, rp, traits):
- """Given a ResourceProvider object and a TraitList object, replaces the set
- of traits associated with the resource provider.
-
- :raises: ConcurrentUpdateDetected if the resource provider's traits or
- inventory was changed in between the time when we first started to
- set traits and the end of this routine.
-
- :param rp: The ResourceProvider object to set traits against
- :param traits: A TraitList object or list of Trait objects
- """
- # Get the internal IDs of our existing traits
- existing_traits = _get_traits_by_provider_id(context, rp.id)
- existing_traits = set(rec['id'] for rec in existing_traits)
- want_traits = set(trait.id for trait in traits)
-
- to_add = want_traits - existing_traits
- to_delete = existing_traits - want_traits
-
- if not to_add and not to_delete:
- return
-
- if to_delete:
- _delete_traits_from_provider(context, rp.id, to_delete)
- if to_add:
- _add_traits_to_provider(context, rp.id, to_add)
- rp.generation = _increment_provider_generation(context, rp)
-
-
-@db_api.placement_context_manager.reader
-def _has_child_providers(context, rp_id):
- """Returns True if the supplied resource provider has any child providers,
- False otherwise
- """
- child_sel = sa.select([_RP_TBL.c.id])
- child_sel = child_sel.where(_RP_TBL.c.parent_provider_id == rp_id)
- child_res = context.session.execute(child_sel.limit(1)).fetchone()
- if child_res:
- return True
- return False
-
-
-@db_api.placement_context_manager.writer
-def _set_root_provider_id(context, rp_id, root_id):
- """Simply sets the root_provider_id value for a provider identified by
- rp_id. Used in online data migration.
-
- :param rp_id: Internal ID of the provider to update
- :param root_id: Value to set root provider to
- """
- upd = _RP_TBL.update().where(_RP_TBL.c.id == rp_id)
- upd = upd.values(root_provider_id=root_id)
- context.session.execute(upd)
-
-
-ProviderIds = collections.namedtuple(
- 'ProviderIds', 'id uuid parent_id parent_uuid root_id root_uuid')
-
-
-def _provider_ids_from_rp_ids(context, rp_ids):
- """Given an iterable of internal resource provider IDs, returns a dict,
- keyed by internal provider Id, of ProviderIds namedtuples describing those
- providers.
-
- :returns: dict, keyed by internal provider Id, of ProviderIds namedtuples
- :param rp_ids: iterable of internal provider IDs to look up
- """
- # SELECT
- # rp.id, rp.uuid,
- # parent.id AS parent_id, parent.uuid AS parent_uuid,
- # root.id AS root_id, root.uuid AS root_uuid
- # FROM resource_providers AS rp
- # LEFT JOIN resource_providers AS parent
- # ON rp.parent_provider_id = parent.id
- # LEFT JOIN resource_providers AS root
- # ON rp.root_provider_id = root.id
- # WHERE rp.id IN ($rp_ids)
- me = sa.alias(_RP_TBL, name="me")
- parent = sa.alias(_RP_TBL, name="parent")
- root = sa.alias(_RP_TBL, name="root")
- cols = [
- me.c.id,
- me.c.uuid,
- parent.c.id.label('parent_id'),
- parent.c.uuid.label('parent_uuid'),
- root.c.id.label('root_id'),
- root.c.uuid.label('root_uuid'),
- ]
- # TODO(jaypipes): Change this to an inner join when we are sure all
- # root_provider_id values are NOT NULL
- me_to_root = sa.outerjoin(me, root, me.c.root_provider_id == root.c.id)
- me_to_parent = sa.outerjoin(me_to_root, parent,
- me.c.parent_provider_id == parent.c.id)
- sel = sa.select(cols).select_from(me_to_parent)
- sel = sel.where(me.c.id.in_(rp_ids))
-
- ret = {}
- for r in context.session.execute(sel):
- # Use its id/uuid for the root id/uuid if the root id/uuid is None
- # TODO(tetsuro): Remove this to when we are sure all root_provider_id
- # values are NOT NULL
- d = dict(r)
- if d['root_id'] is None:
- d['root_id'] = d['id']
- d['root_uuid'] = d['uuid']
- ret[d['id']] = ProviderIds(**d)
- return ret
-
-
-def _provider_ids_from_uuid(context, uuid):
- """Given the UUID of a resource provider, returns a namedtuple
- (ProviderIds) with the internal ID, the UUID, the parent provider's
- internal ID, parent provider's UUID, the root provider's internal ID and
- the root provider UUID.
-
- :returns: ProviderIds object containing the internal IDs and UUIDs of the
- provider identified by the supplied UUID
- :param uuid: The UUID of the provider to look up
- """
- # SELECT
- # rp.id, rp.uuid,
- # parent.id AS parent_id, parent.uuid AS parent_uuid,
- # root.id AS root_id, root.uuid AS root_uuid
- # FROM resource_providers AS rp
- # LEFT JOIN resource_providers AS parent
- # ON rp.parent_provider_id = parent.id
- # LEFT JOIN resource_providers AS root
- # ON rp.root_provider_id = root.id
- me = sa.alias(_RP_TBL, name="me")
- parent = sa.alias(_RP_TBL, name="parent")
- root = sa.alias(_RP_TBL, name="root")
- cols = [
- me.c.id,
- me.c.uuid,
- parent.c.id.label('parent_id'),
- parent.c.uuid.label('parent_uuid'),
- root.c.id.label('root_id'),
- root.c.uuid.label('root_uuid'),
- ]
- # TODO(jaypipes): Change this to an inner join when we are sure all
- # root_provider_id values are NOT NULL
- me_to_root = sa.outerjoin(me, root, me.c.root_provider_id == root.c.id)
- me_to_parent = sa.outerjoin(me_to_root, parent,
- me.c.parent_provider_id == parent.c.id)
- sel = sa.select(cols).select_from(me_to_parent)
- sel = sel.where(me.c.uuid == uuid)
- res = context.session.execute(sel).fetchone()
- if not res:
- return None
- return ProviderIds(**dict(res))
-
-
-def _provider_ids_matching_aggregates(context, member_of, rp_ids=None):
- """Given a list of lists of aggregate UUIDs, return the internal IDs of all
- resource providers associated with the aggregates.
-
- :param member_of: A list containing lists of aggregate UUIDs. Each item in
- the outer list is to be AND'd together. If that item contains multiple
- values, they are OR'd together.
-
- For example, if member_of is::
-
- [
- ['agg1'],
- ['agg2', 'agg3'],
- ]
-
- we will return all the resource providers that are
- associated with agg1 as well as either (agg2 or agg3)
- :param rp_ids: When present, returned resource providers are limited
- to only those in this value
-
- :returns: A set of internal resource provider IDs having all required
- aggregate associations
- """
- # Given a request for the following:
- #
- # member_of = [
- # [agg1],
- # [agg2],
- # [agg3, agg4]
- # ]
- #
- # we need to produce the following SQL expression:
- #
- # SELECT
- # rp.id
- # FROM resource_providers AS rp
- # JOIN resource_provider_aggregates AS rpa1
- # ON rp.id = rpa1.resource_provider_id
- # AND rpa1.aggregate_id IN ($AGG1_ID)
- # JOIN resource_provider_aggregates AS rpa2
- # ON rp.id = rpa2.resource_provider_id
- # AND rpa2.aggregate_id IN ($AGG2_ID)
- # JOIN resource_provider_aggregates AS rpa3
- # ON rp.id = rpa3.resource_provider_id
- # AND rpa3.aggregate_id IN ($AGG3_ID, $AGG4_ID)
- # # Only if we have rp_ids...
- # WHERE rp.id IN ($RP_IDs)
-
- # First things first, get a map of all the aggregate UUID to internal
- # aggregate IDs
- agg_uuids = set()
- for members in member_of:
- for member in members:
- agg_uuids.add(member)
- agg_tbl = sa.alias(_AGG_TBL, name='aggs')
- agg_sel = sa.select([agg_tbl.c.uuid, agg_tbl.c.id])
- agg_sel = agg_sel.where(agg_tbl.c.uuid.in_(agg_uuids))
- agg_uuid_map = {
- r[0]: r[1] for r in context.session.execute(agg_sel).fetchall()
- }
-
- rp_tbl = sa.alias(_RP_TBL, name='rp')
- join_chain = rp_tbl
-
- for x, members in enumerate(member_of):
- rpa_tbl = sa.alias(_RP_AGG_TBL, name='rpa%d' % x)
-
- agg_ids = [agg_uuid_map[member] for member in members
- if member in agg_uuid_map]
- if not agg_ids:
- # This member_of list contains only non-existent aggregate UUIDs
- # and therefore we will always return 0 results, so short-circuit
- return []
-
- join_cond = sa.and_(
- rp_tbl.c.id == rpa_tbl.c.resource_provider_id,
- rpa_tbl.c.aggregate_id.in_(agg_ids))
- join_chain = sa.join(join_chain, rpa_tbl, join_cond)
- sel = sa.select([rp_tbl.c.id]).select_from(join_chain)
- if rp_ids:
- sel = sel.where(rp_tbl.c.id.in_(rp_ids))
- return set(r[0] for r in context.session.execute(sel))
-
-
-@db_api.placement_context_manager.writer
-def _delete_rp_record(context, _id):
- return context.session.query(models.ResourceProvider).\
- filter(models.ResourceProvider.id == _id).\
- delete(synchronize_session=False)
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class ResourceProvider(base.VersionedObject, base.TimestampedObject):
- SETTABLE_FIELDS = ('name', 'parent_provider_uuid')
-
- fields = {
- 'id': fields.IntegerField(read_only=True),
- 'uuid': fields.UUIDField(nullable=False),
- 'name': fields.StringField(nullable=False),
- 'generation': fields.IntegerField(nullable=False),
- # UUID of the root provider in a hierarchy of providers. Will be equal
- # to the uuid field if this provider is the root provider of a
- # hierarchy. This field is never manually set by the user. Instead, it
- # is automatically set to either the root provider UUID of the parent
- # or the UUID of the provider itself if there is no parent. This field
- # is an optimization field that allows us to very quickly query for all
- # providers within a particular tree without doing any recursive
- # querying.
- 'root_provider_uuid': fields.UUIDField(nullable=False),
- # UUID of the direct parent provider, or None if this provider is a
- # "root" provider.
- 'parent_provider_uuid': fields.UUIDField(nullable=True, default=None),
- }
-
- def create(self):
- if 'id' in self:
- raise exception.ObjectActionError(action='create',
- reason='already created')
- if 'uuid' not in self:
- raise exception.ObjectActionError(action='create',
- reason='uuid is required')
- if 'name' not in self:
- raise exception.ObjectActionError(action='create',
- reason='name is required')
- if 'root_provider_uuid' in self:
- raise exception.ObjectActionError(
- action='create',
- reason=_('root provider UUID cannot be manually set.'))
-
- self.obj_set_defaults()
- updates = self.obj_get_changes()
- self._create_in_db(self._context, updates)
- self.obj_reset_changes()
-
- def destroy(self):
- self._delete(self._context, self.id)
-
- def save(self):
- updates = self.obj_get_changes()
- if updates and any(k not in self.SETTABLE_FIELDS
- for k in updates.keys()):
- raise exception.ObjectActionError(
- action='save',
- reason='Immutable fields changed')
- self._update_in_db(self._context, self.id, updates)
- self.obj_reset_changes()
-
- @classmethod
- def get_by_uuid(cls, context, uuid):
- """Returns a new ResourceProvider object with the supplied UUID.
-
- :raises NotFound if no such provider could be found
- :param uuid: UUID of the provider to search for
- """
- rp_rec = _get_provider_by_uuid(context, uuid)
- return cls._from_db_object(context, cls(), rp_rec)
-
- def add_inventory(self, inventory):
- """Add one new Inventory to the resource provider.
-
- Fails if Inventory of the provided resource class is
- already present.
- """
- _add_inventory(self._context, self, inventory)
- self.obj_reset_changes()
-
- def delete_inventory(self, resource_class):
- """Delete Inventory of provided resource_class."""
- _delete_inventory(self._context, self, resource_class)
- self.obj_reset_changes()
-
- def set_inventory(self, inv_list):
- """Set all resource provider Inventory to be the provided list."""
- exceeded = _set_inventory(self._context, self, inv_list)
- for uuid, rclass in exceeded:
- LOG.warning('Resource provider %(uuid)s is now over-'
- 'capacity for %(resource)s',
- {'uuid': uuid, 'resource': rclass})
- self.obj_reset_changes()
-
- def update_inventory(self, inventory):
- """Update one existing Inventory of the same resource class.
-
- Fails if no Inventory of the same class is present.
- """
- exceeded = _update_inventory(self._context, self, inventory)
- for uuid, rclass in exceeded:
- LOG.warning('Resource provider %(uuid)s is now over-'
- 'capacity for %(resource)s',
- {'uuid': uuid, 'resource': rclass})
- self.obj_reset_changes()
-
- def get_aggregates(self):
- """Get the aggregate uuids associated with this resource provider."""
- return list(
- _get_aggregates_by_provider_id(self._context, self.id).values())
-
- def set_aggregates(self, aggregate_uuids, increment_generation=False):
- """Set the aggregate uuids associated with this resource provider.
-
- If an aggregate does not exist, one will be created using the
- provided uuid.
-
- The resource provider generation is incremented if and only if the
- increment_generation parameter is True.
- """
- _set_aggregates(self._context, self, aggregate_uuids,
- increment_generation=increment_generation)
-
- def set_traits(self, traits):
- """Replaces the set of traits associated with the resource provider
- with the given list of Trait objects.
-
- :param traits: A list of Trait objects representing the traits to
- associate with the provider.
- """
- _set_traits(self._context, self, traits)
- self.obj_reset_changes()
-
- @db_api.placement_context_manager.writer
- def _create_in_db(self, context, updates):
- parent_id = None
- root_id = None
- # User supplied a parent, let's make sure it exists
- parent_uuid = updates.pop('parent_provider_uuid')
- if parent_uuid is not None:
- # Setting parent to ourselves doesn't make any sense
- if parent_uuid == self.uuid:
- raise exception.ObjectActionError(
- action='create',
- reason=_('parent provider UUID cannot be same as '
- 'UUID. Please set parent provider UUID to '
- 'None if there is no parent.'))
-
- parent_ids = _provider_ids_from_uuid(context, parent_uuid)
- if parent_ids is None:
- raise exception.ObjectActionError(
- action='create',
- reason=_('parent provider UUID does not exist.'))
-
- parent_id = parent_ids.id
- root_id = parent_ids.root_id
- updates['root_provider_id'] = root_id
- updates['parent_provider_id'] = parent_id
- self.root_provider_uuid = parent_ids.root_uuid
-
- db_rp = models.ResourceProvider()
- db_rp.update(updates)
- context.session.add(db_rp)
- context.session.flush()
-
- self.id = db_rp.id
- self.generation = db_rp.generation
-
- if root_id is None:
- # User did not specify a parent when creating this provider, so the
- # root_provider_id needs to be set to this provider's newly-created
- # internal ID
- db_rp.root_provider_id = db_rp.id
- context.session.add(db_rp)
- context.session.flush()
- self.root_provider_uuid = self.uuid
-
- @staticmethod
- @db_api.placement_context_manager.writer
- def _delete(context, _id):
- # Do a quick check to see if the provider is a parent. If it is, don't
- # allow deleting the provider. Note that the foreign key constraint on
- # resource_providers.parent_provider_id will prevent deletion of the
- # parent within the transaction below. This is just a quick
- # short-circuit outside of the transaction boundary.
- if _has_child_providers(context, _id):
- raise exception.CannotDeleteParentResourceProvider()
-
- # Don't delete the resource provider if it has allocations.
- rp_allocations = context.session.query(models.Allocation).\
- filter(models.Allocation.resource_provider_id == _id).\
- count()
- if rp_allocations:
- raise exception.ResourceProviderInUse()
- # Delete any inventory associated with the resource provider
- context.session.query(models.Inventory).\
- filter(models.Inventory.resource_provider_id == _id).\
- delete(synchronize_session=False)
- # Delete any aggregate associations for the resource provider
- # The name substitution on the next line is needed to satisfy pep8
- RPA_model = models.ResourceProviderAggregate
- context.session.query(RPA_model).\
- filter(RPA_model.resource_provider_id == _id).delete()
- # delete any trait associations for the resource provider
- RPT_model = models.ResourceProviderTrait
- context.session.query(RPT_model).\
- filter(RPT_model.resource_provider_id == _id).delete()
- # set root_provider_id to null to make deletion possible
- context.session.query(models.ResourceProvider).\
- filter(models.ResourceProvider.id == _id,
- models.ResourceProvider.root_provider_id == _id).\
- update({'root_provider_id': None})
- # Now delete the RP record
- try:
- result = _delete_rp_record(context, _id)
- except sqla_exc.IntegrityError:
- # NOTE(jaypipes): Another thread snuck in and parented this
- # resource provider in between the above check for
- # _has_child_providers() and our attempt to delete the record
- raise exception.CannotDeleteParentResourceProvider()
- if not result:
- raise exception.NotFound()
-
- @db_api.placement_context_manager.writer
- def _update_in_db(self, context, id, updates):
- # A list of resource providers in the same tree with the
- # resource provider to update
- same_tree = []
- if 'parent_provider_uuid' in updates:
- # TODO(jaypipes): For now, "re-parenting" and "un-parenting" are
- # not possible. If the provider already had a parent, we don't
- # allow changing that parent due to various issues, including:
- #
- # * if the new parent is a descendant of this resource provider, we
- # introduce the possibility of a loop in the graph, which would
- # be very bad
- # * potentially orphaning heretofore-descendants
- #
- # So, for now, let's just prevent re-parenting...
- my_ids = _provider_ids_from_uuid(context, self.uuid)
- parent_uuid = updates.pop('parent_provider_uuid')
- if parent_uuid is not None:
- parent_ids = _provider_ids_from_uuid(context, parent_uuid)
- # User supplied a parent, let's make sure it exists
- if parent_ids is None:
- raise exception.ObjectActionError(
- action='create',
- reason=_('parent provider UUID does not exist.'))
- if (my_ids.parent_id is not None and
- my_ids.parent_id != parent_ids.id):
- raise exception.ObjectActionError(
- action='update',
- reason=_('re-parenting a provider is not '
- 'currently allowed.'))
- if my_ids.parent_uuid is None:
- # So the user specifies a parent for an RP that doesn't
- # have one. We have to check that by this new parent we
- # don't create a loop in the tree. Basically the new parent
- # cannot be the RP itself or one of its descendants.
- # However as the RP's current parent is None the above
- # condition is the same as "the new parent cannot be any RP
- # from the current RP tree".
- same_tree = ResourceProviderList.get_all_by_filters(
- context,
- filters={'in_tree': self.uuid})
- rp_uuids_in_the_same_tree = [rp.uuid for rp in same_tree]
- if parent_uuid in rp_uuids_in_the_same_tree:
- raise exception.ObjectActionError(
- action='update',
- reason=_('creating loop in the provider tree is '
- 'not allowed.'))
-
- updates['root_provider_id'] = parent_ids.root_id
- updates['parent_provider_id'] = parent_ids.id
- self.root_provider_uuid = parent_ids.root_uuid
- else:
- if my_ids.parent_id is not None:
- raise exception.ObjectActionError(
- action='update',
- reason=_('un-parenting a provider is not '
- 'currently allowed.'))
-
- db_rp = context.session.query(models.ResourceProvider).filter_by(
- id=id).first()
- db_rp.update(updates)
- context.session.add(db_rp)
-
- # We should also update the root providers of resource providers
- # originally in the same tree. If re-parenting is supported,
- # this logic should be changed to update only descendents of the
- # re-parented resource providers, not all the providers in the tree.
- for rp in same_tree:
- # If the parent is not updated, this clause is skipped since the
- # `same_tree` has no element.
- rp.root_provider_uuid = parent_ids.root_uuid
- db_rp = context.session.query(
- models.ResourceProvider).filter_by(id=rp.id).first()
- data = {'root_provider_id': parent_ids.root_id}
- db_rp.update(data)
- context.session.add(db_rp)
-
- try:
- context.session.flush()
- except sqla_exc.IntegrityError:
- # NOTE(jaypipes): Another thread snuck in and deleted the parent
- # for this resource provider in between the above check for a valid
- # parent provider and here...
- raise exception.ObjectActionError(
- action='update',
- reason=_('parent provider UUID does not exist.'))
-
- @staticmethod
- @db_api.placement_context_manager.writer # For online data migration
- def _from_db_object(context, resource_provider, db_resource_provider):
- # Online data migration to populate root_provider_id
- # TODO(jaypipes): Remove when all root_provider_id values are NOT NULL
- if db_resource_provider['root_provider_uuid'] is None:
- rp_id = db_resource_provider['id']
- uuid = db_resource_provider['uuid']
- db_resource_provider['root_provider_uuid'] = uuid
- _set_root_provider_id(context, rp_id, rp_id)
- for field in resource_provider.fields:
- setattr(resource_provider, field, db_resource_provider[field])
- resource_provider._context = context
- resource_provider.obj_reset_changes()
- return resource_provider
-
-
-@db_api.placement_context_manager.reader
-def _get_providers_with_shared_capacity(ctx, rc_id, amount, member_of=None):
- """Returns a list of resource provider IDs (internal IDs, not UUIDs)
- that have capacity for a requested amount of a resource and indicate that
- they share resource via an aggregate association.
-
- Shared resource providers are marked with a standard trait called
- MISC_SHARES_VIA_AGGREGATE. This indicates that the provider allows its
- inventory to be consumed by other resource providers associated via an
- aggregate link.
-
- For example, assume we have two compute nodes, CN_1 and CN_2, each with
- inventory of VCPU and MEMORY_MB but not DISK_GB (in other words, these are
- compute nodes with no local disk). There is a resource provider called
- "NFS_SHARE" that has an inventory of DISK_GB and has the
- MISC_SHARES_VIA_AGGREGATE trait. Both the "CN_1" and "CN_2" compute node
- resource providers and the "NFS_SHARE" resource provider are associated
- with an aggregate called "AGG_1".
-
- The scheduler needs to determine the resource providers that can fulfill a
- request for 2 VCPU, 1024 MEMORY_MB and 100 DISK_GB.
-
- Clearly, no single provider can satisfy the request for all three
- resources, since neither compute node has DISK_GB inventory and the
- NFS_SHARE provider has no VCPU or MEMORY_MB inventories.
-
- However, if we consider the NFS_SHARE resource provider as providing
- inventory of DISK_GB for both CN_1 and CN_2, we can include CN_1 and CN_2
- as potential fits for the requested set of resources.
-
- To facilitate that matching query, this function returns all providers that
- indicate they share their inventory with providers in some aggregate and
- have enough capacity for the requested amount of a resource.
-
- To follow the example above, if we were to call
- _get_providers_with_shared_capacity(ctx, "DISK_GB", 100), we would want to
- get back the ID for the NFS_SHARE resource provider.
-
- :param rc_id: Internal ID of the requested resource class.
- :param amount: Amount of the requested resource.
- :param member_of: When present, contains a list of lists of aggregate
- uuids that are used to filter the returned list of
- resource providers that *directly* belong to the
- aggregates referenced.
- """
- # The SQL we need to generate here looks like this:
- #
- # SELECT rp.id
- # FROM resource_providers AS rp
- # INNER JOIN resource_provider_traits AS rpt
- # ON rp.id = rpt.resource_provider_id
- # INNER JOIN traits AS t
- # ON rpt.trait_id = t.id
- # AND t.name = "MISC_SHARES_VIA_AGGREGATE"
- # INNER JOIN inventories AS inv
- # ON rp.id = inv.resource_provider_id
- # AND inv.resource_class_id = $rc_id
- # LEFT JOIN (
- # SELECT resource_provider_id, SUM(used) as used
- # FROM allocations
- # WHERE resource_class_id = $rc_id
- # GROUP BY resource_provider_id
- # ) AS usage
- # ON rp.id = usage.resource_provider_id
- # WHERE COALESCE(usage.used, 0) + $amount <= (
- # inv.total - inv.reserved) * inv.allocation_ratio
- # ) AND
- # inv.min_unit <= $amount AND
- # inv.max_unit >= $amount AND
- # $amount % inv.step_size = 0
- # GROUP BY rp.id
-
- rp_tbl = sa.alias(_RP_TBL, name='rp')
- inv_tbl = sa.alias(_INV_TBL, name='inv')
- t_tbl = sa.alias(_TRAIT_TBL, name='t')
- rpt_tbl = sa.alias(_RP_TRAIT_TBL, name='rpt')
-
- rp_to_rpt_join = sa.join(
- rp_tbl, rpt_tbl,
- rp_tbl.c.id == rpt_tbl.c.resource_provider_id,
- )
-
- rpt_to_t_join = sa.join(
- rp_to_rpt_join, t_tbl,
- sa.and_(
- rpt_tbl.c.trait_id == t_tbl.c.id,
- # The traits table wants unicode trait names, but os_traits
- # presents native str, so we need to cast.
- t_tbl.c.name == six.text_type(os_traits.MISC_SHARES_VIA_AGGREGATE),
- ),
- )
-
- rp_to_inv_join = sa.join(
- rpt_to_t_join, inv_tbl,
- sa.and_(
- rpt_tbl.c.resource_provider_id == inv_tbl.c.resource_provider_id,
- inv_tbl.c.resource_class_id == rc_id,
- ),
- )
-
- usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
- sql.func.sum(_ALLOC_TBL.c.used).label('used')])
- usage = usage.where(_ALLOC_TBL.c.resource_class_id == rc_id)
- usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id)
- usage = sa.alias(usage, name='usage')
-
- inv_to_usage_join = sa.outerjoin(
- rp_to_inv_join, usage,
- inv_tbl.c.resource_provider_id == usage.c.resource_provider_id,
- )
-
- where_conds = sa.and_(
- func.coalesce(usage.c.used, 0) + amount <= (
- inv_tbl.c.total - inv_tbl.c.reserved) * inv_tbl.c.allocation_ratio,
- inv_tbl.c.min_unit <= amount,
- inv_tbl.c.max_unit >= amount,
- amount % inv_tbl.c.step_size == 0)
-
- # If 'member_of' has values, do a separate lookup to identify the
- # resource providers that meet the member_of constraints.
- if member_of:
- rps_in_aggs = _provider_ids_matching_aggregates(ctx, member_of)
- if not rps_in_aggs:
- # Short-circuit. The user either asked for a non-existing
- # aggregate or there were no resource providers that matched
- # the requirements...
- return []
- where_conds.append(rp_tbl.c.id.in_(rps_in_aggs))
-
- sel = sa.select([rp_tbl.c.id]).select_from(inv_to_usage_join)
- sel = sel.where(where_conds)
- sel = sel.group_by(rp_tbl.c.id)
-
- return [r[0] for r in ctx.session.execute(sel)]
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class ResourceProviderList(base.ObjectListBase, base.VersionedObject):
-
- fields = {
- 'objects': fields.ListOfObjectsField('ResourceProvider'),
- }
-
- @staticmethod
- @db_api.placement_context_manager.reader
- def _get_all_by_filters_from_db(context, filters):
- # Eg. filters can be:
- # filters = {
- # 'name': <name>,
- # 'uuid': <uuid>,
- # 'member_of': [[<aggregate_uuid>, <aggregate_uuid>],
- # [<aggregate_uuid>]]
- # 'resources': {
- # 'VCPU': 1,
- # 'MEMORY_MB': 1024
- # },
- # 'in_tree': <uuid>,
- # 'required': [<trait_name>, ...]
- # }
- if not filters:
- filters = {}
- else:
- # Since we modify the filters, copy them so that we don't modify
- # them in the calling program.
- filters = copy.deepcopy(filters)
- name = filters.pop('name', None)
- uuid = filters.pop('uuid', None)
- member_of = filters.pop('member_of', [])
- required = set(filters.pop('required', []))
- forbidden = set([trait for trait in required
- if trait.startswith('!')])
- required = required - forbidden
- forbidden = set([trait.lstrip('!') for trait in forbidden])
-
- resources = filters.pop('resources', {})
- # NOTE(sbauza): We want to key the dict by the resource class IDs
- # and we want to make sure those class names aren't incorrect.
- resources = {_RC_CACHE.id_from_string(r_name): amount
- for r_name, amount in resources.items()}
- rp = sa.alias(_RP_TBL, name="rp")
- root_rp = sa.alias(_RP_TBL, name="root_rp")
- parent_rp = sa.alias(_RP_TBL, name="parent_rp")
-
- cols = [
- rp.c.id,
- rp.c.uuid,
- rp.c.name,
- rp.c.generation,
- rp.c.updated_at,
- rp.c.created_at,
- root_rp.c.uuid.label("root_provider_uuid"),
- parent_rp.c.uuid.label("parent_provider_uuid"),
- ]
-
- # TODO(jaypipes): Convert this to an inner join once all
- # root_provider_id values are NOT NULL
- rp_to_root = sa.outerjoin(rp, root_rp,
- rp.c.root_provider_id == root_rp.c.id)
- rp_to_parent = sa.outerjoin(rp_to_root, parent_rp,
- rp.c.parent_provider_id == parent_rp.c.id)
-
- query = sa.select(cols).select_from(rp_to_parent)
-
- if name:
- query = query.where(rp.c.name == name)
- if uuid:
- query = query.where(rp.c.uuid == uuid)
- if 'in_tree' in filters:
- # The 'in_tree' parameter is the UUID of a resource provider that
- # the caller wants to limit the returned providers to only those
- # within its "provider tree". So, we look up the resource provider
- # having the UUID specified by the 'in_tree' parameter and grab the
- # root_provider_id value of that record. We can then ask for only
- # those resource providers having a root_provider_id of that value.
- tree_uuid = filters.pop('in_tree')
- tree_ids = _provider_ids_from_uuid(context, tree_uuid)
- if tree_ids is None:
- # List operations should simply return an empty list when a
- # non-existing resource provider UUID is given.
- return []
- root_id = tree_ids.root_id
- # TODO(jaypipes): Remove this OR condition when root_provider_id
- # is not nullable in the database and all resource provider records
- # have populated the root provider ID.
- where_cond = sa.or_(rp.c.id == root_id,
- rp.c.root_provider_id == root_id)
- query = query.where(where_cond)
-
- # If 'member_of' has values, do a separate lookup to identify the
- # resource providers that meet the member_of constraints.
- if member_of:
- rps_in_aggs = _provider_ids_matching_aggregates(context, member_of)
- if not rps_in_aggs:
- # Short-circuit. The user either asked for a non-existing
- # aggregate or there were no resource providers that matched
- # the requirements...
- return []
- query = query.where(rp.c.id.in_(rps_in_aggs))
-
- # If 'required' has values, add a filter to limit results to providers
- # possessing *all* of the listed traits.
- if required:
- trait_map = _trait_ids_from_names(context, required)
- if len(trait_map) != len(required):
- missing = required - set(trait_map)
- raise exception.TraitNotFound(names=', '.join(missing))
- rp_ids = _get_provider_ids_having_all_traits(context, trait_map)
- if not rp_ids:
- # If no providers have the required traits, we're done
- return []
- query = query.where(rp.c.id.in_(rp_ids))
-
- # If 'forbidden' has values, filter out those providers that have
- # that trait as one their traits.
- if forbidden:
- trait_map = _trait_ids_from_names(context, forbidden)
- if len(trait_map) != len(forbidden):
- missing = forbidden - set(trait_map)
- raise exception.TraitNotFound(names=', '.join(missing))
- rp_ids = _get_provider_ids_having_any_trait(context, trait_map)
- if rp_ids:
- query = query.where(~rp.c.id.in_(rp_ids))
-
- if not resources:
- # Returns quickly the list in case we don't need to check the
- # resource usage
- res = context.session.execute(query).fetchall()
- return [dict(r) for r in res]
-
- # NOTE(sbauza): In case we want to look at the resource criteria, then
- # the SQL generated from this case looks something like:
- # SELECT
- # rp.*
- # FROM resource_providers AS rp
- # JOIN inventories AS inv
- # ON rp.id = inv.resource_provider_id
- # LEFT JOIN (
- # SELECT resource_provider_id, resource_class_id, SUM(used) AS used
- # FROM allocations
- # WHERE resource_class_id IN ($RESOURCE_CLASSES)
- # GROUP BY resource_provider_id, resource_class_id
- # ) AS usage
- # ON inv.resource_provider_id = usage.resource_provider_id
- # AND inv.resource_class_id = usage.resource_class_id
- # AND (inv.resource_class_id = $X AND (used + $AMOUNT_X <= (
- # total - reserved) * inv.allocation_ratio) AND
- # inv.min_unit <= $AMOUNT_X AND inv.max_unit >= $AMOUNT_X AND
- # $AMOUNT_X % inv.step_size == 0)
- # OR (inv.resource_class_id = $Y AND (used + $AMOUNT_Y <= (
- # total - reserved) * inv.allocation_ratio) AND
- # inv.min_unit <= $AMOUNT_Y AND inv.max_unit >= $AMOUNT_Y AND
- # $AMOUNT_Y % inv.step_size == 0)
- # OR (inv.resource_class_id = $Z AND (used + $AMOUNT_Z <= (
- # total - reserved) * inv.allocation_ratio) AND
- # inv.min_unit <= $AMOUNT_Z AND inv.max_unit >= $AMOUNT_Z AND
- # $AMOUNT_Z % inv.step_size == 0))
- # GROUP BY rp.id
- # HAVING
- # COUNT(DISTINCT(inv.resource_class_id)) == len($RESOURCE_CLASSES)
- #
- # with a possible additional WHERE clause for the name and uuid that
- # comes from the above filters
-
- # First JOIN between inventories and RPs is here
- inv_join = sa.join(rp_to_parent, _INV_TBL,
- rp.c.id == _INV_TBL.c.resource_provider_id)
-
- # Now, below is the LEFT JOIN for getting the allocations usage
- usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
- _ALLOC_TBL.c.resource_class_id,
- sql.func.sum(_ALLOC_TBL.c.used).label('used')])
- usage = usage.where(_ALLOC_TBL.c.resource_class_id.in_(resources))
- usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id,
- _ALLOC_TBL.c.resource_class_id)
- usage = sa.alias(usage, name='usage')
- usage_join = sa.outerjoin(inv_join, usage,
- sa.and_(
- usage.c.resource_provider_id == (
- _INV_TBL.c.resource_provider_id),
- usage.c.resource_class_id == _INV_TBL.c.resource_class_id))
-
- # And finally, we verify for each resource class if the requested
- # amount isn't more than the left space (considering the allocation
- # ratio, the reserved space and the min and max amount possible sizes)
- where_clauses = [
- sa.and_(
- _INV_TBL.c.resource_class_id == r_idx,
- (func.coalesce(usage.c.used, 0) + amount <= (
- _INV_TBL.c.total - _INV_TBL.c.reserved
- ) * _INV_TBL.c.allocation_ratio),
- _INV_TBL.c.min_unit <= amount,
- _INV_TBL.c.max_unit >= amount,
- amount % _INV_TBL.c.step_size == 0
- )
- for (r_idx, amount) in resources.items()]
- query = query.select_from(usage_join)
- query = query.where(sa.or_(*where_clauses))
- query = query.group_by(rp.c.id, root_rp.c.uuid, parent_rp.c.uuid)
- # NOTE(sbauza): Only RPs having all the asked resources can be provided
- query = query.having(sql.func.count(
- sa.distinct(_INV_TBL.c.resource_class_id)) == len(resources))
-
- res = context.session.execute(query).fetchall()
- return [dict(r) for r in res]
-
- @classmethod
- def get_all_by_filters(cls, context, filters=None):
- """Returns a list of `ResourceProvider` objects that have sufficient
- resources in their inventories to satisfy the amounts specified in the
- `filters` parameter.
-
- If no resource providers can be found, the function will return an
- empty list.
-
- :param context: `nova.context.RequestContext` that may be used to grab
- a DB connection.
- :param filters: Can be `name`, `uuid`, `member_of`, `in_tree` or
- `resources` where `member_of` is a list of list of
- aggregate UUIDs, `in_tree` is a UUID of a resource
- provider that we can use to find the root provider ID
- of the tree of providers to filter results by and
- `resources` is a dict of amounts keyed by resource
- classes.
- :type filters: dict
- """
- resource_providers = cls._get_all_by_filters_from_db(context, filters)
- return base.obj_make_list(context, cls(context),
- ResourceProvider, resource_providers)
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class Inventory(base.VersionedObject, base.TimestampedObject):
-
- fields = {
- 'id': fields.IntegerField(read_only=True),
- 'resource_provider': fields.ObjectField('ResourceProvider'),
- 'resource_class': rc_fields.ResourceClassField(read_only=True),
- 'total': fields.NonNegativeIntegerField(),
- 'reserved': fields.NonNegativeIntegerField(default=0),
- 'min_unit': fields.NonNegativeIntegerField(default=1),
- 'max_unit': fields.NonNegativeIntegerField(default=1),
- 'step_size': fields.NonNegativeIntegerField(default=1),
- 'allocation_ratio': fields.NonNegativeFloatField(default=1.0),
- }
-
- @property
- def capacity(self):
- """Inventory capacity, adjusted by allocation_ratio."""
- return int((self.total - self.reserved) * self.allocation_ratio)
-
-
-@db_api.placement_context_manager.reader
-def _get_inventory_by_provider_id(ctx, rp_id):
- inv = sa.alias(_INV_TBL, name="i")
- cols = [
- inv.c.resource_class_id,
- inv.c.total,
- inv.c.reserved,
- inv.c.min_unit,
- inv.c.max_unit,
- inv.c.step_size,
- inv.c.allocation_ratio,
- inv.c.updated_at,
- inv.c.created_at,
- ]
- sel = sa.select(cols)
- sel = sel.where(inv.c.resource_provider_id == rp_id)
-
- return [dict(r) for r in ctx.session.execute(sel)]
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class InventoryList(base.ObjectListBase, base.VersionedObject):
-
- fields = {
- 'objects': fields.ListOfObjectsField('Inventory'),
- }
-
- def find(self, res_class):
- """Return the inventory record from the list of Inventory records that
- matches the supplied resource class, or None.
-
- :param res_class: An integer or string representing a resource
- class. If the value is a string, the method first
- looks up the resource class identifier from the
- string.
- """
- if not isinstance(res_class, six.string_types):
- raise ValueError
-
- for inv_rec in self.objects:
- if inv_rec.resource_class == res_class:
- return inv_rec
-
- @classmethod
- def get_all_by_resource_provider(cls, context, rp):
- db_inv = _get_inventory_by_provider_id(context, rp.id)
- # Build up a list of Inventory objects, setting the Inventory object
- # fields to the same-named database record field we got from
- # _get_inventory_by_provider_id(). We already have the ResourceProvider
- # object so we just pass that object to the Inventory object
- # constructor as-is
- objs = [
- Inventory(
- context, resource_provider=rp,
- resource_class=_RC_CACHE.string_from_id(
- rec['resource_class_id']),
- **rec)
- for rec in db_inv
- ]
- inv_list = cls(context, objects=objs)
- return inv_list
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class Allocation(base.VersionedObject, base.TimestampedObject):
-
- fields = {
- 'id': fields.IntegerField(),
- 'resource_provider': fields.ObjectField('ResourceProvider'),
- 'consumer': fields.ObjectField('Consumer', nullable=False),
- 'resource_class': rc_fields.ResourceClassField(),
- 'used': fields.IntegerField(),
- }
-
-
-@db_api.placement_context_manager.writer
-def _delete_allocations_for_consumer(ctx, consumer_id):
- """Deletes any existing allocations that correspond to the allocations to
- be written. This is wrapped in a transaction, so if the write subsequently
- fails, the deletion will also be rolled back.
- """
- del_sql = _ALLOC_TBL.delete().where(
- _ALLOC_TBL.c.consumer_id == consumer_id)
- ctx.session.execute(del_sql)
-
-
-@db_api.placement_context_manager.writer
-def _delete_allocations_by_ids(ctx, alloc_ids):
- """Deletes allocations having an internal id value in the set of supplied
- IDs
- """
- del_sql = _ALLOC_TBL.delete().where(_ALLOC_TBL.c.id.in_(alloc_ids))
- ctx.session.execute(del_sql)
-
-
-def _check_capacity_exceeded(ctx, allocs):
- """Checks to see if the supplied allocation records would result in any of
- the inventories involved having their capacity exceeded.
-
- Raises an InvalidAllocationCapacityExceeded exception if any inventory
- would be exhausted by the allocation. Raises an
- InvalidAllocationConstraintsViolated exception if any of the `step_size`,
- `min_unit` or `max_unit` constraints in an inventory will be violated
- by any one of the allocations.
-
- If no inventories would be exceeded or violated by the allocations, the
- function returns a list of `ResourceProvider` objects that contain the
- generation at the time of the check.
-
- :param ctx: `nova.context.RequestContext` that has an oslo_db Session
- :param allocs: List of `Allocation` objects to check
- """
- # The SQL generated below looks like this:
- # SELECT
- # rp.id,
- # rp.uuid,
- # rp.generation,
- # inv.resource_class_id,
- # inv.total,
- # inv.reserved,
- # inv.allocation_ratio,
- # allocs.used
- # FROM resource_providers AS rp
- # JOIN inventories AS i1
- # ON rp.id = i1.resource_provider_id
- # LEFT JOIN (
- # SELECT resource_provider_id, resource_class_id, SUM(used) AS used
- # FROM allocations
- # WHERE resource_class_id IN ($RESOURCE_CLASSES)
- # AND resource_provider_id IN ($RESOURCE_PROVIDERS)
- # GROUP BY resource_provider_id, resource_class_id
- # ) AS allocs
- # ON inv.resource_provider_id = allocs.resource_provider_id
- # AND inv.resource_class_id = allocs.resource_class_id
- # WHERE rp.id IN ($RESOURCE_PROVIDERS)
- # AND inv.resource_class_id IN ($RESOURCE_CLASSES)
- #
- # We then take the results of the above and determine if any of the
- # inventory will have its capacity exceeded.
- rc_ids = set([_RC_CACHE.id_from_string(a.resource_class)
- for a in allocs])
- provider_uuids = set([a.resource_provider.uuid for a in allocs])
- provider_ids = set([a.resource_provider.id for a in allocs])
- usage = sa.select([_ALLOC_TBL.c.resource_provider_id,
- _ALLOC_TBL.c.resource_class_id,
- sql.func.sum(_ALLOC_TBL.c.used).label('used')])
- usage = usage.where(
- sa.and_(_ALLOC_TBL.c.resource_class_id.in_(rc_ids),
- _ALLOC_TBL.c.resource_provider_id.in_(provider_ids)))
- usage = usage.group_by(_ALLOC_TBL.c.resource_provider_id,
- _ALLOC_TBL.c.resource_class_id)
- usage = sa.alias(usage, name='usage')
-
- inv_join = sql.join(_RP_TBL, _INV_TBL,
- sql.and_(_RP_TBL.c.id == _INV_TBL.c.resource_provider_id,
- _INV_TBL.c.resource_class_id.in_(rc_ids)))
- primary_join = sql.outerjoin(inv_join, usage,
- sql.and_(
- _INV_TBL.c.resource_provider_id == usage.c.resource_provider_id,
- _INV_TBL.c.resource_class_id == usage.c.resource_class_id)
- )
- cols_in_output = [
- _RP_TBL.c.id.label('resource_provider_id'),
- _RP_TBL.c.uuid,
- _RP_TBL.c.generation,
- _INV_TBL.c.resource_class_id,
- _INV_TBL.c.total,
- _INV_TBL.c.reserved,
- _INV_TBL.c.allocation_ratio,
- _INV_TBL.c.min_unit,
- _INV_TBL.c.max_unit,
- _INV_TBL.c.step_size,
- usage.c.used,
- ]
-
- sel = sa.select(cols_in_output).select_from(primary_join)
- sel = sel.where(
- sa.and_(_RP_TBL.c.id.in_(provider_ids),
- _INV_TBL.c.resource_class_id.in_(rc_ids)))
- records = ctx.session.execute(sel)
- # Create a map keyed by (rp_uuid, res_class) for the records in the DB
- usage_map = {}
- provs_with_inv = set()
- for record in records:
- map_key = (record['uuid'], record['resource_class_id'])
- if map_key in usage_map:
- raise KeyError("%s already in usage_map, bad query" % str(map_key))
- usage_map[map_key] = record
- provs_with_inv.add(record["uuid"])
- # Ensure that all providers have existing inventory
- missing_provs = provider_uuids - provs_with_inv
- if missing_provs:
- class_str = ', '.join([_RC_CACHE.string_from_id(rc_id)
- for rc_id in rc_ids])
- provider_str = ', '.join(missing_provs)
- raise exception.InvalidInventory(resource_class=class_str,
- resource_provider=provider_str)
-
- res_providers = {}
- rp_resource_class_sum = collections.defaultdict(
- lambda: collections.defaultdict(int))
- for alloc in allocs:
- rc_id = _RC_CACHE.id_from_string(alloc.resource_class)
- rp_uuid = alloc.resource_provider.uuid
- if rp_uuid not in res_providers:
- res_providers[rp_uuid] = alloc.resource_provider
- amount_needed = alloc.used
- rp_resource_class_sum[rp_uuid][rc_id] += amount_needed
- # No use checking usage if we're not asking for anything
- if amount_needed == 0:
- continue
- key = (rp_uuid, rc_id)
- try:
- usage = usage_map[key]
- except KeyError:
- # The resource class at rc_id is not in the usage map.
- raise exception.InvalidInventory(
- resource_class=alloc.resource_class,
- resource_provider=rp_uuid)
- allocation_ratio = usage['allocation_ratio']
- min_unit = usage['min_unit']
- max_unit = usage['max_unit']
- step_size = usage['step_size']
-
- # check min_unit, max_unit, step_size
- if (amount_needed < min_unit or amount_needed > max_unit or
- amount_needed % step_size != 0):
- LOG.warning(
- "Allocation for %(rc)s on resource provider %(rp)s "
- "violates min_unit, max_unit, or step_size. "
- "Requested: %(requested)s, min_unit: %(min_unit)s, "
- "max_unit: %(max_unit)s, step_size: %(step_size)s",
- {'rc': alloc.resource_class,
- 'rp': rp_uuid,
- 'requested': amount_needed,
- 'min_unit': min_unit,
- 'max_unit': max_unit,
- 'step_size': step_size})
- raise exception.InvalidAllocationConstraintsViolated(
- resource_class=alloc.resource_class,
- resource_provider=rp_uuid)
-
- # usage["used"] can be returned as None
- used = usage['used'] or 0
- capacity = (usage['total'] - usage['reserved']) * allocation_ratio
- if (capacity < (used + amount_needed) or
- capacity < (used + rp_resource_class_sum[rp_uuid][rc_id])):
- LOG.warning(
- "Over capacity for %(rc)s on resource provider %(rp)s. "
- "Needed: %(needed)s, Used: %(used)s, Capacity: %(cap)s",
- {'rc': alloc.resource_class,
- 'rp': rp_uuid,
- 'needed': amount_needed,
- 'used': used,
- 'cap': capacity})
- raise exception.InvalidAllocationCapacityExceeded(
- resource_class=alloc.resource_class,
- resource_provider=rp_uuid)
- return res_providers
-
-
-@db_api.placement_context_manager.reader
-def _get_allocations_by_provider_id(ctx, rp_id):
- allocs = sa.alias(_ALLOC_TBL, name="a")
- consumers = sa.alias(_CONSUMER_TBL, name="c")
- projects = sa.alias(_PROJECT_TBL, name="p")
- users = sa.alias(_USER_TBL, name="u")
- cols = [
- allocs.c.id,
- allocs.c.resource_class_id,
- allocs.c.used,
- allocs.c.updated_at,
- allocs.c.created_at,
- consumers.c.id.label("consumer_id"),
- consumers.c.generation.label("consumer_generation"),
- sql.func.coalesce(
- consumers.c.uuid, allocs.c.consumer_id).label("consumer_uuid"),
- projects.c.id.label("project_id"),
- projects.c.external_id.label("project_external_id"),
- users.c.id.label("user_id"),
- users.c.external_id.label("user_external_id"),
- ]
- # TODO(jaypipes): change this join to be on ID not UUID
- consumers_join = sa.join(
- allocs, consumers, allocs.c.consumer_id == consumers.c.uuid)
- projects_join = sa.join(
- consumers_join, projects, consumers.c.project_id == projects.c.id)
- users_join = sa.join(
- projects_join, users, consumers.c.user_id == users.c.id)
- sel = sa.select(cols).select_from(users_join)
- sel = sel.where(allocs.c.resource_provider_id == rp_id)
-
- return [dict(r) for r in ctx.session.execute(sel)]
-
-
-@db_api.placement_context_manager.reader
-def _get_allocations_by_consumer_uuid(ctx, consumer_uuid):
- allocs = sa.alias(_ALLOC_TBL, name="a")
- rp = sa.alias(_RP_TBL, name="rp")
- consumer = sa.alias(_CONSUMER_TBL, name="c")
- project = sa.alias(_PROJECT_TBL, name="p")
- user = sa.alias(_USER_TBL, name="u")
- cols = [
- allocs.c.id,
- allocs.c.resource_provider_id,
- rp.c.name.label("resource_provider_name"),
- rp.c.uuid.label("resource_provider_uuid"),
- rp.c.generation.label("resource_provider_generation"),
- allocs.c.resource_class_id,
- allocs.c.used,
- consumer.c.id.label("consumer_id"),
- consumer.c.generation.label("consumer_generation"),
- sql.func.coalesce(
- consumer.c.uuid, allocs.c.consumer_id).label("consumer_uuid"),
- project.c.id.label("project_id"),
- project.c.external_id.label("project_external_id"),
- user.c.id.label("user_id"),
- user.c.external_id.label("user_external_id"),
- ]
- # Build up the joins of the five tables we need to interact with.
- rp_join = sa.join(allocs, rp, allocs.c.resource_provider_id == rp.c.id)
- consumer_join = sa.join(rp_join, consumer,
- allocs.c.consumer_id == consumer.c.uuid)
- project_join = sa.join(consumer_join, project,
- consumer.c.project_id == project.c.id)
- user_join = sa.join(project_join, user,
- consumer.c.user_id == user.c.id)
-
- sel = sa.select(cols).select_from(user_join)
- sel = sel.where(allocs.c.consumer_id == consumer_uuid)
-
- return [dict(r) for r in ctx.session.execute(sel)]
-
-
-@db_api.placement_context_manager.writer.independent
-def _create_incomplete_consumers_for_provider(ctx, rp_id):
- # TODO(jaypipes): Remove in Stein after a blocker migration is added.
- """Creates consumer record if consumer relationship between allocations ->
- consumers table is missing for any allocation on the supplied provider
- internal ID, using the "incomplete consumer" project and user CONF options.
- """
- alloc_to_consumer = sa.outerjoin(
- _ALLOC_TBL, consumer_obj.CONSUMER_TBL,
- _ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid)
- sel = sa.select([_ALLOC_TBL.c.consumer_id])
- sel = sel.select_from(alloc_to_consumer)
- sel = sel.where(
- sa.and_(
- _ALLOC_TBL.c.resource_provider_id == rp_id,
- consumer_obj.CONSUMER_TBL.c.id.is_(None)))
- missing = ctx.session.execute(sel).fetchall()
- if missing:
- # Do a single INSERT for all missing consumer relationships for the
- # provider
- incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
- incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
-
- cols = [
- _ALLOC_TBL.c.consumer_id,
- incomplete_proj_id,
- incomplete_user_id,
- ]
- sel = sa.select(cols)
- sel = sel.select_from(alloc_to_consumer)
- sel = sel.where(
- sa.and_(
- _ALLOC_TBL.c.resource_provider_id == rp_id,
- consumer_obj.CONSUMER_TBL.c.id.is_(None)))
- # NOTE(mnaser): It is possible to have multiple consumers having many
- # allocations to the same resource provider, which would
- # make the INSERT FROM SELECT fail due to duplicates.
- sel = sel.group_by(_ALLOC_TBL.c.consumer_id)
- target_cols = ['uuid', 'project_id', 'user_id']
- ins_stmt = consumer_obj.CONSUMER_TBL.insert().from_select(
- target_cols, sel)
- res = ctx.session.execute(ins_stmt)
- if res.rowcount > 0:
- LOG.info("Online data migration to fix incomplete consumers "
- "for resource provider %s has been run. Migrated %d "
- "incomplete consumer records on the fly.", rp_id,
- res.rowcount)
-
-
-@db_api.placement_context_manager.writer.independent
-def _create_incomplete_consumer(ctx, consumer_id):
- # TODO(jaypipes): Remove in Stein after a blocker migration is added.
- """Creates consumer record if consumer relationship between allocations ->
- consumers table is missing for the supplied consumer UUID, using the
- "incomplete consumer" project and user CONF options.
- """
- alloc_to_consumer = sa.outerjoin(
- _ALLOC_TBL, consumer_obj.CONSUMER_TBL,
- _ALLOC_TBL.c.consumer_id == consumer_obj.CONSUMER_TBL.c.uuid)
- sel = sa.select([_ALLOC_TBL.c.consumer_id])
- sel = sel.select_from(alloc_to_consumer)
- sel = sel.where(
- sa.and_(
- _ALLOC_TBL.c.consumer_id == consumer_id,
- consumer_obj.CONSUMER_TBL.c.id.is_(None)))
- missing = ctx.session.execute(sel).fetchall()
- if missing:
- incomplete_proj_id = project_obj.ensure_incomplete_project(ctx)
- incomplete_user_id = user_obj.ensure_incomplete_user(ctx)
-
- ins_stmt = consumer_obj.CONSUMER_TBL.insert().values(
- uuid=consumer_id, project_id=incomplete_proj_id,
- user_id=incomplete_user_id)
- res = ctx.session.execute(ins_stmt)
- if res.rowcount > 0:
- LOG.info("Online data migration to fix incomplete consumers "
- "for consumer %s has been run. Migrated %d incomplete "
- "consumer records on the fly.", consumer_id, res.rowcount)
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class AllocationList(base.ObjectListBase, base.VersionedObject):
-
- # The number of times to retry set_allocations if there has
- # been a resource provider (not consumer) generation coflict.
- RP_CONFLICT_RETRY_COUNT = 10
-
- fields = {
- 'objects': fields.ListOfObjectsField('Allocation'),
- }
-
- @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
- @db_api.placement_context_manager.writer
- def _set_allocations(self, context, allocs):
- """Write a set of allocations.
-
- We must check that there is capacity for each allocation.
- If there is not we roll back the entire set.
-
- :raises `exception.ResourceClassNotFound` if any resource class in any
- allocation in allocs cannot be found in either the standard
- classes or the DB.
- :raises `exception.InvalidAllocationCapacityExceeded` if any inventory
- would be exhausted by the allocation.
- :raises `InvalidAllocationConstraintsViolated` if any of the
- `step_size`, `min_unit` or `max_unit` constraints in an
- inventory will be violated by any one of the allocations.
- :raises `ConcurrentUpdateDetected` if a generation for a resource
- provider or consumer failed its increment check.
- """
- # First delete any existing allocations for any consumers. This
- # provides a clean slate for the consumers mentioned in the list of
- # allocations being manipulated.
- consumer_ids = set(alloc.consumer.uuid for alloc in allocs)
- for consumer_id in consumer_ids:
- _delete_allocations_for_consumer(context, consumer_id)
-
- # Before writing any allocation records, we check that the submitted
- # allocations do not cause any inventory capacity to be exceeded for
- # any resource provider and resource class involved in the allocation
- # transaction. _check_capacity_exceeded() raises an exception if any
- # inventory capacity is exceeded. If capacity is not exceeeded, the
- # function returns a list of ResourceProvider objects containing the
- # generation of the resource provider at the time of the check. These
- # objects are used at the end of the allocation transaction as a guard
- # against concurrent updates.
- #
- # Don't check capacity when alloc.used is zero. Zero is not a valid
- # amount when making an allocation (the minimum consumption of a
- # resource is one) but is used in this method to indicate a need for
- # removal. Providing 0 is controlled at the HTTP API layer where PUT
- # /allocations does not allow empty allocations. When POST /allocations
- # is implemented it will for the special case of atomically setting and
- # removing different allocations in the same request.
- # _check_capacity_exceeded will raise a ResourceClassNotFound # if any
- # allocation is using a resource class that does not exist.
- visited_consumers = {}
- visited_rps = _check_capacity_exceeded(context, allocs)
- for alloc in allocs:
- if alloc.consumer.id not in visited_consumers:
- visited_consumers[alloc.consumer.id] = alloc.consumer
-
- # If alloc.used is set to zero that is a signal that we don't want
- # to (re-)create any allocations for this resource class.
- # _delete_current_allocs has already wiped out allocations so just
- # continue
- if alloc.used == 0:
- continue
- consumer_id = alloc.consumer.uuid
- rp = alloc.resource_provider
- rc_id = _RC_CACHE.id_from_string(alloc.resource_class)
- ins_stmt = _ALLOC_TBL.insert().values(
- resource_provider_id=rp.id,
- resource_class_id=rc_id,
- consumer_id=consumer_id,
- used=alloc.used)
- res = context.session.execute(ins_stmt)
- alloc.id = res.lastrowid
- alloc.obj_reset_changes()
-
- # Generation checking happens here. If the inventory for this resource
- # provider changed out from under us, this will raise a
- # ConcurrentUpdateDetected which can be caught by the caller to choose
- # to try again. It will also rollback the transaction so that these
- # changes always happen atomically.
- for rp in visited_rps.values():
- rp.generation = _increment_provider_generation(context, rp)
- for consumer in visited_consumers.values():
- consumer.increment_generation()
- # If any consumers involved in this transaction ended up having no
- # allocations, delete the consumer records. Exclude consumers that had
- # *some resource* in the allocation list with a total > 0 since clearly
- # those consumers have allocations...
- cons_with_allocs = set(a.consumer.uuid for a in allocs if a.used > 0)
- all_cons = set(c.uuid for c in visited_consumers.values())
- consumers_to_check = all_cons - cons_with_allocs
- consumer_obj.delete_consumers_if_no_allocations(
- context, consumers_to_check)
-
- @classmethod
- def get_all_by_resource_provider(cls, context, rp):
- _create_incomplete_consumers_for_provider(context, rp.id)
- db_allocs = _get_allocations_by_provider_id(context, rp.id)
- # Build up a list of Allocation objects, setting the Allocation object
- # fields to the same-named database record field we got from
- # _get_allocations_by_provider_id(). We already have the
- # ResourceProvider object so we just pass that object to the Allocation
- # object constructor as-is
- objs = []
- for rec in db_allocs:
- consumer = consumer_obj.Consumer(
- context, id=rec['consumer_id'],
- uuid=rec['consumer_uuid'],
- generation=rec['consumer_generation'],
- project=project_obj.Project(
- context, id=rec['project_id'],
- external_id=rec['project_external_id']),
- user=user_obj.User(
- context, id=rec['user_id'],
- external_id=rec['user_external_id']))
- objs.append(
- Allocation(
- context, id=rec['id'], resource_provider=rp,
- resource_class=_RC_CACHE.string_from_id(
- rec['resource_class_id']),
- consumer=consumer,
- used=rec['used']))
- alloc_list = cls(context, objects=objs)
- return alloc_list
-
- @classmethod
- def get_all_by_consumer_id(cls, context, consumer_id):
- _create_incomplete_consumer(context, consumer_id)
- db_allocs = _get_allocations_by_consumer_uuid(context, consumer_id)
-
- if db_allocs:
- # Build up the Consumer object (it's the same for all allocations
- # since we looked up by consumer ID)
- db_first = db_allocs[0]
- consumer = consumer_obj.Consumer(
- context, id=db_first['consumer_id'],
- uuid=db_first['consumer_uuid'],
- generation=db_first['consumer_generation'],
- project=project_obj.Project(
- context, id=db_first['project_id'],
- external_id=db_first['project_external_id']),
- user=user_obj.User(
- context, id=db_first['user_id'],
- external_id=db_first['user_external_id']))
-
- # Build up a list of Allocation objects, setting the Allocation object
- # fields to the same-named database record field we got from
- # _get_allocations_by_consumer_id().
- #
- # NOTE(jaypipes): Unlike with get_all_by_resource_provider(), we do
- # NOT already have the ResourceProvider object so we construct a new
- # ResourceProvider object below by looking at the resource provider
- # fields returned by _get_allocations_by_consumer_id().
- objs = [
- Allocation(
- context, id=rec['id'],
- resource_provider=ResourceProvider(
- context,
- id=rec['resource_provider_id'],
- uuid=rec['resource_provider_uuid'],
- name=rec['resource_provider_name'],
- generation=rec['resource_provider_generation']),
- resource_class=_RC_CACHE.string_from_id(
- rec['resource_class_id']),
- consumer=consumer,
- used=rec['used'])
- for rec in db_allocs
- ]
- alloc_list = cls(context, objects=objs)
- return alloc_list
-
- def replace_all(self):
- """Replace the supplied allocations.
-
- :note: This method always deletes all allocations for all consumers
- referenced in the list of Allocation objects and then replaces
- the consumer's allocations with the Allocation objects. In doing
- so, it will end up setting the Allocation.id attribute of each
- Allocation object.
- """
- # Retry _set_allocations server side if there is a
- # ResourceProviderConcurrentUpdateDetected. We don't care about
- # sleeping, we simply want to reset the resource provider objects
- # and try again. For sake of simplicity (and because we don't have
- # easy access to the information) we reload all the resource
- # providers that may be present.
- retries = self.RP_CONFLICT_RETRY_COUNT
- while retries:
- retries -= 1
- try:
- self._set_allocations(self._context, self.objects)
- break
- except exception.ResourceProviderConcurrentUpdateDetected:
- LOG.debug('Retrying allocations write on resource provider '
- 'generation conflict')
- # We only want to reload each unique resource provider once.
- alloc_rp_uuids = set(
- alloc.resource_provider.uuid for alloc in self.objects)
- seen_rps = {}
- for rp_uuid in alloc_rp_uuids:
- seen_rps[rp_uuid] = ResourceProvider.get_by_uuid(
- self._context, rp_uuid)
- for alloc in self.objects:
- rp_uuid = alloc.resource_provider.uuid
- alloc.resource_provider = seen_rps[rp_uuid]
- else:
- # We ran out of retries so we need to raise again.
- # The log will automatically have request id info associated with
- # it that will allow tracing back to specific allocations.
- # Attempting to extract specific consumer or resource provider
- # information from the allocations is not coherent as this
- # could be multiple consumers and providers.
- LOG.warning('Exceeded retry limit of %d on allocations write',
- self.RP_CONFLICT_RETRY_COUNT)
- raise exception.ResourceProviderConcurrentUpdateDetected()
-
- def delete_all(self):
- consumer_uuids = set(alloc.consumer.uuid for alloc in self.objects)
- alloc_ids = [alloc.id for alloc in self.objects]
- _delete_allocations_by_ids(self._context, alloc_ids)
- consumer_obj.delete_consumers_if_no_allocations(
- self._context, consumer_uuids)
-
- def __repr__(self):
- strings = [repr(x) for x in self.objects]
- return "AllocationList[" + ", ".join(strings) + "]"
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class Usage(base.VersionedObject):
-
- fields = {
- 'resource_class': rc_fields.ResourceClassField(read_only=True),
- 'usage': fields.NonNegativeIntegerField(),
- }
-
- @staticmethod
- def _from_db_object(context, target, source):
- for field in target.fields:
- if field not in ('resource_class'):
- setattr(target, field, source[field])
-
- if 'resource_class' not in target:
- rc_str = _RC_CACHE.string_from_id(source['resource_class_id'])
- target.resource_class = rc_str
-
- target._context = context
- target.obj_reset_changes()
- return target
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class UsageList(base.ObjectListBase, base.VersionedObject):
-
- fields = {
- 'objects': fields.ListOfObjectsField('Usage'),
- }
-
- @staticmethod
- @db_api.placement_context_manager.reader
- def _get_all_by_resource_provider_uuid(context, rp_uuid):
- query = (context.session.query(models.Inventory.resource_class_id,
- func.coalesce(func.sum(models.Allocation.used), 0))
- .join(models.ResourceProvider,
- models.Inventory.resource_provider_id ==
- models.ResourceProvider.id)
- .outerjoin(models.Allocation,
- sql.and_(models.Inventory.resource_provider_id ==
- models.Allocation.resource_provider_id,
- models.Inventory.resource_class_id ==
- models.Allocation.resource_class_id))
- .filter(models.ResourceProvider.uuid == rp_uuid)
- .group_by(models.Inventory.resource_class_id))
- result = [dict(resource_class_id=item[0], usage=item[1])
- for item in query.all()]
- return result
-
- @staticmethod
- @db_api.placement_context_manager.reader
- def _get_all_by_project_user(context, project_id, user_id=None):
- query = (context.session.query(models.Allocation.resource_class_id,
- func.coalesce(func.sum(models.Allocation.used), 0))
- .join(models.Consumer,
- models.Allocation.consumer_id == models.Consumer.uuid)
- .join(models.Project,
- models.Consumer.project_id == models.Project.id)
- .filter(models.Project.external_id == project_id))
- if user_id:
- query = query.join(models.User,
- models.Consumer.user_id == models.User.id)
- query = query.filter(models.User.external_id == user_id)
- query = query.group_by(models.Allocation.resource_class_id)
- result = [dict(resource_class_id=item[0], usage=item[1])
- for item in query.all()]
- return result
-
- @classmethod
- def get_all_by_resource_provider_uuid(cls, context, rp_uuid):
- usage_list = cls._get_all_by_resource_provider_uuid(context, rp_uuid)
- return base.obj_make_list(context, cls(context), Usage, usage_list)
-
- @classmethod
- def get_all_by_project_user(cls, context, project_id, user_id=None):
- usage_list = cls._get_all_by_project_user(context, project_id,
- user_id=user_id)
- return base.obj_make_list(context, cls(context), Usage, usage_list)
-
- def __repr__(self):
- strings = [repr(x) for x in self.objects]
- return "UsageList[" + ", ".join(strings) + "]"
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class ResourceClass(base.VersionedObject, base.TimestampedObject):
-
- MIN_CUSTOM_RESOURCE_CLASS_ID = 10000
- """Any user-defined resource classes must have an identifier greater than
- or equal to this number.
- """
-
- # Retry count for handling possible race condition in creating resource
- # class. We don't ever want to hit this, as it is simply a race when
- # creating these classes, but this is just a stopgap to prevent a potential
- # infinite loop.
- RESOURCE_CREATE_RETRY_COUNT = 100
-
- fields = {
- 'id': fields.IntegerField(read_only=True),
- 'name': rc_fields.ResourceClassField(nullable=False),
- }
-
- @staticmethod
- def _from_db_object(context, target, source):
- for field in target.fields:
- setattr(target, field, source[field])
-
- target._context = context
- target.obj_reset_changes()
- return target
-
- @classmethod
- def get_by_name(cls, context, name):
- """Return a ResourceClass object with the given string name.
-
- :param name: String name of the resource class to find
-
- :raises: ResourceClassNotFound if no such resource class was found
- """
- rc = _RC_CACHE.all_from_string(name)
- obj = cls(context, id=rc['id'], name=rc['name'],
- updated_at=rc['updated_at'], created_at=rc['created_at'])
- obj.obj_reset_changes()
- return obj
-
- @staticmethod
- @db_api.placement_context_manager.reader
- def _get_next_id(context):
- """Utility method to grab the next resource class identifier to use for
- user-defined resource classes.
- """
- query = context.session.query(func.max(models.ResourceClass.id))
- max_id = query.one()[0]
- if not max_id:
- return ResourceClass.MIN_CUSTOM_RESOURCE_CLASS_ID
- else:
- return max_id + 1
-
- def create(self):
- if 'id' in self:
- raise exception.ObjectActionError(action='create',
- reason='already created')
- if 'name' not in self:
- raise exception.ObjectActionError(action='create',
- reason='name is required')
- if self.name in rc_fields.ResourceClass.STANDARD:
- raise exception.ResourceClassExists(resource_class=self.name)
-
- if not self.name.startswith(rc_fields.ResourceClass.CUSTOM_NAMESPACE):
- raise exception.ObjectActionError(
- action='create',
- reason='name must start with ' +
- rc_fields.ResourceClass.CUSTOM_NAMESPACE)
-
- updates = self.obj_get_changes()
- # There is the possibility of a race when adding resource classes, as
- # the ID is generated locally. This loop catches that exception, and
- # retries until either it succeeds, or a different exception is
- # encountered.
- retries = self.RESOURCE_CREATE_RETRY_COUNT
- while retries:
- retries -= 1
- try:
- rc = self._create_in_db(self._context, updates)
- self._from_db_object(self._context, self, rc)
- break
- except db_exc.DBDuplicateEntry as e:
- if 'id' in e.columns:
- # Race condition for ID creation; try again
- continue
- # The duplication is on the other unique column, 'name'. So do
- # not retry; raise the exception immediately.
- raise exception.ResourceClassExists(resource_class=self.name)
- else:
- # We have no idea how common it will be in practice for the retry
- # limit to be exceeded. We set it high in the hope that we never
- # hit this point, but added this log message so we know that this
- # specific situation occurred.
- LOG.warning("Exceeded retry limit on ID generation while "
- "creating ResourceClass %(name)s",
- {'name': self.name})
- msg = _("creating resource class %s") % self.name
- raise exception.MaxDBRetriesExceeded(action=msg)
-
- @staticmethod
- @db_api.placement_context_manager.writer
- def _create_in_db(context, updates):
- next_id = ResourceClass._get_next_id(context)
- rc = models.ResourceClass()
- rc.update(updates)
- rc.id = next_id
- context.session.add(rc)
- return rc
-
- def destroy(self):
- if 'id' not in self:
- raise exception.ObjectActionError(action='destroy',
- reason='ID attribute not found')
- # Never delete any standard resource class, since the standard resource
- # classes don't even exist in the database table anyway.
- if self.id in (rc['id'] for rc in _RC_CACHE.STANDARDS):
- raise exception.ResourceClassCannotDeleteStandard(
- resource_class=self.name)
-
- self._destroy(self._context, self.id, self.name)
- _RC_CACHE.clear()
-
- @staticmethod
- @db_api.placement_context_manager.writer
- def _destroy(context, _id, name):
- # Don't delete the resource class if it is referred to in the
- # inventories table.
- num_inv = context.session.query(models.Inventory).filter(
- models.Inventory.resource_class_id == _id).count()
- if num_inv:
- raise exception.ResourceClassInUse(resource_class=name)
-
- res = context.session.query(models.ResourceClass).filter(
- models.ResourceClass.id == _id).delete()
- if not res:
- raise exception.NotFound()
-
- def save(self):
- if 'id' not in self:
- raise exception.ObjectActionError(action='save',
- reason='ID attribute not found')
- updates = self.obj_get_changes()
- # Never update any standard resource class, since the standard resource
- # classes don't even exist in the database table anyway.
- if self.id in (rc['id'] for rc in _RC_CACHE.STANDARDS):
- raise exception.ResourceClassCannotUpdateStandard(
- resource_class=self.name)
- self._save(self._context, self.id, self.name, updates)
- _RC_CACHE.clear()
-
- @staticmethod
- @db_api.placement_context_manager.writer
- def _save(context, id, name, updates):
- db_rc = context.session.query(models.ResourceClass).filter_by(
- id=id).first()
- db_rc.update(updates)
- try:
- db_rc.save(context.session)
- except db_exc.DBDuplicateEntry:
- raise exception.ResourceClassExists(resource_class=name)
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class ResourceClassList(base.ObjectListBase, base.VersionedObject):
-
- fields = {
- 'objects': fields.ListOfObjectsField('ResourceClass'),
- }
-
- @staticmethod
- @db_api.placement_context_manager.reader
- def _get_all(context):
- customs = list(context.session.query(models.ResourceClass).all())
- return _RC_CACHE.STANDARDS + customs
-
- @classmethod
- def get_all(cls, context):
- resource_classes = cls._get_all(context)
- return base.obj_make_list(context, cls(context),
- ResourceClass, resource_classes)
-
- def __repr__(self):
- strings = [repr(x) for x in self.objects]
- return "ResourceClassList[" + ", ".join(strings) + "]"
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class Trait(base.VersionedObject, base.TimestampedObject):
-
- # All the user-defined traits must begin with this prefix.
- CUSTOM_NAMESPACE = 'CUSTOM_'
-
- fields = {
- 'id': fields.IntegerField(read_only=True),
- 'name': fields.StringField(nullable=False)
- }
-
- @staticmethod
- def _from_db_object(context, trait, db_trait):
- for key in trait.fields:
- setattr(trait, key, db_trait[key])
- trait.obj_reset_changes()
- trait._context = context
- return trait
-
- @staticmethod
- @db_api.placement_context_manager.writer
- def _create_in_db(context, updates):
- trait = models.Trait()
- trait.update(updates)
- context.session.add(trait)
- return trait
-
- def create(self):
- if 'id' in self:
- raise exception.ObjectActionError(action='create',
- reason='already created')
- if 'name' not in self:
- raise exception.ObjectActionError(action='create',
- reason='name is required')
-
- updates = self.obj_get_changes()
-
- try:
- db_trait = self._create_in_db(self._context, updates)
- except db_exc.DBDuplicateEntry:
- raise exception.TraitExists(name=self.name)
-
- self._from_db_object(self._context, self, db_trait)
-
- @staticmethod
- @db_api.placement_context_manager.writer # trait sync can cause a write
- def _get_by_name_from_db(context, name):
- result = context.session.query(models.Trait).filter_by(
- name=name).first()
- if not result:
- raise exception.TraitNotFound(names=name)
- return result
-
- @classmethod
- def get_by_name(cls, context, name):
- db_trait = cls._get_by_name_from_db(context, six.text_type(name))
- return cls._from_db_object(context, cls(), db_trait)
-
- @staticmethod
- @db_api.placement_context_manager.writer
- def _destroy_in_db(context, _id, name):
- num = context.session.query(models.ResourceProviderTrait).filter(
- models.ResourceProviderTrait.trait_id == _id).count()
- if num:
- raise exception.TraitInUse(name=name)
-
- res = context.session.query(models.Trait).filter_by(
- name=name).delete()
- if not res:
- raise exception.TraitNotFound(names=name)
-
- def destroy(self):
- if 'name' not in self:
- raise exception.ObjectActionError(action='destroy',
- reason='name is required')
-
- if not self.name.startswith(self.CUSTOM_NAMESPACE):
- raise exception.TraitCannotDeleteStandard(name=self.name)
-
- if 'id' not in self:
- raise exception.ObjectActionError(action='destroy',
- reason='ID attribute not found')
-
- self._destroy_in_db(self._context, self.id, self.name)
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class TraitList(base.ObjectListBase, base.VersionedObject):
-
- fields = {
- 'objects': fields.ListOfObjectsField('Trait')
- }
-
- @staticmethod
- @db_api.placement_context_manager.writer # trait sync can cause a write
- def _get_all_from_db(context, filters):
- if not filters:
- filters = {}
-
- query = context.session.query(models.Trait)
- if 'name_in' in filters:
- query = query.filter(models.Trait.name.in_(
- [six.text_type(n) for n in filters['name_in']]
- ))
- if 'prefix' in filters:
- query = query.filter(
- models.Trait.name.like(six.text_type(filters['prefix'] + '%')))
- if 'associated' in filters:
- if filters['associated']:
- query = query.join(models.ResourceProviderTrait,
- models.Trait.id == models.ResourceProviderTrait.trait_id
- ).distinct()
- else:
- query = query.outerjoin(models.ResourceProviderTrait,
- models.Trait.id == models.ResourceProviderTrait.trait_id
- ).filter(models.ResourceProviderTrait.trait_id == null())
-
- return query.all()
-
- @base.remotable_classmethod
- def get_all(cls, context, filters=None):
- db_traits = cls._get_all_from_db(context, filters)
- return base.obj_make_list(context, cls(context), Trait, db_traits)
-
- @classmethod
- def get_all_by_resource_provider(cls, context, rp):
- """Returns a TraitList containing Trait objects for any trait
- associated with the supplied resource provider.
- """
- db_traits = _get_traits_by_provider_id(context, rp.id)
- return base.obj_make_list(context, cls(context), Trait, db_traits)
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class AllocationRequestResource(base.VersionedObject):
-
- fields = {
- 'resource_provider': fields.ObjectField('ResourceProvider'),
- 'resource_class': rc_fields.ResourceClassField(read_only=True),
- 'amount': fields.NonNegativeIntegerField(),
- }
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class AllocationRequest(base.VersionedObject):
-
- fields = {
- # UUID of (the root of the tree including) the non-sharing resource
- # provider associated with this AllocationRequest. Internal use only,
- # not included when the object is serialized for output.
- 'anchor_root_provider_uuid': fields.UUIDField(),
- # Whether all AllocationRequestResources in this AllocationRequest are
- # required to be satisfied by the same provider (based on the
- # corresponding RequestGroup's use_same_provider attribute). Internal
- # use only, not included when the object is serialized for output.
- 'use_same_provider': fields.BooleanField(),
- 'resource_requests': fields.ListOfObjectsField(
- 'AllocationRequestResource'
- ),
- }
-
- def __repr__(self):
- anchor = (self.anchor_root_provider_uuid[-8:]
- if 'anchor_root_provider_uuid' in self else '<?>')
- usp = self.use_same_provider if 'use_same_provider' in self else '<?>'
- repr_str = ('%s(anchor=...%s, same_provider=%s, '
- 'resource_requests=[%s])' %
- (self.obj_name(), anchor, usp,
- ', '.join([str(arr) for arr in self.resource_requests])))
- if six.PY2:
- repr_str = encodeutils.safe_encode(repr_str, incoming='utf-8')
- return repr_str
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class ProviderSummaryResource(base.VersionedObject):
-
- fields = {
- 'resource_class': rc_fields.ResourceClassField(read_only=True),
- 'capacity': fields.NonNegativeIntegerField(),
- 'used': fields.NonNegativeIntegerField(),
- # Internal use only; not included when the object is serialized for
- # output.
- 'max_unit': fields.NonNegativeIntegerField(),
- }
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class ProviderSummary(base.VersionedObject):
-
- fields = {
- 'resource_provider': fields.ObjectField('ResourceProvider'),
- 'resources': fields.ListOfObjectsField('ProviderSummaryResource'),
- 'traits': fields.ListOfObjectsField('Trait'),
- }
-
- @property
- def resource_class_names(self):
- """Helper property that returns a set() of resource class string names
- that are included in the provider summary.
- """
- return set(res.resource_class for res in self.resources)
-
-
-@db_api.placement_context_manager.reader
-def _get_usages_by_provider_tree(ctx, root_ids):
- """Returns a row iterator of usage records grouped by provider ID
- for all resource providers in all trees indicated in the ``root_ids``.
- """
- # We build up a SQL expression that looks like this:
- # SELECT
- # rp.id as resource_provider_id
- # , rp.uuid as resource_provider_uuid
- # , inv.resource_class_id
- # , inv.total
- # , inv.reserved
- # , inv.allocation_ratio
- # , inv.max_unit
- # , usage.used
- # FROM resource_providers AS rp
- # LEFT JOIN inventories AS inv
- # ON rp.id = inv.resource_provider_id
- # LEFT JOIN (
- # SELECT resource_provider_id, resource_class_id, SUM(used) as used
- # FROM allocations
- # JOIN resource_providers
- # ON allocations.resource_provider_id = resource_providers.id
- # AND (resource_providers.root_provider_id IN($root_ids)
- # OR resource_providers.id IN($root_ids))
- # GROUP BY resource_provider_id, resource_class_id
- # )
- # AS usage
- # ON inv.resource_provider_id = usage.resource_provider_id
- # AND inv.resource_class_id = usage.resource_class_id
- # WHERE (rp.root_provider_id IN ($root_ids)
- # OR resource_providers.id IN($root_ids))
- rpt = sa.alias(_RP_TBL, name="rp")
- inv = sa.alias(_INV_TBL, name="inv")
- # Build our derived table (subquery in the FROM clause) that sums used
- # amounts for resource provider and resource class
- derived_alloc_to_rp = sa.join(
- _ALLOC_TBL, _RP_TBL,
- sa.and_(_ALLOC_TBL.c.resource_provider_id == _RP_TBL.c.id,
- # TODO(tetsuro): Remove this OR condition when all
- # root_provider_id values are NOT NULL
- sa.or_(_RP_TBL.c.root_provider_id.in_(root_ids),
- _RP_TBL.c.id.in_(root_ids))
- )
- )
- usage = sa.alias(
- sa.select([
- _ALLOC_TBL.c.resource_provider_id,
- _ALLOC_TBL.c.resource_class_id,
- sql.func.sum(_ALLOC_TBL.c.used).label('used'),
- ]).select_from(derived_alloc_to_rp).group_by(
- _ALLOC_TBL.c.resource_provider_id,
- _ALLOC_TBL.c.resource_class_id
- ),
- name='usage')
- # Build a join between the resource providers and inventories table
- rpt_inv_join = sa.outerjoin(rpt, inv,
- rpt.c.id == inv.c.resource_provider_id)
- # And then join to the derived table of usages
- usage_join = sa.outerjoin(
- rpt_inv_join,
- usage,
- sa.and_(
- usage.c.resource_provider_id == inv.c.resource_provider_id,
- usage.c.resource_class_id == inv.c.resource_class_id,
- ),
- )
- query = sa.select([
- rpt.c.id.label("resource_provider_id"),
- rpt.c.uuid.label("resource_provider_uuid"),
- inv.c.resource_class_id,
- inv.c.total,
- inv.c.reserved,
- inv.c.allocation_ratio,
- inv.c.max_unit,
- usage.c.used,
- ]).select_from(usage_join).where(
- # TODO(tetsuro): Remove this or condition when all
- # root_provider_id values are NOT NULL
- sa.or_(
- rpt.c.root_provider_id.in_(root_ids),
- rpt.c.id.in_(root_ids)
- )
- )
- return ctx.session.execute(query).fetchall()
-
-
-@db_api.placement_context_manager.reader
-def _get_provider_ids_having_any_trait(ctx, traits):
- """Returns a set of resource provider internal IDs that have ANY of the
- supplied traits.
-
- :param ctx: Session context to use
- :param traits: A map, keyed by trait string name, of trait internal IDs, at
- least one of which each provider must have associated with
- it.
- :raise ValueError: If traits is empty or None.
- """
- if not traits:
- raise ValueError(_('traits must not be empty'))
-
- rptt = sa.alias(_RP_TRAIT_TBL, name="rpt")
- sel = sa.select([rptt.c.resource_provider_id])
- sel = sel.where(rptt.c.trait_id.in_(traits.values()))
- sel = sel.group_by(rptt.c.resource_provider_id)
- return set(r[0] for r in ctx.session.execute(sel))
-
-
-@db_api.placement_context_manager.reader
-def _get_provider_ids_having_all_traits(ctx, required_traits):
- """Returns a set of resource provider internal IDs that have ALL of the
- required traits.
-
- NOTE: Don't call this method with no required_traits.
-
- :param ctx: Session context to use
- :param required_traits: A map, keyed by trait string name, of required
- trait internal IDs that each provider must have
- associated with it
- :raise ValueError: If required_traits is empty or None.
- """
- if not required_traits:
- raise ValueError(_('required_traits must not be empty'))
-
- rptt = sa.alias(_RP_TRAIT_TBL, name="rpt")
- sel = sa.select([rptt.c.resource_provider_id])
- sel = sel.where(rptt.c.trait_id.in_(required_traits.values()))
- sel = sel.group_by(rptt.c.resource_provider_id)
- # Only get the resource providers that have ALL the required traits, so we
- # need to GROUP BY the resource provider and ensure that the
- # COUNT(trait_id) is equal to the number of traits we are requiring
- num_traits = len(required_traits)
- cond = sa.func.count(rptt.c.trait_id) == num_traits
- sel = sel.having(cond)
- return set(r[0] for r in ctx.session.execute(sel))
-
-
-@db_api.placement_context_manager.reader
-def _has_provider_trees(ctx):
- """Simple method that returns whether provider trees (i.e. nested resource
- providers) are in use in the deployment at all. This information is used to
- switch code paths when attempting to retrieve allocation candidate
- information. The code paths are eminently easier to execute and follow for
- non-nested scenarios...
-
- NOTE(jaypipes): The result of this function can be cached extensively.
- """
- sel = sa.select([_RP_TBL.c.id])
- sel = sel.where(_RP_TBL.c.parent_provider_id.isnot(None))
- sel = sel.limit(1)
- res = ctx.session.execute(sel).fetchall()
- return len(res) > 0
-
-
-@db_api.placement_context_manager.reader
-def _get_provider_ids_matching(ctx, resources, required_traits,
- forbidden_traits, member_of=None):
- """Returns a list of tuples of (internal provider ID, root provider ID)
- that have available inventory to satisfy all the supplied requests for
- resources.
-
- :note: This function is used for scenarios that do NOT involve sharing
- providers.
-
- :param ctx: Session context to use
- :param resources: A dict, keyed by resource class ID, of the amount
- requested of that resource class.
- :param required_traits: A map, keyed by trait string name, of required
- trait internal IDs that each provider must have
- associated with it
- :param forbidden_traits: A map, keyed by trait string name, of forbidden
- trait internal IDs that each provider must not
- have associated with it
- :param member_of: An optional list of list of aggregate UUIDs. If provided,
- the allocation_candidates returned will only be for
- resource providers that are members of one or more of the
- supplied aggregates of each aggregate UUID list.
- """
- # The iteratively filtered set of resource provider internal IDs that match
- # all the constraints in the request
- filtered_rps = set()
- if required_traits:
- trait_rps = _get_provider_ids_having_all_traits(ctx, required_traits)
- filtered_rps = trait_rps
- LOG.debug("found %d providers after applying required traits filter "
- "(%s)",
- len(filtered_rps), list(required_traits))
- if not filtered_rps:
- return []
-
- # If 'member_of' has values, do a separate lookup to identify the
- # resource providers that meet the member_of constraints.
- if member_of:
- rps_in_aggs = _provider_ids_matching_aggregates(ctx, member_of)
- if filtered_rps:
- filtered_rps &= set(rps_in_aggs)
- else:
- filtered_rps = set(rps_in_aggs)
- LOG.debug("found %d providers after applying aggregates filter (%s)",
- len(filtered_rps), member_of)
- if not filtered_rps:
- return []
-
- forbidden_rp_ids = set()
- if forbidden_traits:
- forbidden_rp_ids = _get_provider_ids_having_any_trait(
- ctx, forbidden_traits)
- if filtered_rps:
- filtered_rps -= forbidden_rp_ids
- LOG.debug("found %d providers after applying forbidden traits "
- "filter (%s)", len(filtered_rps),
- list(forbidden_traits))
- if not filtered_rps:
- return []
-
- # Instead of constructing a giant complex SQL statement that joins multiple
- # copies of derived usage tables and inventory tables to each other, we do
- # one query for each requested resource class. This allows us to log a
- # rough idea of which resource class query returned no results (for
- # purposes of rough debugging of a single allocation candidates request) as
- # well as reduce the necessary knowledge of SQL in order to understand the
- # queries being executed here.
- #
- # NOTE(jaypipes): The efficiency of this operation may be improved by
- # passing the trait_rps and/or forbidden_ip_ids iterables to the
- # _get_providers_with_resource() function so that we don't have to process
- # as many records inside the loop below to remove providers from the
- # eventual results list
- provs_with_resource = set()
- first = True
- for rc_id, amount in resources.items():
- rc_name = _RC_CACHE.string_from_id(rc_id)
- provs_with_resource = _get_providers_with_resource(ctx, rc_id, amount)
- LOG.debug("found %d providers with available %d %s",
- len(provs_with_resource), amount, rc_name)
- if not provs_with_resource:
- return []
-
- rc_rp_ids = set(p[0] for p in provs_with_resource)
- # The branching below could be collapsed code-wise, but is in place to
- # make the debug logging clearer.
- if first:
- first = False
- if filtered_rps:
- filtered_rps &= rc_rp_ids
- LOG.debug("found %d providers after applying initial "
- "aggregate and trait filters", len(filtered_rps))
- else:
- filtered_rps = rc_rp_ids
- # The following condition is not necessary for the logic; just
- # prevents the message from being logged unnecessarily.
- if forbidden_rp_ids:
- # Forbidden trait filters only need to be applied
- # a) on the first iteration; and
- # b) if not already set up before the loop
- # ...since any providers in the resulting set are the basis
- # for intersections, and providers with forbidden traits
- # are already absent from that set after we've filtered
- # them once.
- filtered_rps -= forbidden_rp_ids
- LOG.debug("found %d providers after applying forbidden "
- "traits", len(filtered_rps))
- else:
- filtered_rps &= rc_rp_ids
- LOG.debug("found %d providers after filtering by previous result",
- len(filtered_rps))
-
- if not filtered_rps:
- return []
-
- # provs_with_resource will contain a superset of providers with IDs still
- # in our filtered_rps set. We return the list of tuples of
- # (internal provider ID, root internal provider ID)
- return [rpids for rpids in provs_with_resource if rpids[0] in filtered_rps]
-
-
-@db_api.placement_context_manager.reader
-def _provider_aggregates(ctx, rp_ids):
- """Given a list of resource provider internal IDs, returns a dict,
- keyed by those provider IDs, of sets of aggregate ids associated
- with that provider.
-
- :raises: ValueError when rp_ids is empty.
-
- :param ctx: nova.context.RequestContext object
- :param rp_ids: list of resource provider IDs
- """
- if not rp_ids:
- raise ValueError(_("Expected rp_ids to be a list of resource provider "
- "internal IDs, but got an empty list."))
-
- rpat = sa.alias(_RP_AGG_TBL, name='rpat')
- sel = sa.select([rpat.c.resource_provider_id,
- rpat.c.aggregate_id])
- sel = sel.where(rpat.c.resource_provider_id.in_(rp_ids))
- res = collections.defaultdict(set)
- for r in ctx.session.execute(sel):
- res[r[0]].add(r[1])
- return res
-
-
-@db_api.placement_context_manager.reader
-def _get_providers_with_resource(ctx, rc_id, amount):
- """Returns a set of tuples of (provider ID, root provider ID) of providers
- that satisfy the request for a single resource class.
-
- :param ctx: Session context to use
- :param rc_id: Internal ID of resource class to check inventory for
- :param amount: Amount of resource being requested
- """
- # SELECT rp.id, rp.root_provider_id
- # FROM resource_providers AS rp
- # JOIN inventories AS inv
- # ON rp.id = inv.resource_provider_id
- # AND inv.resource_class_id = $RC_ID
- # LEFT JOIN (
- # SELECT
- # alloc.resource_provider_id,
- # SUM(allocs.used) AS used
- # FROM allocations AS alloc
- # WHERE allocs.resource_class_id = $RC_ID
- # GROUP BY allocs.resource_provider_id
- # ) AS usage
- # ON inv.resource_provider_id = usage.resource_provider_id
- # WHERE
- # used + $AMOUNT <= ((total - reserved) * inv.allocation_ratio)
- # AND inv.min_unit <= $AMOUNT
- # AND inv.max_unit >= $AMOUNT
- # AND $AMOUNT % inv.step_size == 0
- rpt = sa.alias(_RP_TBL, name="rp")
- inv = sa.alias(_INV_TBL, name="inv")
- allocs = sa.alias(_ALLOC_TBL, name="alloc")
- usage = sa.select([
- allocs.c.resource_provider_id,
- sql.func.sum(allocs.c.used).label('used')])
- usage = usage.where(allocs.c.resource_class_id == rc_id)
- usage = usage.group_by(allocs.c.resource_provider_id)
- usage = sa.alias(usage, name="usage")
- where_conds = [
- sql.func.coalesce(usage.c.used, 0) + amount <= (
- (inv.c.total - inv.c.reserved) * inv.c.allocation_ratio),
- inv.c.min_unit <= amount,
- inv.c.max_unit >= amount,
- amount % inv.c.step_size == 0,
- ]
- rp_to_inv = sa.join(
- rpt, inv, sa.and_(
- rpt.c.id == inv.c.resource_provider_id,
- inv.c.resource_class_id == rc_id))
- inv_to_usage = sa.outerjoin(
- rp_to_inv, usage,
- inv.c.resource_provider_id == usage.c.resource_provider_id)
- sel = sa.select([rpt.c.id, rpt.c.root_provider_id])
- sel = sel.select_from(inv_to_usage)
- sel = sel.where(sa.and_(*where_conds))
- res = ctx.session.execute(sel).fetchall()
- res = set((r[0], r[1]) for r in res)
- # TODO(tetsuro): Bug#1799892: We could have old providers with no root
- # provider set and they haven't undergone a data migration yet,
- # so we need to set the root_id explicitly here. We remove
- # this and when all root_provider_id values are NOT NULL
- ret = []
- for rp_tuple in res:
- rp_id = rp_tuple[0]
- root_id = rp_id if rp_tuple[1] is None else rp_tuple[1]
- ret.append((rp_id, root_id))
- return ret
-
-
-@db_api.placement_context_manager.reader
-def _get_trees_with_traits(ctx, rp_ids, required_traits, forbidden_traits):
- """Given a list of provider IDs, filter them to return a set of tuples of
- (provider ID, root provider ID) of providers which belong to a tree that
- can satisfy trait requirements.
-
- :param ctx: Session context to use
- :param rp_ids: a set of resource provider IDs
- :param required_traits: A map, keyed by trait string name, of required
- trait internal IDs that each provider TREE must
- COLLECTIVELY have associated with it
- :param forbidden_traits: A map, keyed by trait string name, of trait
- internal IDs that a resource provider must
- not have.
- """
- # We now want to restrict the returned providers to only those provider
- # trees that have all our required traits.
- #
- # The SQL we want looks like this:
- #
- # SELECT outer_rp.id, outer_rp.root_provider_id
- # FROM resource_providers AS outer_rp
- # JOIN (
- # SELECT rp.root_provider_id
- # FROM resource_providers AS rp
- # # Only if we have required traits...
- # INNER JOIN resource_provider_traits AS rptt
- # ON rp.id = rptt.resource_provider_id
- # AND rptt.trait_id IN ($REQUIRED_TRAIT_IDS)
- # # Only if we have forbidden_traits...
- # LEFT JOIN resource_provider_traits AS rptt_forbid
- # ON rp.id = rptt_forbid.resource_provider_id
- # AND rptt_forbid.trait_id IN ($FORBIDDEN_TRAIT_IDS)
- # WHERE rp.id IN ($RP_IDS)
- # # Only if we have forbidden traits...
- # AND rptt_forbid.resource_provider_id IS NULL
- # GROUP BY rp.root_provider_id
- # # Only if have required traits...
- # HAVING COUNT(DISTINCT rptt.trait_id) == $NUM_REQUIRED_TRAITS
- # ) AS trees_with_traits
- # ON outer_rp.root_provider_id = trees_with_traits.root_provider_id
- rpt = sa.alias(_RP_TBL, name="rp")
- cond = [rpt.c.id.in_(rp_ids)]
- subq = sa.select([rpt.c.root_provider_id])
- subq_join = None
- if required_traits:
- rptt = sa.alias(_RP_TRAIT_TBL, name="rptt")
- rpt_to_rptt = sa.join(
- rpt, rptt, sa.and_(
- rpt.c.id == rptt.c.resource_provider_id,
- rptt.c.trait_id.in_(required_traits.values())))
- subq_join = rpt_to_rptt
- # Only get the resource providers that have ALL the required traits,
- # so we need to GROUP BY the root provider and ensure that the
- # COUNT(trait_id) is equal to the number of traits we are requiring
- num_traits = len(required_traits)
- having_cond = sa.func.count(sa.distinct(rptt.c.trait_id)) == num_traits
- subq = subq.having(having_cond)
-
- # Tack on an additional LEFT JOIN clause inside the derived table if we've
- # got forbidden traits in the mix.
- if forbidden_traits:
- rptt_forbid = sa.alias(_RP_TRAIT_TBL, name="rptt_forbid")
- join_to = rpt
- if subq_join is not None:
- join_to = subq_join
- rpt_to_rptt_forbid = sa.outerjoin(
- join_to, rptt_forbid, sa.and_(
- rpt.c.id == rptt_forbid.c.resource_provider_id,
- rptt_forbid.c.trait_id.in_(forbidden_traits.values())))
- cond.append(rptt_forbid.c.resource_provider_id == sa.null())
- subq_join = rpt_to_rptt_forbid
-
- subq = subq.select_from(subq_join)
- subq = subq.where(sa.and_(*cond))
- subq = subq.group_by(rpt.c.root_provider_id)
- trees_with_traits = sa.alias(subq, name="trees_with_traits")
-
- outer_rps = sa.alias(_RP_TBL, name="outer_rps")
- outer_to_subq = sa.join(
- outer_rps, trees_with_traits,
- outer_rps.c.root_provider_id == trees_with_traits.c.root_provider_id)
- sel = sa.select([outer_rps.c.id, outer_rps.c.root_provider_id])
- sel = sel.select_from(outer_to_subq)
- res = ctx.session.execute(sel).fetchall()
-
- return [(rp_id, root_id) for rp_id, root_id in res]
-
-
-@db_api.placement_context_manager.reader
-def _get_trees_matching_all(ctx, resources, required_traits, forbidden_traits,
- sharing, member_of):
- """Returns a list of two-tuples (provider internal ID, root provider
- internal ID) for providers that satisfy the request for resources.
-
- If traits are also required, this function only returns results where the
- set of providers within a tree that satisfy the resource request
- collectively have all the required traits associated with them. This means
- that given the following provider tree:
-
- cn1
- |
- --> pf1 (SRIOV_NET_VF:2)
- |
- --> pf2 (SRIOV_NET_VF:1, HW_NIC_OFFLOAD_GENEVE)
-
- If a user requests 1 SRIOV_NET_VF resource and no required traits will
- return both pf1 and pf2. However, a request for 2 SRIOV_NET_VF and required
- trait of HW_NIC_OFFLOAD_GENEVE will return no results (since pf1 is the
- only provider with enough inventory of SRIOV_NET_VF but it does not have
- the required HW_NIC_OFFLOAD_GENEVE trait).
-
- :note: This function is used for scenarios to get results for a
- RequestGroup with use_same_provider=False. In this scenario, we are able
- to use multiple providers within the same provider tree including sharing
- providers to satisfy different resources involved in a single RequestGroup.
-
- :param ctx: Session context to use
- :param resources: A dict, keyed by resource class ID, of the amount
- requested of that resource class.
- :param required_traits: A map, keyed by trait string name, of required
- trait internal IDs that each provider TREE must
- COLLECTIVELY have associated with it
- :param forbidden_traits: A map, keyed by trait string name, of trait
- internal IDs that a resource provider must
- not have.
- :param sharing: dict, keyed by resource class ID, of lists of resource
- provider IDs that share that resource class and can
- contribute to the overall allocation request
- :param member_of: An optional list of lists of aggregate UUIDs. If
- provided, the allocation_candidates returned will only be
- for resource providers that are members of one or more of
- the supplied aggregates in each aggregate UUID list.
- """
- # We first grab the provider trees that have nodes that meet the request
- # for each resource class. Once we have this information, we'll then do a
- # followup query to winnow the set of resource providers to only those
- # provider *trees* that have all of the required traits.
- provs_with_inv = set()
- # provs_with_inv is a list of three-tuples with the second element being
- # the root provider ID and the third being resource class ID. Get the list
- # of root provider IDs and get all trees that collectively have all
- # required traits.
- trees_with_inv = set()
-
- for rc_id, amount in resources.items():
- rc_provs_with_inv = _get_providers_with_resource(ctx, rc_id, amount)
- if not rc_provs_with_inv:
- # If there's no providers that have one of the resource classes,
- # then we can short-circuit
- return []
- rc_trees = set(p[1] for p in rc_provs_with_inv)
- provs_with_inv |= set((p[0], p[1], rc_id) for p in rc_provs_with_inv)
-
- sharing_providers = sharing.get(rc_id)
- if sharing_providers:
- # There are sharing providers for this resource class, so we
- # should also get combinations of (sharing provider, anchor root)
- # in addition to (non-sharing provider, anchor root) we already
- # have.
- rc_provs_with_inv = _anchors_for_sharing_providers(
- ctx, sharing_providers, get_id=True)
- rc_provs_with_inv = set(
- (p[0], p[1], rc_id) for p in rc_provs_with_inv)
- rc_trees |= set(p[1] for p in rc_provs_with_inv)
- provs_with_inv |= rc_provs_with_inv
-
- # Filter trees_with_inv to have only trees with enough inventories
- # for this resource class. Here "tree" includes sharing providers
- # in its terminology
- if trees_with_inv:
- trees_with_inv &= rc_trees
- else:
- trees_with_inv = rc_trees
-
- if not trees_with_inv:
- return []
-
- # Select only those tuples where there are providers for all requested
- # resource classes (trees_with_inv contains the root provider IDs of those
- # trees that contain all our requested resources)
- provs_with_inv = set(p for p in provs_with_inv if p[1] in trees_with_inv)
-
- if not provs_with_inv:
- return []
-
- # If 'member_of' has values, do a separate lookup to identify the
- # resource providers that meet the member_of constraints.
- if member_of:
- rps_in_aggs = _provider_ids_matching_aggregates(ctx, member_of,
- rp_ids=trees_with_inv)
- if not rps_in_aggs:
- # Short-circuit. The user either asked for a non-existing
- # aggregate or there were no resource providers that matched
- # the requirements...
- return []
- provs_with_inv = set(p for p in provs_with_inv if p[1] in rps_in_aggs)
-
- if (not required_traits and not forbidden_traits) or (
- any(sharing.values())):
- # If there were no traits required, there's no difference in how we
- # calculate allocation requests between nested and non-nested
- # environments, so just short-circuit and return. Or if sharing
- # providers are in play, we check the trait constraints later
- # in _alloc_candidates_multiple_providers(), so skip.
- return list(provs_with_inv)
-
- # Return the providers where the providers have the available inventory
- # capacity and that set of providers (grouped by their tree) have all
- # of the required traits and none of the forbidden traits
- rp_ids_with_inv = set(p[0] for p in provs_with_inv)
- rp_tuples_with_trait = _get_trees_with_traits(
- ctx, rp_ids_with_inv, required_traits, forbidden_traits)
-
- ret = [rp_tuple for rp_tuple in provs_with_inv if (
- rp_tuple[0], rp_tuple[1]) in rp_tuples_with_trait]
-
- return ret
-
-
-def _build_provider_summaries(context, usages, prov_traits):
- """Given a list of dicts of usage information and a map of providers to
- their associated string traits, returns a dict, keyed by resource provider
- ID, of ProviderSummary objects.
-
- :param context: nova.context.RequestContext object
- :param usages: A list of dicts with the following format:
-
- {
- 'resource_provider_id': <internal resource provider ID>,
- 'resource_provider_uuid': <UUID>,
- 'resource_class_id': <internal resource class ID>,
- 'total': integer,
- 'reserved': integer,
- 'allocation_ratio': float,
- }
- :param prov_traits: A dict, keyed by internal resource provider ID, of
- string trait names associated with that provider
- """
- # Before we go creating provider summary objects, first grab all the
- # provider information (including root, parent and UUID information) for
- # all providers involved in our operation
- rp_ids = set(usage['resource_provider_id'] for usage in usages)
- provider_ids = _provider_ids_from_rp_ids(context, rp_ids)
-
- # Build up a dict, keyed by internal resource provider ID, of
- # ProviderSummary objects containing one or more ProviderSummaryResource
- # objects representing the resources the provider has inventory for.
- summaries = {}
- for usage in usages:
- rp_id = usage['resource_provider_id']
- summary = summaries.get(rp_id)
- if not summary:
- pids = provider_ids[rp_id]
- summary = ProviderSummary(
- context,
- resource_provider=ResourceProvider(
- context, id=pids.id, uuid=pids.uuid,
- root_provider_uuid=pids.root_uuid,
- parent_provider_uuid=pids.parent_uuid),
- resources=[],
- )
- summaries[rp_id] = summary
-
- traits = prov_traits[rp_id]
- summary.traits = [Trait(context, name=tname) for tname in traits]
-
- rc_id = usage['resource_class_id']
- if rc_id is None:
- # NOTE(tetsuro): This provider doesn't have any inventory itself.
- # But we include this provider in summaries since another
- # provider in the same tree will be in the "allocation_request".
- # Let's skip the following and leave "ProviderSummary.resources"
- # field empty.
- continue
- # NOTE(jaypipes): usage['used'] may be None due to the LEFT JOIN of
- # the usages subquery, so we coerce NULL values to 0 here.
- used = usage['used'] or 0
- allocation_ratio = usage['allocation_ratio']
- cap = int((usage['total'] - usage['reserved']) * allocation_ratio)
- rc_name = _RC_CACHE.string_from_id(rc_id)
- rpsr = ProviderSummaryResource(
- context,
- resource_class=rc_name,
- capacity=cap,
- used=used,
- max_unit=usage['max_unit'],
- )
- summary.resources.append(rpsr)
- return summaries
-
-
-def _aggregates_associated_with_providers(a, b, prov_aggs):
- """quickly check if the two rps are in the same aggregates
-
- :param a: resource provider ID for first provider
- :param b: resource provider ID for second provider
- :param prov_aggs: a dict keyed by resource provider IDs, of sets
- of aggregate ids associated with that provider
- """
- a_aggs = prov_aggs[a]
- b_aggs = prov_aggs[b]
- return a_aggs & b_aggs
-
-
-def _shared_allocation_request_resources(ctx, ns_rp_id, requested_resources,
- sharing, summaries, prov_aggs):
- """Returns a dict, keyed by resource class ID, of lists of
- AllocationRequestResource objects that represent resources that are
- provided by a sharing provider.
-
- :param ctx: nova.context.RequestContext object
- :param ns_rp_id: an internal ID of a non-sharing resource provider
- :param requested_resources: dict, keyed by resource class ID, of amounts
- being requested for that resource class
- :param sharing: dict, keyed by resource class ID, of lists of resource
- provider IDs that share that resource class and can
- contribute to the overall allocation request
- :param summaries: dict, keyed by resource provider ID, of ProviderSummary
- objects containing usage and trait information for
- resource providers involved in the overall request
- :param prov_aggs: dict, keyed by resource provider ID, of sets of
- aggregate ids associated with that provider.
- """
- res_requests = collections.defaultdict(list)
- for rc_id in sharing:
- for rp_id in sharing[rc_id]:
- aggs_in_both = _aggregates_associated_with_providers(
- ns_rp_id, rp_id, prov_aggs)
- if not aggs_in_both:
- continue
- summary = summaries[rp_id]
- rp_uuid = summary.resource_provider.uuid
- res_req = AllocationRequestResource(
- ctx,
- resource_provider=ResourceProvider(ctx, uuid=rp_uuid),
- resource_class=_RC_CACHE.string_from_id(rc_id),
- amount=requested_resources[rc_id],
- )
- res_requests[rc_id].append(res_req)
- return res_requests
-
-
-def _allocation_request_for_provider(ctx, requested_resources, provider):
- """Returns an AllocationRequest object containing AllocationRequestResource
- objects for each resource class in the supplied requested resources dict.
-
- :param ctx: nova.context.RequestContext object
- :param requested_resources: dict, keyed by resource class ID, of amounts
- being requested for that resource class
- :param provider: ResourceProvider object representing the provider of the
- resources.
- """
- resource_requests = [
- AllocationRequestResource(
- ctx, resource_provider=provider,
- resource_class=_RC_CACHE.string_from_id(rc_id),
- amount=amount,
- ) for rc_id, amount in requested_resources.items()
- ]
- # NOTE(efried): This method only produces an AllocationRequest with its
- # anchor in its own tree. If the provider is a sharing provider, the
- # caller needs to identify the other anchors with which it might be
- # associated.
- return AllocationRequest(
- ctx, resource_requests=resource_requests,
- anchor_root_provider_uuid=provider.root_provider_uuid)
-
-
-def _check_traits_for_alloc_request(res_requests, summaries, prov_traits,
- required_traits, forbidden_traits):
- """Given a list of AllocationRequestResource objects, check if that
- combination can provide trait constraints. If it can, returns all
- resource provider internal IDs in play, else return an empty list.
-
- TODO(tetsuro): For optimization, we should move this logic to SQL in
- _get_trees_matching_all().
-
- :param res_requests: a list of AllocationRequestResource objects that have
- resource providers to be checked if they collectively
- satisfy trait constraints in the required_traits and
- forbidden_traits parameters.
- :param summaries: dict, keyed by resource provider ID, of ProviderSummary
- objects containing usage and trait information for
- resource providers involved in the overall request
- :param prov_traits: A dict, keyed by internal resource provider ID, of
- string trait names associated with that provider
- :param required_traits: A map, keyed by trait string name, of required
- trait internal IDs that each *allocation request's
- set of providers* must *collectively* have
- associated with them
- :param forbidden_traits: A map, keyed by trait string name, of trait
- internal IDs that a resource provider must
- not have.
- """
- all_prov_ids = []
- all_traits = set()
- for res_req in res_requests:
- rp_uuid = res_req.resource_provider.uuid
- for rp_id, summary in summaries.items():
- if summary.resource_provider.uuid == rp_uuid:
- break
- rp_traits = set(prov_traits.get(rp_id, []))
-
- # Check if there are forbidden_traits
- conflict_traits = set(forbidden_traits) & set(rp_traits)
- if conflict_traits:
- LOG.debug('Excluding resource provider %s, it has '
- 'forbidden traits: (%s).',
- rp_id, ', '.join(conflict_traits))
- return []
-
- all_prov_ids.append(rp_id)
- all_traits |= rp_traits
-
- # Check if there are missing traits
- missing_traits = set(required_traits) - all_traits
- if missing_traits:
- LOG.debug('Excluding a set of allocation candidate %s : '
- 'missing traits %s are not satisfied.',
- all_prov_ids, ','.join(missing_traits))
- return []
-
- return all_prov_ids
-
-
-def _alloc_candidates_single_provider(ctx, requested_resources, rp_tuples):
- """Returns a tuple of (allocation requests, provider summaries) for a
- supplied set of requested resource amounts and resource providers. The
- supplied resource providers have capacity to satisfy ALL of the resources
- in the requested resources as well as ALL required traits that were
- requested by the user.
-
- This is used in two circumstances:
- - To get results for a RequestGroup with use_same_provider=True.
- - As an optimization when no sharing providers satisfy any of the requested
- resources, and nested providers are not in play.
- In these scenarios, we can more efficiently build the list of
- AllocationRequest and ProviderSummary objects due to not having to
- determine requests across multiple providers.
-
- :param ctx: nova.context.RequestContext object
- :param requested_resources: dict, keyed by resource class ID, of amounts
- being requested for that resource class
- :param rp_tuples: List of two-tuples of (provider ID, root provider ID)s
- for providers that matched the requested resources
- """
- if not rp_tuples:
- return [], []
-
- # Get all root resource provider IDs.
- root_ids = set(p[1] for p in rp_tuples)
-
- # Grab usage summaries for each provider
- usages = _get_usages_by_provider_tree(ctx, root_ids)
-
- # Get a dict, keyed by resource provider internal ID, of trait string names
- # that provider has associated with it
- prov_traits = _get_traits_by_provider_tree(ctx, root_ids)
-
- # Get a dict, keyed by resource provider internal ID, of ProviderSummary
- # objects for all providers
- summaries = _build_provider_summaries(ctx, usages, prov_traits)
-
- # Next, build up a list of allocation requests. These allocation requests
- # are AllocationRequest objects, containing resource provider UUIDs,
- # resource class names and amounts to consume from that resource provider
- alloc_requests = []
- for rp_id, root_id in rp_tuples:
- rp_summary = summaries[rp_id]
- req_obj = _allocation_request_for_provider(
- ctx, requested_resources, rp_summary.resource_provider)
- alloc_requests.append(req_obj)
- # If this is a sharing provider, we have to include an extra
- # AllocationRequest for every possible anchor.
- traits = [trait.name for trait in rp_summary.traits]
- if os_traits.MISC_SHARES_VIA_AGGREGATE in traits:
- anchors = set([p[1] for p in _anchors_for_sharing_providers(
- ctx, [rp_summary.resource_provider.id])])
- for anchor in anchors:
- # We already added self
- if anchor == rp_summary.resource_provider.root_provider_uuid:
- continue
- req_obj = copy.deepcopy(req_obj)
- req_obj.anchor_root_provider_uuid = anchor
- alloc_requests.append(req_obj)
- return alloc_requests, list(summaries.values())
-
-
-def _alloc_candidates_multiple_providers(ctx, requested_resources,
- required_traits, forbidden_traits, rp_tuples):
- """Returns a tuple of (allocation requests, provider summaries) for a
- supplied set of requested resource amounts and tuples of
- (rp_id, root_id, rc_id). The supplied resource provider trees have
- capacity to satisfy ALL of the resources in the requested resources as
- well as ALL required traits that were requested by the user.
-
- This is a code path to get results for a RequestGroup with
- use_same_provider=False. In this scenario, we are able to use multiple
- providers within the same provider tree including sharing providers to
- satisfy different resources involved in a single request group.
-
- :param ctx: nova.context.RequestContext object
- :param requested_resources: dict, keyed by resource class ID, of amounts
- being requested for that resource class
- :param required_traits: A map, keyed by trait string name, of required
- trait internal IDs that each *allocation request's
- set of providers* must *collectively* have
- associated with them
- :param forbidden_traits: A map, keyed by trait string name, of trait
- internal IDs that a resource provider must
- not have.
- :param rp_tuples: List of tuples of (provider ID, anchor root provider ID,
- resource class ID)s for providers that matched the
- requested resources
- """
- if not rp_tuples:
- return [], []
-
- # Get all the root resource provider IDs. We should include the first
- # values of rp_tuples because while sharing providers are root providers,
- # they have their "anchor" providers for the second value.
- root_ids = set(p[0] for p in rp_tuples) | set(p[1] for p in rp_tuples)
-
- # Grab usage summaries for each provider in the trees
- usages = _get_usages_by_provider_tree(ctx, root_ids)
-
- # Get a dict, keyed by resource provider internal ID, of trait string names
- # that provider has associated with it
- prov_traits = _get_traits_by_provider_tree(ctx, root_ids)
-
- # Get a dict, keyed by resource provider internal ID, of ProviderSummary
- # objects for all providers
- summaries = _build_provider_summaries(ctx, usages, prov_traits)
-
- # Get a dict, keyed by root provider internal ID, of a dict, keyed by
- # resource class internal ID, of lists of AllocationRequestResource objects
- tree_dict = collections.defaultdict(lambda: collections.defaultdict(list))
-
- for rp_id, root_id, rc_id in rp_tuples:
- rp_summary = summaries[rp_id]
- tree_dict[root_id][rc_id].append(
- AllocationRequestResource(
- ctx, resource_provider=rp_summary.resource_provider,
- resource_class=_RC_CACHE.string_from_id(rc_id),
- amount=requested_resources[rc_id]))
-
- # Next, build up a list of allocation requests. These allocation requests
- # are AllocationRequest objects, containing resource provider UUIDs,
- # resource class names and amounts to consume from that resource provider
- alloc_requests = []
-
- # Build a list of lists of provider internal IDs that end up in
- # allocation request objects. This is used to ensure we don't end up
- # having allocation requests with duplicate sets of resource providers.
- alloc_prov_ids = []
-
- # Let's look into each tree
- for root_id, alloc_dict in tree_dict.items():
- # Get request_groups, which is a list of lists of
- # AllocationRequestResource(ARR) per requested resource class(rc).
- # For example, if we have the alloc_dict:
- # {rc1_id: [ARR(rc1, rp1), ARR(rc1, rp2)],
- # rc2_id: [ARR(rc2, rp1), ARR(rc2, rp2)],
- # rc3_id: [ARR(rc3, rp1)]}
- # then the request_groups would be something like
- # [[ARR(rc1, rp1), ARR(rc1, rp2)],
- # [ARR(rc2, rp1), ARR(rc2, rp2)],
- # [ARR(rc3, rp1)]]
- # , which should be ordered by the resource class id.
- request_groups = [val for key, val in sorted(alloc_dict.items())]
-
- root_summary = summaries[root_id]
- root_uuid = root_summary.resource_provider.uuid
-
- # Using itertools.product, we get all the combinations of resource
- # providers in a tree.
- # For example, the sample in the comment above becomes:
- # [(ARR(rc1, ss1), ARR(rc2, ss1), ARR(rc3, ss1)),
- # (ARR(rc1, ss1), ARR(rc2, ss2), ARR(rc3, ss1)),
- # (ARR(rc1, ss2), ARR(rc2, ss1), ARR(rc3, ss1)),
- # (ARR(rc1, ss2), ARR(rc2, ss2), ARR(rc3, ss1))]
- for res_requests in itertools.product(*request_groups):
- all_prov_ids = _check_traits_for_alloc_request(res_requests,
- summaries, prov_traits, required_traits, forbidden_traits)
- if (not all_prov_ids) or (all_prov_ids in alloc_prov_ids):
- # This combination doesn't satisfy trait constraints,
- # ...or we already have this permutation, which happens
- # when multiple sharing providers with different resource
- # classes are in one request.
- continue
- alloc_prov_ids.append(all_prov_ids)
- alloc_requests.append(
- AllocationRequest(ctx, resource_requests=list(res_requests),
- anchor_root_provider_uuid=root_uuid)
- )
- return alloc_requests, list(summaries.values())
-
-
-@db_api.placement_context_manager.reader
-def _get_traits_by_provider_tree(ctx, root_ids):
- """Returns a dict, keyed by provider IDs for all resource providers
- in all trees indicated in the ``root_ids``, of string trait names
- associated with that provider.
-
- :raises: ValueError when root_ids is empty.
-
- :param ctx: nova.context.RequestContext object
- :param root_ids: list of root resource provider IDs
- """
- if not root_ids:
- raise ValueError(_("Expected root_ids to be a list of root resource "
- "provider internal IDs, but got an empty list."))
-
- rpt = sa.alias(_RP_TBL, name='rpt')
- rptt = sa.alias(_RP_TRAIT_TBL, name='rptt')
- tt = sa.alias(_TRAIT_TBL, name='t')
- rpt_rptt = sa.join(rpt, rptt, rpt.c.id == rptt.c.resource_provider_id)
- j = sa.join(rpt_rptt, tt, rptt.c.trait_id == tt.c.id)
- sel = sa.select([rptt.c.resource_provider_id, tt.c.name]).select_from(j)
- sel = sel.where(rpt.c.root_provider_id.in_(root_ids))
- res = collections.defaultdict(list)
- for r in ctx.session.execute(sel):
- res[r[0]].append(r[1])
- return res
-
-
-@db_api.placement_context_manager.reader
-def _trait_ids_from_names(ctx, names):
- """Given a list of string trait names, returns a dict, keyed by those
- string names, of the corresponding internal integer trait ID.
-
- :raises: ValueError when names is empty.
-
- :param ctx: nova.context.RequestContext object
- :param names: list of string trait names
- """
- if not names:
- raise ValueError(_("Expected names to be a list of string trait "
- "names, but got an empty list."))
-
- # Avoid SAWarnings about unicode types...
- unames = map(six.text_type, names)
- tt = sa.alias(_TRAIT_TBL, name='t')
- sel = sa.select([tt.c.name, tt.c.id]).where(tt.c.name.in_(unames))
- return {r[0]: r[1] for r in ctx.session.execute(sel)}
-
-
-def _rp_rc_key(rp, rc):
- """Creates hashable key unique to a provider + resource class."""
- return rp.uuid, rc
-
-
-def _consolidate_allocation_requests(areqs):
- """Consolidates a list of AllocationRequest into one.
-
- :param areqs: A list containing one AllocationRequest for each input
- RequestGroup. This may mean that multiple resource_requests
- contain resource amounts of the same class from the same provider.
- :return: A single consolidated AllocationRequest, containing no
- resource_requests with duplicated (resource_provider,
- resource_class).
- """
- # Construct a dict, keyed by resource provider UUID + resource class, of
- # AllocationRequestResource, consolidating as we go.
- arrs_by_rp_rc = {}
- # areqs must have at least one element. Save the anchor to populate the
- # returned AllocationRequest.
- anchor_rp_uuid = areqs[0].anchor_root_provider_uuid
- for areq in areqs:
- # Sanity check: the anchor should be the same for every areq
- if anchor_rp_uuid != areq.anchor_root_provider_uuid:
- # This should never happen. If it does, it's a dev bug.
- raise ValueError(
- _("Expected every AllocationRequest in "
- "`_consolidate_allocation_requests` to have the same "
- "anchor!"))
- for arr in areq.resource_requests:
- key = _rp_rc_key(arr.resource_provider, arr.resource_class)
- if key not in arrs_by_rp_rc:
- arrs_by_rp_rc[key] = copy.deepcopy(arr)
- else:
- arrs_by_rp_rc[key].amount += arr.amount
- return AllocationRequest(
- resource_requests=list(arrs_by_rp_rc.values()),
- anchor_root_provider_uuid=anchor_rp_uuid)
-
-
-def _satisfies_group_policy(areqs, group_policy, num_granular_groups):
- """Applies group_policy to a list of AllocationRequest.
-
- Returns True or False, indicating whether this list of
- AllocationRequest satisfies group_policy, as follows:
-
- * "isolate": Each AllocationRequest with use_same_provider=True
- is satisfied by a single resource provider. If the "isolate"
- policy is in effect, each such AllocationRequest must be
- satisfied by a *unique* resource provider.
- * "none" or None: Always returns True.
-
- :param areqs: A list containing one AllocationRequest for each input
- RequestGroup.
- :param group_policy: String indicating how RequestGroups should interact
- with each other. If the value is "isolate", we will return False
- if AllocationRequests that came from RequestGroups keyed by
- nonempty suffixes are satisfied by the same provider.
- :param num_granular_groups: The number of granular (use_same_provider=True)
- RequestGroups in the request.
- :return: True if areqs satisfies group_policy; False otherwise.
- """
- if group_policy != 'isolate':
- # group_policy="none" means no filtering
- return True
-
- # The number of unique resource providers referenced in the request groups
- # having use_same_provider=True must be equal to the number of granular
- # groups.
- num_granular_groups_in_areqs = len(set(
- # We can reliably use the first resource_request's provider: all the
- # resource_requests are satisfied by the same provider by definition
- # because use_same_provider is True.
- areq.resource_requests[0].resource_provider.uuid
- for areq in areqs
- if areq.use_same_provider))
- if num_granular_groups == num_granular_groups_in_areqs:
- return True
- LOG.debug('Excluding the following set of AllocationRequest because '
- 'group_policy=isolate and the number of granular groups in the '
- 'set (%d) does not match the number of granular groups in the '
- 'request (%d): %s',
- num_granular_groups_in_areqs, num_granular_groups, str(areqs))
- return False
-
-
-def _exceeds_capacity(areq, psum_res_by_rp_rc):
- """Checks a (consolidated) AllocationRequest against the provider summaries
- to ensure that it does not exceed capacity.
-
- Exceeding capacity can mean the total amount (already used plus this
- allocation) exceeds the total inventory amount; or this allocation exceeds
- the max_unit in the inventory record.
-
- :param areq: An AllocationRequest produced by the
- `_consolidate_allocation_requests` method.
- :param psum_res_by_rp_rc: A dict, keyed by provider + resource class via
- _rp_rc_key, of ProviderSummaryResource.
- :return: True if areq exceeds capacity; False otherwise.
- """
- for arr in areq.resource_requests:
- key = _rp_rc_key(arr.resource_provider, arr.resource_class)
- psum_res = psum_res_by_rp_rc[key]
- if psum_res.used + arr.amount > psum_res.capacity:
- LOG.debug('Excluding the following AllocationRequest because used '
- '(%d) + amount (%d) > capacity (%d) for resource class '
- '%s: %s',
- psum_res.used, arr.amount, psum_res.capacity,
- arr.resource_class, str(areq))
- return True
- if arr.amount > psum_res.max_unit:
- LOG.debug('Excluding the following AllocationRequest because '
- 'amount (%d) > max_unit (%d) for resource class %s: %s',
- arr.amount, psum_res.max_unit, arr.resource_class,
- str(areq))
- return True
- return False
-
-
-def _merge_candidates(candidates, group_policy=None):
- """Given a dict, keyed by RequestGroup suffix, of tuples of
- (allocation_requests, provider_summaries), produce a single tuple of
- (allocation_requests, provider_summaries) that appropriately incorporates
- the elements from each.
-
- Each (alloc_reqs, prov_sums) in `candidates` satisfies one RequestGroup.
- This method creates a list of alloc_reqs, *each* of which satisfies *all*
- of the RequestGroups.
-
- For that merged list of alloc_reqs, a corresponding provider_summaries is
- produced.
-
- :param candidates: A dict, keyed by integer suffix or '', of tuples of
- (allocation_requests, provider_summaries) to be merged.
- :param group_policy: String indicating how RequestGroups should interact
- with each other. If the value is "isolate", we will filter out
- candidates where AllocationRequests that came from RequestGroups
- keyed by nonempty suffixes are satisfied by the same provider.
- :return: A tuple of (allocation_requests, provider_summaries).
- """
- # Build a dict, keyed by anchor root provider UUID, of dicts, keyed by
- # suffix, of nonempty lists of AllocationRequest. Each inner dict must
- # possess all of the suffix keys to be viable (i.e. contains at least
- # one AllocationRequest per RequestGroup).
- #
- # areq_lists_by_anchor =
- # { anchor_root_provider_uuid: {
- # '': [AllocationRequest, ...], \ This dict must contain
- # '1': [AllocationRequest, ...], \ exactly one nonempty list per
- # ... / suffix to be viable. That
- # '42': [AllocationRequest, ...], / filtering is done later.
- # },
- # ...
- # }
- areq_lists_by_anchor = collections.defaultdict(
- lambda: collections.defaultdict(list))
- # Save off all the provider summaries lists - we'll use 'em later.
- all_psums = []
- # Construct a dict, keyed by resource provider + resource class, of
- # ProviderSummaryResource. This will be used to do a final capacity
- # check/filter on each merged AllocationRequest.
- psum_res_by_rp_rc = {}
- for suffix, (areqs, psums) in candidates.items():
- for areq in areqs:
- anchor = areq.anchor_root_provider_uuid
- areq_lists_by_anchor[anchor][suffix].append(areq)
- for psum in psums:
- all_psums.append(psum)
- for psum_res in psum.resources:
- key = _rp_rc_key(
- psum.resource_provider, psum_res.resource_class)
- psum_res_by_rp_rc[key] = psum_res
-
- # Create all combinations picking one AllocationRequest from each list
- # for each anchor.
- areqs = []
- all_suffixes = set(candidates)
- num_granular_groups = len(all_suffixes - set(['']))
- for areq_lists_by_suffix in areq_lists_by_anchor.values():
- # Filter out any entries that don't have allocation requests for
- # *all* suffixes (i.e. all RequestGroups)
- if set(areq_lists_by_suffix) != all_suffixes:
- continue
- # We're using itertools.product to go from this:
- # areq_lists_by_suffix = {
- # '': [areq__A, areq__B, ...],
- # '1': [areq_1_A, areq_1_B, ...],
- # ...
- # '42': [areq_42_A, areq_42_B, ...],
- # }
- # to this:
- # [ [areq__A, areq_1_A, ..., areq_42_A], Each of these lists is one
- # [areq__A, areq_1_A, ..., areq_42_B], areq_list in the loop below.
- # [areq__A, areq_1_B, ..., areq_42_A], each areq_list contains one
- # [areq__A, areq_1_B, ..., areq_42_B], AllocationRequest from each
- # [areq__B, areq_1_A, ..., areq_42_A], RequestGroup. So taken as a
- # [areq__B, areq_1_A, ..., areq_42_B], whole, each list is a viable
- # [areq__B, areq_1_B, ..., areq_42_A], (preliminary) candidate to
- # [areq__B, areq_1_B, ..., areq_42_B], return.
- # ...,
- # ]
- for areq_list in itertools.product(
- *list(areq_lists_by_suffix.values())):
- # At this point, each AllocationRequest in areq_list is still
- # marked as use_same_provider. This is necessary to filter by group
- # policy, which enforces how these interact with each other.
- if not _satisfies_group_policy(
- areq_list, group_policy, num_granular_groups):
- continue
- # Now we go from this (where 'arr' is AllocationRequestResource):
- # [ areq__B(arrX, arrY, arrZ),
- # areq_1_A(arrM, arrN),
- # ...,
- # areq_42_B(arrQ)
- # ]
- # to this:
- # areq_combined(arrX, arrY, arrZ, arrM, arrN, arrQ)
- # Note that this discards the information telling us which
- # RequestGroup led to which piece of the final AllocationRequest.
- # We needed that to be present for the previous filter; we need it
- # to be *absent* for the next one (and for the final output).
- areq = _consolidate_allocation_requests(areq_list)
- # Since we sourced this AllocationRequest from multiple
- # *independent* queries, it's possible that the combined result
- # now exceeds capacity where amounts of the same RP+RC were
- # folded together. So do a final capacity check/filter.
- if _exceeds_capacity(areq, psum_res_by_rp_rc):
- continue
- areqs.append(areq)
-
- # It's possible we've filtered out everything. If so, short out.
- if not areqs:
- return [], []
-
- # Now we have to produce provider summaries. The provider summaries in
- # the candidates input contain all the information; we just need to
- # filter it down to only the providers in trees represented by our merged
- # list of allocation requests.
- tree_uuids = set()
- for areq in areqs:
- for arr in areq.resource_requests:
- tree_uuids.add(arr.resource_provider.root_provider_uuid)
- psums = [psum for psum in all_psums if
- psum.resource_provider.root_provider_uuid in tree_uuids]
-
- return areqs, psums
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class AllocationCandidates(base.VersionedObject):
- """The AllocationCandidates object is a collection of possible allocations
- that match some request for resources, along with some summary information
- about the resource providers involved in these allocation candidates.
- """
-
- fields = {
- # A collection of allocation possibilities that can be attempted by the
- # caller that would, at the time of calling, meet the requested
- # resource constraints
- 'allocation_requests': fields.ListOfObjectsField('AllocationRequest'),
- # Information about usage and inventory that relate to any provider
- # contained in any of the AllocationRequest objects in the
- # allocation_requests field
- 'provider_summaries': fields.ListOfObjectsField('ProviderSummary'),
- }
-
- @classmethod
- def get_by_requests(cls, context, requests, limit=None, group_policy=None):
- """Returns an AllocationCandidates object containing all resource
- providers matching a set of supplied resource constraints, with a set
- of allocation requests constructed from that list of resource
- providers. If CONF.placement.randomize_allocation_candidates is True
- (default is False) then the order of the allocation requests will
- be randomized.
-
- :param context: Nova RequestContext.
- :param requests: Dict, keyed by suffix, of
- nova.api.openstack.placement.util.RequestGroup
- :param limit: An integer, N, representing the maximum number of
- allocation candidates to return. If
- CONF.placement.randomize_allocation_candidates is True
- this will be a random sampling of N of the available
- results. If False then the first N results, in whatever
- order the database picked them, will be returned. In
- either case if there are fewer than N total results,
- all the results will be returned.
- :param group_policy: String indicating how RequestGroups with
- use_same_provider=True should interact with each
- other. If the value is "isolate", we will filter
- out allocation requests where any such
- RequestGroups are satisfied by the same RP.
- :return: An instance of AllocationCandidates with allocation_requests
- and provider_summaries satisfying `requests`, limited
- according to `limit`.
- """
- alloc_reqs, provider_summaries = cls._get_by_requests(
- context, requests, limit=limit, group_policy=group_policy)
- return cls(
- context,
- allocation_requests=alloc_reqs,
- provider_summaries=provider_summaries,
- )
-
- @staticmethod
- def _get_by_one_request(context, request, sharing_providers, has_trees):
- """Get allocation candidates for one RequestGroup.
-
- Must be called from within an placement_context_manager.reader
- (or writer) context.
-
- :param context: Nova RequestContext.
- :param request: One nova.api.openstack.placement.util.RequestGroup
- :param sharing_providers: dict, keyed by resource class internal ID, of
- the set of provider IDs containing shared
- inventory of that resource class
- :param has_trees: bool indicating there is some level of nesting in the
- environment (if there isn't, we take faster, simpler
- code paths)
- :return: A tuple of (allocation_requests, provider_summaries)
- satisfying `request`.
- """
- # Transform resource string names to internal integer IDs
- resources = {
- _RC_CACHE.id_from_string(key): value
- for key, value in request.resources.items()
- }
-
- # maps the trait name to the trait internal ID
- required_trait_map = {}
- forbidden_trait_map = {}
- for trait_map, traits in (
- (required_trait_map, request.required_traits),
- (forbidden_trait_map, request.forbidden_traits)):
- if traits:
- trait_map.update(_trait_ids_from_names(context, traits))
- # Double-check that we found a trait ID for each requested name
- if len(trait_map) != len(traits):
- missing = traits - set(trait_map)
- raise exception.TraitNotFound(names=', '.join(missing))
-
- member_of = request.member_of
-
- any_sharing = any(sharing_providers.values())
- if not request.use_same_provider and (has_trees or any_sharing):
- # TODO(jaypipes): The check/callout to handle trees goes here.
- # Build a dict, keyed by resource class internal ID, of lists of
- # internal IDs of resource providers that share some inventory for
- # each resource class requested.
- # If there aren't any providers that have any of the
- # required traits, just exit early...
- if required_trait_map:
- # TODO(cdent): Now that there is also a forbidden_trait_map
- # it should be possible to further optimize this attempt at
- # a quick return, but we leave that to future patches for
- # now.
- trait_rps = _get_provider_ids_having_any_trait(
- context, required_trait_map)
- if not trait_rps:
- return [], []
- rp_tuples = _get_trees_matching_all(context, resources,
- required_trait_map, forbidden_trait_map,
- sharing_providers, member_of)
- return _alloc_candidates_multiple_providers(context, resources,
- required_trait_map, forbidden_trait_map, rp_tuples)
-
- # Either we are processing a single-RP request group, or there are no
- # sharing providers that (help) satisfy the request. Get a list of
- # tuples of (internal provider ID, root provider ID) that have ALL
- # the requested resources and more efficiently construct the
- # allocation requests.
- rp_tuples = _get_provider_ids_matching(context, resources,
- required_trait_map,
- forbidden_trait_map, member_of)
- return _alloc_candidates_single_provider(context, resources, rp_tuples)
-
- @classmethod
- # TODO(efried): This is only a writer context because it accesses the
- # resource_providers table via ResourceProvider.get_by_uuid, which does
- # data migration to populate the root_provider_uuid. Change this back to a
- # reader when that migration is no longer happening.
- @db_api.placement_context_manager.writer
- def _get_by_requests(cls, context, requests, limit=None,
- group_policy=None):
- # TODO(jaypipes): Make a RequestGroupContext object and put these
- # pieces of information in there, passing the context to the various
- # internal functions handling that part of the request.
- sharing = {}
- for request in requests.values():
- member_of = request.member_of
- for rc_name, amount in request.resources.items():
- rc_id = _RC_CACHE.id_from_string(rc_name)
- if rc_id not in sharing:
- sharing[rc_id] = _get_providers_with_shared_capacity(
- context, rc_id, amount, member_of)
- has_trees = _has_provider_trees(context)
-
- candidates = {}
- for suffix, request in requests.items():
- alloc_reqs, summaries = cls._get_by_one_request(
- context, request, sharing, has_trees)
- LOG.debug("%s (suffix '%s') returned %d matches",
- str(request), str(suffix), len(alloc_reqs))
- if not alloc_reqs:
- # Shortcut: If any one request resulted in no candidates, the
- # whole operation is shot.
- return [], []
- # Mark each allocation request according to whether its
- # corresponding RequestGroup required it to be restricted to a
- # single provider. We'll need this later to evaluate group_policy.
- for areq in alloc_reqs:
- areq.use_same_provider = request.use_same_provider
- candidates[suffix] = alloc_reqs, summaries
-
- # At this point, each (alloc_requests, summary_obj) in `candidates` is
- # independent of the others. We need to fold them together such that
- # each allocation request satisfies *all* the incoming `requests`. The
- # `candidates` dict is guaranteed to contain entries for all suffixes,
- # or we would have short-circuited above.
- alloc_request_objs, summary_objs = _merge_candidates(
- candidates, group_policy=group_policy)
-
- # Limit the number of allocation request objects. We do this after
- # creating all of them so that we can do a random slice without
- # needing to mess with the complex sql above or add additional
- # columns to the DB.
- if limit and limit <= len(alloc_request_objs):
- if CONF.placement.randomize_allocation_candidates:
- alloc_request_objs = random.sample(alloc_request_objs, limit)
- else:
- alloc_request_objs = alloc_request_objs[:limit]
- elif CONF.placement.randomize_allocation_candidates:
- random.shuffle(alloc_request_objs)
-
- # Limit summaries to only those mentioned in the allocation requests.
- if limit and limit <= len(alloc_request_objs):
- kept_summary_objs = []
- alloc_req_rp_uuids = set()
- # Extract resource provider uuids from the resource requests.
- for aro in alloc_request_objs:
- for arr in aro.resource_requests:
- alloc_req_rp_uuids.add(arr.resource_provider.uuid)
- for summary in summary_objs:
- rp_uuid = summary.resource_provider.uuid
- # Skip a summary if we are limiting and haven't selected an
- # allocation request that uses the resource provider.
- if rp_uuid not in alloc_req_rp_uuids:
- continue
- kept_summary_objs.append(summary)
- else:
- kept_summary_objs = summary_objs
-
- return alloc_request_objs, kept_summary_objs
-
-
-@db_api.placement_context_manager.writer
-def reshape(ctx, inventories, allocations):
- """The 'replace the world' strategy that is executed when we want to
- completely replace a set of provider inventory, allocation and consumer
- information in a single transaction.
-
- :note: The reason this has to be done in a single monolithic function is so
- we have a single top-level function on which to decorate with the
- @db_api.placement_context_manager.writer transaction context
- manager. Each time a top-level function that is decorated with this
- exits, the transaction is either COMMIT'd or ROLLBACK'd. We need to
- avoid calling two functions that are already decorated with a
- transaction context manager from a function that *isn't* decorated
- with the transaction context manager if we want all changes involved
- in the sub-functions to operate within a single DB transaction.
-
- :param ctx: `nova.api.openstack.placement.context.RequestContext` object
- containing the DB transaction context.
- :param inventories: dict, keyed by ResourceProvider, of `InventoryList`
- objects representing the replaced inventory information
- for the provider.
- :param allocations: `AllocationList` object containing all allocations for
- all consumers being modified by the reshape operation.
- :raises: `exception.ConcurrentUpdateDetected` when any resource provider or
- consumer generation increment fails due to concurrent changes to
- the same objects.
- """
- # The resource provider objects, keyed by provider UUID, that are involved
- # in this transaction. We keep a cache of these because as we perform the
- # various operations on the providers, their generations increment and we
- # want to "inject" the changed resource provider objects into the
- # AllocationList's objects before calling AllocationList.replace_all().
- # We start with the providers in the allocation objects, but only use one
- # if we don't find it in the inventories.
- affected_providers = {alloc.resource_provider.uuid: alloc.resource_provider
- for alloc in allocations}
- # We have to do the inventory changes in two steps because:
- # - we can't delete inventories with allocations; and
- # - we can't create allocations on nonexistent inventories.
- # So in the first step we create a kind of "union" inventory for each
- # provider. It contains all the inventories that the request wishes to
- # exist in the end, PLUS any inventories that the request wished to remove
- # (in their original form).
- # Note that this can cause us to end up with an interim situation where we
- # have modified an inventory to have less capacity than is currently
- # allocated, but that's allowed by the code. If the final picture is
- # overcommitted, we'll get an appropriate exception when we replace the
- # allocations at the end.
- for rp, new_inv_list in inventories.items():
- LOG.debug("reshaping: *interim* inventory replacement for provider %s",
- rp.uuid)
- # Update the cache. This may be replacing an entry that came from
- # allocations, or adding a new entry from inventories.
- affected_providers[rp.uuid] = rp
-
- # Optimization: If the new inventory is empty, the below would be
- # replacing it with itself (and incrementing the generation)
- # unnecessarily.
- if not new_inv_list:
- continue
-
- # A dict, keyed by resource class, of the Inventory objects. We start
- # with the original inventory list.
- inv_by_rc = {inv.resource_class: inv for inv in
- InventoryList.get_all_by_resource_provider(ctx, rp)}
- # Now add each inventory in the new inventory list. If an inventory for
- # that resource class existed in the original inventory list, it is
- # overwritten.
- for inv in new_inv_list:
- inv_by_rc[inv.resource_class] = inv
- # Set the interim inventory structure.
- rp.set_inventory(InventoryList(objects=list(inv_by_rc.values())))
-
- # NOTE(jaypipes): The above inventory replacements will have
- # incremented the resource provider generations, so we need to look in
- # the AllocationList and swap the resource provider object with the one we
- # saved above that has the updated provider generation in it.
- for alloc in allocations:
- rp_uuid = alloc.resource_provider.uuid
- if rp_uuid in affected_providers:
- alloc.resource_provider = affected_providers[rp_uuid]
-
- # Now we can replace all the allocations
- LOG.debug("reshaping: attempting allocation replacement")
- allocations.replace_all()
-
- # And finally, we can set the inventories to their actual desired state.
- for rp, new_inv_list in inventories.items():
- LOG.debug("reshaping: *final* inventory replacement for provider %s",
- rp.uuid)
- rp.set_inventory(new_inv_list)
diff --git a/nova/api/openstack/placement/objects/user.py b/nova/api/openstack/placement/objects/user.py
deleted file mode 100644
index 8d5af8473d..0000000000
--- a/nova/api/openstack/placement/objects/user.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_db import exception as db_exc
-from oslo_versionedobjects import base
-from oslo_versionedobjects import fields
-import sqlalchemy as sa
-
-from nova.api.openstack.placement import db_api
-from nova.api.openstack.placement import exception
-from nova.db.sqlalchemy import api_models as models
-
-CONF = cfg.CONF
-USER_TBL = models.User.__table__
-
-
-@db_api.placement_context_manager.writer
-def ensure_incomplete_user(ctx):
- """Ensures that a user record is created for the "incomplete consumer
- user". Returns the internal ID of that record.
- """
- incomplete_id = CONF.placement.incomplete_consumer_user_id
- sel = sa.select([USER_TBL.c.id]).where(
- USER_TBL.c.external_id == incomplete_id)
- res = ctx.session.execute(sel).fetchone()
- if res:
- return res[0]
- ins = USER_TBL.insert().values(external_id=incomplete_id)
- res = ctx.session.execute(ins)
- return res.inserted_primary_key[0]
-
-
-@db_api.placement_context_manager.reader
-def _get_user_by_external_id(ctx, external_id):
- users = sa.alias(USER_TBL, name="u")
- cols = [
- users.c.id,
- users.c.external_id,
- users.c.updated_at,
- users.c.created_at
- ]
- sel = sa.select(cols)
- sel = sel.where(users.c.external_id == external_id)
- res = ctx.session.execute(sel).fetchone()
- if not res:
- raise exception.UserNotFound(external_id=external_id)
-
- return dict(res)
-
-
-@base.VersionedObjectRegistry.register_if(False)
-class User(base.VersionedObject):
-
- fields = {
- 'id': fields.IntegerField(read_only=True),
- 'external_id': fields.StringField(nullable=False),
- }
-
- @staticmethod
- def _from_db_object(ctx, target, source):
- for field in target.fields:
- setattr(target, field, source[field])
-
- target._context = ctx
- target.obj_reset_changes()
- return target
-
- @classmethod
- def get_by_external_id(cls, ctx, external_id):
- res = _get_user_by_external_id(ctx, external_id)
- return cls._from_db_object(ctx, cls(ctx), res)
-
- def create(self):
- @db_api.placement_context_manager.writer
- def _create_in_db(ctx):
- db_obj = models.User(external_id=self.external_id)
- try:
- db_obj.save(ctx.session)
- except db_exc.DBDuplicateEntry:
- raise exception.UserExists(external_id=self.external_id)
- self._from_db_object(ctx, self, db_obj)
- _create_in_db(self._context)
diff --git a/nova/api/openstack/placement/policies/__init__.py b/nova/api/openstack/placement/policies/__init__.py
deleted file mode 100644
index be0496d23b..0000000000
--- a/nova/api/openstack/placement/policies/__init__.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import itertools
-
-from nova.api.openstack.placement.policies import aggregate
-from nova.api.openstack.placement.policies import allocation
-from nova.api.openstack.placement.policies import allocation_candidate
-from nova.api.openstack.placement.policies import base
-from nova.api.openstack.placement.policies import inventory
-from nova.api.openstack.placement.policies import reshaper
-from nova.api.openstack.placement.policies import resource_class
-from nova.api.openstack.placement.policies import resource_provider
-from nova.api.openstack.placement.policies import trait
-from nova.api.openstack.placement.policies import usage
-
-
-def list_rules():
- return itertools.chain(
- base.list_rules(),
- resource_provider.list_rules(),
- resource_class.list_rules(),
- inventory.list_rules(),
- aggregate.list_rules(),
- usage.list_rules(),
- trait.list_rules(),
- allocation.list_rules(),
- allocation_candidate.list_rules(),
- reshaper.list_rules(),
- )
diff --git a/nova/api/openstack/placement/policies/aggregate.py b/nova/api/openstack/placement/policies/aggregate.py
deleted file mode 100644
index 8e2bd8c3ab..0000000000
--- a/nova/api/openstack/placement/policies/aggregate.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_policy import policy
-
-from nova.api.openstack.placement.policies import base
-
-
-PREFIX = 'placement:resource_providers:aggregates:%s'
-LIST = PREFIX % 'list'
-UPDATE = PREFIX % 'update'
-BASE_PATH = '/resource_providers/{uuid}/aggregates'
-
-rules = [
- policy.DocumentedRuleDefault(
- LIST,
- base.RULE_ADMIN_API,
- "List resource provider aggregates.",
- [
- {
- 'method': 'GET',
- 'path': BASE_PATH
- }
- ],
- scope_types=['system']
- ),
- policy.DocumentedRuleDefault(
- UPDATE,
- base.RULE_ADMIN_API,
- "Update resource provider aggregates.",
- [
- {
- 'method': 'PUT',
- 'path': BASE_PATH
- }
- ],
- scope_types=['system']
- ),
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policies/allocation.py b/nova/api/openstack/placement/policies/allocation.py
deleted file mode 100644
index a5f1c2e001..0000000000
--- a/nova/api/openstack/placement/policies/allocation.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_policy import policy
-
-from nova.api.openstack.placement.policies import base
-
-
-RP_ALLOC_LIST = 'placement:resource_providers:allocations:list'
-
-ALLOC_PREFIX = 'placement:allocations:%s'
-ALLOC_LIST = ALLOC_PREFIX % 'list'
-ALLOC_MANAGE = ALLOC_PREFIX % 'manage'
-ALLOC_UPDATE = ALLOC_PREFIX % 'update'
-ALLOC_DELETE = ALLOC_PREFIX % 'delete'
-
-rules = [
- policy.DocumentedRuleDefault(
- ALLOC_MANAGE,
- base.RULE_ADMIN_API,
- "Manage allocations.",
- [
- {
- 'method': 'POST',
- 'path': '/allocations'
- }
- ],
- scope_types=['system'],
- ),
- policy.DocumentedRuleDefault(
- ALLOC_LIST,
- base.RULE_ADMIN_API,
- "List allocations.",
- [
- {
- 'method': 'GET',
- 'path': '/allocations/{consumer_uuid}'
- }
- ],
- scope_types=['system']
- ),
- policy.DocumentedRuleDefault(
- ALLOC_UPDATE,
- base.RULE_ADMIN_API,
- "Update allocations.",
- [
- {
- 'method': 'PUT',
- 'path': '/allocations/{consumer_uuid}'
- }
- ],
- scope_types=['system'],
- ),
- policy.DocumentedRuleDefault(
- ALLOC_DELETE,
- base.RULE_ADMIN_API,
- "Delete allocations.",
- [
- {
- 'method': 'DELETE',
- 'path': '/allocations/{consumer_uuid}'
- }
- ],
- scope_types=['system'],
- ),
- policy.DocumentedRuleDefault(
- RP_ALLOC_LIST,
- base.RULE_ADMIN_API,
- "List resource provider allocations.",
- [
- {
- 'method': 'GET',
- 'path': '/resource_providers/{uuid}/allocations'
- }
- ],
- scope_types=['system'],
- ),
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policies/allocation_candidate.py b/nova/api/openstack/placement/policies/allocation_candidate.py
deleted file mode 100644
index e2ae655370..0000000000
--- a/nova/api/openstack/placement/policies/allocation_candidate.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_policy import policy
-
-from nova.api.openstack.placement.policies import base
-
-
-LIST = 'placement:allocation_candidates:list'
-
-rules = [
- policy.DocumentedRuleDefault(
- LIST,
- base.RULE_ADMIN_API,
- "List allocation candidates.",
- [
- {
- 'method': 'GET',
- 'path': '/allocation_candidates'
- }
- ],
- scope_types=['system'],
- )
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policies/base.py b/nova/api/openstack/placement/policies/base.py
deleted file mode 100644
index 1e728a37fa..0000000000
--- a/nova/api/openstack/placement/policies/base.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_policy import policy
-
-RULE_ADMIN_API = 'rule:admin_api'
-
-rules = [
- # "placement" is the default rule (action) used for all routes that do
- # not yet have granular policy rules. It is used in
- # PlacementHandler.__call__ and can be dropped once all routes have
- # granular policy handling.
- policy.RuleDefault(
- "placement",
- "role:admin",
- description="This rule is used for all routes that do not yet "
- "have granular policy rules. It will be replaced "
- "with rule:admin_api.",
- deprecated_for_removal=True,
- deprecated_reason="This was a catch-all rule hard-coded into "
- "the placement service and has been superseded by "
- "granular policy rules per operation.",
- deprecated_since="18.0.0"),
- policy.RuleDefault(
- "admin_api",
- "role:admin",
- description="Default rule for most placement APIs.",
- scope_types=['system']),
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policies/inventory.py b/nova/api/openstack/placement/policies/inventory.py
deleted file mode 100644
index 1f3d38f413..0000000000
--- a/nova/api/openstack/placement/policies/inventory.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_policy import policy
-
-from nova.api.openstack.placement.policies import base
-
-
-PREFIX = 'placement:resource_providers:inventories:%s'
-LIST = PREFIX % 'list'
-CREATE = PREFIX % 'create'
-SHOW = PREFIX % 'show'
-UPDATE = PREFIX % 'update'
-DELETE = PREFIX % 'delete'
-BASE_PATH = '/resource_providers/{uuid}/inventories'
-
-rules = [
- policy.DocumentedRuleDefault(
- LIST,
- base.RULE_ADMIN_API,
- "List resource provider inventories.",
- [
- {
- 'method': 'GET',
- 'path': BASE_PATH
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- CREATE,
- base.RULE_ADMIN_API,
- "Create one resource provider inventory.",
- [
- {
- 'method': 'POST',
- 'path': BASE_PATH
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- SHOW,
- base.RULE_ADMIN_API,
- "Show resource provider inventory.",
- [
- {
- 'method': 'GET',
- 'path': BASE_PATH + '/{resource_class}'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- UPDATE,
- base.RULE_ADMIN_API,
- "Update resource provider inventory.",
- [
- {
- 'method': 'PUT',
- 'path': BASE_PATH
- },
- {
- 'method': 'PUT',
- 'path': BASE_PATH + '/{resource_class}'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- DELETE,
- base.RULE_ADMIN_API,
- "Delete resource provider inventory.",
- [
- {
- 'method': 'DELETE',
- 'path': BASE_PATH
- },
- {
- 'method': 'DELETE',
- 'path': BASE_PATH + '/{resource_class}'
- }
- ],
- scope_types=['system']),
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policies/reshaper.py b/nova/api/openstack/placement/policies/reshaper.py
deleted file mode 100644
index a6615ac487..0000000000
--- a/nova/api/openstack/placement/policies/reshaper.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_policy import policy
-
-from nova.api.openstack.placement.policies import base
-
-
-PREFIX = 'placement:reshaper:%s'
-RESHAPE = PREFIX % 'reshape'
-
-rules = [
- policy.DocumentedRuleDefault(
- RESHAPE,
- base.RULE_ADMIN_API,
- "Reshape Inventory and Allocations.",
- [
- {
- 'method': 'POST',
- 'path': '/reshaper'
- }
- ],
- scope_types=['system']),
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policies/resource_class.py b/nova/api/openstack/placement/policies/resource_class.py
deleted file mode 100644
index 75acab9d3b..0000000000
--- a/nova/api/openstack/placement/policies/resource_class.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_policy import policy
-
-from nova.api.openstack.placement.policies import base
-
-
-PREFIX = 'placement:resource_classes:%s'
-LIST = PREFIX % 'list'
-CREATE = PREFIX % 'create'
-SHOW = PREFIX % 'show'
-UPDATE = PREFIX % 'update'
-DELETE = PREFIX % 'delete'
-
-rules = [
- policy.DocumentedRuleDefault(
- LIST,
- base.RULE_ADMIN_API,
- "List resource classes.",
- [
- {
- 'method': 'GET',
- 'path': '/resource_classes'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- CREATE,
- base.RULE_ADMIN_API,
- "Create resource class.",
- [
- {
- 'method': 'POST',
- 'path': '/resource_classes'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- SHOW,
- base.RULE_ADMIN_API,
- "Show resource class.",
- [
- {
- 'method': 'GET',
- 'path': '/resource_classes/{name}'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- UPDATE,
- base.RULE_ADMIN_API,
- "Update resource class.",
- [
- {
- 'method': 'PUT',
- 'path': '/resource_classes/{name}'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- DELETE,
- base.RULE_ADMIN_API,
- "Delete resource class.",
- [
- {
- 'method': 'DELETE',
- 'path': '/resource_classes/{name}'
- }
- ],
- scope_types=['system']),
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policies/resource_provider.py b/nova/api/openstack/placement/policies/resource_provider.py
deleted file mode 100644
index 7c4826bd70..0000000000
--- a/nova/api/openstack/placement/policies/resource_provider.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_policy import policy
-
-from nova.api.openstack.placement.policies import base
-
-
-PREFIX = 'placement:resource_providers:%s'
-LIST = PREFIX % 'list'
-CREATE = PREFIX % 'create'
-SHOW = PREFIX % 'show'
-UPDATE = PREFIX % 'update'
-DELETE = PREFIX % 'delete'
-
-rules = [
- policy.DocumentedRuleDefault(
- LIST,
- base.RULE_ADMIN_API,
- "List resource providers.",
- [
- {
- 'method': 'GET',
- 'path': '/resource_providers'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- CREATE,
- base.RULE_ADMIN_API,
- "Create resource provider.",
- [
- {
- 'method': 'POST',
- 'path': '/resource_providers'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- SHOW,
- base.RULE_ADMIN_API,
- "Show resource provider.",
- [
- {
- 'method': 'GET',
- 'path': '/resource_providers/{uuid}'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- UPDATE,
- base.RULE_ADMIN_API,
- "Update resource provider.",
- [
- {
- 'method': 'PUT',
- 'path': '/resource_providers/{uuid}'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- DELETE,
- base.RULE_ADMIN_API,
- "Delete resource provider.",
- [
- {
- 'method': 'DELETE',
- 'path': '/resource_providers/{uuid}'
- }
- ],
- scope_types=['system']),
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policies/trait.py b/nova/api/openstack/placement/policies/trait.py
deleted file mode 100644
index 6b35a703de..0000000000
--- a/nova/api/openstack/placement/policies/trait.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_policy import policy
-
-from nova.api.openstack.placement.policies import base
-
-
-RP_TRAIT_PREFIX = 'placement:resource_providers:traits:%s'
-RP_TRAIT_LIST = RP_TRAIT_PREFIX % 'list'
-RP_TRAIT_UPDATE = RP_TRAIT_PREFIX % 'update'
-RP_TRAIT_DELETE = RP_TRAIT_PREFIX % 'delete'
-
-TRAITS_PREFIX = 'placement:traits:%s'
-TRAITS_LIST = TRAITS_PREFIX % 'list'
-TRAITS_SHOW = TRAITS_PREFIX % 'show'
-TRAITS_UPDATE = TRAITS_PREFIX % 'update'
-TRAITS_DELETE = TRAITS_PREFIX % 'delete'
-
-
-rules = [
- policy.DocumentedRuleDefault(
- TRAITS_LIST,
- base.RULE_ADMIN_API,
- "List traits.",
- [
- {
- 'method': 'GET',
- 'path': '/traits'
- }
- ],
- scope_types=['system']
- ),
- policy.DocumentedRuleDefault(
- TRAITS_SHOW,
- base.RULE_ADMIN_API,
- "Show trait.",
- [
- {
- 'method': 'GET',
- 'path': '/traits/{name}'
- }
- ],
- scope_types=['system'],
- ),
- policy.DocumentedRuleDefault(
- TRAITS_UPDATE,
- base.RULE_ADMIN_API,
- "Update trait.",
- [
- {
- 'method': 'PUT',
- 'path': '/traits/{name}'
- }
- ],
- scope_types=['system'],
- ),
- policy.DocumentedRuleDefault(
- TRAITS_DELETE,
- base.RULE_ADMIN_API,
- "Delete trait.",
- [
- {
- 'method': 'DELETE',
- 'path': '/traits/{name}'
- }
- ],
- scope_types=['system'],
- ),
- policy.DocumentedRuleDefault(
- RP_TRAIT_LIST,
- base.RULE_ADMIN_API,
- "List resource provider traits.",
- [
- {
- 'method': 'GET',
- 'path': '/resource_providers/{uuid}/traits'
- }
- ],
- scope_types=['system'],
- ),
- policy.DocumentedRuleDefault(
- RP_TRAIT_UPDATE,
- base.RULE_ADMIN_API,
- "Update resource provider traits.",
- [
- {
- 'method': 'PUT',
- 'path': '/resource_providers/{uuid}/traits'
- }
- ],
- scope_types=['system'],
- ),
- policy.DocumentedRuleDefault(
- RP_TRAIT_DELETE,
- base.RULE_ADMIN_API,
- "Delete resource provider traits.",
- [
- {
- 'method': 'DELETE',
- 'path': '/resource_providers/{uuid}/traits'
- }
- ],
- scope_types=['system'],
- ),
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policies/usage.py b/nova/api/openstack/placement/policies/usage.py
deleted file mode 100644
index 6543fa4359..0000000000
--- a/nova/api/openstack/placement/policies/usage.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-from oslo_policy import policy
-
-from nova.api.openstack.placement.policies import base
-
-
-PROVIDER_USAGES = 'placement:resource_providers:usages'
-TOTAL_USAGES = 'placement:usages'
-
-
-rules = [
- policy.DocumentedRuleDefault(
- PROVIDER_USAGES,
- base.RULE_ADMIN_API,
- "List resource provider usages.",
- [
- {
- 'method': 'GET',
- 'path': '/resource_providers/{uuid}/usages'
- }
- ],
- scope_types=['system']),
- policy.DocumentedRuleDefault(
- # TODO(mriedem): At some point we might set scope_types=['project']
- # so that non-admin project-scoped token users can query usages for
- # their project. The context.can() target will need to change as well
- # in the actual policy enforcement check in the handler code.
- TOTAL_USAGES,
- base.RULE_ADMIN_API,
- "List total resource usages for a given project.",
- [
- {
- 'method': 'GET',
- 'path': '/usages'
- }
- ],
- scope_types=['system'])
-]
-
-
-def list_rules():
- return rules
diff --git a/nova/api/openstack/placement/policy.py b/nova/api/openstack/placement/policy.py
deleted file mode 100644
index cad6fdf838..0000000000
--- a/nova/api/openstack/placement/policy.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Policy Enforcement for placement API."""
-
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_policy import policy
-from oslo_utils import excutils
-
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import policies
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-_ENFORCER_PLACEMENT = None
-
-
-def reset():
- """Used to reset the global _ENFORCER_PLACEMENT between test runs."""
- global _ENFORCER_PLACEMENT
- if _ENFORCER_PLACEMENT:
- _ENFORCER_PLACEMENT.clear()
- _ENFORCER_PLACEMENT = None
-
-
-def init():
- """Init an Enforcer class. Sets the _ENFORCER_PLACEMENT global."""
- global _ENFORCER_PLACEMENT
- if not _ENFORCER_PLACEMENT:
- # NOTE(mriedem): We have to explicitly pass in the
- # [placement]/policy_file path because otherwise oslo_policy defaults
- # to read the policy file from config option [oslo_policy]/policy_file
- # which is used by nova. In other words, to have separate policy files
- # for placement and nova, we have to use separate policy_file options.
- _ENFORCER_PLACEMENT = policy.Enforcer(
- CONF, policy_file=CONF.placement.policy_file)
- _ENFORCER_PLACEMENT.register_defaults(policies.list_rules())
- _ENFORCER_PLACEMENT.load_rules()
-
-
-def get_enforcer():
- # This method is used by oslopolicy CLI scripts in order to generate policy
- # files from overrides on disk and defaults in code. We can just pass an
- # empty list and let oslo do the config lifting for us.
- # TODO(mriedem): Change the project kwarg value to "placement" once
- # this code is extracted from nova.
- cfg.CONF([], project='nova')
- init()
- return _ENFORCER_PLACEMENT
-
-
-def authorize(context, action, target, do_raise=True):
- """Verifies that the action is valid on the target in this context.
-
- :param context: instance of
- nova.api.openstack.placement.context.RequestContext
- :param action: string representing the action to be checked
- this should be colon separated for clarity, i.e.
- ``placement:resource_providers:list``
- :param target: dictionary representing the object of the action;
- for object creation this should be a dictionary representing the
- owner of the object e.g. ``{'project_id': context.project_id}``.
- :param do_raise: if True (the default), raises PolicyNotAuthorized;
- if False, returns False
- :raises nova.api.openstack.placement.exception.PolicyNotAuthorized: if
- verification fails and do_raise is True.
- :returns: non-False value (not necessarily "True") if authorized, and the
- exact value False if not authorized and do_raise is False.
- """
- init()
- credentials = context.to_policy_values()
- try:
- # NOTE(mriedem): The "action" kwarg is for the PolicyNotAuthorized exc.
- return _ENFORCER_PLACEMENT.authorize(
- action, target, credentials, do_raise=do_raise,
- exc=exception.PolicyNotAuthorized, action=action)
- except policy.PolicyNotRegistered:
- with excutils.save_and_reraise_exception():
- LOG.exception('Policy not registered')
- except Exception:
- with excutils.save_and_reraise_exception():
- LOG.debug('Policy check for %(action)s failed with credentials '
- '%(credentials)s',
- {'action': action, 'credentials': credentials})
diff --git a/nova/api/openstack/placement/requestlog.py b/nova/api/openstack/placement/requestlog.py
deleted file mode 100644
index da7be6a37f..0000000000
--- a/nova/api/openstack/placement/requestlog.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Simple middleware for request logging."""
-
-from oslo_log import log as logging
-
-from nova.api.openstack.placement import microversion
-
-LOG = logging.getLogger(__name__)
-
-
-class RequestLog(object):
- """WSGI Middleware to write a simple request log to.
-
- Borrowed from Paste Translogger
- """
-
- format = ('%(REMOTE_ADDR)s "%(REQUEST_METHOD)s %(REQUEST_URI)s" '
- 'status: %(status)s len: %(bytes)s '
- 'microversion: %(microversion)s')
-
- def __init__(self, application):
- self.application = application
-
- def __call__(self, environ, start_response):
- LOG.debug('Starting request: %s "%s %s"',
- environ['REMOTE_ADDR'], environ['REQUEST_METHOD'],
- self._get_uri(environ))
- # Set the accept header if it is not otherwise set or is '*/*'. This
- # ensures that error responses will be in JSON.
- accept = environ.get('HTTP_ACCEPT')
- if not accept or accept == '*/*':
- environ['HTTP_ACCEPT'] = 'application/json'
- if LOG.isEnabledFor(logging.INFO):
- return self._log_app(environ, start_response)
- else:
- return self.application(environ, start_response)
-
- @staticmethod
- def _get_uri(environ):
- req_uri = (environ.get('SCRIPT_NAME', '')
- + environ.get('PATH_INFO', ''))
- if environ.get('QUERY_STRING'):
- req_uri += '?' + environ['QUERY_STRING']
- return req_uri
-
- def _log_app(self, environ, start_response):
- req_uri = self._get_uri(environ)
-
- def replacement_start_response(status, headers, exc_info=None):
- """We need to gaze at the content-length, if set, to
- write log info.
- """
- size = None
- for name, value in headers:
- if name.lower() == 'content-length':
- size = value
- self.write_log(environ, req_uri, status, size)
- return start_response(status, headers, exc_info)
-
- return self.application(environ, replacement_start_response)
-
- def write_log(self, environ, req_uri, status, size):
- """Write the log info out in a formatted form to ``LOG.info``.
- """
- if size is None:
- size = '-'
- log_format = {
- 'REMOTE_ADDR': environ.get('REMOTE_ADDR', '-'),
- 'REQUEST_METHOD': environ['REQUEST_METHOD'],
- 'REQUEST_URI': req_uri,
- 'status': status.split(None, 1)[0],
- 'bytes': size,
- 'microversion': environ.get(
- microversion.MICROVERSION_ENVIRON, '-'),
- }
- LOG.info(self.format, log_format)
diff --git a/nova/api/openstack/placement/resource_class_cache.py b/nova/api/openstack/placement/resource_class_cache.py
deleted file mode 100644
index a72b4177ea..0000000000
--- a/nova/api/openstack/placement/resource_class_cache.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_concurrency import lockutils
-import sqlalchemy as sa
-
-from nova.api.openstack.placement import db_api
-from nova.api.openstack.placement import exception
-from nova.db.sqlalchemy import api_models as models
-from nova import rc_fields as fields
-
-_RC_TBL = models.ResourceClass.__table__
-_LOCKNAME = 'rc_cache'
-
-
-@db_api.placement_context_manager.reader
-def _refresh_from_db(ctx, cache):
- """Grabs all custom resource classes from the DB table and populates the
- supplied cache object's internal integer and string identifier dicts.
-
- :param cache: ResourceClassCache object to refresh.
- """
- with db_api.placement_context_manager.reader.connection.using(ctx) as conn:
- sel = sa.select([_RC_TBL.c.id, _RC_TBL.c.name, _RC_TBL.c.updated_at,
- _RC_TBL.c.created_at])
- res = conn.execute(sel).fetchall()
- cache.id_cache = {r[1]: r[0] for r in res}
- cache.str_cache = {r[0]: r[1] for r in res}
- cache.all_cache = {r[1]: r for r in res}
-
-
-class ResourceClassCache(object):
- """A cache of integer and string lookup values for resource classes."""
-
- # List of dict of all standard resource classes, where every list item
- # have a form {'id': <ID>, 'name': <NAME>}
- STANDARDS = [{'id': fields.ResourceClass.STANDARD.index(s), 'name': s,
- 'updated_at': None, 'created_at': None}
- for s in fields.ResourceClass.STANDARD]
-
- def __init__(self, ctx):
- """Initialize the cache of resource class identifiers.
-
- :param ctx: `nova.context.RequestContext` from which we can grab a
- `SQLAlchemy.Connection` object to use for any DB lookups.
- """
- self.ctx = ctx
- self.id_cache = {}
- self.str_cache = {}
- self.all_cache = {}
-
- def clear(self):
- with lockutils.lock(_LOCKNAME):
- self.id_cache = {}
- self.str_cache = {}
- self.all_cache = {}
-
- def id_from_string(self, rc_str):
- """Given a string representation of a resource class -- e.g. "DISK_GB"
- or "IRON_SILVER" -- return the integer code for the resource class. For
- standard resource classes, this integer code will match the list of
- resource classes on the fields.ResourceClass field type. Other custom
- resource classes will cause a DB lookup into the resource_classes
- table, however the results of these DB lookups are cached since the
- lookups are so frequent.
-
- :param rc_str: The string representation of the resource class to look
- up a numeric identifier for.
- :returns integer identifier for the resource class, or None, if no such
- resource class was found in the list of standard resource
- classes or the resource_classes database table.
- :raises `exception.ResourceClassNotFound` if rc_str cannot be found in
- either the standard classes or the DB.
- """
- # First check the standard resource classes
- if rc_str in fields.ResourceClass.STANDARD:
- return fields.ResourceClass.STANDARD.index(rc_str)
-
- with lockutils.lock(_LOCKNAME):
- if rc_str in self.id_cache:
- return self.id_cache[rc_str]
- # Otherwise, check the database table
- _refresh_from_db(self.ctx, self)
- if rc_str in self.id_cache:
- return self.id_cache[rc_str]
- raise exception.ResourceClassNotFound(resource_class=rc_str)
-
- def all_from_string(self, rc_str):
- """Given a string representation of a resource class -- e.g. "DISK_GB"
- or "CUSTOM_IRON_SILVER" -- return all the resource class info.
-
- :param rc_str: The string representation of the resource class for
- which to look up a resource_class.
- :returns: dict representing the resource class fields, if the
- resource class was found in the list of standard
- resource classes or the resource_classes database table.
- :raises: `exception.ResourceClassNotFound` if rc_str cannot be found in
- either the standard classes or the DB.
- """
- # First check the standard resource classes
- if rc_str in fields.ResourceClass.STANDARD:
- return {'id': fields.ResourceClass.STANDARD.index(rc_str),
- 'name': rc_str,
- 'updated_at': None,
- 'created_at': None}
-
- with lockutils.lock(_LOCKNAME):
- if rc_str in self.all_cache:
- return self.all_cache[rc_str]
- # Otherwise, check the database table
- _refresh_from_db(self.ctx, self)
- if rc_str in self.all_cache:
- return self.all_cache[rc_str]
- raise exception.ResourceClassNotFound(resource_class=rc_str)
-
- def string_from_id(self, rc_id):
- """The reverse of the id_from_string() method. Given a supplied numeric
- identifier for a resource class, we look up the corresponding string
- representation, either in the list of standard resource classes or via
- a DB lookup. The results of these DB lookups are cached since the
- lookups are so frequent.
-
- :param rc_id: The numeric representation of the resource class to look
- up a string identifier for.
- :returns: string identifier for the resource class, or None, if no such
- resource class was found in the list of standard resource
- classes or the resource_classes database table.
- :raises `exception.ResourceClassNotFound` if rc_id cannot be found in
- either the standard classes or the DB.
- """
- # First check the fields.ResourceClass.STANDARD values
- try:
- return fields.ResourceClass.STANDARD[rc_id]
- except IndexError:
- pass
-
- with lockutils.lock(_LOCKNAME):
- if rc_id in self.str_cache:
- return self.str_cache[rc_id]
-
- # Otherwise, check the database table
- _refresh_from_db(self.ctx, self)
- if rc_id in self.str_cache:
- return self.str_cache[rc_id]
- raise exception.ResourceClassNotFound(resource_class=rc_id)
diff --git a/nova/api/openstack/placement/rest_api_version_history.rst b/nova/api/openstack/placement/rest_api_version_history.rst
deleted file mode 100644
index 10bd180195..0000000000
--- a/nova/api/openstack/placement/rest_api_version_history.rst
+++ /dev/null
@@ -1,518 +0,0 @@
-REST API Version History
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-This documents the changes made to the REST API with every microversion change.
-The description for each version should be a verbose one which has enough
-information to be suitable for use in user documentation.
-
-.. _1.0 (Maximum in Newton):
-
-1.0 Initial Version (Maximum in Newton)
----------------------------------------
-
-.. versionadded:: Newton
-
-This is the initial version of the placement REST API that was released in
-Nova 14.0.0 (Newton). This contains the following routes:
-
-* ``/resource_providers``
-* ``/resource_providers/allocations``
-* ``/resource_providers/inventories``
-* ``/resource_providers/usages``
-* ``/allocations``
-
-1.1 Resource provider aggregates
---------------------------------
-
-.. versionadded:: Ocata
-
-The 1.1 version adds support for associating aggregates with resource
-providers.
-
-The following new operations are added:
-
-``GET /resource_providers/{uuid}/aggregates``
- Return all aggregates associated with a resource provider
-
-``PUT /resource_providers/{uuid}/aggregates``
- Update the aggregates associated with a resource provider
-
-1.2 Add custom resource classes
--------------------------------
-
-.. versionadded:: Ocata
-
-Placement API version 1.2 adds basic operations allowing an admin to create,
-list and delete custom resource classes.
-
-The following new routes are added:
-
-``GET /resource_classes``
- Return all resource classes
-
-``POST /resource_classes``
- Create a new custom resource class
-
-``PUT /resource_classes/{name}``
- Update the name of a custom resource class
-
-``DELETE /resource_classes/{name}``
- Delete a custom resource class
-
-``GET /resource_classes/{name}``
- Get a single resource class
-
-Custom resource classes must begin with the prefix ``CUSTOM_`` and contain only
-the letters A through Z, the numbers 0 through 9 and the underscore ``_``
-character.
-
-1.3 member_of query parameter
------------------------------
-
-.. versionadded:: Ocata
-
-Version 1.3 adds support for listing resource providers that are members of any
-of the list of aggregates provided using a ``member_of`` query parameter::
-
- ?member_of=in:{agg1_uuid},{agg2_uuid},{agg3_uuid}
-
-1.4 Filter resource providers by requested resource capacity (Maximum in Ocata)
--------------------------------------------------------------------------------
-
-.. versionadded:: Ocata
-
-The 1.4 version adds support for querying resource providers that have the
-ability to serve a requested set of resources. A new "resources" query string
-parameter is now accepted to the ``GET /resource_providers`` API call. This
-parameter indicates the requested amounts of various resources that a provider
-must have the capacity to serve. The "resources" query string parameter takes
-the form::
-
- ?resources=$RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT
-
-For instance, if the user wishes to see resource providers that can service a
-request for 2 vCPUs, 1024 MB of RAM and 50 GB of disk space, the user can issue
-a request to::
-
- GET /resource_providers?resources=VCPU:2,MEMORY_MB:1024,DISK_GB:50
-
-If the resource class does not exist, then it will return a HTTP 400.
-
-.. note:: The resources filtering is also based on the `min_unit`, `max_unit`
- and `step_size` of the inventory record. For example, if the `max_unit` is
- 512 for the DISK_GB inventory for a particular resource provider and a
- GET request is made for `DISK_GB:1024`, that resource provider will not be
- returned. The `min_unit` is the minimum amount of resource that can be
- requested for a given inventory and resource provider. The `step_size` is
- the increment of resource that can be requested for a given resource on a
- given provider.
-
-1.5 DELETE all inventory for a resource provider
-------------------------------------------------
-
-.. versionadded:: Pike
-
-Placement API version 1.5 adds DELETE method for deleting all inventory for a
-resource provider. The following new method is supported:
-
-``DELETE /resource_providers/{uuid}/inventories``
-
- Delete all inventories for a given resource provider
-
-1.6 Traits API
---------------
-
-.. versionadded:: Pike
-
-The 1.6 version adds basic operations allowing an admin to create, list, and
-delete custom traits, also adds basic operations allowing an admin to attach
-traits to a resource provider.
-
-The following new routes are added:
-
-``GET /traits``
- Return all resource classes.
-
-``PUT /traits/{name}``
- Insert a single custom trait.
-
-``GET /traits/{name}``
- Check if a trait name exists.
-
-``DELETE /traits/{name}``
- Delete the specified trait.
-
-``GET /resource_providers/{uuid}/traits``
- Return all traits associated with a specific resource provider.
-
-``PUT /resource_providers/{uuid}/traits``
- Update all traits for a specific resource provider.
-
-``DELETE /resource_providers/{uuid}/traits``
- Remove any existing trait associations for a specific resource provider
-
-Custom traits must begin with the prefix ``CUSTOM_`` and contain only the
-letters A through Z, the numbers 0 through 9 and the underscore ``_``
-character.
-
-1.7 Idempotent PUT /resource_classes/{name}
--------------------------------------------
-
-.. versionadded:: Pike
-
-The 1.7 version changes handling of ``PUT /resource_classes/{name}`` to be a
-create or verification of the resource class with ``{name}``. If the resource
-class is a custom resource class and does not already exist it will be created
-and a ``201`` response code returned. If the class already exists the response
-code will be ``204``. This makes it possible to check or create a resource
-class in one request.
-
-1.8 Require placement 'project_id', 'user_id' in PUT /allocations
------------------------------------------------------------------
-
-.. versionadded:: Pike
-
-The 1.8 version adds ``project_id`` and ``user_id`` required request parameters
-to ``PUT /allocations``.
-
-1.9 Add GET /usages
---------------------
-
-.. versionadded:: Pike
-
-The 1.9 version adds usages that can be queried by a project or project/user.
-
-The following new routes are added:
-
-``GET /usages?project_id=<project_id>``
- Return all usages for a given project.
-
-``GET /usages?project_id=<project_id>&user_id=<user_id>``
- Return all usages for a given project and user.
-
-1.10 Allocation candidates (Maximum in Pike)
---------------------------------------------
-
-.. versionadded:: Pike
-
-The 1.10 version brings a new REST resource endpoint for getting a list of
-allocation candidates. Allocation candidates are collections of possible
-allocations against resource providers that can satisfy a particular request
-for resources.
-
-1.11 Add 'allocations' link to the ``GET /resource_providers`` response
------------------------------------------------------------------------
-
-.. versionadded:: Queens
-
-The ``/resource_providers/{rp_uuid}/allocations`` endpoint has been available
-since version 1.0, but was not listed in the ``links`` section of the
-``GET /resource_providers`` response. The link is included as of version 1.11.
-
-1.12 PUT dict format to /allocations/{consumer_uuid}
-----------------------------------------------------
-
-.. versionadded:: Queens
-
-In version 1.12 the request body of a ``PUT /allocations/{consumer_uuid}``
-is expected to have an ``object`` for the ``allocations`` property, not as
-``array`` as with earlier microversions. This puts the request body more in
-alignment with the structure of the ``GET /allocations/{consumer_uuid}``
-response body. Because the ``PUT`` request requires ``user_id`` and
-``project_id`` in the request body, these fields are added to the ``GET``
-response. In addition, the response body for ``GET /allocation_candidates``
-is updated so the allocations in the ``alocation_requests`` object work
-with the new ``PUT`` format.
-
-1.13 POST multiple allocations to /allocations
-----------------------------------------------
-
-.. versionadded:: Queens
-
-Version 1.13 gives the ability to set or clear allocations for more than
-one consumer UUID with a request to ``POST /allocations``.
-
-1.14 Add nested resource providers
-----------------------------------
-
-.. versionadded:: Queens
-
-The 1.14 version introduces the concept of nested resource providers. The
-resource provider resource now contains two new attributes:
-
-* ``parent_provider_uuid`` indicates the provider's direct parent, or null if
- there is no parent. This attribute can be set in the call to ``POST
- /resource_providers`` and ``PUT /resource_providers/{uuid}`` if the attribute
- has not already been set to a non-NULL value (i.e. we do not support
- "reparenting" a provider)
-* ``root_provider_uuid`` indicates the UUID of the root resource provider in
- the provider's tree. This is a read-only attribute
-
-A new ``in_tree=<UUID>`` parameter is now available in the ``GET
-/resource-providers`` API call. Supplying a UUID value for the ``in_tree``
-parameter will cause all resource providers within the "provider tree" of the
-provider matching ``<UUID>`` to be returned.
-
-1.15 Add 'last-modified' and 'cache-control' headers
-----------------------------------------------------
-
-.. versionadded:: Queens
-
-Throughout the API, 'last-modified' headers have been added to GET responses
-and those PUT and POST responses that have bodies. The value is either the
-actual last modified time of the most recently modified associated database
-entity or the current time if there is no direct mapping to the database. In
-addition, 'cache-control: no-cache' headers are added where the 'last-modified'
-header has been added to prevent inadvertent caching of resources.
-
-1.16 Limit allocation candidates
---------------------------------
-
-.. versionadded:: Queens
-
-Add support for a ``limit`` query parameter when making a
-``GET /allocation_candidates`` request. The parameter accepts an integer
-value, ``N``, which limits the maximum number of candidates returned.
-
-1.17 Add 'required' parameter to the allocation candidates (Maximum in Queens)
-------------------------------------------------------------------------------
-
-.. versionadded:: Queens
-
-Add the ``required`` parameter to the ``GET /allocation_candidates`` API. It
-accepts a list of traits separated by ``,``. The provider summary in the
-response will include the attached traits also.
-
-1.18 Support ?required=<traits> queryparam on GET /resource_providers
----------------------------------------------------------------------
-
-.. versionadded:: Rocky
-
-Add support for the ``required`` query parameter to the ``GET
-/resource_providers`` API. It accepts a comma-separated list of string trait
-names. When specified, the API results will be filtered to include only
-resource providers marked with all the specified traits. This is in addition to
-(logical AND) any filtering based on other query parameters.
-
-Trait names which are empty, do not exist, or are otherwise invalid will result
-in a 400 error.
-
-1.19 Include generation and conflict detection in provider aggregates APIs
---------------------------------------------------------------------------
-
-.. versionadded:: Rocky
-
-Enhance the payloads for the ``GET /resource_providers/{uuid}/aggregates``
-response and the ``PUT /resource_providers/{uuid}/aggregates`` request and
-response to be identical, and to include the ``resource_provider_generation``.
-As with other generation-aware APIs, if the ``resource_provider_generation``
-specified in the ``PUT`` request does not match the generation known by the
-server, a 409 Conflict error is returned.
-
-1.20 Return 200 with provider payload from POST /resource_providers
--------------------------------------------------------------------
-
-.. versionadded:: Rocky
-
-The ``POST /resource_providers`` API, on success, returns 200 with a payload
-representing the newly-created resource provider, in the same format as the
-corresponding ``GET /resource_providers/{uuid}`` call. This is to allow the
-caller to glean automatically-set fields, such as UUID and generation, without
-a subsequent GET.
-
-1.21 Support ?member_of=<aggregates> queryparam on GET /allocation_candidates
------------------------------------------------------------------------------
-
-.. versionadded:: Rocky
-
-Add support for the ``member_of`` query parameter to the ``GET
-/allocation_candidates`` API. It accepts a comma-separated list of UUIDs for
-aggregates. Note that if more than one aggregate UUID is passed, the
-comma-separated list must be prefixed with the "in:" operator. If this
-parameter is provided, the only resource providers returned will be those in
-one of the specified aggregates that meet the other parts of the request.
-
-1.22 Support forbidden traits on resource providers and allocations candidates
-------------------------------------------------------------------------------
-
-.. versionadded:: Rocky
-
-Add support for expressing traits which are forbidden when filtering
-``GET /resource_providers`` or ``GET /allocation_candidates``. A forbidden
-trait is a properly formatted trait in the existing ``required`` parameter,
-prefixed by a ``!``. For example ``required=!STORAGE_DISK_SSD`` asks that the
-results not include any resource providers that provide solid state disk.
-
-1.23 Include code attribute in JSON error responses
----------------------------------------------------
-
-.. versionadded:: Rocky
-
-JSON formatted error responses gain a new attribute, ``code``, with a value
-that identifies the type of this error. This can be used to distinguish errors
-that are different but use the same HTTP status code. Any error response which
-does not specifically define a code will have the code
-``placement.undefined_code``.
-
-1.24 Support multiple ?member_of queryparams
---------------------------------------------
-
-.. versionadded:: Rocky
-
-Add support for specifying multiple ``member_of`` query parameters to the ``GET
-/resource_providers`` API. When multiple ``member_of`` query parameters are
-found, they are AND'd together in the final query. For example, issuing a
-request for ``GET /resource_providers?member_of=agg1&member_of=agg2`` means get
-the resource providers that are associated with BOTH agg1 and agg2. Issuing a
-request for ``GET /resource_providers?member_of=in:agg1,agg2&member_of=agg3``
-means get the resource providers that are associated with agg3 and are also
-associated with *any of* (agg1, agg2).
-
-1.25 Granular resource requests to ``GET /allocation_candidates``
------------------------------------------------------------------
-
-.. versionadded:: Rocky
-
-``GET /allocation_candidates`` is enhanced to accept numbered groupings of
-resource, required/forbidden trait, and aggregate association requests. A
-``resources`` query parameter key with a positive integer suffix (e.g.
-``resources42``) will be logically associated with ``required`` and/or
-``member_of`` query parameter keys with the same suffix (e.g. ``required42``,
-``member_of42``). The resources, required/forbidden traits, and aggregate
-associations in that group will be satisfied by the same resource provider in
-the response. When more than one numbered grouping is supplied, the
-``group_policy`` query parameter is required to indicate how the groups should
-interact. With ``group_policy=none``, separate groupings - numbered or
-unnumbered - may or may not be satisfied by the same provider. With
-``group_policy=isolate``, numbered groups are guaranteed to be satisfied by
-*different* providers - though there may still be overlap with the unnumbered
-group. In all cases, each ``allocation_request`` will be satisfied by providers
-in a single non-sharing provider tree and/or sharing providers associated via
-aggregate with any of the providers in that tree.
-
-The ``required`` and ``member_of`` query parameters for a given group are
-optional. That is, you may specify ``resources42=XXX`` without a corresponding
-``required42=YYY`` or ``member_of42=ZZZ``. However, the reverse (specifying
-``required42=YYY`` or ``member_of42=ZZZ`` without ``resources42=XXX``) will
-result in an error.
-
-The semantic of the (unnumbered) ``resources``, ``required``, and ``member_of``
-query parameters is unchanged: the resources, traits, and aggregate
-associations specified thereby may be satisfied by any provider in the same
-non-sharing tree or associated via the specified aggregate(s).
-
-1.26 Allow inventories to have reserved value equal to total
-------------------------------------------------------------
-
-.. versionadded:: Rocky
-
-Starting with this version, it is allowed to set the reserved value of the
-resource provider inventory to be equal to total.
-
-1.27 Include all resource class inventories in provider_summaries
------------------------------------------------------------------
-
-.. versionadded:: Rocky
-
-Include all resource class inventories in the ``provider_summaries`` field in
-response of the ``GET /allocation_candidates`` API even if the resource class
-is not in the requested resources.
-
-1.28 Consumer generation support
---------------------------------
-
-.. versionadded:: Rocky
-
-A new generation field has been added to the consumer concept. Consumers are
-the actors that are allocated resources in the placement API. When an
-allocation is created, a consumer UUID is specified. Starting with microversion
-1.8, a project and user ID are also required. If using microversions prior to
-1.8, these are populated from the ``incomplete_consumer_project_id`` and
-``incomplete_consumer_user_id`` config options from the ``[placement]``
-section.
-
-The consumer generation facilitates safe concurrent modification of an
-allocation.
-
-A consumer generation is now returned from the following URIs:
-
-``GET /resource_providers/{uuid}/allocations``
-
-The response continues to be a dict with a key of ``allocations``, which itself
-is a dict, keyed by consumer UUID, of allocations against the resource
-provider. For each of those dicts, a ``consumer_generation`` field will now be
-shown.
-
-``GET /allocations/{consumer_uuid}``
-
-The response continues to be a dict with a key of ``allocations``, which
-itself is a dict, keyed by resource provider UUID, of allocations being
-consumed by the consumer with the ``{consumer_uuid}``. The top-level dict will
-also now contain a ``consumer_generation`` field.
-
-The value of the ``consumer_generation`` field is opaque and should only be
-used to send back to subsequent operations on the consumer's allocations.
-
-The ``PUT /allocations/{consumer_uuid}`` URI has been modified to now require a
-``consumer_generation`` field in the request payload. This field is required to
-be ``null`` if the caller expects that there are no allocations already
-existing for the consumer. Otherwise, it should contain the generation that the
-caller understands the consumer to be at the time of the call.
-
-A ``409 Conflict`` will be returned from ``PUT /allocations/{consumer_uuid}``
-if there was a mismatch between the supplied generation and the consumer's
-generation as known by the server. Similarly, a ``409 Conflict`` will be
-returned if during the course of replacing the consumer's allocations another
-process concurrently changed the consumer's allocations. This allows the caller
-to react to the concurrent write by re-reading the consumer's allocations and
-re-issuing the call to replace allocations as needed.
-
-The ``PUT /allocations/{consumer_uuid}`` URI has also been modified to accept
-an empty allocations object, thereby bringing it to parity with the behaviour
-of ``POST /allocations``, which uses an empty allocations object to indicate
-that the allocations for a particular consumer should be removed. Passing an
-empty allocations object along with a ``consumer_generation`` makes ``PUT
-/allocations/{consumer_uuid}`` a **safe** way to delete allocations for a
-consumer. The ``DELETE /allocations/{consumer_uuid}`` URI remains unsafe to
-call in deployments where multiple callers may simultaneously be attempting to
-modify a consumer's allocations.
-
-The ``POST /allocations`` URI variant has also been changed to require a
-``consumer_generation`` field in the request payload **for each consumer
-involved in the request**. Similar responses to ``PUT
-/allocations/{consumer_uuid}`` are returned when any of the consumers
-generations conflict with the server's view of those consumers or if any of the
-consumers involved in the request are modified by another process.
-
-.. warning:: In all cases, it is absolutely **NOT SAFE** to create and modify
- allocations for a consumer using different microversions where one
- of the microversions is prior to 1.28. The only way to safely
- modify allocations for a consumer and satisfy expectations you
- have regarding the prior existence (or lack of existence) of those
- allocations is to always use microversion 1.28+ when calling
- allocations API endpoints.
-
-1.29 Support allocation candidates with nested resource providers
------------------------------------------------------------------
-
-.. versionadded:: Rocky
-
-Add support for nested resource providers with the following two features.
-1) ``GET /allocation_candidates`` is aware of nested providers. Namely, when
-provider trees are present, ``allocation_requests`` in the response of
-``GET /allocation_candidates`` can include allocations on combinations of
-multiple resource providers in the same tree.
-2) ``root_provider_uuid`` and ``parent_provider_uuid`` are added to
-``provider_summaries`` in the response of ``GET /allocation_candidates``.
-
-1.30 Provide a /reshaper resource
----------------------------------
-
-Add support for a ``POST /reshaper`` resource that provides for atomically
-migrating resource provider inventories and associated allocations when some of
-the inventory moves from one resource provider to another, such as when a class
-of inventory moves from a parent provider to a new child provider.
-
-.. note:: This is a special operation that should only be used in rare cases
- of resource provider topology changing when inventory is in use.
- Only use this if you are really sure of what you are doing.
diff --git a/nova/api/openstack/placement/schemas/__init__.py b/nova/api/openstack/placement/schemas/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/nova/api/openstack/placement/schemas/__init__.py
+++ /dev/null
diff --git a/nova/api/openstack/placement/schemas/aggregate.py b/nova/api/openstack/placement/schemas/aggregate.py
deleted file mode 100644
index dc5d949216..0000000000
--- a/nova/api/openstack/placement/schemas/aggregate.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Aggregate schemas for Placement API."""
-import copy
-
-
-_AGGREGATES_LIST_SCHEMA = {
- "type": "array",
- "items": {
- "type": "string",
- "format": "uuid"
- },
- "uniqueItems": True
-}
-
-
-PUT_AGGREGATES_SCHEMA_V1_1 = copy.deepcopy(_AGGREGATES_LIST_SCHEMA)
-
-
-PUT_AGGREGATES_SCHEMA_V1_19 = {
- "type": "object",
- "properties": {
- "aggregates": copy.deepcopy(_AGGREGATES_LIST_SCHEMA),
- "resource_provider_generation": {
- "type": "integer",
- }
- },
- "required": [
- "aggregates",
- "resource_provider_generation",
- ],
- "additionalProperties": False,
-}
diff --git a/nova/api/openstack/placement/schemas/allocation.py b/nova/api/openstack/placement/schemas/allocation.py
deleted file mode 100644
index 169a953f58..0000000000
--- a/nova/api/openstack/placement/schemas/allocation.py
+++ /dev/null
@@ -1,169 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API schemas for setting and deleting allocations."""
-
-import copy
-
-from nova.api.openstack.placement.schemas import common
-
-
-ALLOCATION_SCHEMA = {
- "type": "object",
- "properties": {
- "allocations": {
- "type": "array",
- "minItems": 1,
- "items": {
- "type": "object",
- "properties": {
- "resource_provider": {
- "type": "object",
- "properties": {
- "uuid": {
- "type": "string",
- "format": "uuid"
- }
- },
- "additionalProperties": False,
- "required": ["uuid"]
- },
- "resources": {
- "type": "object",
- "minProperties": 1,
- "patternProperties": {
- common.RC_PATTERN: {
- "type": "integer",
- "minimum": 1,
- }
- },
- "additionalProperties": False
- }
- },
- "required": [
- "resource_provider",
- "resources"
- ],
- "additionalProperties": False
- }
- }
- },
- "required": ["allocations"],
- "additionalProperties": False
-}
-
-ALLOCATION_SCHEMA_V1_8 = copy.deepcopy(ALLOCATION_SCHEMA)
-ALLOCATION_SCHEMA_V1_8['properties']['project_id'] = {'type': 'string',
- 'minLength': 1,
- 'maxLength': 255}
-ALLOCATION_SCHEMA_V1_8['properties']['user_id'] = {'type': 'string',
- 'minLength': 1,
- 'maxLength': 255}
-ALLOCATION_SCHEMA_V1_8['required'].extend(['project_id', 'user_id'])
-
-# Update the allocation schema to achieve symmetry with the representation
-# used when GET /allocations/{consumer_uuid} is called.
-# NOTE(cdent): Explicit duplication here for sake of comprehensibility.
-ALLOCATION_SCHEMA_V1_12 = {
- "type": "object",
- "properties": {
- "allocations": {
- "type": "object",
- "minProperties": 1,
- # resource provider uuid
- "patternProperties": {
- common.UUID_PATTERN: {
- "type": "object",
- "properties": {
- # generation is optional
- "generation": {
- "type": "integer",
- },
- "resources": {
- "type": "object",
- "minProperties": 1,
- # resource class
- "patternProperties": {
- common.RC_PATTERN: {
- "type": "integer",
- "minimum": 1,
- }
- },
- "additionalProperties": False
- }
- },
- "required": ["resources"],
- "additionalProperties": False
- }
- },
- "additionalProperties": False
- },
- "project_id": {
- "type": "string",
- "minLength": 1,
- "maxLength": 255
- },
- "user_id": {
- "type": "string",
- "minLength": 1,
- "maxLength": 255
- }
- },
- "additionalProperties": False,
- "required": [
- "allocations",
- "project_id",
- "user_id"
- ]
-}
-
-
-# POST to /allocations, added in microversion 1.13, uses the
-# POST_ALLOCATIONS_V1_13 schema to allow multiple allocations
-# from multiple consumers in one request. It is a dict, keyed by
-# consumer uuid, using the form of PUT allocations from microversion
-# 1.12. In POST the allocations can be empty, so DELETABLE_ALLOCATIONS
-# modifies ALLOCATION_SCHEMA_V1_12 accordingly.
-DELETABLE_ALLOCATIONS = copy.deepcopy(ALLOCATION_SCHEMA_V1_12)
-DELETABLE_ALLOCATIONS['properties']['allocations']['minProperties'] = 0
-POST_ALLOCATIONS_V1_13 = {
- "type": "object",
- "minProperties": 1,
- "additionalProperties": False,
- "patternProperties": {
- common.UUID_PATTERN: DELETABLE_ALLOCATIONS
- }
-}
-
-# A required consumer generation was added to the top-level dict in this
-# version of PUT /allocations/{consumer_uuid}. In addition, the PUT
-# /allocations/{consumer_uuid}/now allows for empty allocations (indicating the
-# allocations are being removed)
-ALLOCATION_SCHEMA_V1_28 = copy.deepcopy(DELETABLE_ALLOCATIONS)
-ALLOCATION_SCHEMA_V1_28['properties']['consumer_generation'] = {
- "type": ["integer", "null"],
- "additionalProperties": False
-}
-ALLOCATION_SCHEMA_V1_28['required'].append("consumer_generation")
-
-# A required consumer generation was added to the allocations dicts in this
-# version of POST /allocations
-REQUIRED_GENERATION_ALLOCS_POST = copy.deepcopy(DELETABLE_ALLOCATIONS)
-alloc_props = REQUIRED_GENERATION_ALLOCS_POST['properties']
-alloc_props['consumer_generation'] = {
- "type": ["integer", "null"],
- "additionalProperties": False
-}
-REQUIRED_GENERATION_ALLOCS_POST['required'].append("consumer_generation")
-POST_ALLOCATIONS_V1_28 = copy.deepcopy(POST_ALLOCATIONS_V1_13)
-POST_ALLOCATIONS_V1_28["patternProperties"] = {
- common.UUID_PATTERN: REQUIRED_GENERATION_ALLOCS_POST
-}
diff --git a/nova/api/openstack/placement/schemas/allocation_candidate.py b/nova/api/openstack/placement/schemas/allocation_candidate.py
deleted file mode 100644
index d418366ff6..0000000000
--- a/nova/api/openstack/placement/schemas/allocation_candidate.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API schemas for getting allocation candidates."""
-
-import copy
-
-
-# Represents the allowed query string parameters to the GET
-# /allocation_candidates API call
-GET_SCHEMA_1_10 = {
- "type": "object",
- "properties": {
- "resources": {
- "type": "string"
- },
- },
- "required": [
- "resources",
- ],
- "additionalProperties": False,
-}
-
-
-# Add limit query parameter.
-GET_SCHEMA_1_16 = copy.deepcopy(GET_SCHEMA_1_10)
-GET_SCHEMA_1_16['properties']['limit'] = {
- # A query parameter is always a string in webOb, but
- # we'll handle integer here as well.
- "type": ["integer", "string"],
- "pattern": "^[1-9][0-9]*$",
- "minimum": 1,
- "minLength": 1
-}
-
-# Add required parameter.
-GET_SCHEMA_1_17 = copy.deepcopy(GET_SCHEMA_1_16)
-GET_SCHEMA_1_17['properties']['required'] = {
- "type": ["string"]
-}
-
-# Add member_of parameter.
-GET_SCHEMA_1_21 = copy.deepcopy(GET_SCHEMA_1_17)
-GET_SCHEMA_1_21['properties']['member_of'] = {
- "type": ["string"]
-}
-
-GET_SCHEMA_1_25 = copy.deepcopy(GET_SCHEMA_1_21)
-# We're going to *replace* 'resources', 'required', and 'member_of'.
-del GET_SCHEMA_1_25["properties"]["resources"]
-del GET_SCHEMA_1_25["required"]
-del GET_SCHEMA_1_25["properties"]["required"]
-del GET_SCHEMA_1_25["properties"]["member_of"]
-# Pattern property key format for a numbered or un-numbered grouping
-_GROUP_PAT_FMT = "^%s([1-9][0-9]*)?$"
-GET_SCHEMA_1_25["patternProperties"] = {
- _GROUP_PAT_FMT % "resources": {
- "type": "string",
- },
- _GROUP_PAT_FMT % "required": {
- "type": "string",
- },
- _GROUP_PAT_FMT % "member_of": {
- "type": "string",
- },
-}
-GET_SCHEMA_1_25["properties"]["group_policy"] = {
- "type": "string",
- "enum": ["none", "isolate"],
-}
diff --git a/nova/api/openstack/placement/schemas/common.py b/nova/api/openstack/placement/schemas/common.py
deleted file mode 100644
index 51d3ee925c..0000000000
--- a/nova/api/openstack/placement/schemas/common.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-_UUID_CHAR = "[0-9a-fA-F-]"
-# TODO(efried): Use this stricter pattern, and replace string/uuid with it:
-# UUID_PATTERN = "^%s{8}-%s{4}-%s{4}-%s{4}-%s{12}$" % ((_UUID_CHAR,) * 5)
-UUID_PATTERN = "^%s{36}$" % _UUID_CHAR
-
-_RC_TRAIT_CHAR = "[A-Z0-9_]"
-_RC_TRAIT_PATTERN = "^%s+$" % _RC_TRAIT_CHAR
-RC_PATTERN = _RC_TRAIT_PATTERN
-_CUSTOM_RC_TRAIT_PATTERN = "^CUSTOM_%s+$" % _RC_TRAIT_CHAR
-CUSTOM_RC_PATTERN = _CUSTOM_RC_TRAIT_PATTERN
-CUSTOM_TRAIT_PATTERN = _CUSTOM_RC_TRAIT_PATTERN
diff --git a/nova/api/openstack/placement/schemas/inventory.py b/nova/api/openstack/placement/schemas/inventory.py
deleted file mode 100644
index cddea13064..0000000000
--- a/nova/api/openstack/placement/schemas/inventory.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Inventory schemas for Placement API."""
-
-import copy
-
-from nova.api.openstack.placement.schemas import common
-from nova.db import constants as db_const
-
-
-BASE_INVENTORY_SCHEMA = {
- "type": "object",
- "properties": {
- "resource_provider_generation": {
- "type": "integer"
- },
- "total": {
- "type": "integer",
- "maximum": db_const.MAX_INT,
- "minimum": 1,
- },
- "reserved": {
- "type": "integer",
- "maximum": db_const.MAX_INT,
- "minimum": 0,
- },
- "min_unit": {
- "type": "integer",
- "maximum": db_const.MAX_INT,
- "minimum": 1
- },
- "max_unit": {
- "type": "integer",
- "maximum": db_const.MAX_INT,
- "minimum": 1
- },
- "step_size": {
- "type": "integer",
- "maximum": db_const.MAX_INT,
- "minimum": 1
- },
- "allocation_ratio": {
- "type": "number",
- "maximum": db_const.SQL_SP_FLOAT_MAX
- },
- },
- "required": [
- "total",
- "resource_provider_generation"
- ],
- "additionalProperties": False
-}
-
-
-POST_INVENTORY_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
-POST_INVENTORY_SCHEMA['properties']['resource_class'] = {
- "type": "string",
- "pattern": common.RC_PATTERN,
-}
-POST_INVENTORY_SCHEMA['required'].append('resource_class')
-POST_INVENTORY_SCHEMA['required'].remove('resource_provider_generation')
-
-
-PUT_INVENTORY_RECORD_SCHEMA = copy.deepcopy(BASE_INVENTORY_SCHEMA)
-PUT_INVENTORY_RECORD_SCHEMA['required'].remove('resource_provider_generation')
-PUT_INVENTORY_SCHEMA = {
- "type": "object",
- "properties": {
- "resource_provider_generation": {
- "type": "integer"
- },
- "inventories": {
- "type": "object",
- "patternProperties": {
- common.RC_PATTERN: PUT_INVENTORY_RECORD_SCHEMA,
- }
- }
- },
- "required": [
- "resource_provider_generation",
- "inventories"
- ],
- "additionalProperties": False
-}
diff --git a/nova/api/openstack/placement/schemas/reshaper.py b/nova/api/openstack/placement/schemas/reshaper.py
deleted file mode 100644
index 1658d92515..0000000000
--- a/nova/api/openstack/placement/schemas/reshaper.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Reshaper schema for Placement API."""
-
-import copy
-
-from nova.api.openstack.placement.schemas import allocation
-from nova.api.openstack.placement.schemas import common
-from nova.api.openstack.placement.schemas import inventory
-
-
-ALLOCATIONS = copy.deepcopy(allocation.POST_ALLOCATIONS_V1_28)
-# In the reshaper we need to allow allocations to be an empty dict
-# because it may be the case that there simply are no allocations
-# (now) for any of the inventory being moved.
-ALLOCATIONS['minProperties'] = 0
-POST_RESHAPER_SCHEMA = {
- "type": "object",
- "properties": {
- "inventories": {
- "type": "object",
- "patternProperties": {
- # resource provider uuid
- common.UUID_PATTERN: inventory.PUT_INVENTORY_SCHEMA,
- },
- # We expect at least one inventories, otherwise there is no reason
- # to call the reshaper.
- "minProperties": 1,
- "additionalProperties": False,
- },
- "allocations": ALLOCATIONS,
- },
- "required": [
- "inventories",
- "allocations",
- ],
- "additionalProperties": False,
-}
diff --git a/nova/api/openstack/placement/schemas/resource_class.py b/nova/api/openstack/placement/schemas/resource_class.py
deleted file mode 100644
index 32f75bc880..0000000000
--- a/nova/api/openstack/placement/schemas/resource_class.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API schemas for resource classes."""
-
-import copy
-
-from nova.api.openstack.placement.schemas import common
-
-
-POST_RC_SCHEMA_V1_2 = {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "pattern": common.CUSTOM_RC_PATTERN,
- "maxLength": 255,
- },
- },
- "required": [
- "name"
- ],
- "additionalProperties": False,
-}
-PUT_RC_SCHEMA_V1_2 = copy.deepcopy(POST_RC_SCHEMA_V1_2)
diff --git a/nova/api/openstack/placement/schemas/resource_provider.py b/nova/api/openstack/placement/schemas/resource_provider.py
deleted file mode 100644
index 7ca43ef69a..0000000000
--- a/nova/api/openstack/placement/schemas/resource_provider.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API schemas for resource providers."""
-
-import copy
-
-
-POST_RESOURCE_PROVIDER_SCHEMA = {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "maxLength": 200
- },
- "uuid": {
- "type": "string",
- "format": "uuid"
- }
- },
- "required": [
- "name"
- ],
- "additionalProperties": False,
-}
-# Remove uuid to create the schema for PUTting a resource provider
-PUT_RESOURCE_PROVIDER_SCHEMA = copy.deepcopy(POST_RESOURCE_PROVIDER_SCHEMA)
-PUT_RESOURCE_PROVIDER_SCHEMA['properties'].pop('uuid')
-
-# Placement API microversion 1.14 adds an optional parent_provider_uuid field
-# to the POST and PUT request schemas
-POST_RP_SCHEMA_V1_14 = copy.deepcopy(POST_RESOURCE_PROVIDER_SCHEMA)
-POST_RP_SCHEMA_V1_14["properties"]["parent_provider_uuid"] = {
- "anyOf": [
- {
- "type": "string",
- "format": "uuid",
- },
- {
- "type": "null",
- }
- ]
-}
-PUT_RP_SCHEMA_V1_14 = copy.deepcopy(POST_RP_SCHEMA_V1_14)
-PUT_RP_SCHEMA_V1_14['properties'].pop('uuid')
-
-# Represents the allowed query string parameters to the GET /resource_providers
-# API call
-GET_RPS_SCHEMA_1_0 = {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "uuid": {
- "type": "string",
- "format": "uuid"
- }
- },
- "additionalProperties": False,
-}
-
-# Placement API microversion 1.3 adds support for a member_of attribute
-GET_RPS_SCHEMA_1_3 = copy.deepcopy(GET_RPS_SCHEMA_1_0)
-GET_RPS_SCHEMA_1_3['properties']['member_of'] = {
- "type": "string"
-}
-
-# Placement API microversion 1.4 adds support for requesting resource providers
-# having some set of capacity for some resources. The query string is a
-# comma-delimited set of "$RESOURCE_CLASS_NAME:$AMOUNT" strings. The validation
-# of the string is left up to the helper code in the
-# normalize_resources_qs_param() function.
-GET_RPS_SCHEMA_1_4 = copy.deepcopy(GET_RPS_SCHEMA_1_3)
-GET_RPS_SCHEMA_1_4['properties']['resources'] = {
- "type": "string"
-}
-
-# Placement API microversion 1.14 adds support for requesting resource
-# providers within a tree of providers. The 'in_tree' query string parameter
-# should be the UUID of a resource provider. The result of the GET call will
-# include only those resource providers in the same "provider tree" as the
-# provider with the UUID represented by 'in_tree'
-GET_RPS_SCHEMA_1_14 = copy.deepcopy(GET_RPS_SCHEMA_1_4)
-GET_RPS_SCHEMA_1_14['properties']['in_tree'] = {
- "type": "string",
- "format": "uuid",
-}
-
-# Microversion 1.18 adds support for the `required` query parameter to the
-# `GET /resource_providers` API. It accepts a comma-separated list of string
-# trait names. When specified, the API results will be filtered to include only
-# resource providers marked with all the specified traits. This is in addition
-# to (logical AND) any filtering based on other query parameters.
-GET_RPS_SCHEMA_1_18 = copy.deepcopy(GET_RPS_SCHEMA_1_14)
-GET_RPS_SCHEMA_1_18['properties']['required'] = {
- "type": "string",
-}
diff --git a/nova/api/openstack/placement/schemas/trait.py b/nova/api/openstack/placement/schemas/trait.py
deleted file mode 100644
index b9c04e54de..0000000000
--- a/nova/api/openstack/placement/schemas/trait.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Trait schemas for Placement API."""
-
-import copy
-
-from nova.api.openstack.placement.schemas import common
-
-
-TRAIT = {
- "type": "string",
- 'minLength': 1, 'maxLength': 255,
-}
-
-CUSTOM_TRAIT = copy.deepcopy(TRAIT)
-CUSTOM_TRAIT.update({"pattern": common.CUSTOM_TRAIT_PATTERN})
-
-PUT_TRAITS_SCHEMA = {
- "type": "object",
- "properties": {
- "traits": {
- "type": "array",
- "items": CUSTOM_TRAIT,
- }
- },
- 'required': ['traits'],
- 'additionalProperties': False
-}
-
-SET_TRAITS_FOR_RP_SCHEMA = copy.deepcopy(PUT_TRAITS_SCHEMA)
-SET_TRAITS_FOR_RP_SCHEMA['properties']['traits']['items'] = TRAIT
-SET_TRAITS_FOR_RP_SCHEMA['properties'][
- 'resource_provider_generation'] = {'type': 'integer'}
-SET_TRAITS_FOR_RP_SCHEMA['required'].append('resource_provider_generation')
-
-LIST_TRAIT_SCHEMA = {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "associated": {
- "type": "string",
- }
- },
- "additionalProperties": False
-}
diff --git a/nova/api/openstack/placement/schemas/usage.py b/nova/api/openstack/placement/schemas/usage.py
deleted file mode 100644
index 3b1a184504..0000000000
--- a/nova/api/openstack/placement/schemas/usage.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Placement API schemas for usage information."""
-
-# Represents the allowed query string parameters to GET /usages
-GET_USAGES_SCHEMA_1_9 = {
- "type": "object",
- "properties": {
- "project_id": {
- "type": "string",
- "minLength": 1,
- "maxLength": 255,
- },
- "user_id": {
- "type": "string",
- "minLength": 1,
- "maxLength": 255,
- },
- },
- "required": [
- "project_id"
- ],
- "additionalProperties": False,
-}
diff --git a/nova/api/openstack/placement/util.py b/nova/api/openstack/placement/util.py
deleted file mode 100644
index 6b3ae052f6..0000000000
--- a/nova/api/openstack/placement/util.py
+++ /dev/null
@@ -1,697 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Utility methods for placement API."""
-
-import functools
-import re
-
-import jsonschema
-from oslo_config import cfg
-from oslo_log import log as logging
-from oslo_middleware import request_id
-from oslo_serialization import jsonutils
-from oslo_utils import timeutils
-from oslo_utils import uuidutils
-import webob
-
-from nova.api.openstack.placement import errors
-from nova.api.openstack.placement import exception
-from nova.api.openstack.placement import lib as placement_lib
-# NOTE(cdent): avoid cyclical import conflict between util and
-# microversion
-import nova.api.openstack.placement.microversion
-from nova.api.openstack.placement.objects import consumer as consumer_obj
-from nova.api.openstack.placement.objects import project as project_obj
-from nova.api.openstack.placement.objects import user as user_obj
-from nova.i18n import _
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-# Error code handling constants
-ENV_ERROR_CODE = 'placement.error_code'
-ERROR_CODE_MICROVERSION = (1, 23)
-
-# Querystring-related constants
-_QS_RESOURCES = 'resources'
-_QS_REQUIRED = 'required'
-_QS_MEMBER_OF = 'member_of'
-_QS_KEY_PATTERN = re.compile(
- r"^(%s)([1-9][0-9]*)?$" % '|'.join(
- (_QS_RESOURCES, _QS_REQUIRED, _QS_MEMBER_OF)))
-
-
-# NOTE(cdent): This registers a FormatChecker on the jsonschema
-# module. Do not delete this code! Although it appears that nothing
-# is using the decorated method it is being used in JSON schema
-# validations to check uuid formatted strings.
-@jsonschema.FormatChecker.cls_checks('uuid')
-def _validate_uuid_format(instance):
- return uuidutils.is_uuid_like(instance)
-
-
-def check_accept(*types):
- """If accept is set explicitly, try to follow it.
-
- If there is no match for the incoming accept header
- send a 406 response code.
-
- If accept is not set send our usual content-type in
- response.
- """
- def decorator(f):
- @functools.wraps(f)
- def decorated_function(req):
- if req.accept:
- best_matches = req.accept.acceptable_offers(types)
- if not best_matches:
- type_string = ', '.join(types)
- raise webob.exc.HTTPNotAcceptable(
- _('Only %(type)s is provided') % {'type': type_string},
- json_formatter=json_error_formatter)
- return f(req)
- return decorated_function
- return decorator
-
-
-def extract_json(body, schema):
- """Extract JSON from a body and validate with the provided schema."""
- try:
- data = jsonutils.loads(body)
- except ValueError as exc:
- raise webob.exc.HTTPBadRequest(
- _('Malformed JSON: %(error)s') % {'error': exc},
- json_formatter=json_error_formatter)
- try:
- jsonschema.validate(data, schema,
- format_checker=jsonschema.FormatChecker())
- except jsonschema.ValidationError as exc:
- raise webob.exc.HTTPBadRequest(
- _('JSON does not validate: %(error)s') % {'error': exc},
- json_formatter=json_error_formatter)
- return data
-
-
-def inventory_url(environ, resource_provider, resource_class=None):
- url = '%s/inventories' % resource_provider_url(environ, resource_provider)
- if resource_class:
- url = '%s/%s' % (url, resource_class)
- return url
-
-
-def json_error_formatter(body, status, title, environ):
- """A json_formatter for webob exceptions.
-
- Follows API-WG guidelines at
- http://specs.openstack.org/openstack/api-wg/guidelines/errors.html
- """
- # Shortcut to microversion module, to avoid wraps below.
- microversion = nova.api.openstack.placement.microversion
-
- # Clear out the html that webob sneaks in.
- body = webob.exc.strip_tags(body)
- # Get status code out of status message. webob's error formatter
- # only passes entire status string.
- status_code = int(status.split(None, 1)[0])
- error_dict = {
- 'status': status_code,
- 'title': title,
- 'detail': body
- }
-
- # Version may not be set if we have experienced an error before it
- # is set.
- want_version = environ.get(microversion.MICROVERSION_ENVIRON)
- if want_version and want_version.matches(ERROR_CODE_MICROVERSION):
- error_dict['code'] = environ.get(ENV_ERROR_CODE, errors.DEFAULT)
-
- # If the request id middleware has had a chance to add an id,
- # put it in the error response.
- if request_id.ENV_REQUEST_ID in environ:
- error_dict['request_id'] = environ[request_id.ENV_REQUEST_ID]
-
- # When there is a no microversion in the environment and a 406,
- # microversion parsing failed so we need to include microversion
- # min and max information in the error response.
- if status_code == 406 and microversion.MICROVERSION_ENVIRON not in environ:
- error_dict['max_version'] = microversion.max_version_string()
- error_dict['min_version'] = microversion.min_version_string()
-
- return {'errors': [error_dict]}
-
-
-def pick_last_modified(last_modified, obj):
- """Choose max of last_modified and obj.updated_at or obj.created_at.
-
- If updated_at is not implemented in `obj` use the current time in UTC.
- """
- try:
- current_modified = (obj.updated_at or obj.created_at)
- except NotImplementedError:
- # If updated_at is not implemented, we are looking at objects that
- # have not come from the database, so "now" is the right modified
- # time.
- current_modified = timeutils.utcnow(with_timezone=True)
- if last_modified:
- last_modified = max(last_modified, current_modified)
- else:
- last_modified = current_modified
- return last_modified
-
-
-def require_content(content_type):
- """Decorator to require a content type in a handler."""
- def decorator(f):
- @functools.wraps(f)
- def decorated_function(req):
- if req.content_type != content_type:
- # webob's unset content_type is the empty string so
- # set it the error message content to 'None' to make
- # a useful message in that case. This also avoids a
- # KeyError raised when webob.exc eagerly fills in a
- # Template for output we will never use.
- if not req.content_type:
- req.content_type = 'None'
- raise webob.exc.HTTPUnsupportedMediaType(
- _('The media type %(bad_type)s is not supported, '
- 'use %(good_type)s') %
- {'bad_type': req.content_type,
- 'good_type': content_type},
- json_formatter=json_error_formatter)
- else:
- return f(req)
- return decorated_function
- return decorator
-
-
-def resource_class_url(environ, resource_class):
- """Produce the URL for a resource class.
-
- If SCRIPT_NAME is present, it is the mount point of the placement
- WSGI app.
- """
- prefix = environ.get('SCRIPT_NAME', '')
- return '%s/resource_classes/%s' % (prefix, resource_class.name)
-
-
-def resource_provider_url(environ, resource_provider):
- """Produce the URL for a resource provider.
-
- If SCRIPT_NAME is present, it is the mount point of the placement
- WSGI app.
- """
- prefix = environ.get('SCRIPT_NAME', '')
- return '%s/resource_providers/%s' % (prefix, resource_provider.uuid)
-
-
-def trait_url(environ, trait):
- """Produce the URL for a trait.
-
- If SCRIPT_NAME is present, it is the mount point of the placement
- WSGI app.
- """
- prefix = environ.get('SCRIPT_NAME', '')
- return '%s/traits/%s' % (prefix, trait.name)
-
-
-def validate_query_params(req, schema):
- try:
- # NOTE(Kevin_Zheng): The webob package throws UnicodeError when
- # param cannot be decoded. Catch this and raise HTTP 400.
- jsonschema.validate(dict(req.GET), schema,
- format_checker=jsonschema.FormatChecker())
- except (jsonschema.ValidationError, UnicodeDecodeError) as exc:
- raise webob.exc.HTTPBadRequest(
- _('Invalid query string parameters: %(exc)s') %
- {'exc': exc})
-
-
-def wsgi_path_item(environ, name):
- """Extract the value of a named field in a URL.
-
- Return None if the name is not present or there are no path items.
- """
- # NOTE(cdent): For the time being we don't need to urldecode
- # the value as the entire placement API has paths that accept no
- # encoded values.
- try:
- return environ['wsgiorg.routing_args'][1][name]
- except (KeyError, IndexError):
- return None
-
-
-def normalize_resources_qs_param(qs):
- """Given a query string parameter for resources, validate it meets the
- expected format and return a dict of amounts, keyed by resource class name.
-
- The expected format of the resources parameter looks like so:
-
- $RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT
-
- So, if the user was looking for resource providers that had room for an
- instance that will consume 2 vCPUs, 1024 MB of RAM and 50GB of disk space,
- they would use the following query string:
-
- ?resources=VCPU:2,MEMORY_MB:1024,DISK_GB:50
-
- The returned value would be:
-
- {
- "VCPU": 2,
- "MEMORY_MB": 1024,
- "DISK_GB": 50,
- }
-
- :param qs: The value of the 'resources' query string parameter
- :raises `webob.exc.HTTPBadRequest` if the parameter's value isn't in the
- expected format.
- """
- if qs.strip() == "":
- msg = _('Badly formed resources parameter. Expected resources '
- 'query string parameter in form: '
- '?resources=VCPU:2,MEMORY_MB:1024. Got: empty string.')
- raise webob.exc.HTTPBadRequest(msg)
-
- result = {}
- resource_tuples = qs.split(',')
- for rt in resource_tuples:
- try:
- rc_name, amount = rt.split(':')
- except ValueError:
- msg = _('Badly formed resources parameter. Expected resources '
- 'query string parameter in form: '
- '?resources=VCPU:2,MEMORY_MB:1024. Got: %s.')
- msg = msg % rt
- raise webob.exc.HTTPBadRequest(msg)
- try:
- amount = int(amount)
- except ValueError:
- msg = _('Requested resource %(resource_name)s expected positive '
- 'integer amount. Got: %(amount)s.')
- msg = msg % {
- 'resource_name': rc_name,
- 'amount': amount,
- }
- raise webob.exc.HTTPBadRequest(msg)
- if amount < 1:
- msg = _('Requested resource %(resource_name)s requires '
- 'amount >= 1. Got: %(amount)d.')
- msg = msg % {
- 'resource_name': rc_name,
- 'amount': amount,
- }
- raise webob.exc.HTTPBadRequest(msg)
- result[rc_name] = amount
- return result
-
-
-def valid_trait(trait, allow_forbidden):
- """Return True if the provided trait is the expected form.
-
- When allow_forbidden is True, then a leading '!' is acceptable.
- """
- if trait.startswith('!') and not allow_forbidden:
- return False
- return True
-
-
-def normalize_traits_qs_param(val, allow_forbidden=False):
- """Parse a traits query string parameter value.
-
- Note that this method doesn't know or care about the query parameter key,
- which may currently be of the form `required`, `required123`, etc., but
- which may someday also include `preferred`, etc.
-
- This method currently does no format validation of trait strings, other
- than to ensure they're not zero-length.
-
- :param val: A traits query parameter value: a comma-separated string of
- trait names.
- :param allow_forbidden: If True, accept forbidden traits (that is, traits
- prefixed by '!') as a valid form when notifying
- the caller that the provided value is not properly
- formed.
- :return: A set of trait names.
- :raises `webob.exc.HTTPBadRequest` if the val parameter is not in the
- expected format.
- """
- ret = set(substr.strip() for substr in val.split(','))
- expected_form = 'HW_CPU_X86_VMX,CUSTOM_MAGIC'
- if allow_forbidden:
- expected_form = 'HW_CPU_X86_VMX,!CUSTOM_MAGIC'
- if not all(trait and valid_trait(trait, allow_forbidden) for trait in ret):
- msg = _("Invalid query string parameters: Expected 'required' "
- "parameter value of the form: %(form)s. "
- "Got: %(val)s") % {'form': expected_form, 'val': val}
- raise webob.exc.HTTPBadRequest(msg)
- return ret
-
-
-def normalize_member_of_qs_params(req, suffix=''):
- """Given a webob.Request object, validate that the member_of querystring
- parameters are correct. We begin supporting multiple member_of params in
- microversion 1.24.
-
- :param req: webob.Request object
- :return: A list containing sets of UUIDs of aggregates to filter on
- :raises `webob.exc.HTTPBadRequest` if the microversion requested is <1.24
- and the request contains multiple member_of querystring params
- :raises `webob.exc.HTTPBadRequest` if the val parameter is not in the
- expected format.
- """
- microversion = nova.api.openstack.placement.microversion
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- multi_member_of = want_version.matches((1, 24))
- if not multi_member_of and len(req.GET.getall('member_of' + suffix)) > 1:
- raise webob.exc.HTTPBadRequest(
- _('Multiple member_of%s parameters are not supported') % suffix)
- values = []
- for value in req.GET.getall('member_of' + suffix):
- values.append(normalize_member_of_qs_param(value))
- return values
-
-
-def normalize_member_of_qs_param(value):
- """Parse a member_of query string parameter value.
-
- Valid values are either a single UUID, or the prefix 'in:' followed by two
- or more comma-separated UUIDs.
-
- :param value: A member_of query parameter of either a single UUID, or a
- comma-separated string of two or more UUIDs, prefixed with
- the "in:" operator
- :return: A set of UUIDs
- :raises `webob.exc.HTTPBadRequest` if the value parameter is not in the
- expected format.
- """
- if "," in value and not value.startswith("in:"):
- msg = _("Multiple values for 'member_of' must be prefixed with the "
- "'in:' keyword. Got: %s") % value
- raise webob.exc.HTTPBadRequest(msg)
- if value.startswith('in:'):
- value = set(value[3:].split(','))
- else:
- value = set([value])
- # Make sure the values are actually UUIDs.
- for aggr_uuid in value:
- if not uuidutils.is_uuid_like(aggr_uuid):
- msg = _("Invalid query string parameters: Expected 'member_of' "
- "parameter to contain valid UUID(s). Got: %s") % aggr_uuid
- raise webob.exc.HTTPBadRequest(msg)
- return value
-
-
-def parse_qs_request_groups(req):
- """Parse numbered resources, traits, and member_of groupings out of a
- querystring dict.
-
- The input qsdict represents a query string of the form:
-
- ?resources=$RESOURCE_CLASS_NAME:$AMOUNT,$RESOURCE_CLASS_NAME:$AMOUNT
- &required=$TRAIT_NAME,$TRAIT_NAME&member_of=in:$AGG1_UUID,$AGG2_UUID
- &resources1=$RESOURCE_CLASS_NAME:$AMOUNT,RESOURCE_CLASS_NAME:$AMOUNT
- &required1=$TRAIT_NAME,$TRAIT_NAME&member_of1=$AGG_UUID
- &resources2=$RESOURCE_CLASS_NAME:$AMOUNT,RESOURCE_CLASS_NAME:$AMOUNT
- &required2=$TRAIT_NAME,$TRAIT_NAME&member_of2=$AGG_UUID
-
- These are parsed in groups according to the numeric suffix of the key.
- For each group, a RequestGroup instance is created containing that group's
- resources, required traits, and member_of. For the (single) group with no
- suffix, the RequestGroup.use_same_provider attribute is False; for the
- numbered groups it is True.
-
- If a trait in the required parameter is prefixed with ``!`` this
- indicates that that trait must not be present on the resource
- providers in the group. That is, the trait is forbidden. Forbidden traits
- are only processed if ``allow_forbidden`` is True. This allows the
- caller to control processing based on microversion handling.
-
- The return is a dict, keyed by the numeric suffix of these RequestGroup
- instances (or the empty string for the unnumbered group).
-
- As an example, if qsdict represents the query string:
-
- ?resources=VCPU:2,MEMORY_MB:1024,DISK_GB=50
- &required=HW_CPU_X86_VMX,CUSTOM_STORAGE_RAID
- &member_of=in:9323b2b1-82c9-4e91-bdff-e95e808ef954,8592a199-7d73-4465-8df6-ab00a6243c82 # noqa
- &resources1=SRIOV_NET_VF:2
- &required1=CUSTOM_PHYSNET_PUBLIC,CUSTOM_SWITCH_A
- &resources2=SRIOV_NET_VF:1
- &required2=!CUSTOM_PHYSNET_PUBLIC
-
- ...the return value will be:
-
- { '': RequestGroup(
- use_same_provider=False,
- resources={
- "VCPU": 2,
- "MEMORY_MB": 1024,
- "DISK_GB" 50,
- },
- required_traits=[
- "HW_CPU_X86_VMX",
- "CUSTOM_STORAGE_RAID",
- ],
- member_of=[
- [9323b2b1-82c9-4e91-bdff-e95e808ef954],
- [8592a199-7d73-4465-8df6-ab00a6243c82,
- ddbd9226-d6a6-475e-a85f-0609914dd058],
- ],
- ),
- '1': RequestGroup(
- use_same_provider=True,
- resources={
- "SRIOV_NET_VF": 2,
- },
- required_traits=[
- "CUSTOM_PHYSNET_PUBLIC",
- "CUSTOM_SWITCH_A",
- ],
- ),
- '2': RequestGroup(
- use_same_provider=True,
- resources={
- "SRIOV_NET_VF": 1,
- },
- forbidden_traits=[
- "CUSTOM_PHYSNET_PUBLIC",
- ],
- ),
- }
-
- :param req: webob.Request object
- :return: A list of RequestGroup instances.
- :raises `webob.exc.HTTPBadRequest` if any value is malformed, or if a
- trait list is given without corresponding resources.
- """
- microversion = nova.api.openstack.placement.microversion
- want_version = req.environ[microversion.MICROVERSION_ENVIRON]
- # Control whether we handle forbidden traits.
- allow_forbidden = want_version.matches((1, 22))
- # Temporary dict of the form: { suffix: RequestGroup }
- by_suffix = {}
-
- def get_request_group(suffix):
- if suffix not in by_suffix:
- rq_grp = placement_lib.RequestGroup(use_same_provider=bool(suffix))
- by_suffix[suffix] = rq_grp
- return by_suffix[suffix]
-
- for key, val in req.GET.items():
- match = _QS_KEY_PATTERN.match(key)
- if not match:
- continue
- # `prefix` is 'resources', 'required', or 'member_of'
- # `suffix` is an integer string, or None
- prefix, suffix = match.groups()
- suffix = suffix or ''
- request_group = get_request_group(suffix)
- if prefix == _QS_RESOURCES:
- request_group.resources = normalize_resources_qs_param(val)
- elif prefix == _QS_REQUIRED:
- request_group.required_traits = normalize_traits_qs_param(
- val, allow_forbidden=allow_forbidden)
- elif prefix == _QS_MEMBER_OF:
- # special handling of member_of qparam since we allow multiple
- # member_of params at microversion 1.24.
- # NOTE(jaypipes): Yes, this is inefficient to do this when there
- # are multiple member_of query parameters, but we do this so we can
- # error out if someone passes an "orphaned" member_of request
- # group.
- # TODO(jaypipes): Do validation of query parameters using
- # JSONSchema
- request_group.member_of = normalize_member_of_qs_params(
- req, suffix)
-
- # Ensure any group with 'required' or 'member_of' also has 'resources'.
- orphans = [('required%s' % suff) for suff, group in by_suffix.items()
- if group.required_traits and not group.resources]
- if orphans:
- msg = _('All traits parameters must be associated with resources. '
- 'Found the following orphaned traits keys: %s')
- raise webob.exc.HTTPBadRequest(msg % ', '.join(orphans))
- orphans = [('member_of%s' % suff) for suff, group in by_suffix.items()
- if group.member_of and not group.resources]
- if orphans:
- msg = _('All member_of parameters must be associated with '
- 'resources. Found the following orphaned member_of '
- 'keys: %s')
- raise webob.exc.HTTPBadRequest(msg % ', '.join(orphans))
- # All request groups must have resources (which is almost, but not quite,
- # verified by the orphan checks above).
- if not all(grp.resources for grp in by_suffix.values()):
- msg = _("All request groups must specify resources.")
- raise webob.exc.HTTPBadRequest(msg)
- # The above would still pass if there were no request groups
- if not by_suffix:
- msg = _("At least one request group (`resources` or `resources{N}`) "
- "is required.")
- raise webob.exc.HTTPBadRequest(msg)
-
- # Make adjustments for forbidden traits by stripping forbidden out
- # of required.
- if allow_forbidden:
- conflicting_traits = []
- for suff, group in by_suffix.items():
- forbidden = [trait for trait in group.required_traits
- if trait.startswith('!')]
- group.required_traits = (group.required_traits - set(forbidden))
- group.forbidden_traits = set([trait.lstrip('!') for trait in
- forbidden])
- conflicts = group.forbidden_traits & group.required_traits
- if conflicts:
- conflicting_traits.append('required%s: (%s)'
- % (suff, ', '.join(conflicts)))
- if conflicting_traits:
- msg = _('Conflicting required and forbidden traits found in the '
- 'following traits keys: %s')
- raise webob.exc.HTTPBadRequest(msg % ', '.join(conflicting_traits))
-
- return by_suffix
-
-
-def ensure_consumer(ctx, consumer_uuid, project_id, user_id,
- consumer_generation, want_version):
- """Ensures there are records in the consumers, projects and users table for
- the supplied external identifiers.
-
- Returns a tuple containing the populated Consumer object containing Project
- and User sub-objects and a boolean indicating whether a new Consumer object
- was created (as opposed to an existing consumer record retrieved)
-
- :note: If the supplied project or user external identifiers do not match an
- existing consumer's project and user identifiers, the existing
- consumer's project and user IDs are updated to reflect the supplied
- ones.
-
- :param ctx: The request context.
- :param consumer_uuid: The uuid of the consumer of the resources.
- :param project_id: The external ID of the project consuming the resources.
- :param user_id: The external ID of the user consuming the resources.
- :param consumer_generation: The generation provided by the user for this
- consumer.
- :param want_version: the microversion matcher.
- :raises webob.exc.HTTPConflict if consumer generation is required and there
- was a mismatch
- """
- created_new_consumer = False
- requires_consumer_generation = want_version.matches((1, 28))
- if project_id is None:
- project_id = CONF.placement.incomplete_consumer_project_id
- user_id = CONF.placement.incomplete_consumer_user_id
- try:
- proj = project_obj.Project.get_by_external_id(ctx, project_id)
- except exception.NotFound:
- # Auto-create the project if we found no record of it...
- try:
- proj = project_obj.Project(ctx, external_id=project_id)
- proj.create()
- except exception.ProjectExists:
- # No worries, another thread created this project already
- proj = project_obj.Project.get_by_external_id(ctx, project_id)
- try:
- user = user_obj.User.get_by_external_id(ctx, user_id)
- except exception.NotFound:
- # Auto-create the user if we found no record of it...
- try:
- user = user_obj.User(ctx, external_id=user_id)
- user.create()
- except exception.UserExists:
- # No worries, another thread created this user already
- user = user_obj.User.get_by_external_id(ctx, user_id)
-
- try:
- consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid)
- if requires_consumer_generation:
- if consumer.generation != consumer_generation:
- raise webob.exc.HTTPConflict(
- _('consumer generation conflict - '
- 'expected %(expected_gen)s but got %(got_gen)s') %
- {
- 'expected_gen': consumer.generation,
- 'got_gen': consumer_generation,
- },
- comment=errors.CONCURRENT_UPDATE)
- # NOTE(jaypipes): The user may have specified a different project and
- # user external ID than the one that we had for the consumer. If this
- # is the case, go ahead and modify the consumer record with the
- # newly-supplied project/user information, but do not bump the consumer
- # generation (since it will be bumped in the
- # AllocationList.replace_all() method).
- #
- # TODO(jaypipes): This means that there may be a partial update.
- # Imagine a scenario where a user calls POST /allocations, and the
- # payload references two consumers. The first consumer is a new
- # consumer and is auto-created. The second consumer is an existing
- # consumer, but contains a different project or user ID than the
- # existing consumer's record. If the eventual call to
- # AllocationList.replace_all() fails for whatever reason (say, a
- # resource provider generation conflict or out of resources failure),
- # we will end up deleting the auto-created consumer but we MAY not undo
- # the changes to the second consumer's project and user ID. I say MAY
- # and not WILL NOT because I'm not sure that the exception that gets
- # raised from AllocationList.replace_all() will cause the context
- # manager's transaction to rollback automatically. I believe that the
- # same transaction context is used for both util.ensure_consumer() and
- # AllocationList.replace_all() within the same HTTP request, but need
- # to test this to be 100% certain...
- if (project_id != consumer.project.external_id or
- user_id != consumer.user.external_id):
- LOG.debug("Supplied project or user ID for consumer %s was "
- "different than existing record. Updating consumer "
- "record.", consumer_uuid)
- consumer.project = proj
- consumer.user = user
- consumer.update()
- except exception.NotFound:
- # If we are attempting to modify or create allocations after 1.26, we
- # need a consumer generation specified. The user must have specified
- # None for the consumer generation if we get here, since there was no
- # existing consumer with this UUID and therefore the user should be
- # indicating that they expect the consumer did not exist.
- if requires_consumer_generation:
- if consumer_generation is not None:
- raise webob.exc.HTTPConflict(
- _('consumer generation conflict - '
- 'expected null but got %s') % consumer_generation,
- comment=errors.CONCURRENT_UPDATE)
- # No such consumer. This is common for new allocations. Create the
- # consumer record
- try:
- consumer = consumer_obj.Consumer(
- ctx, uuid=consumer_uuid, project=proj, user=user)
- consumer.create()
- created_new_consumer = True
- except exception.ConsumerExists:
- # No worries, another thread created this user already
- consumer = consumer_obj.Consumer.get_by_uuid(ctx, consumer_uuid)
- return consumer, created_new_consumer
diff --git a/nova/api/openstack/placement/wsgi.py b/nova/api/openstack/placement/wsgi.py
deleted file mode 100644
index a37e5954eb..0000000000
--- a/nova/api/openstack/placement/wsgi.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""WSGI script for Placement API
-
-WSGI handler for running Placement API under Apache2, nginx, gunicorn etc.
-"""
-
-import logging as py_logging
-import os
-import os.path
-
-from oslo_log import log as logging
-from oslo_middleware import cors
-from oslo_policy import opts as policy_opts
-from oslo_utils import importutils
-import pbr.version
-
-from nova.api.openstack.placement import db_api
-from nova.api.openstack.placement import deploy
-from nova import conf
-
-
-profiler = importutils.try_import('osprofiler.opts')
-
-
-CONFIG_FILE = 'nova.conf'
-
-
-version_info = pbr.version.VersionInfo('nova')
-
-
-def setup_logging(config):
- # Any dependent libraries that have unhelp debug levels should be
- # pinned to a higher default.
- extra_log_level_defaults = [
- 'routes=INFO',
- ]
- logging.set_defaults(default_log_levels=logging.get_default_log_levels() +
- extra_log_level_defaults)
- logging.setup(config, 'nova')
- py_logging.captureWarnings(True)
-
-
-def _get_config_file(env=None):
- if env is None:
- env = os.environ
-
- dirname = env.get('OS_PLACEMENT_CONFIG_DIR', '/etc/nova').strip()
- return os.path.join(dirname, CONFIG_FILE)
-
-
-def _parse_args(argv, default_config_files):
- logging.register_options(conf.CONF)
-
- if profiler:
- profiler.set_defaults(conf.CONF)
-
- _set_middleware_defaults()
-
- # This is needed so we can check [oslo_policy]/enforce_scope in the
- # deploy module.
- policy_opts.set_defaults(conf.CONF)
-
- conf.CONF(argv[1:], project='nova', version=version_info.version_string(),
- default_config_files=default_config_files)
-
-
-def _set_middleware_defaults():
- """Update default configuration options for oslo.middleware."""
- cors.set_defaults(
- allow_headers=['X-Auth-Token',
- 'X-Openstack-Request-Id',
- 'X-Identity-Status',
- 'X-Roles',
- 'X-Service-Catalog',
- 'X-User-Id',
- 'X-Tenant-Id'],
- expose_headers=['X-Auth-Token',
- 'X-Openstack-Request-Id',
- 'X-Subject-Token',
- 'X-Service-Token'],
- allow_methods=['GET',
- 'PUT',
- 'POST',
- 'DELETE',
- 'PATCH']
- )
-
-
-def init_application():
- # initialize the config system
- conffile = _get_config_file()
-
- # NOTE(lyarwood): Call reset to ensure the ConfigOpts object doesn't
- # already contain registered options if the app is reloaded.
- conf.CONF.reset()
-
- _parse_args([], default_config_files=[conffile])
- db_api.configure(conf.CONF)
-
- # initialize the logging system
- setup_logging(conf.CONF)
-
- # dump conf at debug if log_options
- if conf.CONF.log_options:
- conf.CONF.log_opt_values(
- logging.getLogger(__name__),
- logging.DEBUG)
-
- # build and return our WSGI app
- return deploy.loadapp(conf.CONF)
diff --git a/nova/api/openstack/placement/wsgi_wrapper.py b/nova/api/openstack/placement/wsgi_wrapper.py
deleted file mode 100644
index fcb6551d3e..0000000000
--- a/nova/api/openstack/placement/wsgi_wrapper.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Extend functionality from webob.dec.wsgify for Placement API."""
-
-import webob
-
-from oslo_log import log as logging
-from webob.dec import wsgify
-
-from nova.api.openstack.placement import util
-
-LOG = logging.getLogger(__name__)
-
-
-class PlacementWsgify(wsgify):
-
- def call_func(self, req, *args, **kwargs):
- """Add json_error_formatter to any webob HTTPExceptions."""
- try:
- super(PlacementWsgify, self).call_func(req, *args, **kwargs)
- except webob.exc.HTTPException as exc:
- LOG.debug("Placement API returning an error response: %s", exc)
- exc.json_formatter = util.json_error_formatter
- # The exception itself is not passed to json_error_formatter
- # but environ is, so set the environ.
- if exc.comment:
- req.environ[util.ENV_ERROR_CODE] = exc.comment
- exc.comment = None
- raise
diff --git a/nova/cmd/manage.py b/nova/cmd/manage.py
index a2cebb8e1e..a060d77f95 100644
--- a/nova/cmd/manage.py
+++ b/nova/cmd/manage.py
@@ -45,8 +45,6 @@ import six
import six.moves.urllib.parse as urlparse
from sqlalchemy.engine import url as sqla_url
-# FIXME(cdent): This is a speedbump in the extraction process
-from nova.api.openstack.placement.objects import consumer as consumer_obj
from nova.cmd import common as cmd_common
from nova.compute import api as compute_api
import nova.conf
@@ -400,9 +398,6 @@ class DbCommands(object):
# need to be populated if it was not specified during boot time.
instance_obj.populate_missing_availability_zones,
# Added in Rocky
- # FIXME(cdent): This is a factor that needs to be addressed somehow
- consumer_obj.create_incomplete_consumers,
- # Added in Rocky
instance_mapping_obj.populate_queued_for_delete,
# Added in Stein
compute_node_obj.migrate_empty_ratio,
@@ -864,11 +859,7 @@ class ApiDbCommands(object):
@args('--version', metavar='<version>', help=argparse.SUPPRESS)
@args('version2', metavar='VERSION', nargs='?', help='Database version')
def sync(self, version=None, version2=None):
- """Sync the database up to the most recent version.
-
- If placement_database.connection is not None, sync that
- database using the API database migrations.
- """
+ """Sync the database up to the most recent version."""
if version and not version2:
print(_("DEPRECATED: The '--version' parameter was deprecated in "
"the Pike cycle and will not be supported in future "
@@ -876,15 +867,7 @@ class ApiDbCommands(object):
"instead"))
version2 = version
- # NOTE(cdent): At the moment, the migration code deep in the belly
- # of the migration package doesn't actually return anything, so
- # returning the result of db_sync is not particularly meaningful
- # here. But, in case that changes, we store the result from the
- # the placement sync to and with the api sync.
- result = True
- if CONF.placement_database.connection is not None:
- result = migration.db_sync(version2, database='placement')
- return migration.db_sync(version2, database='api') and result
+ return migration.db_sync(version2, database='api')
def version(self):
"""Print the current database version."""
@@ -1844,7 +1827,6 @@ class PlacementCommands(object):
return num_processed
- # FIXME(cdent): This needs to be addressed as part of extraction.
@action_description(
_("Iterates over non-cell0 cells looking for instances which do "
"not have allocations in the Placement service, or have incomplete "
diff --git a/nova/conf/database.py b/nova/conf/database.py
index 253f418159..90e88b81cb 100644
--- a/nova/conf/database.py
+++ b/nova/conf/database.py
@@ -106,61 +106,9 @@ def enrich_help_text(alt_db_opts):
alt_db_opt.help = db_opt.help + alt_db_opt.help
-# NOTE(cdent): See the note above on api_db_group. The same issues
-# apply here.
-
-placement_db_group = cfg.OptGroup('placement_database',
- title='Placement API database options',
- help="""
-The *Placement API Database* is a separate database which can be used with the
-placement service. This database is optional: if the connection option is not
-set, the nova api database will be used instead.
-""")
-
-placement_db_opts = [
- cfg.StrOpt('connection',
- help='',
- secret=True),
- cfg.StrOpt('connection_parameters',
- default='',
- help=''),
- cfg.BoolOpt('sqlite_synchronous',
- default=True,
- help=''),
- cfg.StrOpt('slave_connection',
- secret=True,
- help=''),
- cfg.StrOpt('mysql_sql_mode',
- default='TRADITIONAL',
- help=''),
- cfg.IntOpt('connection_recycle_time',
- default=3600,
- help=''),
- cfg.IntOpt('max_pool_size',
- help=''),
- cfg.IntOpt('max_retries',
- default=10,
- help=''),
- cfg.IntOpt('retry_interval',
- default=10,
- help=''),
- cfg.IntOpt('max_overflow',
- help=''),
- cfg.IntOpt('connection_debug',
- default=0,
- help=''),
- cfg.BoolOpt('connection_trace',
- default=False,
- help=''),
- cfg.IntOpt('pool_timeout',
- help=''),
-] # noqa
-
-
def register_opts(conf):
oslo_db_options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION)
conf.register_opts(api_db_opts, group=api_db_group)
- conf.register_opts(placement_db_opts, group=placement_db_group)
def list_opts():
@@ -174,9 +122,7 @@ def list_opts():
global _ENRICHED
if not _ENRICHED:
enrich_help_text(api_db_opts)
- enrich_help_text(placement_db_opts)
_ENRICHED = True
return {
api_db_group: api_db_opts,
- placement_db_group: placement_db_opts,
}
diff --git a/nova/conf/placement.py b/nova/conf/placement.py
index 31eb3b403a..625c48f070 100644
--- a/nova/conf/placement.py
+++ b/nova/conf/placement.py
@@ -17,80 +17,22 @@ from nova.conf import utils as confutils
DEFAULT_SERVICE_TYPE = 'placement'
-DEFAULT_CONSUMER_MISSING_ID = '00000000-0000-0000-0000-000000000000'
+
placement_group = cfg.OptGroup(
'placement',
title='Placement Service Options',
help="Configuration options for connecting to the placement API service")
-placement_opts = [
- cfg.BoolOpt(
- 'randomize_allocation_candidates',
- default=False,
- help="""
-If True, when limiting allocation candidate results, the results will be
-a random sampling of the full result set. If False, allocation candidates
-are returned in a deterministic but undefined order. That is, all things
-being equal, two requests for allocation candidates will return the same
-results in the same order; but no guarantees are made as to how that order
-is determined.
-"""),
- # TODO(mriedem): When placement is split out of nova, this should be
- # deprecated since then [oslo_policy]/policy_file can be used.
- cfg.StrOpt(
- 'policy_file',
- # This default matches what is in
- # etc/nova/placement-policy-generator.conf
- default='placement-policy.yaml',
- help='The file that defines placement policies. This can be an '
- 'absolute path or relative to the configuration file.'),
- cfg.StrOpt(
- 'incomplete_consumer_project_id',
- default=DEFAULT_CONSUMER_MISSING_ID,
- help="""
-Early API microversions (<1.8) allowed creating allocations and not specifying
-a project or user identifier for the consumer. In cleaning up the data
-modeling, we no longer allow missing project and user information. If an older
-client makes an allocation, we'll use this in place of the information it
-doesn't provide.
-"""),
- cfg.StrOpt(
- 'incomplete_consumer_user_id',
- default=DEFAULT_CONSUMER_MISSING_ID,
- help="""
-Early API microversions (<1.8) allowed creating allocations and not specifying
-a project or user identifier for the consumer. In cleaning up the data
-modeling, we no longer allow missing project and user information. If an older
-client makes an allocation, we'll use this in place of the information it
-doesn't provide.
-"""),
-]
-
-
-# Duplicate log_options from oslo_service so that we don't have to import
-# that package into placement.
-# NOTE(cdent): Doing so ends up requiring eventlet and other unnecessary
-# packages for just this one setting.
-service_opts = [
- cfg.BoolOpt('log_options',
- default=True,
- help='Enables or disables logging values of all registered '
- 'options when starting a service (at DEBUG level).'),
-]
-
def register_opts(conf):
conf.register_group(placement_group)
- conf.register_opts(placement_opts, group=placement_group)
- conf.register_opts(service_opts)
confutils.register_ksa_opts(conf, placement_group, DEFAULT_SERVICE_TYPE)
def list_opts():
return {
placement_group.name: (
- placement_opts +
ks_loading.get_session_conf_options() +
ks_loading.get_auth_common_conf_options() +
ks_loading.get_auth_plugin_conf_options('password') +
diff --git a/nova/config.py b/nova/config.py
index de2d10c826..b5064b4783 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -18,7 +18,6 @@
from oslo_log import log
from oslo_utils import importutils
-from nova.api.openstack.placement import db_api as placement_db
from nova.common import config
import nova.conf
from nova.db.sqlalchemy import api as sqlalchemy_api
@@ -62,4 +61,3 @@ def parse_args(argv, default_config_files=None, configure_db=True,
if configure_db:
sqlalchemy_api.configure(CONF)
- placement_db.configure(CONF)
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index 79d8423a07..a8a544999f 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -24,7 +24,6 @@ from oslo_log import log as logging
import sqlalchemy
from sqlalchemy.sql import null
-from nova.api.openstack.placement import db_api as placement_db
from nova.db.sqlalchemy import api as db_session
from nova import exception
from nova.i18n import _
@@ -32,7 +31,6 @@ from nova.i18n import _
INIT_VERSION = {}
INIT_VERSION['main'] = 215
INIT_VERSION['api'] = 0
-INIT_VERSION['placement'] = 0
_REPOSITORY = {}
LOG = logging.getLogger(__name__)
@@ -43,8 +41,6 @@ def get_engine(database='main', context=None):
return db_session.get_engine(context=context)
if database == 'api':
return db_session.get_api_engine()
- if database == 'placement':
- return placement_db.get_placement_engine()
def db_sync(version=None, database='main', context=None):
@@ -173,10 +169,7 @@ def _find_migrate_repo(database='main'):
"""Get the path for the migrate repository."""
global _REPOSITORY
rel_path = 'migrate_repo'
- if database == 'api' or database == 'placement':
- # NOTE(cdent): For the time being the placement database (if
- # it is being used) is a replica (in structure) of the api
- # database.
+ if database == 'api':
rel_path = os.path.join('api_migrations', 'migrate_repo')
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
rel_path)
diff --git a/nova/hacking/checks.py b/nova/hacking/checks.py
index b567d5c44d..0f3cf06572 100644
--- a/nova/hacking/checks.py
+++ b/nova/hacking/checks.py
@@ -626,15 +626,12 @@ def check_config_option_in_central_place(logical_line, filename):
def check_policy_registration_in_central_place(logical_line, filename):
msg = ('N350: Policy registration should be in the central location(s) '
- '"/nova/policies/*" or "nova/api/openstack/placement/policies/*".')
+ '"/nova/policies/*"')
# This is where registration should happen
- if ("nova/policies/" in filename or
- "nova/api/openstack/placement/policies/" in filename):
+ if "nova/policies/" in filename:
return
# A couple of policy tests register rules
- if ("nova/tests/unit/test_policy.py" in filename or
- "nova/tests/unit/api/openstack/placement/test_policy.py" in
- filename):
+ if "nova/tests/unit/test_policy.py" in filename:
return
if rule_default_re.match(logical_line):
diff --git a/nova/rc_fields.py b/nova/rc_fields.py
deleted file mode 100644
index 5c525e9214..0000000000
--- a/nova/rc_fields.py
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Standard Resource Class Fields."""
-
-# NOTE(cdent): This file is only used by the placement code within
-# nova. Other uses of resource classes in nova make use of the
-# os-resource-classes library. The placement code within nova
-# continues to use this so that that code can remain unchanged.
-
-import re
-
-from oslo_versionedobjects import fields
-
-
-class ResourceClass(fields.StringField):
- """Classes of resources provided to consumers."""
-
- CUSTOM_NAMESPACE = 'CUSTOM_'
- """All non-standard resource classes must begin with this string."""
-
- VCPU = 'VCPU'
- MEMORY_MB = 'MEMORY_MB'
- DISK_GB = 'DISK_GB'
- PCI_DEVICE = 'PCI_DEVICE'
- SRIOV_NET_VF = 'SRIOV_NET_VF'
- NUMA_SOCKET = 'NUMA_SOCKET'
- NUMA_CORE = 'NUMA_CORE'
- NUMA_THREAD = 'NUMA_THREAD'
- NUMA_MEMORY_MB = 'NUMA_MEMORY_MB'
- IPV4_ADDRESS = 'IPV4_ADDRESS'
- VGPU = 'VGPU'
- VGPU_DISPLAY_HEAD = 'VGPU_DISPLAY_HEAD'
- # Standard resource class for network bandwidth egress measured in
- # kilobits per second.
- NET_BW_EGR_KILOBIT_PER_SEC = 'NET_BW_EGR_KILOBIT_PER_SEC'
- # Standard resource class for network bandwidth ingress measured in
- # kilobits per second.
- NET_BW_IGR_KILOBIT_PER_SEC = 'NET_BW_IGR_KILOBIT_PER_SEC'
-
- # The ordering here is relevant. If you must add a value, only
- # append.
- STANDARD = (VCPU, MEMORY_MB, DISK_GB, PCI_DEVICE, SRIOV_NET_VF,
- NUMA_SOCKET, NUMA_CORE, NUMA_THREAD, NUMA_MEMORY_MB,
- IPV4_ADDRESS, VGPU, VGPU_DISPLAY_HEAD,
- NET_BW_EGR_KILOBIT_PER_SEC, NET_BW_IGR_KILOBIT_PER_SEC)
-
- @classmethod
- def normalize_name(cls, rc_name):
- if rc_name is None:
- return None
- # Replace non-alphanumeric characters with underscores
- norm_name = re.sub('[^0-9A-Za-z]+', '_', rc_name)
- # Bug #1762789: Do .upper after replacing non alphanumerics.
- norm_name = norm_name.upper()
- norm_name = cls.CUSTOM_NAMESPACE + norm_name
- return norm_name
-
-
-class ResourceClassField(fields.AutoTypedField):
- AUTO_TYPE = ResourceClass()
diff --git a/nova/tests/functional/test_nova_manage.py b/nova/tests/functional/test_nova_manage.py
index f5c2038af7..444db78a41 100644
--- a/nova/tests/functional/test_nova_manage.py
+++ b/nova/tests/functional/test_nova_manage.py
@@ -24,6 +24,7 @@ from nova import test
from nova.tests.functional import integrated_helpers
CONF = config.CONF
+INCOMPLETE_CONSUMER_ID = '00000000-0000-0000-0000-000000000000'
class NovaManageDBIronicTest(test.TestCase):
@@ -626,10 +627,8 @@ class TestNovaManagePlacementHealAllocations(
# the project_id and user_id are based on the sentinel values.
allocations = self.placement_api.get(
'/allocations/%s' % server['id'], version='1.12').body
- self.assertEqual(CONF.placement.incomplete_consumer_project_id,
- allocations['project_id'])
- self.assertEqual(CONF.placement.incomplete_consumer_user_id,
- allocations['user_id'])
+ self.assertEqual(INCOMPLETE_CONSUMER_ID, allocations['project_id'])
+ self.assertEqual(INCOMPLETE_CONSUMER_ID, allocations['user_id'])
allocations = allocations['allocations']
self.assertIn(rp_uuid, allocations)
self.assertFlavorMatchesAllocation(self.flavor, server['id'], rp_uuid)
diff --git a/nova/tests/unit/policy_fixture.py b/nova/tests/unit/policy_fixture.py
index 651f096bcb..a076afa93d 100644
--- a/nova/tests/unit/policy_fixture.py
+++ b/nova/tests/unit/policy_fixture.py
@@ -18,7 +18,6 @@ import fixtures
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
-from nova.api.openstack.placement import policy as placement_policy
import nova.conf
from nova.conf import paths
from nova import policies
@@ -127,32 +126,3 @@ class RoleBasedPolicyFixture(RealPolicyFixture):
self.policy_file = os.path.join(self.policy_dir.path, 'policy.json')
with open(self.policy_file, 'w') as f:
jsonutils.dump(policy, f)
-
-
-class PlacementPolicyFixture(fixtures.Fixture):
- """Load the default placement policy for tests.
-
- This fixture requires nova.tests.unit.conf_fixture.ConfFixture.
- """
- def setUp(self):
- super(PlacementPolicyFixture, self).setUp()
- policy_file = paths.state_path_def('etc/nova/placement-policy.yaml')
- CONF.set_override('policy_file', policy_file, group='placement')
- placement_policy.reset()
- placement_policy.init()
- self.addCleanup(placement_policy.reset)
-
- @staticmethod
- def set_rules(rules, overwrite=True):
- """Set placement policy rules.
-
- .. note:: The rules must first be registered via the
- Enforcer.register_defaults method.
-
- :param rules: dict of action=rule mappings to set
- :param overwrite: Whether to overwrite current rules or update them
- with the new rules.
- """
- enforcer = placement_policy.get_enforcer()
- enforcer.set_rules(oslo_policy.Rules.from_dict(rules),
- overwrite=overwrite)
diff --git a/nova/tests/unit/test_conf.py b/nova/tests/unit/test_conf.py
index 95f3ec41e9..21e5f730b4 100644
--- a/nova/tests/unit/test_conf.py
+++ b/nova/tests/unit/test_conf.py
@@ -91,9 +91,6 @@ class TestParseArgs(test.NoDBTestCase):
m = mock.patch('nova.db.sqlalchemy.api.configure')
self.nova_db_config_mock = m.start()
self.addCleanup(self.nova_db_config_mock.stop)
- m = mock.patch('nova.api.openstack.placement.db_api.configure')
- self.placement_db_config_mock = m.start()
- self.addCleanup(self.placement_db_config_mock.stop)
@mock.patch.object(config.log, 'register_options')
def test_parse_args_glance_debug_false(self, register_options):
@@ -101,7 +98,6 @@ class TestParseArgs(test.NoDBTestCase):
config.parse_args([], configure_db=False, init_rpc=False)
self.assertIn('glanceclient=WARN', config.CONF.default_log_levels)
self.nova_db_config_mock.assert_not_called()
- self.placement_db_config_mock.assert_not_called()
@mock.patch.object(config.log, 'register_options')
def test_parse_args_glance_debug_true(self, register_options):
@@ -109,4 +105,3 @@ class TestParseArgs(test.NoDBTestCase):
config.parse_args([], configure_db=True, init_rpc=False)
self.assertIn('glanceclient=DEBUG', config.CONF.default_log_levels)
self.nova_db_config_mock.assert_called_once_with(config.CONF)
- self.placement_db_config_mock.assert_called_once_with(config.CONF)
diff --git a/nova/tests/unit/test_nova_manage.py b/nova/tests/unit/test_nova_manage.py
index a00e4b45bd..393616bb22 100644
--- a/nova/tests/unit/test_nova_manage.py
+++ b/nova/tests/unit/test_nova_manage.py
@@ -2562,7 +2562,7 @@ class TestNovaManagePlacement(test.NoDBTestCase):
new_callable=mock.NonCallableMock) # assert not called
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.put',
return_value=fake_requests.FakeResponse(204))
- def test_heal_allocations_sentinel_consumer(
+ def test_heal_allocations(
self, mock_put, mock_get_compute_node, mock_get_allocs,
mock_get_instances, mock_get_all_cells):
"""Tests the scenario that there are allocations created using
@@ -2584,8 +2584,8 @@ class TestNovaManagePlacement(test.NoDBTestCase):
}
}
},
- "project_id": CONF.placement.incomplete_consumer_project_id,
- "user_id": CONF.placement.incomplete_consumer_user_id
+ "project_id": uuidsentinel.project_id,
+ "user_id": uuidsentinel.user_id
}
self.assertEqual(0, self.cli.heal_allocations(verbose=True))
self.assertIn('Processed 1 instances.', self.output.getvalue())
@@ -2614,7 +2614,7 @@ class TestNovaManagePlacement(test.NoDBTestCase):
return_value=fake_requests.FakeResponse(
409, content='Inventory and/or allocations changed while '
'attempting to allocate'))
- def test_heal_allocations_sentinel_consumer_put_fails(
+ def test_heal_allocations_put_fails(
self, mock_put, mock_get_allocs, mock_get_instances,
mock_get_all_cells):
"""Tests the scenario that there are allocations created using
@@ -2634,8 +2634,8 @@ class TestNovaManagePlacement(test.NoDBTestCase):
}
}
},
- "project_id": CONF.placement.incomplete_consumer_project_id,
- "user_id": CONF.placement.incomplete_consumer_user_id
+ "project_id": uuidsentinel.project_id,
+ "user_id": uuidsentinel.user_id
}
self.assertEqual(3, self.cli.heal_allocations(verbose=True))
self.assertIn(
diff --git a/releasenotes/notes/placement-deleted-a79ad405f428a5f8.yaml b/releasenotes/notes/placement-deleted-a79ad405f428a5f8.yaml
new file mode 100644
index 0000000000..416c8f2fa0
--- /dev/null
+++ b/releasenotes/notes/placement-deleted-a79ad405f428a5f8.yaml
@@ -0,0 +1,13 @@
+---
+other:
+ - |
+ The code for the `placement service
+ <https://docs.openstack.org/placement>`_ was moved to its own
+ `repository <https://git.openstack.org/cgit/openstack/placement>`_ in
+ Stein. The placement code in nova has been deleted.
+upgrade:
+ - |
+ If you upgraded your OpenStack deployment to Stein without switching to use
+ the now independent placement service, you must do so before upgrading to
+ Train. `Instructions <https://docs.openstack.org/placement/latest/upgrade/to-stein.html>`_
+ for one way to do this are available.
diff --git a/setup.cfg b/setup.cfg
index b9481bc445..985191dc82 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -40,7 +40,6 @@ oslo.config.opts.defaults =
oslo.policy.enforcer =
nova = nova.policy:get_enforcer
- placement = nova.api.openstack.placement.policy:get_enforcer
oslo.policy.policies =
# The sample policies will be ordered by entry point and then by list
@@ -48,7 +47,6 @@ oslo.policy.policies =
# list_rules method into a separate entry point rather than using the
# aggregate method.
nova = nova.policies:list_rules
- placement = nova.api.openstack.placement.policies:list_rules
nova.compute.monitors.cpu =
virt_driver = nova.compute.monitors.cpu.virt_driver:Monitor
@@ -74,7 +72,6 @@ console_scripts =
nova-status = nova.cmd.status:main
nova-xvpvncproxy = nova.cmd.xvpvncproxy:main
wsgi_scripts =
- nova-placement-api = nova.api.openstack.placement.wsgi:init_application
nova-api-wsgi = nova.api.openstack.compute.wsgi:init_application
nova-metadata-wsgi = nova.api.metadata.wsgi:init_application
diff --git a/tox.ini b/tox.ini
index 2e1af69786..65effb9b17 100644
--- a/tox.ini
+++ b/tox.ini
@@ -93,7 +93,7 @@ commands =
# special way. See the following for more details.
# http://stestr.readthedocs.io/en/latest/MANUAL.html#grouping-tests
# https://gabbi.readthedocs.io/en/latest/#purpose
- stestr --test-path=./nova/tests/functional --group_regex=nova\.tests\.functional\.api\.openstack\.placement\.test_placement_api(?:\.|_)([^_]+) run {posargs}
+ stestr --test-path=./nova/tests/functional run {posargs}
stestr slowest
# TODO(gcb) Merge this into [testenv:functional] when functional tests are gating
@@ -132,11 +132,6 @@ envdir = {toxworkdir}/shared
commands =
oslopolicy-sample-generator --config-file=etc/nova/nova-policy-generator.conf
-[testenv:genplacementpolicy]
-envdir = {toxworkdir}/shared
-commands =
- oslopolicy-sample-generator --config-file=etc/nova/placement-policy-generator.conf
-
[testenv:cover]
# TODO(stephenfin): Remove the PYTHON hack below in favour of a [coverage]
# section once we rely on coverage 4.3+