summaryrefslogtreecommitdiff
path: root/nova/compute/api.py
diff options
context:
space:
mode:
Diffstat (limited to 'nova/compute/api.py')
-rw-r--r--nova/compute/api.py169
1 files changed, 119 insertions, 50 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 3ec0a5267c..26ae3cf0f3 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -22,7 +22,6 @@ networking and storage of VMs, and compute hosts on which they run)."""
import collections
import functools
import re
-import string
import typing as ty
from castellan import key_manager
@@ -75,6 +74,7 @@ from nova.objects import quotas as quotas_obj
from nova.objects import service as service_obj
from nova.pci import request as pci_request
from nova.policies import servers as servers_policies
+from nova.policies import shelve as shelve_policies
import nova.policy
from nova import profiler
from nova import rpc
@@ -380,6 +380,8 @@ def block_extended_resource_request(function):
class API:
"""API for interacting with the compute manager."""
+ _sentinel = object()
+
def __init__(self, image_api=None, network_api=None, volume_api=None):
self.image_api = image_api or glance.API()
self.network_api = network_api or neutron.API()
@@ -4391,31 +4393,45 @@ class API:
context, instance=instance,
clean_shutdown=clean_shutdown, accel_uuids=accel_uuids)
+ def _check_offloaded(self, context, instance):
+ """Check if the status of an instance is SHELVE_OFFLOADED,
+ if not raise an exception.
+ """
+ if instance.vm_state != vm_states.SHELVED_OFFLOADED:
+ # NOTE(brinzhang): If the server status is 'SHELVED', it still
+ # belongs to a host, the availability_zone should not change.
+ # Unshelving a shelved offloaded server will go through the
+ # scheduler to find a new host.
+ raise exception.UnshelveInstanceInvalidState(
+ state=instance.vm_state, instance_uuid=instance.uuid)
+
+ def _ensure_host_in_az(self, context, host, availability_zone):
+ """Ensure the host provided belongs to the availability zone,
+ if not raise an exception.
+ """
+ if availability_zone is not None:
+ host_az = availability_zones.get_host_availability_zone(
+ context,
+ host
+ )
+ if host_az != availability_zone:
+ raise exception.UnshelveHostNotInAZ(
+ host=host, availability_zone=availability_zone)
+
def _validate_unshelve_az(self, context, instance, availability_zone):
"""Verify the specified availability_zone during unshelve.
- Verifies that the server is shelved offloaded, the AZ exists and
- if [cinder]/cross_az_attach=False, that any attached volumes are in
- the same AZ.
+ Verifies the AZ exists and if [cinder]/cross_az_attach=False, that
+ any attached volumes are in the same AZ.
:param context: nova auth RequestContext for the unshelve action
:param instance: Instance object for the server being unshelved
:param availability_zone: The user-requested availability zone in
which to unshelve the server.
- :raises: UnshelveInstanceInvalidState if the server is not shelved
- offloaded
:raises: InvalidRequest if the requested AZ does not exist
:raises: MismatchVolumeAZException if [cinder]/cross_az_attach=False
and any attached volumes are not in the requested AZ
"""
- if instance.vm_state != vm_states.SHELVED_OFFLOADED:
- # NOTE(brinzhang): If the server status is 'SHELVED', it still
- # belongs to a host, the availability_zone has not changed.
- # Unshelving a shelved offloaded server will go through the
- # scheduler to find a new host.
- raise exception.UnshelveInstanceInvalidState(
- state=instance.vm_state, instance_uuid=instance.uuid)
-
available_zones = availability_zones.get_availability_zones(
context, self.host_api, get_only_available=True)
if availability_zone not in available_zones:
@@ -4443,31 +4459,96 @@ class API:
@block_extended_resource_request
@check_instance_lock
- @check_instance_state(vm_state=[vm_states.SHELVED,
- vm_states.SHELVED_OFFLOADED])
- def unshelve(self, context, instance, new_az=None):
- """Restore a shelved instance."""
+ @check_instance_state(
+ vm_state=[vm_states.SHELVED, vm_states.SHELVED_OFFLOADED])
+ def unshelve(
+ self, context, instance, new_az=_sentinel, host=None):
+ """Restore a shelved instance.
+
+ :param context: the nova request context
+ :param instance: nova.objects.instance.Instance object
+ :param new_az: (optional) target AZ.
+ If None is provided then the current AZ restriction
+ will be removed from the instance.
+ If the parameter is not provided then the current
+ AZ restriction will not be changed.
+ :param host: (optional) a host to target
+ """
+ # Unshelving a shelved offloaded server will go through the
+ # scheduler to pick a new host, so we update the
+ # RequestSpec.availability_zone here. Note that if scheduling
+ # fails the RequestSpec will remain updated, which is not great.
+ # Bug open to track this https://bugs.launchpad.net/nova/+bug/1978573
+
+ az_passed = new_az is not self._sentinel
+
request_spec = objects.RequestSpec.get_by_instance_uuid(
context, instance.uuid)
- if new_az:
+ # We need to check a list of preconditions and validate inputs first
+
+ # Ensure instance is shelve offloaded
+ if az_passed or host:
+ self._check_offloaded(context, instance)
+
+ if az_passed and new_az:
+ # we have to ensure that new AZ is valid
self._validate_unshelve_az(context, instance, new_az)
- LOG.debug("Replace the old AZ %(old_az)s in RequestSpec "
- "with a new AZ %(new_az)s of the instance.",
- {"old_az": request_spec.availability_zone,
- "new_az": new_az}, instance=instance)
- # Unshelving a shelved offloaded server will go through the
- # scheduler to pick a new host, so we update the
- # RequestSpec.availability_zone here. Note that if scheduling
- # fails the RequestSpec will remain updated, which is not great,
- # but if we want to change that we need to defer updating the
- # RequestSpec until conductor which probably means RPC changes to
- # pass the new_az variable to conductor. This is likely low
- # priority since the RequestSpec.availability_zone on a shelved
- # offloaded server does not mean much anyway and clearly the user
- # is trying to put the server in the target AZ.
- request_spec.availability_zone = new_az
- request_spec.save()
+ # This will be the AZ of the instance after the unshelve. It can be
+ # None indicating that the instance is not pinned to any AZ after the
+ # unshelve
+ expected_az_after_unshelve = (
+ request_spec.availability_zone
+ if not az_passed else new_az
+ )
+ # host is requested, so we have to see if it exists and does not
+ # contradict with the AZ of the instance
+ if host:
+ # Make sure only admin can unshelve to a specific host.
+ context.can(
+ shelve_policies.POLICY_ROOT % 'unshelve_to_host',
+ target={
+ 'user_id': instance.user_id,
+ 'project_id': instance.project_id
+ }
+ )
+ # Ensure that the requested host exists otherwise raise
+ # a ComputeHostNotFound exception
+ objects.ComputeNode.get_first_node_by_host_for_old_compat(
+ context, host, use_slave=True)
+ # A specific host is requested so we need to make sure that it is
+ # not contradicts with the AZ of the instance
+ self._ensure_host_in_az(
+ context, host, expected_az_after_unshelve)
+
+ if new_az is None:
+ LOG.debug(
+ 'Unpin instance from AZ "%(old_az)s".',
+ {'old_az': request_spec.availability_zone},
+ instance=instance
+ )
+
+ LOG.debug(
+ 'Unshelving instance with old availability_zone "%(old_az)s" to '
+ 'new availability_zone "%(new_az)s" and host "%(host)s".',
+ {
+ 'old_az': request_spec.availability_zone,
+ 'new_az': '%s' %
+ new_az if az_passed
+ else 'not provided',
+ 'host': host,
+ },
+ instance=instance,
+ )
+ # OK every precondition checks out, we just need to tell the scheduler
+ # where to put the instance
+ # We have the expected AZ already calculated. So we just need to
+ # set it in the request_spec to drive the scheduling
+ request_spec.availability_zone = expected_az_after_unshelve
+ # if host is requested we also need to tell the scheduler that
+ if host:
+ request_spec.requested_destination = objects.Destination(host=host)
+ request_spec.save()
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
@@ -6612,19 +6693,7 @@ class KeypairAPI:
}
self.notifier.info(context, 'keypair.%s' % event_suffix, payload)
- def _validate_new_key_pair(self, context, user_id, key_name, key_type):
- safe_chars = "_- " + string.digits + string.ascii_letters
- clean_value = "".join(x for x in key_name if x in safe_chars)
- if clean_value != key_name:
- raise exception.InvalidKeypair(
- reason=_("Keypair name contains unsafe characters"))
-
- try:
- utils.check_string_length(key_name, min_length=1, max_length=255)
- except exception.InvalidInput:
- raise exception.InvalidKeypair(
- reason=_('Keypair name must be string and between '
- '1 and 255 characters long'))
+ def _check_key_pair_quotas(self, context, user_id, key_name, key_type):
try:
objects.Quotas.check_deltas(context, {'key_pairs': 1}, user_id)
local_limit.enforce_db_limit(context, local_limit.KEY_PAIRS,
@@ -6638,7 +6707,7 @@ class KeypairAPI:
def import_key_pair(self, context, user_id, key_name, public_key,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Import a key pair using an existing public key."""
- self._validate_new_key_pair(context, user_id, key_name, key_type)
+ self._check_key_pair_quotas(context, user_id, key_name, key_type)
self._notify(context, 'import.start', key_name)
@@ -6673,7 +6742,7 @@ class KeypairAPI:
def create_key_pair(self, context, user_id, key_name,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Create a new key pair."""
- self._validate_new_key_pair(context, user_id, key_name, key_type)
+ self._check_key_pair_quotas(context, user_id, key_name, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id