summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSandy Walsh <sandy@sandywalsh.com>2012-01-30 13:10:50 -0800
committerChris Behrens <cbehrens@codestud.com>2012-02-16 17:53:00 +0000
commit26227b79e9246a87eeb83766cfcc8e96d294d28b (patch)
treecba29636bf65611a2816acfd455b503d79c43309
parentf5e17bbc155203feb8bba4f34ed93d22b1b8e95b (diff)
downloadnova-26227b79e9246a87eeb83766cfcc8e96d294d28b.tar.gz
Removed zones from api and distributed scheduler
There is a new Zones implementation coming that will use AMQP-to-AMQP channels vs. the public API. This is being done for three reasons: 1. remove complications in the OpenStack API (and possibly allow EC2 Zones) 2. remove dependencies on keystone and novaclient 3. faster scheduling (fewer chances for race conditions) Learn more here: http://wiki.openstack.org/EssexSchedulerImprovements Change-Id: I6fe538923dd5ae19276afac2ac3311a285fd5c99
-rw-r--r--doc/source/devref/distributed_scheduler.rst118
-rw-r--r--doc/source/devref/zone.rst125
-rwxr-xr-xdoc/source/image_src/zones_distsched_illustrations.odpbin182810 -> 0 bytes
-rwxr-xr-xdoc/source/images/nova.compute.api.create_all_at_once.pngbin62263 -> 0 bytes
-rw-r--r--etc/nova/policy.json1
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py20
-rw-r--r--nova/api/openstack/compute/contrib/console_output.py2
-rw-r--r--nova/api/openstack/compute/contrib/consoles.py2
-rw-r--r--nova/api/openstack/compute/contrib/disk_config.py6
-rw-r--r--nova/api/openstack/compute/contrib/extended_status.py2
-rw-r--r--nova/api/openstack/compute/contrib/server_diagnostics.py3
-rw-r--r--nova/api/openstack/compute/contrib/zones.py228
-rw-r--r--nova/api/openstack/compute/servers.py58
-rw-r--r--nova/compute/api.py88
-rw-r--r--nova/exception.py13
-rw-r--r--nova/flags.py9
-rw-r--r--nova/scheduler/api.py372
-rw-r--r--nova/scheduler/distributed_scheduler.py184
-rw-r--r--nova/scheduler/driver.py22
-rw-r--r--nova/scheduler/least_cost.py18
-rw-r--r--nova/scheduler/manager.py14
-rw-r--r--nova/scheduler/multi.py4
-rw-r--r--nova/scheduler/vsa.py2
-rw-r--r--nova/scheduler/zone_manager.py168
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_extendedstatus.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_zones.py283
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py3
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py25
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py120
-rw-r--r--nova/tests/scheduler/fakes.py2
-rw-r--r--nova/tests/scheduler/test_distributed_scheduler.py201
-rw-r--r--nova/tests/scheduler/test_scheduler.py32
-rw-r--r--nova/tests/scheduler/test_zone_manager.py189
-rw-r--r--nova/tests/test_compute.py31
34 files changed, 99 insertions, 2250 deletions
diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst
index a185095be2..ec72564d41 100644
--- a/doc/source/devref/distributed_scheduler.rst
+++ b/doc/source/devref/distributed_scheduler.rst
@@ -21,20 +21,14 @@
Distributed Scheduler
=====================
-The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Chance Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
+The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Chance Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Distributed Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone).
.. image:: /images/dating_service.png
-But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone.
-
-This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capabilities of a Zone and its component services to make informed decisions on where a new instance should be created. When making this decision it consults not only all the Compute nodes in the current Zone, but the Compute nodes in each Child Zone. This continues recursively until the ideal host is found.
+The Distributed Scheduler (DS) supports filtering and weighing to make informed decisions on where a new instance should be created.
So, how does this all work?
-This document will explain the strategy employed by the `BaseScheduler`, which is the base for all schedulers designed to work across zones, and its derivations. You should read the :doc:`devguide/zones` documentation before reading this.
-
- .. image:: /images/base_scheduler.png
-
Costs & Weights
---------------
When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost.
@@ -52,88 +46,12 @@ This Weight is computed for each Instance requested. If the customer asked for 1
.. image:: /images/costs_weights.png
-nova.scheduler.base_scheduler.BaseScheduler
-------------------------------------------------------
-As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `BaseScheduler` uses this information to make its decisions.
-
-Here is how it works:
-
- 1. The compute nodes are filtered and the nodes remaining are weighed.
- 2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request.
- 3. Weighing of the remaining compute nodes assigns a number based on their suitability for the request.
- 4. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent.
- 5. The parent Zone sorts and aggregates all the weights and a final build plan is constructed.
- 6. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed.
-
- .. image:: /images/zone_overview.png
-
-`BaseScheduler` by itself is not capable of handling all the provisioning itself. You should also specify the filter classes and weighting classes to be used in determining which host is selected for new instance creation.
-
Filtering and Weighing
----------------------
-The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `BaseScheduler` are flexible and extensible.
+The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `DistributedScheduler` are flexible and extensible.
.. image:: /images/filtering.png
-Requesting a new instance
--------------------------
-(Note: The information below is out of date, as the `nova.compute.api.create_all_at_once()` functionality has merged into `nova.compute.api.create()` and the non-zone aware schedulers have been updated.)
-
-Prior to the `BaseScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table.
-
-`nova.compute.api.create()` performed the following actions:
- 1. it validated all the fields passed into it.
- 2. it created an entry in the `Instance` table for each instance requested
- 3. it put one `run_instance` message in the scheduler queue for each instance requested
- 4. the schedulers picked off the messages and decided which compute node should handle the request.
- 5. the `run_instance` message was forwarded to the compute node for processing and the instance is created.
- 6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_ids` are valid.
-
- .. image:: /images/nova.compute.api.create.png
-
-Generally, the simplest schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones.
-
-The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once.
-
-For the `BaseScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently:
- 1. it validates all the fields passed into it.
- 2. it creates a single `reservation_id` for all of instances created. This is a UUID.
- 3. it creates a single `run_instance` request in the scheduler queue
- 4. a scheduler picks the message off the queue and works on it.
- 5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`.
- 6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones.
- 7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well.
- 8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed.
- 9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user.
- 10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`.
-
- .. image:: /images/nova.compute.api.create_all_at_once.png
-
-The Catch
----------
-This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world.
-
-When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many, many `select` calls issued to child Zones asking for estimates.
-
-Instead, we take a rather innovative approach to the problem. We encrypt all the child Zone internal details and pass them back the to parent Zone. In the case of a nested Zone layout, each nesting layer will encrypt the data from all of its children and pass that to its parent Zone. In the case of nested child Zones, each Zone re-encrypts the weighted list results and passes those values to the parent. Every Zone interface adds another layer of encryption, using its unique key.
-
-Once a host is selected, it will either be local to the Zone that received the initial API call, or one of its child Zones. In the latter case, the parent Zone it simply passes the encrypted data for the selected host back to each of its child Zones during the `POST /servers` call as an extra parameter. If the child Zone can decrypt the data, then it is the correct Zone for the selected host; all other Zones will not be able to decrypt the data and will discard the request. This is why it is critical that each Zone has a unique value specified in its config in `--build_plan_encryption_key`: it controls the ability to locate the selected host without having to hard-code path information or other identifying information. The child Zone can then act on the decrypted data and either go directly to the Compute node previously selected if it is located in that Zone, or repeat the process with its child Zones until the target Zone containing the selected host is reached.
-
-Throughout the `nova.api.openstack.servers`, `nova.api.openstack.zones`, `nova.compute.api.create*` and `nova.scheduler.base_scheduler` code you'll see references to `blob` and `child_blob`. These are the encrypted hints about which Compute node to use.
-
-Reservation IDs
----------------
-
-The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created.
-
-NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would not be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled.
-
-We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches.
-
-Finally, we need to give the user a way to get information on each of the instances created under this `reservation_id`. Fortunately, this is still possible with the existing `GET /servers` command, so long as we add a new optional `reservation_id` parameter.
-
-`python-novaclient` will be extended to support both of these changes.
-
Host Filter
-----------
@@ -141,39 +59,23 @@ As we mentioned earlier, filtering hosts is a very deployment-specific process.
The filter used is determined by the `--default_host_filters` flag, which points to a Python Class. By default this flag is set to `[AllHostsFilter]` which simply returns all available hosts. But there are others:
- * `InstanceTypeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`.
-
- * `JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `InstanceType` specifies. See `nova.tests.test_host_filter` for examples.
-
-To create your own `HostFilter` the user simply has to derive from `nova.scheduler.filters.AbstractHostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of available hosts is in the `host_list` parameter passed into the call as well as the filter query. The host tuple contains (`<hostname>`, `<additional data>`) where `<additional data>` is whatever you want it to be. By default, it is the capabilities reported by the host.
-
-Cost Scheduler Weighing
------------------------
-Every `BaseScheduler` subclass should also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `BaseScheduler` when all the results have been assembled.
+ * `ComputeFilter` provides host filtering based on the memory and disk size specified in the `InstanceType` record passed into `run_instance`.
-Simple Scheduling Across Zones
-----------------------------
-The `BaseScheduler` uses the default `filter_hosts` method, which will use either any filters specified in the request's `filter` parameter, or, if that is not specified, the filters specified in the `FLAGS.default_host_filters` setting. Its `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things.
+ * `JSONFilter` filters hosts based on simple JSON expression grammar. Using a LISP-like JSON structure the caller can request instances based on criteria well beyond what `ComputeFilter` specifies. See `nova.tests.scheduler.test_host_filters` for examples.
-The `--scheduler_driver` flag is how you specify the scheduler class name.
+To create your own `HostFilter` the user simply has to derive from `nova.scheduler.filters.AbstractHostFilter` and implement one method: `host_passes`. This method accepts a `HostState` instance describing a host as well as a `filter_properties` dictionary. Host capabilities can be found in `HostState`.capabilities and other properites can be found in `filter_properties` like `instance_type`, etc. Your method should return True if it passes the filter.
Flags
-----
-All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file:
+Here are some of the main flags you should set in your `nova.conf` file:
::
- --enable_zone_routing=true
- --zone_name=zone1
- --build_plan_encryption_key=c286696d887c9aa0611bbb3e2025a45b
- --scheduler_driver=nova.scheduler.base_scheduler.BaseScheduler
- --default_host_filter=nova.scheduler.filters.AllHostsFilter
+ --scheduler_driver=nova.scheduler.distributed_scheduler.DistributedScheduler
+ --default_host_filters=nova.scheduler.filters.AllHostsFilter
-`--enable_zone_routing` must be set for OS API commands such as `create()`, `pause()` and `delete()` to get routed from Zone to Zone when looking for instances.
-`--zone_name` is only required in child Zones. The default Zone name is `nova`, but you may want to name your child Zones something useful. Duplicate Zone names are not an issue.
-`build_plan_encryption_key` is the SHA-256 key for encrypting/decrypting the Host information when it leaves a Zone. Be sure to change this key for each Zone you create. Do not duplicate keys.
-`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.base_scheduler.BaseScheduler`.
+`scheduler_driver` is the real workhorse of the operation. For Distributed Scheduler, you need to specify a class derived from `nova.scheduler.distributed_scheduler.DistributedScheduler`.
`default_host_filter` is the host filter to be used for filtering candidate Compute nodes.
Some optional flags which are handy for debugging are:
diff --git a/doc/source/devref/zone.rst b/doc/source/devref/zone.rst
deleted file mode 100644
index 241cbf46c7..0000000000
--- a/doc/source/devref/zone.rst
+++ /dev/null
@@ -1,125 +0,0 @@
-..
- Copyright 2010-2011 OpenStack LLC
- All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License"); you may
- not use this file except in compliance with the License. You may obtain
- a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- License for the specific language governing permissions and limitations
- under the License.
-
-Zones
-=====
-
-A Nova deployment is called a Zone. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers.
-
-The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion.
-
-Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones and, in those cases, the grandchild's internal structure would not be known to the grand-parent.
-
-Zones share nothing. They communicate via the public OpenStack API only. No database, queue, user or project definition is shared between Zones.
-
-
-Capabilities
-------------
-Routing between Zones is based on the Capabilities of that Zone. Capabilities are nothing more than key/value pairs. Values are multi-value, with each value separated with a semicolon (`;`). When expressed as a string they take the form:
-
-::
-
- key=value;value;value, key=value;value;value
-
-Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag.
-
-Flow within a Zone
-------------------
-The brunt of the work within a Zone is done in the Scheduler Service. The Scheduler is responsible for:
-- collecting capability messages from the Compute, Volume and Network nodes,
-- polling the child Zones for their status and
-- providing data to the Distributed Scheduler for performing load balancing calculations
-
-Inter-service communication within a Zone is done with RabbitMQ. Each class of Service (Compute, Volume and Network) has both a named message exchange (particular to that host) and a general message exchange (particular to that class of service). Messages sent to these exchanges are picked off in round-robin fashion. Zones introduce a new fan-out exchange per service. Messages sent to the fan-out exchange are picked up by all services of a particular class. This fan-out exchange is used by the Scheduler services to receive capability messages from the Compute, Volume and Network nodes.
-
-These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing.
-
-The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova").
-
-Zone administrative functions
------------------------------
-Zone administrative operations are usually done using python-novaclient_
-
-.. _python-novaclient: https://github.com/rackspace/python-novaclient
-
-Finally you need to enable Zone Forwarding. This will be used by the Distributed Scheduler initiative currently underway. Set `--enable_zone_routing=true` to enable this feature.
-
-Find out about this Zone
-------------------------
-In any Zone you can find the Zone's name and capabilities with the ``nova zone-info`` command.
-
-::
-
- alice@novadev:~$ nova zone-info
- +-----------------+---------------+
- | Property | Value |
- +-----------------+---------------+
- | compute_cpu | 0.7,0.7 |
- | compute_disk | 123000,123000 |
- | compute_network | 800,800 |
- | hypervisor | xenserver |
- | name | nova |
- | network_cpu | 0.7,0.7 |
- | network_disk | 123000,123000 |
- | network_network | 800,800 |
- | os | linux |
- +-----------------+---------------+
-
-This equates to a GET operation on `.../zones/info`. If you have no child Zones defined you'll usually only get back the default `name`, `hypervisor` and `os` capabilities. Otherwise you'll get back a tuple of min, max values for each capabilities of all the hosts of all the services running in the child zone. These take the `<service>_<capability> = <min>,<max>` format.
-
-Adding a child Zone
--------------------
-Any Zone can be a parent Zone. Children are associated to a Zone. The Zone where this command originates from is known as the Parent Zone. Routing is only ever conducted from a Zone to its children, never the other direction. From a parent zone you can add a child zone with the following command:
-
-::
-
- nova zone-add <child zone api url> <username> <nova api key>
-
-You can get the `child zone api url`, `nova api key` and `username` from the `novarc` file in the child zone. For example:
-
-::
-
- export NOVA_API_KEY="3bd1af06-6435-4e23-a827-413b2eb86934"
- export NOVA_USERNAME="alice"
- export NOVA_URL="http://192.168.2.120:8774/v1.0/"
-
-
-This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done with this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information.
-
-Getting a list of child Zones
------------------------------
-
-::
-
- nova zone-list
-
- alice@novadev:~$ nova zone-list
- +----+-------+-----------+--------------------------------------------+---------------------------------+
- | ID | Name | Is Active | Capabilities | API URL |
- +----+-------+-----------+--------------------------------------------+---------------------------------+
- | 2 | zone1 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.108:8774/v1.0/ |
- | 3 | zone2 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.115:8774/v1.0/ |
- +----+-------+-----------+--------------------------------------------+---------------------------------+
-
-This equates to a GET operation to `.../zones`.
-
-Removing a child Zone
----------------------
-::
-
- nova zone-delete <N>
-
-This equates to a DELETE call to `.../zones/N`. The Zone with ID=N will be removed. This will only remove the zone entry from the current (parent) Zone, no child Zones are affected. Removing a Child Zone doesn't affect any other part of the hierarchy.
diff --git a/doc/source/image_src/zones_distsched_illustrations.odp b/doc/source/image_src/zones_distsched_illustrations.odp
deleted file mode 100755
index 8762a183b4..0000000000
--- a/doc/source/image_src/zones_distsched_illustrations.odp
+++ /dev/null
Binary files differ
diff --git a/doc/source/images/nova.compute.api.create_all_at_once.png b/doc/source/images/nova.compute.api.create_all_at_once.png
deleted file mode 100755
index c3ce86d039..0000000000
--- a/doc/source/images/nova.compute.api.create_all_at_once.png
+++ /dev/null
Binary files differ
diff --git a/etc/nova/policy.json b/etc/nova/policy.json
index 25272c3681..f0457fd9f4 100644
--- a/etc/nova/policy.json
+++ b/etc/nova/policy.json
@@ -53,7 +53,6 @@
"compute_extension:virtual_storage_arrays": [],
"compute_extension:volumes": [],
"compute_extension:volumetypes": [],
- "compute_extension:zones": [],
"volume:create": [],
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index 69340648bb..b1bfaf6acc 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -45,8 +45,6 @@ class AdminActionsController(wsgi.Controller):
# TODO(bcwaldon): These action names should be prefixed with 'os-'
@wsgi.action('pause')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _pause(self, req, id, body):
"""Permit Admins to pause the server"""
ctxt = req.environ['nova.context']
@@ -64,8 +62,6 @@ class AdminActionsController(wsgi.Controller):
return webob.Response(status_int=202)
@wsgi.action('unpause')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _unpause(self, req, id, body):
"""Permit Admins to unpause the server"""
ctxt = req.environ['nova.context']
@@ -83,8 +79,6 @@ class AdminActionsController(wsgi.Controller):
return webob.Response(status_int=202)
@wsgi.action('suspend')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _suspend(self, req, id, body):
"""Permit admins to suspend the server"""
context = req.environ['nova.context']
@@ -102,8 +96,6 @@ class AdminActionsController(wsgi.Controller):
return webob.Response(status_int=202)
@wsgi.action('resume')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _resume(self, req, id, body):
"""Permit admins to resume the server from suspend"""
context = req.environ['nova.context']
@@ -121,8 +113,6 @@ class AdminActionsController(wsgi.Controller):
return webob.Response(status_int=202)
@wsgi.action('migrate')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _migrate(self, req, id, body):
"""Permit admins to migrate a server to a new host"""
context = req.environ['nova.context']
@@ -139,8 +129,6 @@ class AdminActionsController(wsgi.Controller):
return webob.Response(status_int=202)
@wsgi.action('resetNetwork')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on an server"""
context = req.environ['nova.context']
@@ -155,8 +143,6 @@ class AdminActionsController(wsgi.Controller):
return webob.Response(status_int=202)
@wsgi.action('injectNetworkInfo')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server"""
context = req.environ['nova.context']
@@ -173,8 +159,6 @@ class AdminActionsController(wsgi.Controller):
return webob.Response(status_int=202)
@wsgi.action('lock')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _lock(self, req, id, body):
"""Permit admins to lock a server"""
context = req.environ['nova.context']
@@ -191,8 +175,6 @@ class AdminActionsController(wsgi.Controller):
return webob.Response(status_int=202)
@wsgi.action('unlock')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _unlock(self, req, id, body):
"""Permit admins to lock a server"""
context = req.environ['nova.context']
@@ -277,8 +259,6 @@ class AdminActionsController(wsgi.Controller):
return resp
@wsgi.action('os-migrateLive')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _migrate_live(self, req, id, body):
"""Permit admins to (live) migrate a server to a new host"""
context = req.environ["nova.context"]
diff --git a/nova/api/openstack/compute/contrib/console_output.py b/nova/api/openstack/compute/contrib/console_output.py
index 0752c3c70b..6fc7fcbf01 100644
--- a/nova/api/openstack/compute/contrib/console_output.py
+++ b/nova/api/openstack/compute/contrib/console_output.py
@@ -41,7 +41,7 @@ class ConsoleOutputController(wsgi.Controller):
authorize(context)
try:
- instance = self.compute_api.routing_get(context, id)
+ instance = self.compute_api.get(context, id)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Instance not found'))
diff --git a/nova/api/openstack/compute/contrib/consoles.py b/nova/api/openstack/compute/contrib/consoles.py
index f308c67174..833eb0f395 100644
--- a/nova/api/openstack/compute/contrib/consoles.py
+++ b/nova/api/openstack/compute/contrib/consoles.py
@@ -44,7 +44,7 @@ class ConsolesController(wsgi.Controller):
raise webob.exc.HTTPBadRequest(_('Missing type specification'))
try:
- instance = self.compute_api.routing_get(context, id)
+ instance = self.compute_api.get(context, id)
except exception.NotFound:
raise webob.exc.HTTPNotFound(_('Instance not found'))
diff --git a/nova/api/openstack/compute/contrib/disk_config.py b/nova/api/openstack/compute/contrib/disk_config.py
index 5b249478a4..e338fa5ee6 100644
--- a/nova/api/openstack/compute/contrib/disk_config.py
+++ b/nova/api/openstack/compute/contrib/disk_config.py
@@ -103,12 +103,6 @@ class ServersDiskConfigTemplate(xmlutil.TemplateBuilder):
class ServerDiskConfigController(wsgi.Controller):
def _add_disk_config(self, context, servers):
- # Filter out any servers that already have the key set
- # (most likely from a remote zone)
- servers = [s for s in servers if API_DISK_CONFIG not in s]
- if not servers:
- return
-
# Get DB information for servers
uuids = [server['id'] for server in servers]
db_servers = db.instance_get_all_by_filters(context,
diff --git a/nova/api/openstack/compute/contrib/extended_status.py b/nova/api/openstack/compute/contrib/extended_status.py
index 9447bf3000..88a5f12a4e 100644
--- a/nova/api/openstack/compute/contrib/extended_status.py
+++ b/nova/api/openstack/compute/contrib/extended_status.py
@@ -56,7 +56,7 @@ class ExtendedStatusController(wsgi.Controller):
resp_obj.attach(xml=ExtendedStatusTemplate())
try:
- instance = self.compute_api.routing_get(context, id)
+ instance = self.compute_api.get(context, id)
except exception.NotFound:
explanation = _("Server not found.")
raise exc.HTTPNotFound(explanation=explanation)
diff --git a/nova/api/openstack/compute/contrib/server_diagnostics.py b/nova/api/openstack/compute/contrib/server_diagnostics.py
index 49afcac01a..c03cd2d470 100644
--- a/nova/api/openstack/compute/contrib/server_diagnostics.py
+++ b/nova/api/openstack/compute/contrib/server_diagnostics.py
@@ -20,7 +20,6 @@ from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
-from nova.scheduler import api as scheduler_api
authorize = extensions.extension_authorizer('compute', 'server_diagnostics')
@@ -38,8 +37,6 @@ class ServerDiagnosticsTemplate(xmlutil.TemplateBuilder):
class ServerDiagnosticsController(object):
@wsgi.serializers(xml=ServerDiagnosticsTemplate)
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
diff --git a/nova/api/openstack/compute/contrib/zones.py b/nova/api/openstack/compute/contrib/zones.py
deleted file mode 100644
index 8db16f235a..0000000000
--- a/nova/api/openstack/compute/contrib/zones.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""The zones extension."""
-
-import json
-
-from nova.api.openstack import common
-from nova.api.openstack.compute import servers
-from nova.api.openstack import extensions
-from nova.api.openstack import xmlutil
-from nova.api.openstack import wsgi
-from nova.compute import api as compute
-from nova import crypto
-from nova import exception
-from nova import flags
-from nova import log as logging
-import nova.scheduler.api
-
-
-LOG = logging.getLogger(__name__)
-FLAGS = flags.FLAGS
-authorize = extensions.extension_authorizer('compute', 'zones')
-
-
-class CapabilitySelector(object):
- def __call__(self, obj, do_raise=False):
- return [(k, v) for k, v in obj.items()
- if k not in ('id', 'api_url', 'name', 'capabilities')]
-
-
-def make_zone(elem):
- elem.set('id')
- elem.set('api_url')
- elem.set('name')
- elem.set('capabilities')
-
- cap = xmlutil.SubTemplateElement(elem, xmlutil.Selector(0),
- selector=CapabilitySelector())
- cap.text = 1
-
-
-zone_nsmap = {None: wsgi.XMLNS_V10}
-
-
-class ZoneTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('zone', selector='zone')
- make_zone(root)
- return xmlutil.MasterTemplate(root, 1, nsmap=zone_nsmap)
-
-
-class ZonesTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('zones')
- elem = xmlutil.SubTemplateElement(root, 'zone', selector='zones')
- make_zone(elem)
- return xmlutil.MasterTemplate(root, 1, nsmap=zone_nsmap)
-
-
-class WeightsTemplate(xmlutil.TemplateBuilder):
- def construct(self):
- root = xmlutil.TemplateElement('weights')
- weight = xmlutil.SubTemplateElement(root, 'weight', selector='weights')
- blob = xmlutil.SubTemplateElement(weight, 'blob')
- blob.text = 'blob'
- inner_weight = xmlutil.SubTemplateElement(weight, 'weight')
- inner_weight.text = 'weight'
- return xmlutil.MasterTemplate(root, 1, nsmap=zone_nsmap)
-
-
-def _filter_keys(item, keys):
- """
- Filters all model attributes except for keys
- item is a dict
-
- """
- return dict((k, v) for k, v in item.iteritems() if k in keys)
-
-
-def _exclude_keys(item, keys):
- return dict((k, v) for k, v in item.iteritems() if k and (k not in keys))
-
-
-def _scrub_zone(zone):
- return _exclude_keys(zone, ('username', 'password', 'created_at',
- 'deleted', 'deleted_at', 'updated_at'))
-
-
-def check_encryption_key(func):
- def wrapped(*args, **kwargs):
- if not FLAGS.build_plan_encryption_key:
- raise exception.Error(_("--build_plan_encryption_key not set"))
- return func(*args, **kwargs)
- return wrapped
-
-
-class Controller(object):
- """Controller for Zone resources."""
-
- def __init__(self):
- self.compute_api = compute.API()
-
- @wsgi.serializers(xml=ZonesTemplate)
- def index(self, req):
- """Return all zones in brief"""
- authorize(req.environ['nova.context'])
- # Ask the ZoneManager in the Scheduler for most recent data,
- # or fall-back to the database ...
- items = nova.scheduler.api.get_zone_list(req.environ['nova.context'])
- items = common.limited(items, req)
- items = [_scrub_zone(item) for item in items]
- return dict(zones=items)
-
- @wsgi.serializers(xml=ZonesTemplate)
- def detail(self, req):
- """Return all zones in detail"""
- return self.index(req)
-
- @wsgi.serializers(xml=ZoneTemplate)
- def info(self, req):
- """Return name and capabilities for this zone."""
- context = req.environ['nova.context']
- authorize(context)
- zone_capabs = nova.scheduler.api.get_zone_capabilities(context)
- # NOTE(comstud): This should probably return, instead:
- # {'zone': {'name': FLAGS.zone_name,
- # 'capabilities': zone_capabs}}
- zone_capabs['name'] = FLAGS.zone_name
- return dict(zone=zone_capabs)
-
- @wsgi.serializers(xml=ZoneTemplate)
- def show(self, req, id):
- """Return data about the given zone id"""
- context = req.environ['nova.context']
- authorize(context)
- zone_id = int(id)
- zone = nova.scheduler.api.zone_get(context, zone_id)
- return dict(zone=_scrub_zone(zone))
-
- def delete(self, req, id):
- """Delete a child zone entry."""
- authorize(req.environ['nova.context'])
- zone_id = int(id)
- nova.scheduler.api.zone_delete(req.environ['nova.context'], zone_id)
- return {}
-
- @wsgi.serializers(xml=ZoneTemplate)
- @wsgi.deserializers(xml=servers.CreateDeserializer)
- def create(self, req, body):
- """Create a child zone entry."""
- context = req.environ['nova.context']
- authorize(context)
- zone = nova.scheduler.api.zone_create(context, body["zone"])
- return dict(zone=_scrub_zone(zone))
-
- @wsgi.serializers(xml=ZoneTemplate)
- def update(self, req, id, body):
- """Update a child zone entry."""
- context = req.environ['nova.context']
- authorize(context)
- zone_id = int(id)
- zone = nova.scheduler.api.zone_update(context, zone_id, body["zone"])
- return dict(zone=_scrub_zone(zone))
-
- @wsgi.serializers(xml=WeightsTemplate)
- @check_encryption_key
- def select(self, req, body):
- """Returns a weighted list of costs to create instances
- of desired capabilities."""
- context = req.environ['nova.context']
- authorize(context)
- specs = json.loads(body)
- build_plan = nova.scheduler.api.select(context, specs=specs)
- cooked = self._scrub_build_plan(build_plan)
- return {"weights": cooked}
-
- def _scrub_build_plan(self, build_plan):
- """Remove all the confidential data and return a sanitized
- version of the build plan. Include an encrypted full version
- of the weighting entry so we can get back to it later."""
- encryptor = crypto.encryptor(FLAGS.build_plan_encryption_key)
- cooked = []
- for entry in build_plan:
- json_entry = json.dumps(entry)
- cipher_text = encryptor(json_entry)
- cooked.append(dict(weight=entry['weight'],
- blob=cipher_text))
- return cooked
-
-
-class Zones(extensions.ExtensionDescriptor):
- """Enables zones-related functionality such as adding child zones,
- listing child zones, getting the capabilities of the local zone,
- and returning build plans to parent zones' schedulers
- """
-
- name = "Zones"
- alias = "os-zones"
- namespace = "http://docs.openstack.org/compute/ext/zones/api/v1.1"
- updated = "2011-09-21T00:00:00+00:00"
-
- def get_resources(self):
- #NOTE(bcwaldon): This resource should be prefixed with 'os-'
- coll_actions = {
- 'detail': 'GET',
- 'info': 'GET',
- 'select': 'POST',
- }
-
- res = extensions.ResourceExtension('zones',
- Controller(),
- collection_actions=coll_actions)
- return [res]
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index 649566ec93..87e66b71fa 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -33,7 +33,6 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova.rpc import common as rpc_common
-from nova.scheduler import api as scheduler_api
from nova import utils
@@ -417,18 +416,14 @@ class Controller(wsgi.Controller):
remove_invalid_options(context, search_opts,
self._get_server_search_options())
- # Convert local_zone_only into a boolean
- search_opts['local_zone_only'] = utils.bool_from_str(
- search_opts.get('local_zone_only', False))
-
- # If search by 'status', we need to convert it to 'vm_state'
- # to pass on to child zones.
- if 'status' in search_opts:
- status = search_opts['status']
+ # Verify search by 'status' contains a valid status.
+ # Convert it to filter by vm_state for compute_api.
+ status = search_opts.pop('status', None)
+ if status is not None:
state = common.vm_state_from_status(status)
if state is None:
- reason = _('Invalid server status: %(status)s') % locals()
- raise exception.InvalidInput(reason=reason)
+ msg = _('Invalid server status: %(status)s') % locals()
+ raise exc.HTTPBadRequest(explanation=msg)
search_opts['vm_state'] = state
if 'changes-since' in search_opts:
@@ -474,7 +469,7 @@ class Controller(wsgi.Controller):
def _get_server(self, context, instance_uuid):
"""Utility function for looking up an instance by uuid"""
try:
- return self.compute_api.routing_get(context, instance_uuid)
+ return self.compute_api.get(context, instance_uuid)
except exception.NotFound:
raise exc.HTTPNotFound()
@@ -602,13 +597,11 @@ class Controller(wsgi.Controller):
raise exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ServerTemplate)
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def show(self, req, id):
""" Returns server details by server id """
try:
context = req.environ['nova.context']
- instance = self.compute_api.routing_get(context, id)
+ instance = self.compute_api.get(context, id)
self._add_instance_faults(context, [instance])
return self._view_builder.show(req, instance)
except exception.NotFound:
@@ -684,8 +677,6 @@ class Controller(wsgi.Controller):
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
- zone_blob = server_dict.get('blob')
-
# optional openstack extensions:
key_name = server_dict.get('key_name')
user_data = server_dict.get('user_data')
@@ -698,14 +689,6 @@ class Controller(wsgi.Controller):
block_device_mapping = self._get_block_device_mapping(server_dict)
- # Only allow admins to specify their own reservation_ids
- # This is really meant to allow zones to work.
- reservation_id = server_dict.get('reservation_id')
- if all([reservation_id is not None,
- reservation_id != '',
- not context.is_admin]):
- reservation_id = None
-
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count')
@@ -736,8 +719,6 @@ class Controller(wsgi.Controller):
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
- zone_blob=zone_blob,
- reservation_id=reservation_id,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
@@ -795,7 +776,6 @@ class Controller(wsgi.Controller):
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
- @scheduler_api.redirect_handler
def update(self, req, id, body):
"""Update server then pass on to version-specific controller"""
if len(req.body) == 0:
@@ -827,7 +807,7 @@ class Controller(wsgi.Controller):
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
- instance = self.compute_api.routing_get(ctxt, id)
+ instance = self.compute_api.get(ctxt, id)
try:
self.compute_api.update(ctxt, instance, **update_dict)
@@ -843,8 +823,6 @@ class Controller(wsgi.Controller):
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, id)
@@ -865,8 +843,6 @@ class Controller(wsgi.Controller):
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, id)
@@ -887,8 +863,6 @@ class Controller(wsgi.Controller):
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
valid_reboot_types = ['HARD', 'SOFT']
@@ -935,8 +909,6 @@ class Controller(wsgi.Controller):
return webob.Response(status_int=202)
@wsgi.response(204)
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def delete(self, req, id):
""" Destroys a server """
try:
@@ -975,8 +947,6 @@ class Controller(wsgi.Controller):
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('changePassword')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not 'changePassword' in body
@@ -1007,8 +977,6 @@ class Controller(wsgi.Controller):
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _action_resize(self, req, id, body):
""" Resizes a given instance to the flavor size requested """
try:
@@ -1030,8 +998,6 @@ class Controller(wsgi.Controller):
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes"""
try:
@@ -1115,8 +1081,6 @@ class Controller(wsgi.Controller):
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('createImage')
- @exception.novaclient_converter
- @scheduler_api.redirect_handler
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
@@ -1173,8 +1137,8 @@ class Controller(wsgi.Controller):
def _get_server_search_options(self):
"""Return server search options allowed by non-admin"""
- return ('reservation_id', 'name', 'local_zone_only',
- 'status', 'image', 'flavor', 'changes-since')
+ return ('reservation_id', 'name', 'status', 'image', 'flavor',
+ 'changes-since')
def create_resource():
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 514f0c133d..637be01af8 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -56,7 +56,6 @@ find_host_timeout_opt = cfg.StrOpt('find_host_timeout',
FLAGS = flags.FLAGS
FLAGS.register_opt(find_host_timeout_opt)
-flags.DECLARE('enable_zone_routing', 'nova.scheduler.api')
flags.DECLARE('consoleauth_topic', 'nova.consoleauth')
@@ -179,11 +178,12 @@ class API(base.Base):
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
- injected_files, admin_password, zone_blob,
- reservation_id, access_ip_v4, access_ip_v6,
+ injected_files, admin_password,
+ access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
- create_instance_here=False, scheduler_hints=None):
+ reservation_id=None, create_instance_here=False,
+ scheduler_hints=None):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation."""
@@ -360,7 +360,7 @@ class API(base.Base):
instances = self._schedule_run_instance(
rpc_method,
context, base_options,
- instance_type, zone_blob,
+ instance_type,
availability_zone, injected_files,
admin_password, image,
num_instances, requested_networks,
@@ -532,7 +532,7 @@ class API(base.Base):
def _schedule_run_instance(self,
rpc_method,
context, base_options,
- instance_type, zone_blob,
+ instance_type,
availability_zone, injected_files,
admin_password, image,
num_instances,
@@ -552,7 +552,6 @@ class API(base.Base):
'image': image,
'instance_properties': base_options,
'instance_type': instance_type,
- 'blob': zone_blob,
'num_instances': num_instances,
'block_device_mapping': block_device_mapping,
'security_group': security_group,
@@ -574,10 +573,9 @@ class API(base.Base):
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
- injected_files=None, admin_password=None, zone_blob=None,
- reservation_id=None, block_device_mapping=None,
- access_ip_v4=None, access_ip_v6=None,
- requested_networks=None, config_drive=None,
+ injected_files=None, admin_password=None,
+ block_device_mapping=None, access_ip_v4=None,
+ access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None):
"""
Provision instances, sending instance information to the
@@ -600,11 +598,10 @@ class API(base.Base):
check_policy(context, 'create:attach_volume', target)
# We can create the DB entry for the instance here if we're
- # only going to create 1 instance and we're in a single
- # zone deployment. This speeds up API responses for builds
+ # only going to create 1 instance.
+ # This speeds up API responses for builds
# as we don't need to wait for the scheduler.
- create_instance_here = (max_count == 1 and
- not FLAGS.enable_zone_routing)
+ create_instance_here = max_count == 1
(instances, reservation_id) = self._create_instance(
context, instance_type,
@@ -613,8 +610,8 @@ class API(base.Base):
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
- injected_files, admin_password, zone_blob,
- reservation_id, access_ip_v4, access_ip_v6,
+ injected_files, admin_password,
+ access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
create_instance_here=create_instance_here,
@@ -805,7 +802,6 @@ class API(base.Base):
params=params)
@wrap_check_policy
- @scheduler_api.reroute_compute("update")
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
@@ -823,7 +819,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.ERROR])
- @scheduler_api.reroute_compute("soft_delete")
def soft_delete(self, context, instance):
"""Terminate an instance."""
instance_uuid = instance["uuid"]
@@ -870,7 +865,6 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.BUILDING,
vm_states.ERROR, vm_states.RESCUED,
vm_states.SHUTOFF, vm_states.STOPPED])
- @scheduler_api.reroute_compute("delete")
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate %s"), instance["uuid"])
@@ -882,7 +876,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.SOFT_DELETE])
- @scheduler_api.reroute_compute("restore")
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
self.update(context,
@@ -901,7 +894,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.SOFT_DELETE])
- @scheduler_api.reroute_compute("force_delete")
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete(context, instance)
@@ -910,7 +902,6 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.RESCUED],
task_state=[None, task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("stop")
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
instance_uuid = instance["uuid"]
@@ -982,16 +973,6 @@ class API(base.Base):
inst['name'] = instance['name']
return inst
- @scheduler_api.reroute_compute("get")
- def routing_get(self, context, instance_id):
- """A version of get with special routing characteristics.
-
- Use this method instead of get() if this is the only operation you
- intend to to. It will route to novaclient.get if the instance is not
- found.
- """
- return self.get(context, instance_id)
-
def get_all(self, context, search_opts=None):
"""Get all instances filtered by one of the given parameters.
@@ -1065,8 +1046,6 @@ class API(base.Base):
except ValueError:
return []
- local_zone_only = search_opts.get('local_zone_only', False)
-
inst_models = self._get_instances_by_filters(context, filters)
# Convert the models to dictionaries
@@ -1077,25 +1056,6 @@ class API(base.Base):
instance['name'] = inst_model['name']
instances.append(instance)
- if local_zone_only:
- return instances
-
- # Recurse zones. Send along the un-modified search options we received.
- children = scheduler_api.call_zone_method(context,
- "list",
- errors_to_ignore=[novaclient.exceptions.NotFound],
- novaclient_collection_name="servers",
- search_opts=search_opts)
-
- for zone, servers in children:
- # 'servers' can be None if a 404 was returned by a zone
- if servers is None:
- continue
- for server in servers:
- # Results are ready to send to user. No need to scrub.
- server._info['_is_precooked'] = True
- instances.append(server._info)
-
return instances
def _get_instances_by_filters(self, context, filters):
@@ -1155,7 +1115,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[None, task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("backup")
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
@@ -1175,7 +1134,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[None, task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("snapshot")
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
@@ -1257,7 +1215,6 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.RESCUED],
task_state=[None, task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("reboot")
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
state = {'SOFT': task_states.REBOOTING,
@@ -1280,7 +1237,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[None, task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("rebuild")
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
"""Rebuild the given instance with the provided attributes."""
@@ -1313,7 +1269,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("revert_resize")
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
context = context.elevated()
@@ -1340,7 +1295,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("confirm_resize")
def confirm_resize(self, context, instance):
"""Confirms a migration/resize and deletes the 'old' instance."""
context = context.elevated()
@@ -1369,7 +1323,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF],
task_state=[None])
- @scheduler_api.reroute_compute("resize")
def resize(self, context, instance, flavor_id=None, **kwargs):
"""Resize (ie, migrate) a running instance.
@@ -1411,7 +1364,7 @@ class API(base.Base):
'num_instances': 1,
'instance_properties': instance}
- filter_properties = {'local_zone_only': True, 'ignore_hosts': []}
+ filter_properties = {'ignore_hosts': []}
if not FLAGS.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
@@ -1426,7 +1379,6 @@ class API(base.Base):
"filter_properties": filter_properties}})
@wrap_check_policy
- @scheduler_api.reroute_compute("add_fixed_ip")
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self._cast_compute_message('add_fixed_ip_to_instance',
@@ -1435,7 +1387,6 @@ class API(base.Base):
params=dict(network_id=network_id))
@wrap_check_policy
- @scheduler_api.reroute_compute("remove_fixed_ip")
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self._cast_compute_message('remove_fixed_ip_from_instance',
@@ -1447,7 +1398,6 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.RESCUED],
task_state=[None, task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("pause")
def pause(self, context, instance):
"""Pause the given instance."""
self.update(context,
@@ -1458,7 +1408,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.PAUSED])
- @scheduler_api.reroute_compute("unpause")
def unpause(self, context, instance):
"""Unpause the given instance."""
self.update(context,
@@ -1482,7 +1431,6 @@ class API(base.Base):
host=host, params={"action": action})
@wrap_check_policy
- @scheduler_api.reroute_compute("diagnostics")
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self._call_compute_message("get_diagnostics",
@@ -1498,7 +1446,6 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.RESCUED],
task_state=[None, task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("suspend")
def suspend(self, context, instance):
"""Suspend the given instance."""
self.update(context,
@@ -1509,7 +1456,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.SUSPENDED])
- @scheduler_api.reroute_compute("resume")
def resume(self, context, instance):
"""Resume the given instance."""
self.update(context,
@@ -1522,7 +1468,6 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.SHUTOFF,
vm_states.STOPPED],
task_state=[None, task_states.RESIZE_VERIFY])
- @scheduler_api.reroute_compute("rescue")
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
self.update(context,
@@ -1539,7 +1484,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.RESCUED])
- @scheduler_api.reroute_compute("unrescue")
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
@@ -1551,7 +1495,6 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE])
- @scheduler_api.reroute_compute("set_admin_password")
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
@@ -1564,7 +1507,6 @@ class API(base.Base):
params=params)
@wrap_check_policy
- @scheduler_api.reroute_compute("inject_file")
def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
params = {'path': path, 'file_contents': file_contents}
diff --git a/nova/exception.py b/nova/exception.py
index 979703072f..dc8f7e19d3 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -43,19 +43,6 @@ class ConvertedException(webob.exc.WSGIHTTPException):
super(ConvertedException, self).__init__()
-def novaclient_converter(f):
- """Convert novaclient ClientException HTTP codes to webob exceptions.
- Has to be the outer-most decorator.
- """
- def new_f(*args, **kwargs):
- try:
- ret = f(*args, **kwargs)
- return ret
- except novaclient.exceptions.ClientException, e:
- raise ConvertedException(e.code, e.message, e.details)
- return new_f
-
-
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
diff --git a/nova/flags.py b/nova/flags.py
index ea6176e9c8..3b06760de5 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -369,15 +369,6 @@ global_opts = [
cfg.ListOpt('memcached_servers',
default=None,
help='Memcached servers or None for in process cache.'),
- cfg.StrOpt('zone_name',
- default='nova',
- help='name of this zone'),
- cfg.ListOpt('zone_capabilities',
- default=['hypervisor=xenserver;kvm', 'os=linux;windows'],
- help='Key/Multi-value list with the capabilities of the zone'),
- cfg.StrOpt('build_plan_encryption_key',
- default=None,
- help='128bit (hex) encryption key for scheduler build plans.'),
cfg.StrOpt('instance_usage_audit_period',
default='month',
help='time period to generate instance usages for.'),
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 68d4c29e71..30a26f9b38 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -17,29 +17,11 @@
Handles all requests relating to schedulers.
"""
-import functools
-
-from novaclient import v1_1 as novaclient
-from novaclient import exceptions as novaclient_exceptions
-
-from nova import db
-from nova import exception
from nova import flags
from nova import log as logging
-from nova.openstack.common import cfg
from nova import rpc
-from nova import utils
-
-from eventlet import greenpool
-
-
-enable_zone_routing_opt = cfg.BoolOpt('enable_zone_routing',
- default=False,
- help='When True, routing to child zones will occur.')
FLAGS = flags.FLAGS
-FLAGS.register_opt(enable_zone_routing_opt)
-
LOG = logging.getLogger(__name__)
@@ -63,54 +45,9 @@ def get_host_list(context):
return _call_scheduler('get_host_list', context)
-def get_zone_list(context):
- """Return a list of zones associated with this zone."""
- items = _call_scheduler('get_zone_list', context)
- for item in items:
- item['api_url'] = item['api_url'].replace('\\/', '/')
- if not items:
- items = db.zone_get_all(context.elevated())
- return items
-
-
-def zone_get(context, zone_id):
- return db.zone_get(context, zone_id)
-
-
-def zone_delete(context, zone_id):
- return db.zone_delete(context, zone_id)
-
-
-def zone_create(context, data):
- return db.zone_create(context, data)
-
-
-def zone_update(context, zone_id, data):
- return db.zone_update(context, zone_id, data)
-
-
-def get_zone_capabilities(context):
- """Returns a dict of key, value capabilities for this zone."""
-
- zone_capabs = {}
-
- # First grab the capabilities of combined services.
- service_capabs = _call_scheduler('get_service_capabilities', context)
- for item, (min_value, max_value) in service_capabs.iteritems():
- zone_capabs[item] = "%s,%s" % (min_value, max_value)
-
- # Add the capabilities defined by FLAGS
- caps = FLAGS.zone_capabilities
- for cap in caps:
- key, value = cap.split('=')
- zone_capabs[key] = value
- return zone_capabs
-
-
-def select(context, specs=None):
- """Returns a list of hosts."""
- return _call_scheduler('select', context=context,
- params={"request_spec": specs})
+def get_service_capabilities(context):
+ """Return aggregated capabilities for all services."""
+ return _call_scheduler('get_service_capabilities', context)
def update_service_capabilities(context, service_name, host, capabilities):
@@ -122,309 +59,6 @@ def update_service_capabilities(context, service_name, host, capabilities):
return rpc.fanout_cast(context, 'scheduler', kwargs)
-def call_zone_method(context, method_name, errors_to_ignore=None,
- novaclient_collection_name='zones', zones=None,
- *args, **kwargs):
- """Returns a list of (zone, call_result) objects."""
- if not isinstance(errors_to_ignore, (list, tuple)):
- # This will also handle the default None
- errors_to_ignore = [errors_to_ignore]
-
- pool = greenpool.GreenPool()
- results = []
- if zones is None:
- zones = db.zone_get_all(context.elevated())
- for zone in zones:
- try:
- # Do this on behalf of the user ...
- nova = novaclient.Client(zone.username, zone.password, None,
- zone.api_url, region_name=zone.name,
- token=context.auth_token)
- nova.authenticate()
- except novaclient_exceptions.BadRequest, e:
- url = zone.api_url
- name = zone.name
- LOG.warn(_("Authentication failed to zone "
- "'%(name)s' URL=%(url)s: %(e)s") % locals())
- #TODO (dabo) - add logic for failure counts per zone,
- # with escalation after a given number of failures.
- continue
- novaclient_collection = getattr(nova, novaclient_collection_name)
- collection_method = getattr(novaclient_collection, method_name)
-
- def _error_trap(*args, **kwargs):
- try:
- return collection_method(*args, **kwargs)
- except Exception as e:
- if type(e) in errors_to_ignore:
- return None
- raise
-
- res = pool.spawn(_error_trap, *args, **kwargs)
- results.append((zone, res))
- pool.waitall()
- return [(zone.id, res.wait()) for zone, res in results]
-
-
-def child_zone_helper(context, zone_list, func):
- """Fire off a command to each zone in the list.
- The return is [novaclient return objects] from each child zone.
- For example, if you are calling server.pause(), the list will
- be whatever the response from server.pause() is. One entry
- per child zone called."""
-
- def _process(func, context, zone):
- """Worker stub for green thread pool. Give the worker
- an authenticated nova client and zone info."""
- try:
- nova = novaclient.Client(zone.username, zone.password, None,
- zone.api_url, region_name=zone.name,
- token=context.auth_token)
- nova.authenticate()
- except novaclient_exceptions.BadRequest, e:
- url = zone.api_url
- LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s")
- % locals())
- # This is being returned instead of raised, so that when
- # results are processed in unmarshal_result() after the
- # greenpool.imap completes, the exception can be raised
- # there if no other zones had a response.
- return exception.ZoneRequestError()
- else:
- try:
- answer = func(nova, zone)
- return answer
- except Exception, e:
- return e
-
- green_pool = greenpool.GreenPool()
- return [result for result in green_pool.imap(
- functools.partial(_process, func, context), zone_list)]
-
-
-def _issue_novaclient_command(nova, zone, collection,
- method_name, *args, **kwargs):
- """Use novaclient to issue command to a single child zone.
- One of these will be run in parallel for each child zone.
- """
- manager = getattr(nova, collection)
-
- # NOTE(comstud): This is not ideal, but we have to do this based on
- # how novaclient is implemented right now.
- # 'find' is special cased as novaclient requires kwargs for it to
- # filter on a 'get_all'.
- # Every other method first needs to do a 'get' on the first argument
- # passed, which should be a UUID. If it's 'get' itself that we want,
- # we just return the result. Otherwise, we next call the real method
- # that's wanted... passing other arguments that may or may not exist.
- if method_name in ['find', 'findall']:
- try:
- return getattr(manager, method_name)(**kwargs)
- except novaclient_exceptions.NotFound:
- url = zone.api_url
- LOG.debug(_("%(collection)s.%(method_name)s didn't find "
- "anything matching '%(kwargs)s' on '%(url)s'" %
- locals()))
- return None
-
- args = list(args)
- # pop off the UUID to look up
- item = args.pop(0)
- try:
- result = manager.get(item)
- except novaclient_exceptions.NotFound, e:
- url = zone.api_url
- LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" %
- locals()))
- raise e
-
- if method_name.lower() != 'get':
- # if we're doing something other than 'get', call it passing args.
- result = getattr(result, method_name)(*args, **kwargs)
- return result
-
-
-def wrap_novaclient_function(f, collection, method_name, *args, **kwargs):
- """Appends collection, method_name and arguments to the incoming
- (nova, zone) call from child_zone_helper."""
- def inner(nova, zone):
- return f(nova, zone, collection, method_name, *args, **kwargs)
-
- return inner
-
-
-class RedirectResult(exception.Error):
- """Used to the HTTP API know that these results are pre-cooked
- and they can be returned to the caller directly."""
- def __init__(self, results):
- self.results = results
- super(RedirectResult, self).__init__(
- _("Uncaught Zone redirection exception"))
-
-
-class reroute_compute(object):
- """
- reroute_compute is responsible for trying to lookup a resource in the
- current zone and if it's not found there, delegating the call to the
- child zones.
-
- Since reroute_compute will be making 'cross-zone' calls, the ID for the
- object must come in as a UUID-- if we receive an integer ID, we bail.
-
- The steps involved are:
-
- 1. Validate that item_id is UUID like
-
- 2. Lookup item by UUID in the zone local database
-
- 3. If the item was found, then extract integer ID, and pass that to
- the wrapped method. (This ensures that zone-local code can
- continue to use integer IDs).
-
- 4. If the item was not found, we delegate the call to a child zone
- using the UUID.
- """
- def __init__(self, method_name):
- self.method_name = method_name
-
- def _route_to_child_zones(self, context, collection, item_uuid):
- if not FLAGS.enable_zone_routing:
- raise exception.InstanceNotFound(instance_id=item_uuid)
-
- self.item_uuid = item_uuid
-
- zones = db.zone_get_all(context)
- if not zones:
- raise exception.InstanceNotFound(instance_id=item_uuid)
-
- # Ask the children to provide an answer ...
- LOG.debug(_("Asking child zones ..."))
- result = self._call_child_zones(context, zones,
- wrap_novaclient_function(_issue_novaclient_command,
- collection, self.method_name, item_uuid))
- # Scrub the results and raise another exception
- # so the API layers can bail out gracefully ...
- raise RedirectResult(self.unmarshall_result(result))
-
- def __call__(self, f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- _collection_info = self.get_collection_context_and_id(args, kwargs)
- collection, context, item_id_or_uuid = _collection_info
-
- attempt_reroute = False
- if utils.is_uuid_like(item_id_or_uuid):
- item_uuid = item_id_or_uuid
- try:
- instance = db.instance_get_by_uuid(context, item_uuid)
- except exception.InstanceNotFound, e:
- # NOTE(sirp): since a UUID was passed in, we can attempt
- # to reroute to a child zone
- attempt_reroute = True
- LOG.debug(_("Instance %(item_uuid)s not found "
- "locally: '%(e)s'" % locals()))
- else:
- # NOTE(sirp): since we're not re-routing in this case, and
- # we we were passed a UUID, we need to replace that UUID
- # with an integer ID in the argument list so that the
- # zone-local code can continue to use integer IDs.
- item_id = instance['id']
- args = list(args) # needs to be mutable to replace
- self.replace_uuid_with_id(args, kwargs, item_id)
-
- if attempt_reroute:
- return self._route_to_child_zones(context, collection,
- item_uuid)
- else:
- return f(*args, **kwargs)
-
- return wrapped_f
-
- def _call_child_zones(self, context, zones, function):
- """Ask the child zones to perform this operation.
- Broken out for testing."""
- return child_zone_helper(context, zones, function)
-
- def get_collection_context_and_id(self, args, kwargs):
- """Returns a tuple of (novaclient collection name, security
- context and resource id. Derived class should override this."""
- context = kwargs.get('context', None)
- instance_id = kwargs.get('instance_id', None)
-
- #NOTE(blamar): This is going to get worse before it gets better...
- instance = kwargs.get('instance', None)
- if instance is not None:
- instance_id = instance['uuid']
-
- if len(args) > 0 and not context:
- context = args[1]
- if len(args) > 1 and not instance_id:
- instance_id = args[2]
- return ("servers", context, instance_id)
-
- @staticmethod
- def replace_uuid_with_id(args, kwargs, replacement_id):
- """
- Extracts the UUID parameter from the arg or kwarg list and replaces
- it with an integer ID.
- """
- if 'instance_id' in kwargs:
- kwargs['instance_id'] = replacement_id
- elif len(args) > 2:
- args.pop(2)
- args.insert(2, replacement_id)
-
- def unmarshall_result(self, zone_responses):
- """Result is a list of responses from each child zone.
- Each decorator derivation is responsible to turning this
- into a format expected by the calling method. For
- example, this one is expected to return a single Server
- dict {'server':{k:v}}. Others may return a list of them, like
- {'servers':[{k,v}]}"""
- reduced_response = []
- found_exception = None
- for zone_response in zone_responses:
- if not zone_response:
- continue
- if isinstance(zone_response, BaseException):
- found_exception = zone_response
- continue
-
- server = zone_response.__dict__
-
- for k in server.keys():
- if k[0] == '_' or k == 'manager':
- del server[k]
-
- reduced_response.append(dict(server=server))
-
- # Boil the responses down to a single response.
- #
- # If we get a happy response use that, ignore all the
- # complaint repsonses ...
- if reduced_response:
- return reduced_response[0] # first for now.
- elif found_exception:
- return found_exception
-
- # Some operations, like delete(), don't send back any results
- # on success. We'll do the same.
- return None
-
-
-def redirect_handler(f):
- def new_f(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except RedirectResult, e:
- # Remember: exceptions are returned, not thrown, in the decorator.
- # At this point it's safe to throw it.
- if isinstance(e.results, BaseException):
- raise e.results
- return e.results
- return new_f
-
-
def live_migration(context, block_migration, disk_over_commit,
instance_id, dest, topic):
"""Migrate a server to a new host"""
diff --git a/nova/scheduler/distributed_scheduler.py b/nova/scheduler/distributed_scheduler.py
index ac488b0861..d11e2318f4 100644
--- a/nova/scheduler/distributed_scheduler.py
+++ b/nova/scheduler/distributed_scheduler.py
@@ -14,24 +14,17 @@
# under the License.
"""
-The DistributedScheduler is for creating instances locally or across zones.
+The DistributedScheduler is for creating instances locally.
You can customize this scheduler by specifying your own Host Filters and
Weighing Functions.
"""
-import json
import operator
-from novaclient import v1_1 as novaclient
-from novaclient import exceptions as novaclient_exceptions
-from nova import crypto
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
-from nova.scheduler import api
from nova.scheduler import driver
-from nova.scheduler import host_manager
from nova.scheduler import least_cost
from nova.scheduler import scheduler_options
from nova import utils
@@ -42,9 +35,7 @@ LOG = logging.getLogger(__name__)
class DistributedScheduler(driver.Scheduler):
- """Scheduler that can work across any nova deployment, from simple
- deployments to multiple nested zones.
- """
+ """Scheduler that can be used for filtering and weighing."""
def __init__(self, *args, **kwargs):
super(DistributedScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = {}
@@ -61,14 +52,10 @@ class DistributedScheduler(driver.Scheduler):
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
"""This method is called from nova.compute.api to provision
- an instance. However we need to look at the parameters being
- passed in to see if this is a request to:
- 1. Create build plan (a list of WeightedHosts) and then provision, or
- 2. Use the WeightedHost information in the request parameters
- to simply create the instance (either in this zone or
- a child zone).
-
- returns a list of the instances created.
+ an instance. We first create a build plan (a list of WeightedHosts)
+ and then provision.
+
+ Returns a list of the instances created.
"""
elevated = context.elevated()
@@ -76,16 +63,7 @@ class DistributedScheduler(driver.Scheduler):
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
locals())
- weighted_hosts = []
-
- # Having a 'blob' hint means we've already provided a build plan.
- # We need to turn this back into a WeightedHost object.
- blob = request_spec.get('blob', None)
- if blob:
- weighted_hosts.append(self._make_weighted_host_from_blob(blob))
- else:
- # No plan ... better make one.
- weighted_hosts = self._schedule(context, "compute", request_spec,
+ weighted_hosts = self._schedule(context, "compute", request_spec,
*args, **kwargs)
if not weighted_hosts:
@@ -101,13 +79,8 @@ class DistributedScheduler(driver.Scheduler):
break
weighted_host = weighted_hosts.pop(0)
- instance = None
- if weighted_host.zone:
- instance = self._ask_child_zone_to_create_instance(elevated,
- weighted_host, request_spec, kwargs)
- else:
- instance = self._provision_resource_locally(elevated,
- weighted_host, request_spec, kwargs)
+ instance = self._provision_resource(elevated, weighted_host,
+ request_spec, kwargs)
if instance:
instances.append(instance)
@@ -121,17 +94,6 @@ class DistributedScheduler(driver.Scheduler):
the prep_resize operation to it.
"""
- # We need the new instance type ID...
- instance_type_id = kwargs['instance_type_id']
-
- elevated = context.elevated()
- LOG.debug(_("Attempting to determine target host for resize to "
- "instance type %(instance_type_id)s") % locals())
-
- # Convert it to an actual instance type
- instance_type = db.instance_type_get(elevated, instance_type_id)
-
- # Now let's grab a possibility
hosts = self._schedule(context, 'compute', request_spec,
*args, **kwargs)
if not hosts:
@@ -146,22 +108,8 @@ class DistributedScheduler(driver.Scheduler):
driver.cast_to_compute_host(context, host.host_state.host,
'prep_resize', **kwargs)
- def select(self, context, request_spec, *args, **kwargs):
- """Select returns a list of weights and zone/host information
- corresponding to the best hosts to service the request. Any
- internal zone information will be encrypted so as not to reveal
- anything about our inner layout.
- """
- weighted_hosts = self._schedule(context, "compute", request_spec,
- *args, **kwargs)
- return [weighted_host.to_dict() for weighted_host in weighted_hosts]
-
- def _call_zone_method(self, context, method, specs, zones):
- """Call novaclient zone method. Broken out for testing."""
- return api.call_zone_method(context, method, specs=specs, zones=zones)
-
- def _provision_resource_locally(self, context, weighted_host, request_spec,
- kwargs):
+ def _provision_resource(self, context, weighted_host, request_spec,
+ kwargs):
"""Create the requested resource in this Zone."""
instance = self.create_instance_db_entry(context, request_spec)
driver.cast_to_compute_host(context, weighted_host.host_state.host,
@@ -173,104 +121,6 @@ class DistributedScheduler(driver.Scheduler):
del request_spec['instance_properties']['uuid']
return inst
- def _make_weighted_host_from_blob(self, blob):
- """Returns the decrypted blob as a WeightedHost object
- or None if invalid. Broken out for testing.
- """
- decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
- json_entry = decryptor(blob)
-
- # Extract our WeightedHost values
- wh_dict = json.loads(json_entry)
- host = wh_dict.get('host', None)
- blob = wh_dict.get('blob', None)
- zone = wh_dict.get('zone', None)
- return least_cost.WeightedHost(wh_dict['weight'],
- host_state=host_manager.HostState(host, 'compute'),
- blob=blob, zone=zone)
-
- def _ask_child_zone_to_create_instance(self, context, weighted_host,
- request_spec, kwargs):
- """Once we have determined that the request should go to one
- of our children, we need to fabricate a new POST /servers/
- call with the same parameters that were passed into us.
- This request is always for a single instance.
-
- Note that we have to reverse engineer from our args to get back the
- image, flavor, ipgroup, etc. since the original call could have
- come in from EC2 (which doesn't use these things).
- """
- instance_type = request_spec['instance_type']
- instance_properties = request_spec['instance_properties']
-
- name = instance_properties['display_name']
- image_ref = instance_properties['image_ref']
- meta = instance_properties['metadata']
- flavor_id = instance_type['flavorid']
- reservation_id = instance_properties['reservation_id']
- files = kwargs['injected_files']
-
- zone = db.zone_get(context.elevated(), weighted_host.zone)
- zone_name = zone.name
- url = zone.api_url
- LOG.debug(_("Forwarding instance create call to zone '%(zone_name)s'. "
- "ReservationID=%(reservation_id)s") % locals())
- nova = None
- try:
- # This operation is done as the caller, not the zone admin.
- nova = novaclient.Client(zone.username, zone.password, None, url,
- token=context.auth_token,
- region_name=zone_name)
- nova.authenticate()
- except novaclient_exceptions.BadRequest, e:
- raise exception.NotAuthorized(_("Bad credentials attempting "
- "to talk to zone at %(url)s.") % locals())
- # NOTE(Vek): Novaclient has two different calling conventions
- # for this call, depending on whether you're using
- # 1.0 or 1.1 API: in 1.0, there's an ipgroups
- # argument after flavor_id which isn't present in
- # 1.1. To work around this, all the extra
- # arguments are passed as keyword arguments
- # (there's a reasonable default for ipgroups in the
- # novaclient call).
- instance = nova.servers.create(name, image_ref, flavor_id,
- meta=meta, files=files,
- zone_blob=weighted_host.blob,
- reservation_id=reservation_id)
- return driver.encode_instance(instance._info, local=False)
-
- def _adjust_child_weights(self, child_results, zones):
- """Apply the Scale and Offset values from the Zone definition
- to adjust the weights returned from the child zones. Returns
- a list of WeightedHost objects: [WeightedHost(), ...]
- """
- weighted_hosts = []
- for zone_id, result in child_results:
- if not result:
- continue
-
- for zone_rec in zones:
- if zone_rec['id'] != zone_id:
- continue
- for item in result:
- try:
- offset = zone_rec['weight_offset']
- scale = zone_rec['weight_scale']
- raw_weight = item['weight']
- cooked_weight = offset + scale * raw_weight
-
- weighted_hosts.append(least_cost.WeightedHost(
- cooked_weight, zone=zone_id,
- blob=item['blob']))
- except KeyError:
- LOG.exception(_("Bad child zone scaling values "
- "for Zone: %(zone_id)s") % locals())
- return weighted_hosts
-
- def _zone_get_all(self, context):
- """Broken out for testing."""
- return db.zone_get_all(context)
-
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
@@ -292,10 +142,6 @@ class DistributedScheduler(driver.Scheduler):
instance_properties = request_spec['instance_properties']
instance_type = request_spec.get("instance_type", None)
- if not instance_type:
- raise NotImplementedError(_("Scheduler only understands "
- "InstanceType-based "
- "provisioning."))
cost_functions = self.get_cost_functions()
config_options = self._get_configuration_options()
@@ -347,14 +193,6 @@ class DistributedScheduler(driver.Scheduler):
weighted_host.host_state.consume_from_instance(
instance_properties)
- # Next, tack on the host weights from the child zones
- if not filter_properties.get('local_zone_only', False):
- json_spec = json.dumps(request_spec)
- all_zones = self._zone_get_all(elevated)
- child_results = self._call_zone_method(elevated, "select",
- specs=json_spec, zones=all_zones)
- selected_hosts.extend(self._adjust_child_weights(
- child_results, all_zones))
selected_hosts.sort(key=operator.attrgetter('weight'))
return selected_hosts[:num_instances]
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 1552555f70..5ee4ed343f 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -31,8 +31,6 @@ from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import rpc
-from nova.scheduler import host_manager
-from nova.scheduler import zone_manager
from nova import utils
@@ -42,9 +40,6 @@ scheduler_driver_opts = [
cfg.StrOpt('scheduler_host_manager',
default='nova.scheduler.host_manager.HostManager',
help='The scheduler host manager class to use'),
- cfg.StrOpt('scheduler_zone_manager',
- default='nova.scheduler.zone_manager.ZoneManager',
- help='The scheduler zone manager class to use'),
]
FLAGS = flags.FLAGS
@@ -135,8 +130,6 @@ class Scheduler(object):
"""The base class that all Scheduler classes should inherit from."""
def __init__(self):
- self.zone_manager = utils.import_object(
- FLAGS.scheduler_zone_manager)
self.host_manager = utils.import_object(
FLAGS.scheduler_host_manager)
self.compute_api = compute_api.API()
@@ -145,13 +138,8 @@ class Scheduler(object):
"""Get a list of hosts from the HostManager."""
return self.host_manager.get_host_list()
- def get_zone_list(self):
- """Get a list of zones from the ZoneManager."""
- return self.zone_manager.get_zone_list()
-
def get_service_capabilities(self):
- """Get the normalized set of capabilities for the services
- in this zone.
+ """Get the normalized set of capabilities for the services.
"""
return self.host_manager.get_service_capabilities()
@@ -160,10 +148,6 @@ class Scheduler(object):
self.host_manager.update_service_capabilities(service_name,
host, capabilities)
- def poll_child_zones(self, context):
- """Poll child zones periodically to get status."""
- return self.zone_manager.update(context)
-
def hosts_up(self, context, topic):
"""Return the list of hosts that have a running service for topic."""
@@ -196,10 +180,6 @@ class Scheduler(object):
"""Must override at least this method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
- def select(self, context, topic, method, *_args, **_kwargs):
- """Must override this for zones to work."""
- raise NotImplementedError(_("Must implement 'select' method"))
-
def schedule_live_migration(self, context, instance_id, dest,
block_migration=False,
disk_over_commit=False):
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 3fbef5bc5c..f54f8fe461 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -23,8 +23,8 @@ is then selected for provisioning.
"""
from nova import flags
-from nova.openstack.common import cfg
from nova import log as logging
+from nova.openstack.common import cfg
LOG = logging.getLogger(__name__)
@@ -55,24 +55,10 @@ class WeightedHost(object):
This is an attempt to remove some of the ad-hoc dict structures
previously used."""
- def __init__(self, weight, host_state=None, blob=None, zone=None):
+ def __init__(self, weight, host_state=None):
self.weight = weight
- self.blob = blob
- self.zone = zone
-
- # Local members. These are not returned outside of the Zone.
self.host_state = host_state
- def to_dict(self):
- x = dict(weight=self.weight)
- if self.blob:
- x['blob'] = self.blob
- if self.host_state:
- x['host'] = self.host_state.host
- if self.zone:
- x['zone'] = self.zone
- return x
-
def noop_cost_fn(host_state, weighing_properties):
"""Return a pre-weight cost of 1 for each host"""
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 1a94a6b0bc..b3391186b5 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -30,7 +30,6 @@ from nova import flags
from nova import log as logging
from nova import manager
from nova.openstack.common import cfg
-from nova import rpc
from nova import utils
@@ -57,19 +56,10 @@ class SchedulerManager(manager.Manager):
"""Converts all method calls to use the schedule method"""
return functools.partial(self._schedule, key)
- @manager.periodic_task
- def _poll_child_zones(self, context):
- """Poll child zones periodically to get status."""
- self.driver.poll_child_zones(context)
-
def get_host_list(self, context):
"""Get a list of hosts from the HostManager."""
return self.driver.get_host_list()
- def get_zone_list(self, context):
- """Get a list of zones from the ZoneManager."""
- return self.driver.get_zone_list()
-
def get_service_capabilities(self, context):
"""Get the normalized set of capabilities for this zone."""
return self.driver.get_service_capabilities()
@@ -82,10 +72,6 @@ class SchedulerManager(manager.Manager):
self.driver.update_service_capabilities(service_name, host,
capabilities)
- def select(self, context, *args, **kwargs):
- """Select a list of hosts best matching the provided specs."""
- return self.driver.select(context, *args, **kwargs)
-
def _schedule(self, method, context, topic, *args, **kwargs):
"""Tries to call schedule_* method on the driver to retrieve host.
diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py
index 43adf0b1ea..fb8692ffa9 100644
--- a/nova/scheduler/multi.py
+++ b/nova/scheduler/multi.py
@@ -71,10 +71,6 @@ class MultiScheduler(driver.Scheduler):
raise AttributeError(key)
return getattr(self.drivers[_METHOD_MAP[method]], key)
- def set_zone_manager(self, zone_manager):
- for k, v in self.drivers.iteritems():
- v.set_zone_manager(zone_manager)
-
def schedule(self, context, topic, method, *_args, **_kwargs):
return self.drivers[topic].schedule(context, topic,
method, *_args, **_kwargs)
diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py
index f7a8efc124..fc888dc113 100644
--- a/nova/scheduler/vsa.py
+++ b/nova/scheduler/vsa.py
@@ -21,12 +21,12 @@ VSA Simple Scheduler
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova import rpc
from nova import utils
-from nova import exception
from nova.scheduler import driver
from nova.scheduler import simple
from nova.vsa.api import VsaState
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
deleted file mode 100644
index b9d021c756..0000000000
--- a/nova/scheduler/zone_manager.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Copyright (c) 2011 Openstack, LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Manage communication with child zones and keep state for them.
-"""
-
-import datetime
-import traceback
-
-from eventlet import greenpool
-from novaclient import v1_1 as novaclient
-
-from nova import db
-from nova import flags
-from nova import log as logging
-from nova.openstack.common import cfg
-from nova import utils
-
-
-zone_manager_opts = [
- cfg.IntOpt('zone_db_check_interval',
- default=60,
- help='Seconds between getting fresh zone info from db.'),
- cfg.IntOpt('zone_failures_to_offline',
- default=3,
- help='Number of consecutive errors before offlining a zone'),
- ]
-
-FLAGS = flags.FLAGS
-FLAGS.register_opts(zone_manager_opts)
-
-LOG = logging.getLogger(__name__)
-
-
-class ZoneState(object):
- """Holds state for a particular zone."""
- def __init__(self):
- self.is_active = True
- self.capabilities = {}
- self.attempt = 0
- self.last_seen = datetime.datetime.min
- self.last_exception = None
- self.last_exception_time = None
- self.zone_info = {}
-
- def update_zone_info(self, zone):
- """Update zone credentials from db"""
- self.zone_info = dict(zone.iteritems())
-
- def update_metadata(self, zone_metadata):
- """Update zone metadata after successful communications with
- child zone."""
- self.last_seen = utils.utcnow()
- self.attempt = 0
- self.capabilities = dict(
- [(k, v) for k, v in zone_metadata.iteritems() if k != 'name'])
- self.is_active = True
-
- def get_zone_info(self):
- db_fields_to_return = ['api_url', 'id', 'weight_scale',
- 'weight_offset']
- zone_info = dict(is_active=self.is_active,
- capabilities=self.capabilities)
- for field in db_fields_to_return:
- zone_info[field] = self.zone_info[field]
- return zone_info
-
- def log_error(self, exception):
- """Something went wrong. Check to see if zone should be
- marked as offline."""
- self.last_exception = exception
- self.last_exception_time = utils.utcnow()
- api_url = self.zone_info['api_url']
- LOG.warning(_("'%(exception)s' error talking to "
- "zone %(api_url)s") % locals())
-
- max_errors = FLAGS.zone_failures_to_offline
- self.attempt += 1
- if self.attempt >= max_errors:
- self.is_active = False
- LOG.error(_("No answer from zone %(api_url)s "
- "after %(max_errors)d "
- "attempts. Marking inactive.") % locals())
-
- def call_novaclient(self):
- """Call novaclient. Broken out for testing purposes. Note that
- we have to use the admin credentials for this since there is no
- available context."""
- username = self.zone_info['username']
- password = self.zone_info['password']
- api_url = self.zone_info['api_url']
- region_name = self.zone_info['name']
- client = novaclient.Client(username, password, None, api_url,
- region_name)
- return client.zones.info()._info
-
- def poll(self):
- """Eventlet worker to poll a self."""
- if 'api_url' not in self.zone_info:
- return
- name = self.zone_info['name']
- api_url = self.zone_info['api_url']
- LOG.debug(_("Polling zone: %(name)s @ %(api_url)s") % locals())
- try:
- self.update_metadata(self.call_novaclient())
- except Exception, e:
- self.log_error(traceback.format_exc())
-
-
-class ZoneManager(object):
- """Keeps the zone states updated."""
- def __init__(self):
- self.last_zone_db_check = datetime.datetime.min
- self.zone_states = {} # { <zone_id> : ZoneState }
- self.green_pool = greenpool.GreenPool()
-
- def get_zone_list(self):
- """Return the list of zones we know about."""
- return [zone.get_zone_info() for zone in self.zone_states.values()]
-
- def _refresh_from_db(self, context):
- """Make our zone state map match the db."""
- # Add/update existing zones ...
- zones = db.zone_get_all(context)
- existing = self.zone_states.keys()
- db_keys = []
- for zone in zones:
- zone_id = zone['id']
- db_keys.append(zone_id)
- if zone_id not in existing:
- self.zone_states[zone_id] = ZoneState()
- self.zone_states[zone_id].update_zone_info(zone)
-
- # Cleanup zones removed from db ...
- keys = self.zone_states.keys() # since we're deleting
- for zone_id in keys:
- if zone_id not in db_keys:
- del self.zone_states[zone_id]
-
- def _poll_zones(self):
- """Try to connect to each child zone and get update."""
- def _worker(zone_state):
- zone_state.poll()
- self.green_pool.imap(_worker, self.zone_states.values())
-
- def update(self, context):
- """Update status for all zones. This should be called
- periodically to refresh the zone states.
- """
- diff = utils.utcnow() - self.last_zone_db_check
- if diff.seconds >= FLAGS.zone_db_check_interval:
- LOG.debug(_("Updating zone cache from db."))
- self.last_zone_db_check = utils.utcnow()
- self._refresh_from_db(context)
- self._poll_zones()
diff --git a/nova/tests/api/openstack/compute/contrib/test_extendedstatus.py b/nova/tests/api/openstack/compute/contrib/test_extendedstatus.py
index 1edec954c6..ba832cfe78 100644
--- a/nova/tests/api/openstack/compute/contrib/test_extendedstatus.py
+++ b/nova/tests/api/openstack/compute/contrib/test_extendedstatus.py
@@ -51,7 +51,7 @@ class ExtendedStatusTest(test.TestCase):
def setUp(self):
super(ExtendedStatusTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
- self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
def _make_request(self, url):
@@ -94,7 +94,7 @@ class ExtendedStatusTest(test.TestCase):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound()
- self.stubs.Set(compute.api.API, 'routing_get', fake_compute_get)
+ self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v2/fake/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
diff --git a/nova/tests/api/openstack/compute/contrib/test_zones.py b/nova/tests/api/openstack/compute/contrib/test_zones.py
deleted file mode 100644
index 9f887cb0d0..0000000000
--- a/nova/tests/api/openstack/compute/contrib/test_zones.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import json
-
-from lxml import etree
-
-from nova.api.openstack.compute.contrib import zones
-from nova.api.openstack import xmlutil
-from nova import crypto
-import nova.db
-from nova import flags
-from nova.scheduler import api
-from nova import test
-from nova.tests.api.openstack import fakes
-
-
-FLAGS = flags.FLAGS
-
-
-def zone_get(context, zone_id):
- return dict(id=1, api_url='http://example.com', username='bob',
- password='xxx', weight_scale=1.0, weight_offset=0.0,
- name='darksecret')
-
-
-def zone_create(context, values):
- zone = dict(id=1)
- zone.update(values)
- return zone
-
-
-def zone_update(context, zone_id, values):
- zone = dict(id=zone_id, api_url='http://example.com', username='bob',
- password='xxx')
- zone.update(values)
- return zone
-
-
-def zone_delete(context, zone_id):
- pass
-
-
-def zone_get_all_scheduler(*args):
- return [
- dict(id=1, api_url='http://example.com', username='bob',
- password='xxx', weight_scale=1.0, weight_offset=0.0),
- dict(id=2, api_url='http://example.org', username='alice',
- password='qwerty', weight_scale=1.0, weight_offset=0.0),
- ]
-
-
-def zone_get_all_scheduler_empty(*args):
- return []
-
-
-def zone_get_all_db(context):
- return [
- dict(id=1, api_url='http://example.com', username='bob',
- password='xxx', weight_scale=1.0, weight_offset=0.0),
- dict(id=2, api_url='http://example.org', username='alice',
- password='qwerty', weight_scale=1.0, weight_offset=0.0),
- ]
-
-
-def zone_capabilities(method, context):
- return dict()
-
-
-GLOBAL_BUILD_PLAN = [
- dict(name='host1', weight=10, ip='10.0.0.1', zone='zone1'),
- dict(name='host2', weight=9, ip='10.0.0.2', zone='zone2'),
- dict(name='host3', weight=8, ip='10.0.0.3', zone='zone3'),
- dict(name='host4', weight=7, ip='10.0.0.4', zone='zone4'),
- ]
-
-
-def zone_select(context, specs):
- return GLOBAL_BUILD_PLAN
-
-
-class ZonesTest(test.TestCase):
- def setUp(self):
- super(ZonesTest, self).setUp()
- fakes.stub_out_networking(self.stubs)
- fakes.stub_out_rate_limiting(self.stubs)
-
- self.stubs.Set(nova.db, 'zone_get', zone_get)
- self.stubs.Set(nova.db, 'zone_update', zone_update)
- self.stubs.Set(nova.db, 'zone_create', zone_create)
- self.stubs.Set(nova.db, 'zone_delete', zone_delete)
-
- self.controller = zones.Controller()
-
- def test_get_zone_list_scheduler(self):
- self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler)
-
- req = fakes.HTTPRequest.blank('/v2/fake/zones')
- res_dict = self.controller.index(req)
-
- self.assertEqual(len(res_dict['zones']), 2)
-
- def test_get_zone_list_db(self):
- self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler_empty)
- self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db)
-
- req = fakes.HTTPRequest.blank('/v2/fake/zones')
- res_dict = self.controller.index(req)
-
- self.assertEqual(len(res_dict['zones']), 2)
-
- def test_get_zone_by_id(self):
- req = fakes.HTTPRequest.blank('/v2/fake/zones/1')
- res_dict = self.controller.show(req, 1)
-
- self.assertEqual(res_dict['zone']['id'], 1)
- self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
- self.assertFalse('password' in res_dict['zone'])
-
- def test_zone_delete(self):
- req = fakes.HTTPRequest.blank('/v2/fake/zones/1')
- self.controller.delete(req, 1)
-
- def test_zone_create(self):
- body = dict(zone=dict(api_url='http://example.com', username='fred',
- password='fubar'))
-
- req = fakes.HTTPRequest.blank('/v2/fake/zones')
- res_dict = self.controller.create(req, body)
-
- self.assertEqual(res_dict['zone']['id'], 1)
- self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
- self.assertFalse('username' in res_dict['zone'])
-
- def test_zone_update(self):
- body = dict(zone=dict(username='zeb', password='sneaky'))
-
- req = fakes.HTTPRequest.blank('/v2/fake/zones/1')
- res_dict = self.controller.update(req, 1, body)
-
- self.assertEqual(res_dict['zone']['id'], 1)
- self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
- self.assertFalse('username' in res_dict['zone'])
-
- def test_zone_info(self):
- caps = ['cap1=a;b', 'cap2=c;d']
- self.flags(zone_name='darksecret', zone_capabilities=caps)
- self.stubs.Set(api, '_call_scheduler', zone_capabilities)
-
- req = fakes.HTTPRequest.blank('/v2/fake/zones/info')
- res_dict = self.controller.info(req)
-
- self.assertEqual(res_dict['zone']['name'], 'darksecret')
- self.assertEqual(res_dict['zone']['cap1'], 'a;b')
- self.assertEqual(res_dict['zone']['cap2'], 'c;d')
-
- def test_zone_select(self):
- key = 'c286696d887c9aa0611bbb3e2025a45a'
- self.flags(build_plan_encryption_key=key)
- self.stubs.Set(api, 'select', zone_select)
-
- # Select queries end up being JSON encoded twice.
- # Once to a string and again as an HTTP POST Body
- body = json.dumps({})
-
- req = fakes.HTTPRequest.blank('/v2/fake/zones/select')
- res_dict = self.controller.select(req, body)
-
- self.assertTrue('weights' in res_dict)
-
- for item in res_dict['weights']:
- blob = item['blob']
- decrypt = crypto.decryptor(FLAGS.build_plan_encryption_key)
- secret_item = json.loads(decrypt(blob))
- found = False
- for original_item in GLOBAL_BUILD_PLAN:
- if original_item['name'] != secret_item['name']:
- continue
- found = True
- for key in ('weight', 'ip', 'zone'):
- self.assertEqual(secret_item[key], original_item[key])
-
- self.assertTrue(found)
- self.assertEqual(len(item), 2)
- self.assertTrue('weight' in item)
-
-
-class TestZonesXMLSerializer(test.TestCase):
-
- def test_select(self):
- serializer = zones.WeightsTemplate()
-
- key = 'c286696d887c9aa0611bbb3e2025a45a'
-
- encrypt = crypto.encryptor(key)
- decrypt = crypto.decryptor(key)
-
- item = GLOBAL_BUILD_PLAN[0]
- fixture = {'weights': {'blob': encrypt(json.dumps(item)),
- 'weight': item['weight']}}
-
- output = serializer.serialize(fixture)
- res_tree = etree.XML(output)
-
- self.assertEqual(res_tree.tag, '{%s}weights' % xmlutil.XMLNS_V10)
-
- for item in res_tree:
- self.assertEqual(item.tag, '{%s}weight' % xmlutil.XMLNS_V10)
- blob = None
- weight = None
- for chld in item:
- if chld.tag.endswith('blob'):
- blob = chld.text
- elif chld.tag.endswith('weight'):
- weight = chld.text
-
- secret_item = json.loads(decrypt(blob))
- found = False
- for original_item in GLOBAL_BUILD_PLAN:
- if original_item['name'] != secret_item['name']:
- continue
- found = True
- for key in ('weight', 'ip', 'zone'):
- self.assertEqual(secret_item[key], original_item[key])
-
- self.assertTrue(found)
- self.assertEqual(len(item), 2)
- self.assertTrue(weight)
-
- def test_index(self):
- serializer = zones.ZonesTemplate()
-
- fixture = {'zones': zone_get_all_scheduler()}
-
- output = serializer.serialize(fixture)
- res_tree = etree.XML(output)
-
- self.assertEqual(res_tree.tag, '{%s}zones' % xmlutil.XMLNS_V10)
- self.assertEqual(len(res_tree), 2)
- self.assertEqual(res_tree[0].tag, '{%s}zone' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree[1].tag, '{%s}zone' % xmlutil.XMLNS_V10)
-
- def test_show(self):
- serializer = zones.ZoneTemplate()
-
- zone = {'id': 1,
- 'api_url': 'http://example.com',
- 'name': 'darksecret',
- 'cap1': 'a;b',
- 'cap2': 'c;d'}
- fixture = {'zone': zone}
-
- output = serializer.serialize(fixture)
- print repr(output)
- res_tree = etree.XML(output)
-
- self.assertEqual(res_tree.tag, '{%s}zone' % xmlutil.XMLNS_V10)
- self.assertEqual(res_tree.get('id'), '1')
- self.assertEqual(res_tree.get('api_url'), 'http://example.com')
- self.assertEqual(res_tree.get('password'), None)
-
- self.assertEqual(res_tree.get('name'), 'darksecret')
- for elem in res_tree:
- self.assertEqual(elem.tag in ('{%s}cap1' % xmlutil.XMLNS_V10,
- '{%s}cap2' % xmlutil.XMLNS_V10),
- True)
- if elem.tag == '{%s}cap1' % xmlutil.XMLNS_V10:
- self.assertEqual(elem.text, 'a;b')
- elif elem.tag == '{%s}cap2' % xmlutil.XMLNS_V10:
- self.assertEqual(elem.text, 'c;d')
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index 7341cd4832..bd292c5332 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -172,6 +172,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"Hosts",
"Keypairs",
"Multinic",
+ "Networks",
"Quotas",
"Rescue",
"SchedulerHints",
@@ -185,8 +186,6 @@ class ExtensionControllerTest(ExtensionTestCase):
"VirtualInterfaces",
"Volumes",
"VolumeTypes",
- "Zones",
- "Networks",
]
self.ext_list.sort()
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 82dcf65e49..7a1f3c4940 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -52,8 +52,12 @@ def instance_update(context, instance_id, kwargs):
return stub_instance(instance_id)
-def return_server_with_attributes(**kwargs):
- def _return_server(context, id):
+def return_server_with_attributes(id, **kwargs):
+ """NOTE: This won't work unless you stub out both
+ nova.db.instance_get() and nova.db.instance_get_by_uuid()
+ to be safe. Most all tests only require instance_get_by_uuid().
+ """
+ def _return_server(context, id_or_uuid):
return stub_instance(id, **kwargs)
return _return_server
@@ -280,8 +284,9 @@ class ServerActionsControllerTest(test.TestCase):
req, FAKE_UUID, body)
def test_rebuild_accepted_minimum(self):
- new_return_server = return_server_with_attributes(image_ref='2')
+ new_return_server = return_server_with_attributes(id=1, image_ref='2')
self.stubs.Set(nova.db, 'instance_get', new_return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
@@ -305,8 +310,9 @@ class ServerActionsControllerTest(test.TestCase):
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
- new_return_server = return_server_with_attributes(image_ref='2')
+ new_return_server = return_server_with_attributes(id=1, image_ref='2')
self.stubs.Set(nova.db, 'instance_get', new_return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
self_href = 'http://localhost/v2/fake/servers/%s' % FAKE_UUID
body = {
@@ -344,8 +350,10 @@ class ServerActionsControllerTest(test.TestCase):
def test_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
- new_return_server = return_server_with_attributes(metadata=metadata)
+ new_return_server = return_server_with_attributes(id=1,
+ metadata=metadata)
self.stubs.Set(nova.db, 'instance_get', new_return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
body = {
"rebuild": {
@@ -417,8 +425,9 @@ class ServerActionsControllerTest(test.TestCase):
self.assertTrue('personality' not in body['server'])
def test_rebuild_admin_pass(self):
- new_return_server = return_server_with_attributes(image_ref='2')
+ new_return_server = return_server_with_attributes(id=1, image_ref='2')
self.stubs.Set(nova.db, 'instance_get', new_return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
body = {
"rebuild": {
@@ -438,8 +447,9 @@ class ServerActionsControllerTest(test.TestCase):
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
- new_return_server = return_server_with_attributes(image_ref='2')
+ new_return_server = return_server_with_attributes(id=1, image_ref='2')
self.stubs.Set(nova.db, 'instance_get', new_return_server)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
body = {
"rebuild": {
@@ -458,6 +468,7 @@ class ServerActionsControllerTest(test.TestCase):
def server_not_found(self, instance_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(nova.db, 'instance_get', server_not_found)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', server_not_found)
body = {
"rebuild": {
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 3540a006ca..45a726f39e 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -38,7 +38,6 @@ from nova.db.sqlalchemy.models import InstanceMetadata
from nova import flags
import nova.image.fake
import nova.rpc
-import nova.scheduler.api
from nova import test
from nova.tests import fake_network
from nova.tests.api.openstack import fakes
@@ -120,28 +119,6 @@ def return_servers_by_reservation_empty(context, reservation_id=""):
return []
-def return_servers_from_child_zones_empty(*args, **kwargs):
- return []
-
-
-def return_servers_from_child_zones(*args, **kwargs):
- class Server(object):
- pass
-
- zones = []
- for zone in xrange(3):
- servers_list = []
- for server_id in xrange(5):
- server = Server()
- server._info = fakes.stub_instance(
- server_id, reservation_id="child",
- project_id='fake_project')
- servers_list.append(server)
-
- zones.append(("Zone%d" % zone, servers_list))
- return zones
-
-
def return_security_group(context, instance_id, security_group_id):
pass
@@ -192,28 +169,6 @@ class ServersControllerTest(test.TestCase):
spectacular=True)
def test_get_server_by_uuid(self):
- """
- The steps involved with resolving a UUID are pretty complicated;
- here's what's happening in this scenario:
-
- 1. Show is calling `routing_get`
-
- 2. `routing_get` is wrapped by `reroute_compute` which does the work
- of resolving requests to child zones.
-
- 3. `reroute_compute` looks up the UUID by hitting the stub
- (returns_server_by_uuid)
-
- 4. Since the stub return that the record exists, `reroute_compute`
- considers the request to be 'zone local', so it replaces the UUID
- in the argument list with an integer ID and then calls the inner
- function ('get').
-
- 5. The call to `get` hits the other stub 'returns_server_by_id` which
- has the UUID set to FAKE_UUID
-
- So, counterintuitively, we call `get` twice on the `show` command.
- """
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
res_dict = self.controller.show(req, FAKE_UUID)
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
@@ -312,6 +267,9 @@ class ServersControllerTest(test.TestCase):
new_return_server = return_server_with_attributes(
vm_state=vm_states.ACTIVE, progress=100)
self.stubs.Set(nova.db, 'instance_get', new_return_server)
+ new_return_server = return_server_with_attributes_by_uuid(
+ vm_state=vm_states.ACTIVE, progress=100)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
uuid = FAKE_UUID
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
@@ -383,6 +341,10 @@ class ServersControllerTest(test.TestCase):
vm_state=vm_states.ACTIVE, image_ref=image_ref,
flavor_id=flavor_id, progress=100)
self.stubs.Set(nova.db, 'instance_get', new_return_server)
+ new_return_server = return_server_with_attributes_by_uuid(
+ vm_state=vm_states.ACTIVE, image_ref=image_ref,
+ flavor_id=flavor_id, progress=100)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
uuid = FAKE_UUID
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
@@ -615,8 +577,6 @@ class ServersControllerTest(test.TestCase):
def test_get_server_list_with_reservation_id(self):
self.stubs.Set(nova.db, 'instance_get_all_by_reservation',
return_servers_by_reservation)
- self.stubs.Set(nova.scheduler.api, 'call_zone_method',
- return_servers_from_child_zones)
req = fakes.HTTPRequest.blank('/v2/fake/servers?reservation_id=foo')
res_dict = self.controller.index(req)
@@ -633,8 +593,6 @@ class ServersControllerTest(test.TestCase):
def test_get_server_list_with_reservation_id_empty(self):
self.stubs.Set(nova.db, 'instance_get_all_by_reservation',
return_servers_by_reservation_empty)
- self.stubs.Set(nova.scheduler.api, 'call_zone_method',
- return_servers_from_child_zones_empty)
req = fakes.HTTPRequest.blank('/v2/fake/servers/detail?'
'reservation_id=foo')
@@ -651,8 +609,6 @@ class ServersControllerTest(test.TestCase):
def test_get_server_list_with_reservation_id_details(self):
self.stubs.Set(nova.db, 'instance_get_all_by_reservation',
return_servers_by_reservation)
- self.stubs.Set(nova.scheduler.api, 'call_zone_method',
- return_servers_from_child_zones)
req = fakes.HTTPRequest.blank('/v2/fake/servers/detail?'
'reservation_id=foo')
@@ -963,7 +919,8 @@ class ServersControllerTest(test.TestCase):
self.assertNotEqual(search_opts, None)
# Allowed by user
self.assertTrue('name' in search_opts)
- self.assertTrue('status' in search_opts)
+ # OSAPI converts status to vm_state
+ self.assertTrue('vm_state' in search_opts)
# Allowed only by admins with admin API on
self.assertFalse('ip' in search_opts)
self.assertFalse('unknown_option' in search_opts)
@@ -989,7 +946,8 @@ class ServersControllerTest(test.TestCase):
self.assertNotEqual(search_opts, None)
# Allowed by user
self.assertTrue('name' in search_opts)
- self.assertTrue('status' in search_opts)
+ # OSAPI converts status to vm_state
+ self.assertTrue('vm_state' in search_opts)
# Allowed only by admins with admin API on
self.assertTrue('ip' in search_opts)
self.assertTrue('unknown_option' in search_opts)
@@ -1340,6 +1298,9 @@ class ServersControllerTest(test.TestCase):
new_return_server = return_server_with_attributes(
vm_state=vm_states.RESIZING)
self.stubs.Set(nova.db, 'instance_get', new_return_server)
+ new_return_server = return_server_with_attributes_by_uuid(
+ vm_state=vm_states.RESIZING)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
def instance_destroy_mock(context, id):
self.server_delete_called = True
@@ -1638,59 +1599,6 @@ class ServersControllerCreateTest(test.TestCase):
self.assertNotEqual(reservation_id, None)
self.assertTrue(len(reservation_id) > 1)
- def test_create_instance_with_user_supplied_reservation_id(self):
- """Non-admin supplied reservation_id should be ignored."""
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {'hello': 'world',
- 'open': 'stack'},
- 'personality': [],
- 'reservation_id': 'myresid',
- 'return_reservation_id': True
- }
- }
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, body)
-
- self.assertIn('reservation_id', res)
- self.assertNotEqual(res['reservation_id'], 'myresid')
-
- def test_create_instance_with_admin_supplied_reservation_id(self):
- """Admin supplied reservation_id should be honored."""
- image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
- flavor_ref = 'http://localhost/123/flavors/3'
- body = {
- 'server': {
- 'name': 'server_test',
- 'imageRef': image_href,
- 'flavorRef': flavor_ref,
- 'metadata': {'hello': 'world',
- 'open': 'stack'},
- 'personality': [],
- 'reservation_id': 'myresid',
- 'return_reservation_id': True
- }
- }
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers',
- use_admin_context=True)
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- res = self.controller.create(req, body)
-
- reservation_id = res['reservation_id']
- self.assertEqual(reservation_id, "myresid")
-
def test_create_instance_image_ref_is_bookmark(self):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/fake/images/%s' % image_uuid
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index d2de4ba404..fb514c5402 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -23,7 +23,6 @@ from nova.compute import instance_types
from nova.compute import vm_states
from nova.scheduler import distributed_scheduler
from nova.scheduler import host_manager
-from nova.scheduler import zone_manager
COMPUTE_NODES = [
@@ -60,7 +59,6 @@ INSTANCES = [
class FakeDistributedScheduler(distributed_scheduler.DistributedScheduler):
def __init__(self, *args, **kwargs):
super(FakeDistributedScheduler, self).__init__(*args, **kwargs)
- self.zone_manager = zone_manager.ZoneManager()
self.host_manager = host_manager.HostManager()
diff --git a/nova/tests/scheduler/test_distributed_scheduler.py b/nova/tests/scheduler/test_distributed_scheduler.py
index f385ef7b73..a0deffe7bf 100644
--- a/nova/tests/scheduler/test_distributed_scheduler.py
+++ b/nova/tests/scheduler/test_distributed_scheduler.py
@@ -16,55 +16,14 @@
Tests For Distributed Scheduler.
"""
-import json
-
from nova import context
-from nova import db
from nova import exception
-from nova.scheduler import distributed_scheduler
from nova.scheduler import least_cost
from nova.scheduler import host_manager
from nova import test
from nova.tests.scheduler import fakes
-def fake_call_zone_method(context, method, specs, zones):
- return [
- (1, [
- dict(weight=2, blob='AAAAAAA'),
- dict(weight=4, blob='BBBBBBB'),
- dict(weight=6, blob='CCCCCCC'),
- dict(weight=8, blob='DDDDDDD'),
- ]),
- (2, [
- dict(weight=10, blob='EEEEEEE'),
- dict(weight=12, blob='FFFFFFF'),
- dict(weight=14, blob='GGGGGGG'),
- dict(weight=16, blob='HHHHHHH'),
- ]),
- (3, [
- dict(weight=18, blob='IIIIIII'),
- dict(weight=20, blob='JJJJJJJ'),
- dict(weight=22, blob='KKKKKKK'),
- dict(weight=24, blob='LLLLLLL'),
- ]),
- ]
-
-
-def fake_zone_get_all(context):
- return [
- dict(id=1, api_url='zone1',
- username='admin', password='password',
- weight_offset=0.0, weight_scale=1.0),
- dict(id=2, api_url='zone2',
- username='admin', password='password',
- weight_offset=1000.0, weight_scale=1.0),
- dict(id=3, api_url='zone3',
- username='admin', password='password',
- weight_offset=0.0, weight_scale=1000.0),
- ]
-
-
def fake_filter_hosts(hosts, filter_properties):
return list(hosts)
@@ -72,25 +31,6 @@ def fake_filter_hosts(hosts, filter_properties):
class DistributedSchedulerTestCase(test.TestCase):
"""Test case for Distributed Scheduler."""
- def test_adjust_child_weights(self):
- """Make sure the weights returned by child zones are
- properly adjusted based on the scale/offset in the zone
- db entries.
- """
- sched = fakes.FakeDistributedScheduler()
- child_results = fake_call_zone_method(None, None, None, None)
- zones = fake_zone_get_all(None)
- weighted_hosts = sched._adjust_child_weights(child_results, zones)
- scaled = [130000, 131000, 132000, 3000]
- for weighted_host in weighted_hosts:
- w = weighted_host.weight
- if weighted_host.zone == 'zone1': # No change
- self.assertTrue(w < 1000.0)
- if weighted_host.zone == 'zone2': # Offset +1000
- self.assertTrue(w >= 1000.0 and w < 2000)
- if weighted_host.zone == 'zone3': # Scale x1000
- self.assertEqual(scaled.pop(0), w)
-
def test_run_instance_no_hosts(self):
"""
Ensure empty hosts & child_zones result in NoValidHosts exception.
@@ -99,9 +39,6 @@ class DistributedSchedulerTestCase(test.TestCase):
return []
sched = fakes.FakeDistributedScheduler()
- self.stubs.Set(sched, '_call_zone_method',
- _fake_empty_call_zone_method)
- self.stubs.Set(db, 'zone_get_all', fake_zone_get_all)
fake_context = context.RequestContext('user', 'project')
request_spec = {'instance_type': {'memory_mb': 1, 'root_gb': 1,
@@ -110,59 +47,6 @@ class DistributedSchedulerTestCase(test.TestCase):
self.assertRaises(exception.NoValidHost, sched.schedule_run_instance,
fake_context, request_spec)
- def test_run_instance_with_blob_hint(self):
- """
- Check the local/child zone routing in the run_instance() call.
- If the zone_blob hint was passed in, don't re-schedule.
- """
- self.schedule_called = False
- self.from_blob_called = False
- self.locally_called = False
- self.child_zone_called = False
-
- def _fake_schedule(*args, **kwargs):
- self.schedule_called = True
- return least_cost.WeightedHost(1, host='x')
-
- def _fake_make_weighted_host_from_blob(*args, **kwargs):
- self.from_blob_called = True
- return least_cost.WeightedHost(1, zone='x', blob='y')
-
- def _fake_provision_resource_locally(*args, **kwargs):
- # Tests that filter_properties is stripped
- self.assertNotIn('filter_properties', kwargs)
- self.locally_called = True
- return 1
-
- def _fake_ask_child_zone_to_create_instance(*args, **kwargs):
- self.child_zone_called = True
- return 2
-
- sched = fakes.FakeDistributedScheduler()
- self.stubs.Set(sched, '_schedule', _fake_schedule)
- self.stubs.Set(sched, '_make_weighted_host_from_blob',
- _fake_make_weighted_host_from_blob)
- self.stubs.Set(sched, '_provision_resource_locally',
- _fake_provision_resource_locally)
- self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
- _fake_ask_child_zone_to_create_instance)
- request_spec = {
- 'instance_properties': {},
- 'instance_type': {},
- 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
- 'blob': "Non-None blob data",
- }
-
- fake_context = context.RequestContext('user', 'project')
- instances = sched.schedule_run_instance(fake_context, request_spec,
- filter_properties={})
- self.assertTrue(instances)
- self.assertFalse(self.schedule_called)
- self.assertTrue(self.from_blob_called)
- self.assertTrue(self.child_zone_called)
- self.assertFalse(self.locally_called)
- self.assertEquals(instances, [2])
-
def test_run_instance_non_admin(self):
"""Test creating an instance locally using run_instance, passing
a non-admin context. DB actions should work."""
@@ -192,14 +76,6 @@ class DistributedSchedulerTestCase(test.TestCase):
self.assertRaises(NotImplementedError, sched._schedule, fake_context,
"foo", {})
- def test_schedule_no_instance_type(self):
- """Parameter checking."""
- sched = fakes.FakeDistributedScheduler()
- request_spec = {'instance_properties': {}}
- fake_context = context.RequestContext('user', 'project')
- self.assertRaises(NotImplementedError, sched._schedule, fake_context,
- "compute", request_spec=request_spec)
-
def test_schedule_happy_day(self):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through."""
@@ -208,9 +84,9 @@ class DistributedSchedulerTestCase(test.TestCase):
def _fake_weighted_sum(functions, hosts, options):
self.next_weight += 2.0
- host, hostinfo = hosts[0]
- return least_cost.WeightedHost(self.next_weight, host=host,
- hostinfo=hostinfo)
+ host_state = hosts[0]
+ return least_cost.WeightedHost(self.next_weight,
+ host_state=host_state)
sched = fakes.FakeDistributedScheduler()
fake_context = context.RequestContext('user', 'project',
@@ -219,87 +95,24 @@ class DistributedSchedulerTestCase(test.TestCase):
self.stubs.Set(sched.host_manager, 'filter_hosts',
fake_filter_hosts)
self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
- self.stubs.Set(db, 'zone_get_all', fake_zone_get_all)
- self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
-
- request_spec = {'num_instances': 10,
- 'instance_type': {'memory_mb': 512, 'root_gb': 512,
- 'ephemeral_gb': 0},
- 'instance_properties': {'project_id': 1}}
- self.mox.ReplayAll()
- weighted_hosts = sched._schedule(fake_context, 'compute',
- request_spec)
- self.mox.VerifyAll()
- self.assertEquals(len(weighted_hosts), 10)
- for weighted_host in weighted_hosts:
- # We set this up so remote hosts have even weights ...
- if int(weighted_host.weight) % 2 == 0:
- self.assertTrue(weighted_host.zone is not None)
- self.assertTrue(weighted_host.host_state is None)
- else:
- self.assertTrue(weighted_host.host_state is not None)
- self.assertTrue(weighted_host.zone is None)
-
- def test_schedule_local_zone(self):
- """Test to make sure _schedule makes no call out to zones if
- local_zone_only in the filter_properties is True.
- """
-
- self.next_weight = 1.0
-
- def _fake_weighted_sum(functions, hosts, options):
- self.next_weight += 2.0
- host = hosts[0]
- return least_cost.WeightedHost(self.next_weight, host_state=host)
-
- sched = fakes.FakeDistributedScheduler()
- fake_context = context.RequestContext('user', 'project',
- is_admin=True)
-
fakes.mox_host_manager_db_calls(self.mox, fake_context)
- self.stubs.Set(sched.host_manager, 'filter_hosts',
- fake_filter_hosts)
- self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
- self.stubs.Set(db, 'zone_get_all', fake_zone_get_all)
- self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
-
request_spec = {'num_instances': 10,
'instance_type': {'memory_mb': 512, 'root_gb': 512,
- 'ephemeral_gb': 256},
+ 'ephemeral_gb': 0,
+ 'vcpus': 1},
'instance_properties': {'project_id': 1,
- 'memory_mb': 512,
'root_gb': 512,
+ 'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1}}
- filter_properties = {'local_zone_only': True}
self.mox.ReplayAll()
weighted_hosts = sched._schedule(fake_context, 'compute',
- request_spec, filter_properties=filter_properties)
+ request_spec)
self.mox.VerifyAll()
self.assertEquals(len(weighted_hosts), 10)
for weighted_host in weighted_hosts:
- # There should be no remote hosts
self.assertTrue(weighted_host.host_state is not None)
- self.assertTrue(weighted_host.zone is None)
-
- def test_decrypt_blob(self):
- """Test that the decrypt method works."""
-
- fixture = fakes.FakeDistributedScheduler()
- test_data = {'weight': 1, 'host': 'x', 'blob': 'y', 'zone': 'z'}
-
- class StubDecryptor(object):
- def decryptor(self, key):
- return lambda blob: blob
-
- self.stubs.Set(distributed_scheduler, 'crypto', StubDecryptor())
-
- weighted_host = fixture._make_weighted_host_from_blob(
- json.dumps(test_data))
- self.assertTrue(isinstance(weighted_host, least_cost.WeightedHost))
- self.assertEqual(weighted_host.to_dict(), dict(weight=1, host='x',
- blob='y', zone='z'))
def test_get_cost_functions(self):
self.flags(reserved_host_memory_mb=128)
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 7f8645db88..67f607b073 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -72,16 +72,6 @@ class SchedulerManagerTestCase(test.TestCase):
result = self.manager.get_host_list(self.context)
self.assertEqual(result, expected)
- def test_get_zone_list(self):
- expected = 'fake_zones'
-
- self.mox.StubOutWithMock(self.manager.driver, 'get_zone_list')
- self.manager.driver.get_zone_list().AndReturn(expected)
-
- self.mox.ReplayAll()
- result = self.manager.get_zone_list(self.context)
- self.assertEqual(result, expected)
-
def test_get_service_capabilities(self):
expected = 'fake_service_capabs'
@@ -142,18 +132,6 @@ class SchedulerManagerTestCase(test.TestCase):
self.manager.noexist(self.context, self.topic,
*self.fake_args, **self.fake_kwargs)
- def test_select(self):
- expected = 'fake_select'
-
- self.mox.StubOutWithMock(self.manager.driver, 'select')
- self.manager.driver.select(self.context,
- *self.fake_args, **self.fake_kwargs).AndReturn(expected)
-
- self.mox.ReplayAll()
- result = self.manager.select(self.context, *self.fake_args,
- **self.fake_kwargs)
- self.assertEqual(result, expected)
-
def test_show_host_resources(self):
host = 'fake_host'
@@ -260,16 +238,6 @@ class SchedulerTestCase(test.TestCase):
result = self.driver.get_host_list()
self.assertEqual(result, expected)
- def test_get_zone_list(self):
- expected = 'fake_zones'
-
- self.mox.StubOutWithMock(self.driver.zone_manager, 'get_zone_list')
- self.driver.zone_manager.get_zone_list().AndReturn(expected)
-
- self.mox.ReplayAll()
- result = self.driver.get_zone_list()
- self.assertEqual(result, expected)
-
def test_get_service_capabilities(self):
expected = 'fake_service_capabs'
diff --git a/nova/tests/scheduler/test_zone_manager.py b/nova/tests/scheduler/test_zone_manager.py
deleted file mode 100644
index 364384c1c3..0000000000
--- a/nova/tests/scheduler/test_zone_manager.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# All Rights Reserved.
-# Copyright 2011 OpenStack LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Tests For ZoneManager
-"""
-
-import mox
-
-from nova import db
-from nova import flags
-from nova.scheduler import zone_manager
-from nova import test
-
-FLAGS = flags.FLAGS
-
-
-def _create_zone(zone_id=1, name=None, api_url=None, username=None):
- if api_url is None:
- api_url = "http://foo.com"
- if username is None:
- username = "user1"
- if name is None:
- name = "child1"
- return dict(id=zone_id, name=name, api_url=api_url,
- username=username, password="pass1", weight_offset=0.0,
- weight_scale=1.0)
-
-
-def exploding_novaclient(zone):
- """Used when we want to simulate a novaclient call failing."""
- raise Exception("kaboom")
-
-
-class ZoneManagerTestCase(test.TestCase):
- """Test case for zone manager"""
-
- zone_manager_cls = zone_manager.ZoneManager
- zone_state_cls = zone_manager.ZoneState
-
- def setUp(self):
- super(ZoneManagerTestCase, self).setUp()
- self.zone_manager = self.zone_manager_cls()
-
- def _create_zone_state(self, zone_id=1, name=None, api_url=None,
- username=None):
- zone = self.zone_state_cls()
- zone.zone_info = _create_zone(zone_id, name, api_url, username)
- return zone
-
- def test_update(self):
- zm = self.zone_manager
- self.mox.StubOutWithMock(zm, '_refresh_from_db')
- self.mox.StubOutWithMock(zm, '_poll_zones')
- zm._refresh_from_db(mox.IgnoreArg())
- zm._poll_zones()
-
- self.mox.ReplayAll()
- zm.update(None)
- self.mox.VerifyAll()
-
- def test_refresh_from_db_new(self):
- zone = _create_zone(zone_id=1, username='user1')
- self.mox.StubOutWithMock(db, 'zone_get_all')
- db.zone_get_all(mox.IgnoreArg()).AndReturn([zone])
-
- zm = self.zone_manager
- self.assertEquals(len(zm.zone_states), 0)
-
- self.mox.ReplayAll()
- zm._refresh_from_db(None)
- self.mox.VerifyAll()
-
- self.assertEquals(len(zm.zone_states), 1)
- self.assertIn(1, zm.zone_states)
- self.assertEquals(zm.zone_states[1].zone_info['username'], 'user1')
-
- def test_refresh_from_db_replace_existing(self):
- zone_state = self._create_zone_state(zone_id=1, username='user1')
- zm = self.zone_manager
- zm.zone_states[1] = zone_state
-
- zone = _create_zone(zone_id=1, username='user2')
- self.mox.StubOutWithMock(db, 'zone_get_all')
- db.zone_get_all(mox.IgnoreArg()).AndReturn([zone])
- self.assertEquals(len(zm.zone_states), 1)
-
- self.mox.ReplayAll()
- zm._refresh_from_db(None)
- self.mox.VerifyAll()
-
- self.assertEquals(len(zm.zone_states), 1)
- self.assertEquals(zm.zone_states[1].zone_info['username'], 'user2')
-
- def test_refresh_from_db_missing(self):
- zone_state = self._create_zone_state(zone_id=1, username='user1')
- zm = self.zone_manager
- zm.zone_states[1] = zone_state
-
- self.mox.StubOutWithMock(db, 'zone_get_all')
- db.zone_get_all(mox.IgnoreArg()).AndReturn([])
-
- self.assertEquals(len(zm.zone_states), 1)
-
- self.mox.ReplayAll()
- zm._refresh_from_db(None)
- self.mox.VerifyAll()
-
- self.assertEquals(len(zm.zone_states), 0)
-
- def test_refresh_from_db_add(self):
- zone_state = self._create_zone_state(zone_id=1, username='user1')
- zm = self.zone_manager
- zm.zone_states[1] = zone_state
-
- zone1 = _create_zone(zone_id=1, username='user1')
- zone2 = _create_zone(zone_id=2, username='user2')
- self.mox.StubOutWithMock(db, 'zone_get_all')
- db.zone_get_all(mox.IgnoreArg()).AndReturn([zone1, zone2])
-
- self.mox.ReplayAll()
- zm._refresh_from_db(None)
- self.mox.VerifyAll()
-
- self.assertEquals(len(zm.zone_states), 2)
- self.assertIn(1, zm.zone_states)
- self.assertIn(2, zm.zone_states)
- self.assertEquals(zm.zone_states[1].zone_info['username'], 'user1')
- self.assertEquals(zm.zone_states[2].zone_info['username'], 'user2')
-
- def test_refresh_from_db_add_and_delete(self):
- zone_state = self._create_zone_state(zone_id=1, username='user1')
- zm = self.zone_manager
- zm.zone_states[1] = zone_state
-
- zone2 = _create_zone(zone_id=2, username='user2')
- self.mox.StubOutWithMock(db, 'zone_get_all')
- db.zone_get_all(mox.IgnoreArg()).AndReturn([zone2])
-
- self.mox.ReplayAll()
- zm._refresh_from_db(None)
- self.mox.VerifyAll()
-
- self.assertEquals(len(zm.zone_states), 1)
- self.assertIn(2, zm.zone_states)
- self.assertEquals(zm.zone_states[2].zone_info['username'], 'user2')
-
- def test_poll_zone(self):
- zone_state = self._create_zone_state(zone_id=1, name='child1')
- zone_state.attempt = 1
-
- self.mox.StubOutWithMock(zone_state, 'call_novaclient')
- zone_state.call_novaclient().AndReturn(
- dict(name=zone_state.zone_info['name'],
- hairdresser='dietz'))
- self.assertDictMatch(zone_state.capabilities, {})
-
- self.mox.ReplayAll()
- zone_state.poll()
- self.mox.VerifyAll()
- self.assertEquals(zone_state.attempt, 0)
- self.assertDictMatch(zone_state.capabilities,
- dict(hairdresser='dietz'))
- self.assertTrue(zone_state.is_active)
-
- def test_poll_zones_with_failure(self):
- zone_state = self._create_zone_state(zone_id=1)
- zone_state.attempt = FLAGS.zone_failures_to_offline - 1
-
- self.mox.StubOutWithMock(zone_state, 'call_novaclient')
- zone_state.call_novaclient().AndRaise(Exception('foo'))
-
- self.mox.ReplayAll()
- zone_state.poll()
- self.mox.VerifyAll()
- self.assertEquals(zone_state.attempt, 3)
- self.assertFalse(zone_state.is_active)
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index c6e3201d33..6cbfb5344e 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -2904,37 +2904,6 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['id'])
- def test_create_with_specified_reservation_id(self):
- """Verify building instances with a specified
- reservation_id results in the correct reservation_id
- being set
- """
-
- # We need admin context to be able to specify our own
- # reservation_ids.
- context = self.context.elevated()
- # 1 instance
- (refs, resv_id) = self.compute_api.create(context,
- instance_types.get_default_instance_type(), None,
- min_count=1, max_count=1, reservation_id='meow')
- try:
- self.assertEqual(len(refs), 1)
- self.assertEqual(resv_id, 'meow')
- finally:
- self.assertEqual(refs[0]['reservation_id'], resv_id)
-
- # 2 instances
- (refs, resv_id) = self.compute_api.create(context,
- instance_types.get_default_instance_type(), None,
- min_count=2, max_count=2, reservation_id='woof')
- try:
- self.assertEqual(len(refs), 2)
- self.assertEqual(resv_id, 'woof')
- finally:
- for instance in refs:
- self.assertEqual(instance['reservation_id'], resv_id)
- db.instance_destroy(self.context, refs[0]['id'])
-
def test_instance_name_template(self):
"""Test the instance_name template"""
self.flags(instance_name_template='instance-%d')