summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSoren Hansen <soren.hansen@rackspace.com>2010-12-14 18:11:30 +0100
committerSoren Hansen <soren.hansen@rackspace.com>2010-12-14 18:11:30 +0100
commit31c1407f7fe39b5a366e75b3131769efbfb50b57 (patch)
tree44a5e436b947934676cd1e3b7f1d0b1f28a062a8
parentbfe019e0de486eea09e4702262cd228791a4694c (diff)
parent911f11139926a772a5d90b8ec65cc4cb7ad850e9 (diff)
downloadnova-31c1407f7fe39b5a366e75b3131769efbfb50b57.tar.gz
Merge trunk
-rw-r--r--.bzrignore10
-rw-r--r--Authors4
-rwxr-xr-xbin/nova-compute2
-rwxr-xr-xbin/nova-instancemonitor2
-rwxr-xr-xbin/nova-manage11
-rwxr-xr-xbin/nova-network2
-rwxr-xr-xbin/nova-objectstore2
-rwxr-xr-xbin/nova-scheduler2
-rwxr-xr-xbin/nova-volume2
-rwxr-xr-xcontrib/nova.sh11
-rw-r--r--doc/source/adminguide/index.rst6
-rw-r--r--doc/source/adminguide/managing.networks.rst17
-rw-r--r--doc/source/adminguide/multi.node.install.rst43
-rw-r--r--doc/source/adminguide/network.vlan.rst8
-rw-r--r--doc/source/adminguide/single.node.install.rst24
-rw-r--r--doc/source/cloud101.rst9
-rw-r--r--doc/source/devref/cloudpipe.rst2
-rw-r--r--doc/source/images/novascreens.pngbin0 -> 27949 bytes
-rw-r--r--doc/source/images/novashvirtually.pngbin0 -> 39000 bytes
-rw-r--r--doc/source/index.rst4
-rw-r--r--doc/source/livecd.rst46
-rw-r--r--doc/source/nova.concepts.rst38
-rw-r--r--nova/api/cloud.py58
-rw-r--r--nova/api/ec2/cloud.py282
-rw-r--r--nova/api/openstack/__init__.py19
-rw-r--r--nova/api/openstack/auth.py26
-rw-r--r--nova/api/openstack/images.py9
-rw-r--r--nova/api/openstack/servers.py214
-rw-r--r--nova/auth/ldapdriver.py138
-rw-r--r--nova/auth/manager.py4
-rw-r--r--nova/auth/nova_openldap.schema84
-rw-r--r--nova/auth/nova_sun.schema16
-rwxr-xr-xnova/auth/opendj.sh119
-rw-r--r--nova/auth/openssh-lpk_openldap.schema19
-rw-r--r--nova/auth/openssh-lpk_sun.schema10
-rwxr-xr-xnova/auth/slap.sh112
-rw-r--r--nova/compute/api.py310
-rw-r--r--nova/compute/instance_types.py20
-rw-r--r--nova/compute/manager.py51
-rw-r--r--nova/db/base.py36
-rw-r--r--nova/db/sqlalchemy/api.py9
-rw-r--r--nova/db/sqlalchemy/models.py3
-rw-r--r--nova/flags.py4
-rw-r--r--nova/image/local.py2
-rw-r--r--nova/manager.py10
-rw-r--r--nova/network/linux_net.py2
-rw-r--r--nova/quota.py5
-rw-r--r--nova/tests/api/openstack/fakes.py19
-rw-r--r--nova/tests/api/openstack/test_adminapi.py61
-rw-r--r--nova/tests/api/openstack/test_auth.py7
-rw-r--r--nova/tests/api/openstack/test_servers.py31
-rw-r--r--nova/tests/cloud_unittest.py13
-rw-r--r--nova/tests/compute_unittest.py29
-rw-r--r--nova/tests/misc_unittest.py26
-rw-r--r--nova/tests/quota_unittest.py16
-rw-r--r--nova/tests/virt_unittest.py39
-rw-r--r--nova/virt/connection.py4
-rw-r--r--nova/virt/libvirt_conn.py18
-rw-r--r--nova/virt/xenapi.py444
-rw-r--r--nova/virt/xenapi/__init__.py15
-rw-r--r--nova/virt/xenapi/network_utils.py45
-rw-r--r--nova/virt/xenapi/vm_utils.py268
-rw-r--r--nova/virt/xenapi/vmops.py146
-rw-r--r--nova/virt/xenapi/volumeops.py32
-rw-r--r--nova/virt/xenapi_conn.py242
-rw-r--r--setup.py1
-rw-r--r--smoketests/admin_smoketests.py92
-rw-r--r--smoketests/base.py (renamed from smoketests/novatestcase.py)114
-rw-r--r--smoketests/flags.py13
-rw-r--r--smoketests/smoketest.py566
-rw-r--r--smoketests/user_smoketests.py326
-rw-r--r--tools/pip-requires1
72 files changed, 2468 insertions, 1907 deletions
diff --git a/.bzrignore b/.bzrignore
index ab099d3e32..82db46fa22 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -1,3 +1,13 @@
run_tests.err.log
.nova-venv
ChangeLog
+_trial_temp
+keys
+networks
+nova.sqlite
+CA/cacert.pem
+CA/index.txt*
+CA/openssl.cnf
+CA/serial*
+CA/newcerts/*.pem
+CA/private/cakey.pem
diff --git a/Authors b/Authors
index ef1a535ca9..4a526d8491 100644
--- a/Authors
+++ b/Authors
@@ -3,6 +3,7 @@ Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Chris Behrens <cbehrens@codestud.com>
+Chmouel Boudjnah <chmouel@chmouel.com>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
Eric Day <eday@oddments.org>
@@ -20,8 +21,11 @@ Michael Gundlach <michael.gundlach@rackspace.com>
Monty Taylor <mordred@inaugust.com>
Paul Voccio <paul@openstack.org>
Rick Clark <rick@openstack.org>
+Ryan Lucio <rlucio@internap.com>
+Sandy Walsh <sandy.walsh@rackspace.com>
Soren Hansen <soren.hansen@rackspace.com>
Todd Willey <todd@ansolabs.com>
+Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
diff --git a/bin/nova-compute b/bin/nova-compute
index a66477af53..ac6378f754 100755
--- a/bin/nova-compute
+++ b/bin/nova-compute
@@ -38,8 +38,8 @@ from nova import utils
if __name__ == '__main__':
+ utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
- utils.default_flagfile()
application = service.Service.create() # pylint: disable=C0103
diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor
index a7b7fb0c68..9b6c40e820 100755
--- a/bin/nova-instancemonitor
+++ b/bin/nova-instancemonitor
@@ -42,10 +42,10 @@ logging.getLogger('boto').setLevel(logging.WARN)
if __name__ == '__main__':
+ utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
- utils.default_flagfile()
logging.warn('Starting instance monitor')
# pylint: disable-msg=C0103
monitor = monitor.InstanceMonitor()
diff --git a/bin/nova-manage b/bin/nova-manage
index eb7c6b87b5..62eec8353f 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -359,9 +359,14 @@ class ProjectCommands(object):
def zipfile(self, project_id, user_id, filename='nova.zip'):
"""Exports credentials for project to a zip file
arguments: project_id user_id [filename='nova.zip]"""
- zip_file = self.manager.get_credentials(user_id, project_id)
- with open(filename, 'w') as f:
- f.write(zip_file)
+ try:
+ zip_file = self.manager.get_credentials(user_id, project_id)
+ with open(filename, 'w') as f:
+ f.write(zip_file)
+ except db.api.NoMoreNetworks:
+ print ('No more networks available. If this is a new '
+ 'installation, you need\nto call something like this:\n\n'
+ ' nova-manage network create 10.0.0.0/8 10 64\n\n')
class FloatingIpCommands(object):
diff --git a/bin/nova-network b/bin/nova-network
index 342a63058c..d1fb552612 100755
--- a/bin/nova-network
+++ b/bin/nova-network
@@ -38,8 +38,8 @@ from nova import utils
if __name__ == '__main__':
+ utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
- utils.default_flagfile()
application = service.Service.create() # pylint: disable-msg=C0103
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 728f2ee5bc..00ae27af93 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -42,8 +42,8 @@ FLAGS = flags.FLAGS
if __name__ == '__main__':
+ utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
- utils.default_flagfile()
application = handler.get_application() # pylint: disable-msg=C0103
diff --git a/bin/nova-scheduler b/bin/nova-scheduler
index 069b5a6fa9..4d1a40cf10 100755
--- a/bin/nova-scheduler
+++ b/bin/nova-scheduler
@@ -38,8 +38,8 @@ from nova import utils
if __name__ == '__main__':
+ utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
- utils.default_flagfile()
application = service.Service.create()
diff --git a/bin/nova-volume b/bin/nova-volume
index 26148b0ecc..e7281d6c0b 100755
--- a/bin/nova-volume
+++ b/bin/nova-volume
@@ -38,8 +38,8 @@ from nova import utils
if __name__ == '__main__':
+ utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
- utils.default_flagfile()
application = service.Service.create() # pylint: disable-msg=C0103
diff --git a/contrib/nova.sh b/contrib/nova.sh
index 7407422b3d..7eb934eca9 100755
--- a/contrib/nova.sh
+++ b/contrib/nova.sh
@@ -22,6 +22,8 @@ USE_MYSQL=${USE_MYSQL:-0}
MYSQL_PASS=${MYSQL_PASS:-nova}
TEST=${TEST:-0}
USE_LDAP=${USE_LDAP:-0}
+# Use OpenDJ instead of OpenLDAP when using LDAP
+USE_OPENDJ=${USE_OPENDJ:-0}
LIBVIRT_TYPE=${LIBVIRT_TYPE:-qemu}
NET_MAN=${NET_MAN:-VlanManager}
# NOTE(vish): If you are using FlatDHCP on multiple hosts, set the interface
@@ -46,7 +48,6 @@ cat >$NOVA_DIR/bin/nova.conf << NOVA_CONF_EOF
--verbose
--nodaemon
--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf
---FAKE_subdomain=ec2
--network_manager=nova.network.manager.$NET_MAN
--cc_host=$HOST_IP
--routing_source_ip=$HOST_IP
@@ -114,7 +115,13 @@ if [ "$CMD" == "run" ]; then
rm $NOVA_DIR/nova.sqlite
fi
if [ "$USE_LDAP" == 1 ]; then
- sudo $NOVA_DIR/nova/auth/slap.sh
+ if [ "$USE_OPENDJ" == 1 ]; then
+ echo '--ldap_user_dn=cn=Directory Manager' >> \
+ /etc/nova/nova-manage.conf
+ sudo $NOVA_DIR/nova/auth/opendj.sh
+ else
+ sudo $NOVA_DIR/nova/auth/slap.sh
+ fi
fi
rm -rf $NOVA_DIR/instances
mkdir -p $NOVA_DIR/instances
diff --git a/doc/source/adminguide/index.rst b/doc/source/adminguide/index.rst
index 51228b3191..736a154b21 100644
--- a/doc/source/adminguide/index.rst
+++ b/doc/source/adminguide/index.rst
@@ -38,14 +38,14 @@ There are two main tools that a system administrator will find useful to manage
nova.manage
euca2ools
-nova-manage may only be run by users with admin priviledges. euca2ools can be used by all users, though specific commands may be restricted by Role Based Access Control. You can read more about creating and managing users in :doc:`managing.users`
+The nova-manage command may only be run by users with admin priviledges. Commands for euca2ools can be used by all users, though specific commands may be restricted by Role Based Access Control. You can read more about creating and managing users in :doc:`managing.users`
User and Resource Management
----------------------------
-nova-manage and euca2ools provide the basic interface to perform a broad range of administration functions. In this section, you can read more about how to accomplish specific administration tasks.
+The nova-manage and euca2ools commands provide the basic interface to perform a broad range of administration functions. In this section, you can read more about how to accomplish specific administration tasks.
-For background on the core objects refenced in this section, see :doc:`../object.model`
+For background on the core objects referenced in this section, see :doc:`../object.model`
.. toctree::
:maxdepth: 1
diff --git a/doc/source/adminguide/managing.networks.rst b/doc/source/adminguide/managing.networks.rst
index c8df471e84..38c1cba78e 100644
--- a/doc/source/adminguide/managing.networks.rst
+++ b/doc/source/adminguide/managing.networks.rst
@@ -20,25 +20,10 @@ Networking Overview
===================
In Nova, users organize their cloud resources in projects. A Nova project consists of a number of VM instances created by a user. For each VM instance, Nova assigns to it a private IP address. (Currently, Nova only supports Linux bridge networking that allows the virtual interfaces to connect to the outside network through the physical interface. Other virtual network technologies, such as Open vSwitch, could be supported in the future.) The Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network.
-..
- (perhaps some of this should be moved elsewhere)
- Introduction
- ------------
-
- Nova consists of seven main components, with the Cloud Controller component representing the global state and interacting with all other components. API Server acts as the Web services front end for the cloud controller. Compute Controller provides compute server resources, and the Object Store component provides storage services. Auth Manager provides authentication and authorization services. Volume Controller provides fast and permanent block-level storage for the comput servers. Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. Scheduler selects the most suitable compute controller to host an instance.
-
- .. todo:: Insert Figure 1 image from "An OpenStack Network Overview" contributed by Citrix
-
- Nova is built on a shared-nothing, messaging-based architecture. All of the major components, that is Compute Controller, Volume Controller, Network Controller, and Object Store can be run on multiple servers. Cloud Controller communicates with Object Store via HTTP (Hyper Text Transfer Protocol), but it communicates with Scheduler, Network Controller, and Volume Controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, Nova uses asynchronous calls, with a call-back that gets triggered when a response is received.
-
- To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
-
- .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`.
-
Nova Network Strategies
-----------------------
-Currently, Nova supports three kinds of networks, implemented in three "Network Manager" types respectively: Flat Network Manager, Flat DHCP Network Manager, and VLAN Network Manager. The three kinds of networks can c-exist in a cloud system. However, the scheduler for selecting the type of network for a given project is not yet implemented. Here is a brief description of each of the different network strategies, with a focus on the VLAN Manager in a separate section.
+Currently, Nova supports three kinds of networks, implemented in three "Network Manager" types respectively: Flat Network Manager, Flat DHCP Network Manager, and VLAN Network Manager. The three kinds of networks can co-exist in a cloud system. However, the scheduler for selecting the type of network for a given project is not yet implemented. Here is a brief description of each of the different network strategies, with a focus on the VLAN Manager in a separate section.
Read more about Nova network strategies here:
diff --git a/doc/source/adminguide/multi.node.install.rst b/doc/source/adminguide/multi.node.install.rst
index dcceb539bf..fcb76c5e53 100644
--- a/doc/source/adminguide/multi.node.install.rst
+++ b/doc/source/adminguide/multi.node.install.rst
@@ -19,7 +19,7 @@ Installing Nova on Multiple Servers
===================================
When you move beyond evaluating the technology and into building an actual
-production environemnt, you will need to know how to configure your datacenter
+production environment, you will need to know how to configure your datacenter
and how to deploy components across your clusters. This guide should help you
through that process.
@@ -35,7 +35,6 @@ Requirements for a multi-node installation
* For a recommended HA setup, consider a MySQL master/slave replication, with as many slaves as you like, and probably a heartbeat to kick one of the slaves into being a master if it dies.
* For performance optimization, split reads and writes to the database. MySQL proxy is the easiest way to make this work if running MySQL.
-
Assumptions
^^^^^^^^^^^
@@ -69,14 +68,14 @@ Step 1 Use apt-get to get the latest code
It is highly likely that there will be errors when the nova services come up since they are not yet configured. Don't worry, you're only at step 1!
-Step 2 Setup configuration files (installed in /etc/nova)
+Step 2 Setup configuration file (installed in /etc/nova)
---------------------------------------------------------
Note: CC_ADDR=<the external IP address of your cloud controller>
-1. These need to be defined in EACH configuration file
+Nova development has consolidated all .conf files to nova.conf as of November 2010. References to specific .conf files may be ignored.
-::
+#. These need to be defined in the nova.conf configuration file::
--sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db
--s3_host=$CC_ADDR # This is where nova is hosting the objectstore service, which
@@ -87,31 +86,14 @@ Note: CC_ADDR=<the external IP address of your cloud controller>
--ec2_url=http://$CC_ADDR:8773/services/Cloud
--network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type
-
-2. nova-manage specific flags
-
-::
-
- --fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
- --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
-
-
-3. nova-network specific flags
-
-::
-
--fixed_range=<network/prefix> # ip network to use for VM guests, ex 192.168.2.64/26
--network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64
-4. Create a nova group
-
-::
+#. Create a nova group::
sudo addgroup nova
-5. nova-objectstore specific flags < no specific config needed >
-
-Config files should be have their owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.
+The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password.
::
@@ -121,7 +103,7 @@ Config files should be have their owner set to root:nova, and mode set to 0640,
Step 3 Setup the sql db
-----------------------
-1. First you 'preseed' (using vishy's :doc:`../quickstart`). Run this as root.
+1. First you 'preseed' (using the Quick Start method :doc:`../quickstart`). Run this as root.
::
@@ -161,7 +143,7 @@ Step 3 Setup the sql db
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
SET PASSWORD FOR 'root'@'%' = PASSWORD('nova');
-7. branch and install Nova
+7. Branch and install Nova
::
@@ -186,9 +168,7 @@ Step 4 Setup Nova environment
Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
-On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. We ended up doing this manually, (update query fired directly in the DB). Is there a better way to mark a network as bridged?
-
-Update: This has been resolved w.e.f 27/10. network is marked as bridged automatically based on the type of n/w manager selected.
+On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected.
More networking details to create a network bridge for flat network
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -233,7 +213,6 @@ unzip them in your home directory, and add them to your environment::
echo ". creds/novarc" >> ~/.bashrc
~/.bashrc
-
Step 6 Restart all relevant services
------------------------------------
@@ -249,8 +228,8 @@ Restart relevant nova services::
.. todo:: do we still need the content below?
-Bare-metal Provisioning
------------------------
+Bare-metal Provisioning Notes
+-----------------------------
To install the base operating system you can use PXE booting.
diff --git a/doc/source/adminguide/network.vlan.rst b/doc/source/adminguide/network.vlan.rst
index 5bbc54bed5..c6c4e7f91e 100644
--- a/doc/source/adminguide/network.vlan.rst
+++ b/doc/source/adminguide/network.vlan.rst
@@ -50,7 +50,7 @@ The following diagram illustrates how the communication that occurs between the
Goals
-----
-* each project is in a protected network segment
+For our implementation of Nova, our goal is that each project is in a protected network segment. Here are the specifications we keep in mind for meeting this goal.
* RFC-1918 IP space
* public IP via NAT
@@ -59,19 +59,19 @@ Goals
* limited (project-admin controllable) access to other project segments
* all connectivity to instance and cloud API is via VPN into the project segment
-* common DMZ segment for support services (only visible from project segment)
+We also keep as a goal a common DMZ segment for support services, meaning these items are only visible from project segment:
* metadata
* dashboard
-
Limitations
-----------
+We kept in mind some of these limitations:
+
* Projects / cluster limited to available VLANs in switching infrastructure
* Requires VPN for access to project segment
-
Implementation
--------------
Currently Nova segregates project VLANs using 802.1q VLAN tagging in the
diff --git a/doc/source/adminguide/single.node.install.rst b/doc/source/adminguide/single.node.install.rst
index 27597962aa..8572c5a4a5 100644
--- a/doc/source/adminguide/single.node.install.rst
+++ b/doc/source/adminguide/single.node.install.rst
@@ -9,7 +9,7 @@ The fastest way to get a test cloud running is through our :doc:`../quickstart`.
Step 1 and 2: Get the latest Nova code system software
------------------------------------------------------
-Depending on your system, the mehod for accomplishing this varies
+Depending on your system, the method for accomplishing this varies
.. toctree::
:maxdepth: 1
@@ -63,8 +63,20 @@ You see an access key and a secret key export, such as these made-up ones:::
export EC2_ACCESS_KEY=4e6498a2-blah-blah-blah-17d1333t97fd
export EC2_SECRET_KEY=0a520304-blah-blah-blah-340sp34k05bbe9a7
+Step 5: Create the network
+--------------------------
+
+Type or copy/paste in the following line to create a network prior to creating a project.
+
+::
+
+ sudo nova-manage network create 10.0.0.0/8 1 64
+
+For this command, the IP address is the cidr notation for your netmask, such as 192.168.1.0/24. The value 1 is the total number of networks you want made, and the 64 value is the total number of ips in all networks.
+
+After running this command, entries are made in the 'networks' and 'fixed_ips' table in the database.
-Step 5: Create a project with the user you created
+Step 6: Create a project with the user you created
--------------------------------------------------
Type or copy/paste in the following line to create a project named IRT (for Ice Road Truckers, of course) with the newly-created user named anne.
@@ -94,7 +106,7 @@ Type or copy/paste in the following line to create a project named IRT (for Ice
Data Base Updated
-Step 6: Unzip the nova.zip
+Step 7: Unzip the nova.zip
--------------------------
You should have a nova.zip file in your current working directory. Unzip it with this command:
@@ -116,7 +128,7 @@ You'll see these files extract.
extracting: cacert.pem
-Step 7: Source the rc file
+Step 8: Source the rc file
--------------------------
Type or copy/paste the following to source the novarc file in your current working directory.
@@ -125,9 +137,9 @@ Type or copy/paste the following to source the novarc file in your current worki
. novarc
-Step 8: Pat yourself on the back :)
+Step 9: Pat yourself on the back :)
-----------------------------------
-Congratulations, your cloud is up and running, you’ve created an admin user, retrieved the user's credentials and put them in your environment.
+Congratulations, your cloud is up and running, you’ve created an admin user, created a network, retrieved the user's credentials and put them in your environment.
Now you need an image.
diff --git a/doc/source/cloud101.rst b/doc/source/cloud101.rst
index 87db5af1eb..7c79d2a70f 100644
--- a/doc/source/cloud101.rst
+++ b/doc/source/cloud101.rst
@@ -54,6 +54,8 @@ Cloud computing offers different service models depending on the capabilities a
The US-based National Institute of Standards and Technology offers definitions for cloud computing
and the service models that are emerging.
+These definitions are summarized from http://csrc.nist.gov/groups/SNS/cloud-computing/.
+
SaaS - Software as a Service
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -72,12 +74,15 @@ IaaS - Infrastructure as a Service
Provides infrastructure such as computer instances, network connections, and storage so that people
can run any software or operating system.
-.. todo:: Use definitions from http://csrc.nist.gov/groups/SNS/cloud-computing/ and attribute NIST
Types of Cloud Deployments
--------------------------
-.. todo:: describe public/private/hybrid/etc
+When you hear terms such as public cloud or private cloud, these refer to the deployment model for the cloud. A private cloud operates for a single organization, but can be managed on-premise or off-premise. A public cloud has an infrastructure that is available to the general public or a large industry group and is likely owned by a cloud services company.
+
+The NIST also defines community cloud as shared by several organizations supporting a specific community with shared concerns.
+
+A hybrid cloud can be a deployment model, as a composition of both public and private clouds, or a hybrid model for cloud computing may involve both virtual and physical servers.
Work in the Clouds
------------------
diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst
index 31bd85e817..fb104c160e 100644
--- a/doc/source/devref/cloudpipe.rst
+++ b/doc/source/devref/cloudpipe.rst
@@ -21,7 +21,7 @@
Cloudpipe -- Per Project Vpns
=============================
-Cloudpipe is a method for connecting end users to their project insnances in vlan mode.
+Cloudpipe is a method for connecting end users to their project instances in vlan mode.
Overview
diff --git a/doc/source/images/novascreens.png b/doc/source/images/novascreens.png
new file mode 100644
index 0000000000..0fe3279cf8
--- /dev/null
+++ b/doc/source/images/novascreens.png
Binary files differ
diff --git a/doc/source/images/novashvirtually.png b/doc/source/images/novashvirtually.png
new file mode 100644
index 0000000000..02c7e767c2
--- /dev/null
+++ b/doc/source/images/novashvirtually.png
Binary files differ
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 9b2c8e1f8d..b9ba6208a4 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -26,7 +26,7 @@ Nova is written with the following design guidelines in mind:
* **Component based architecture**: Quickly add new behaviors
* **Highly available**: Scale to very serious workloads
-* **Fault-Tollerant**: Isloated processes avoid cascading failures
+* **Fault-Tolerant**: Isolated processes avoid cascading failures
* **Recoverable**: Failures should be easy to diagnose, debug, and rectify
* **Open Standards**: Be a reference implementation for a community-driven api
* **API Compatibility**: Nova strives to provide API-compatible with popular systems like Amazon EC2
@@ -62,8 +62,6 @@ Administrator's Documentation
adminguide/single.node.install
adminguide/multi.node.install
-.. todo:: add swiftadmin
-
Developer Docs
==============
diff --git a/doc/source/livecd.rst b/doc/source/livecd.rst
index 82cf4658a0..b355fa1801 100644
--- a/doc/source/livecd.rst
+++ b/doc/source/livecd.rst
@@ -1,2 +1,48 @@
Installing the Live CD
======================
+
+If you'd like to set up a sandbox installation of Nova, you can use one of these Live CD images.
+
+If you don't already have VirtualBox installed, you can download it from http://www.virtualbox.org/wiki/Downloads.
+
+Download the zip or iso file and then follow these steps to try Nova in a virtual environment.
+
+http://c0047913.cdn1.cloudfiles.rackspacecloud.com/OpenStackNova.x86_64-2010.1.2.iso (OpenSUSE image; root password is "linux" for this image)
+
+http://c0028699.cdn1.cloudfiles.rackspacecloud.com/nova-vm.zip (~900 MB) (log in information is nova/nova)
+
+Once a VM is configured and started, here are the basics:
+
+ #. Login to Ubuntu using ID nova and Password nova.
+
+ #. Switch to running as sudo (enter nova when prompted for the password)::
+
+ sudo -s
+
+ #. To run Nova for the first time, enter::
+
+ cd /var/openstack/
+
+ #. Now that you're in the correct directory, enter::
+
+ ./nova.sh run
+
+ .. image:: images/novashvirtually.png
+
+If it's already running, use screen -ls, and when the nova screen is presented,then enter screen -d -r nova.
+
+These are the steps to get an instance running (the image is already provided in this environment). Enter these commands in the "test" screen.
+
+::
+
+ euca-add-keypair test > test.pem
+ chmod 600 test.pem
+ euca-run-instances -k test -t m1.tiny ami-tiny
+ euca-describe-instances
+
+ ssh -i test.pem root@10.0.0.3
+
+To see output from the various workers, switch screen windows with Ctrl+A " (quotation mark).
+
+ .. image:: images/novascreens.png
+
diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst
index ddf0f1b829..18368546bd 100644
--- a/doc/source/nova.concepts.rst
+++ b/doc/source/nova.concepts.rst
@@ -23,13 +23,13 @@ Nova Concepts and Introduction
Introduction
------------
-Nova is the software that controls your Infrastructure as as Service (IaaS)
+Nova, also known as OpenStack Compute, is the software that controls your Infrastructure as as Service (IaaS)
cloud computing platform. It is similar in scope to Amazon EC2 and Rackspace
-CloudServers. Nova does not include any virtualization software, rather it
+Cloud Servers. Nova does not include any virtualization software, rather it
defines drivers that interact with underlying virtualization mechanisms that
run on your host operating system, and exposes functionality over a web API.
-This document does not attempt to explain fundamental concepts of cloud
+This site does not attempt to explain fundamental concepts of cloud
computing, IaaS, virtualization, or other related technologies. Instead, it
focuses on describing how Nova's implementation of those concepts is achieved.
@@ -64,6 +64,19 @@ Concept: Instances
An 'instance' is a word for a virtual machine that runs inside the cloud.
+Concept: System Architecture
+----------------------------
+
+Nova consists of seven main components, with the Cloud Controller component representing the global state and interacting with all other components. API Server acts as the Web services front end for the cloud controller. Compute Controller provides compute server resources, and the Object Store component provides storage services. Auth Manager provides authentication and authorization services. Volume Controller provides fast and permanent block-level storage for the comput servers. Network Controller provides virtual networks to enable compute servers to interact with each other and with the public network. Scheduler selects the most suitable compute controller to host an instance.
+
+ .. image:: images/Novadiagram.png
+
+Nova is built on a shared-nothing, messaging-based architecture. All of the major components, that is Compute Controller, Volume Controller, Network Controller, and Object Store can be run on multiple servers. Cloud Controller communicates with Object Store via HTTP (Hyper Text Transfer Protocol), but it communicates with Scheduler, Network Controller, and Volume Controller via AMQP (Advanced Message Queue Protocol). To avoid blocking each component while waiting for a response, Nova uses asynchronous calls, with a call-back that gets triggered when a response is received.
+
+To achieve the shared-nothing property with multiple copies of the same component, Nova keeps all the cloud system state in a distributed data store. Updates to system state are written into this store, using atomic transactions when required. Requests for system state are read out of this store. In limited cases, the read results are cached within controllers for short periods of time (for example, the current list of system users.)
+
+ .. note:: The database schema is available on the `OpenStack Wiki <http://wiki.openstack.org/NovaDatabaseSchema>_`.
+
Concept: Storage
----------------
@@ -104,9 +117,9 @@ Concept: API
Concept: Networking
-------------------
-Nova has a concept of Fixed Ips and Floating ips. Fixed ips are assigned to an instance on creation and stay the same until the instance is explicitly terminated. Floating ips are ip addresses that can be dynamically associated with an instance. This address can be disassociated and associated with another instance at any time.
+Nova has a concept of Fixed IPs and Floating IPs. Fixed IPs are assigned to an instance on creation and stay the same until the instance is explicitly terminated. Floating ips are ip addresses that can be dynamically associated with an instance. This address can be disassociated and associated with another instance at any time.
-There are multiple strategies available for implementing fixed ips:
+There are multiple strategies available for implementing fixed IPs:
Flat Mode
~~~~~~~~~
@@ -116,7 +129,7 @@ The simplest networking mode. Each instance receives a fixed ip from the pool.
Flat DHCP Mode
~~~~~~~~~~~~~~
-This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed ips by doing a dhcpdiscover.
+This is similar to the flat mode, in that all instances are attached to the same bridge. In this mode nova does a bit more configuration, it will attempt to bridge into an ethernet device (eth0 by default). It will also run dnsmasq as a dhcpserver listening on this bridge. Instances receive their fixed IPs by doing a dhcpdiscover.
VLAN DHCP Mode
~~~~~~~~~~~~~~
@@ -150,7 +163,7 @@ See doc:`nova.manage` in the Administration Guide for more details.
Concept: Flags
--------------
-python-gflags
+Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth.
Concept: Plugins
@@ -187,8 +200,17 @@ Concept: Scheduler
Concept: Security Groups
------------------------
-Security groups
+In Nova, a security group is a named collection of network access rules, like firewall policies. These access rules specify which incoming network traffic should be delivered to all VM instances in the group, all other incoming traffic being discarded. Users can modify rules for a group at any time. The new rules are automatically enforced for all running instances and instances launched from then on.
+
+When launching VM instances, the project manager specifies which security groups it wants to join. It will become a member of these specified security groups when it is launched. If no groups are specified, the instances is assigned to the default group, which by default allows all network traffic from other members of this group and discards traffic from other IP addresses and groups. If this does not meet a user's needs, the user can modify the rule settings of the default group.
+
+A security group can be thought of as a security profile or a security role - it promotes the good practice of managing firewalls by role, not by machine. For example, a user could stipulate that servers with the "webapp" role must be able to connect to servers with the "mysql" role on port 3306. Going further with the security profile analogy, an instance can be launched with membership of multiple security groups - similar to a server with multiple roles. Because all rules in security groups are ACCEPT rules, it's trivial to combine them.
+
+Each rule in a security group must specify the source of packets to be allowed, which can either be a subnet anywhere on the Internet (in CIDR notation, with 0.0.0./0 representing the entire Internet) or another security group. In the latter case, the source security group can be any user's group. This makes it easy to grant selective access to one user's instances from instances run by the user's friends, partners, and vendors.
+
+The creation of rules with other security groups specified as sources helps users deal with dynamic IP addressing. Without this feature, the user would have had to adjust the security groups each time a new instance is launched. This practice would become cumbersome if an application running in Nova is very dynamic and elastic, for example scales up or down frequently.
+Security groups for a VM are passed at launch time by the cloud controller to the compute node, and applied at the compute node when a VM is started.
Concept: Certificate Authority
------------------------------
diff --git a/nova/api/cloud.py b/nova/api/cloud.py
deleted file mode 100644
index b8f15019f4..0000000000
--- a/nova/api/cloud.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Methods for API calls to control instances via AMQP.
-"""
-
-
-from nova import db
-from nova import flags
-from nova import rpc
-
-FLAGS = flags.FLAGS
-
-
-def reboot(instance_id, context=None):
- """Reboot the given instance."""
- instance_ref = db.instance_get_by_internal_id(context, instance_id)
- host = instance_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "reboot_instance",
- "args": {"instance_id": instance_ref['id']}})
-
-
-def rescue(instance_id, context):
- """Rescue the given instance."""
- instance_ref = db.instance_get_by_internal_id(context, instance_id)
- host = instance_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "rescue_instance",
- "args": {"instance_id": instance_ref['id']}})
-
-
-def unrescue(instance_id, context):
- """Unrescue the given instance."""
- instance_ref = db.instance_get_by_internal_id(context, instance_id)
- host = instance_ref['host']
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "unrescue_instance",
- "args": {"instance_id": instance_ref['id']}})
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index a5acd1f6d8..8375c4399b 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -39,9 +39,8 @@ from nova import flags
from nova import quota
from nova import rpc
from nova import utils
-from nova.compute.instance_types import INSTANCE_TYPES
-from nova.api import cloud
-from nova.image.s3 import S3ImageService
+from nova.compute import api as compute_api
+from nova.compute import instance_types
FLAGS = flags.FLAGS
@@ -50,11 +49,6 @@ flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
InvalidInputException = exception.InvalidInputException
-class QuotaError(exception.ApiError):
- """Quota Exceeeded"""
- pass
-
-
def _gen_key(context, user_id, key_name):
"""Generate a key
@@ -99,8 +93,9 @@ class CloudController(object):
"""
def __init__(self):
self.network_manager = utils.import_object(FLAGS.network_manager)
- self.compute_manager = utils.import_object(FLAGS.compute_manager)
- self.image_service = S3ImageService()
+ self.image_service = utils.import_object(FLAGS.image_service)
+ self.compute_api = compute_api.ComputeAPI(self.network_manager,
+ self.image_service)
self.setup()
def __str__(self):
@@ -124,10 +119,10 @@ class CloudController(object):
def _get_mpi_data(self, context, project_id):
result = {}
- for instance in db.instance_get_all_by_project(context, project_id):
+ for instance in self.compute_api.get_instances(context, project_id):
if instance['fixed_ip']:
line = '%s slots=%d' % (instance['fixed_ip']['address'],
- INSTANCE_TYPES[instance['instance_type']]['vcpus'])
+ instance['vcpus'])
key = str(instance['key_name'])
if key in result:
result[key].append(line)
@@ -260,7 +255,7 @@ class CloudController(object):
return True
def describe_security_groups(self, context, group_name=None, **kwargs):
- self._ensure_default_security_group(context)
+ self.compute_api.ensure_default_security_group(context)
if context.user.is_admin():
groups = db.security_group_get_all(context)
else:
@@ -358,7 +353,7 @@ class CloudController(object):
return False
def revoke_security_group_ingress(self, context, group_name, **kwargs):
- self._ensure_default_security_group(context)
+ self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
@@ -383,7 +378,7 @@ class CloudController(object):
# for these operations, so support for newer API versions
# is sketchy.
def authorize_security_group_ingress(self, context, group_name, **kwargs):
- self._ensure_default_security_group(context)
+ self.compute_api.ensure_default_security_group(context)
security_group = db.security_group_get_by_name(context,
context.project_id,
group_name)
@@ -419,7 +414,7 @@ class CloudController(object):
return source_project_id
def create_security_group(self, context, group_name, group_description):
- self._ensure_default_security_group(context)
+ self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError('group %s already exists' % group_name)
@@ -443,7 +438,7 @@ class CloudController(object):
# instance_id is passed in as a list of instances
ec2_id = instance_id[0]
internal_id = ec2_id_to_internal_id(ec2_id)
- instance_ref = db.instance_get_by_internal_id(context, internal_id)
+ instance_ref = self.compute_api.get_instance(context, internal_id)
output = rpc.call(context,
'%s.%s' % (FLAGS.compute_topic,
instance_ref['host']),
@@ -455,13 +450,15 @@ class CloudController(object):
"Timestamp": now,
"output": base64.b64encode(output)}
- def describe_volumes(self, context, **kwargs):
+ def describe_volumes(self, context, volume_id=None, **kwargs):
if context.user.is_admin():
volumes = db.volume_get_all(context)
else:
volumes = db.volume_get_all_by_project(context, context.project_id)
- volumes = [self._format_volume(context, v) for v in volumes]
+ # NOTE(vish): volume_id is an optional list of volume ids to filter by.
+ volumes = [self._format_volume(context, v) for v in volumes
+ if volume_id is None or v['ec2_id'] in volume_id]
return {'volumeSet': volumes}
@@ -505,9 +502,8 @@ class CloudController(object):
if quota.allowed_volumes(context, 1, size) < 1:
logging.warn("Quota exceeeded for %s, tried to create %sG volume",
context.project_id, size)
- raise QuotaError("Volume quota exceeded. You cannot "
- "create a volume of size %s" %
- size)
+ raise quota.QuotaError("Volume quota exceeded. You cannot "
+ "create a volume of size %s" % size)
vol = {}
vol['size'] = size
vol['user_id'] = context.user.id
@@ -541,7 +537,7 @@ class CloudController(object):
if volume_ref['attach_status'] == "attached":
raise exception.ApiError("Volume is already attached")
internal_id = ec2_id_to_internal_id(instance_id)
- instance_ref = db.instance_get_by_internal_id(context, internal_id)
+ instance_ref = self.compute_api.get_instance(context, internal_id)
host = instance_ref['host']
rpc.cast(context,
db.queue_get_for(context, FLAGS.compute_topic, host),
@@ -619,11 +615,7 @@ class CloudController(object):
instances = db.instance_get_all_by_reservation(context,
reservation_id)
else:
- if context.user.is_admin():
- instances = db.instance_get_all(context)
- else:
- instances = db.instance_get_all_by_project(context,
- context.project_id)
+ instances = self.compute_api.get_instances(context)
for instance in instances:
if not context.user.is_admin():
if instance['image_id'] == FLAGS.vpn_image_id:
@@ -699,8 +691,8 @@ class CloudController(object):
if quota.allowed_floating_ips(context, 1) < 1:
logging.warn("Quota exceeeded for %s, tried to allocate address",
context.project_id)
- raise QuotaError("Address quota exceeded. You cannot "
- "allocate any more addresses")
+ raise quota.QuotaError("Address quota exceeded. You cannot "
+ "allocate any more addresses")
network_topic = self._get_network_topic(context)
public_ip = rpc.call(context,
network_topic,
@@ -720,7 +712,7 @@ class CloudController(object):
def associate_address(self, context, instance_id, public_ip, **kwargs):
internal_id = ec2_id_to_internal_id(instance_id)
- instance_ref = db.instance_get_by_internal_id(context, internal_id)
+ instance_ref = self.compute_api.get_instance(context, internal_id)
fixed_address = db.instance_get_fixed_address(context,
instance_ref['id'])
floating_ip_ref = db.floating_ip_get_by_address(context, public_ip)
@@ -752,224 +744,49 @@ class CloudController(object):
"args": {"network_id": network_ref['id']}})
return db.queue_get_for(context, FLAGS.network_topic, host)
- def _ensure_default_security_group(self, context):
- try:
- db.security_group_get_by_name(context,
- context.project_id,
- 'default')
- except exception.NotFound:
- values = {'name': 'default',
- 'description': 'default',
- 'user_id': context.user.id,
- 'project_id': context.project_id}
- group = db.security_group_create(context, values)
-
def run_instances(self, context, **kwargs):
- instance_type = kwargs.get('instance_type', 'm1.small')
- if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError("Unknown instance type: %s",
- instance_type)
- # check quota
- max_instances = int(kwargs.get('max_count', 1))
- min_instances = int(kwargs.get('min_count', max_instances))
- num_instances = quota.allowed_instances(context,
- max_instances,
- instance_type)
- if num_instances < min_instances:
- logging.warn("Quota exceeeded for %s, tried to run %s instances",
- context.project_id, min_instances)
- raise QuotaError("Instance quota exceeded. You can only "
- "run %s more instances of this type." %
- num_instances, "InstanceLimitExceeded")
- # make sure user can access the image
- # vpn image is private so it doesn't show up on lists
- vpn = kwargs['image_id'] == FLAGS.vpn_image_id
-
- if not vpn:
- image = self.image_service.show(context, kwargs['image_id'])
-
- # FIXME(ja): if image is vpn, this breaks
- # get defaults from imagestore
- image_id = image['imageId']
- kernel_id = image.get('kernelId', FLAGS.default_kernel)
- ramdisk_id = image.get('ramdiskId', FLAGS.default_ramdisk)
-
- # API parameters overrides of defaults
- kernel_id = kwargs.get('kernel_id', kernel_id)
- ramdisk_id = kwargs.get('ramdisk_id', ramdisk_id)
-
- if kernel_id == str(FLAGS.null_kernel):
- kernel_id = None
- ramdisk_id = None
-
- # make sure we have access to kernel and ramdisk
- if kernel_id:
- self.image_service.show(context, kernel_id)
- if ramdisk_id:
- self.image_service.show(context, ramdisk_id)
-
- logging.debug("Going to run %s instances...", num_instances)
- launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
- key_data = None
- if 'key_name' in kwargs:
- key_pair_ref = db.key_pair_get(context,
- context.user.id,
- kwargs['key_name'])
- key_data = key_pair_ref['public_key']
-
- security_group_arg = kwargs.get('security_group', ["default"])
- if not type(security_group_arg) is list:
- security_group_arg = [security_group_arg]
-
- security_groups = []
- self._ensure_default_security_group(context)
- for security_group_name in security_group_arg:
- group = db.security_group_get_by_name(context,
- context.project_id,
- security_group_name)
- security_groups.append(group['id'])
-
- reservation_id = utils.generate_uid('r')
- base_options = {}
- base_options['state_description'] = 'scheduling'
- base_options['image_id'] = image_id
- base_options['kernel_id'] = kernel_id or ''
- base_options['ramdisk_id'] = ramdisk_id or ''
- base_options['reservation_id'] = reservation_id
- base_options['key_data'] = key_data
- base_options['key_name'] = kwargs.get('key_name', None)
- base_options['user_id'] = context.user.id
- base_options['project_id'] = context.project_id
- base_options['user_data'] = kwargs.get('user_data', '')
-
- base_options['display_name'] = kwargs.get('display_name')
- base_options['display_description'] = kwargs.get('display_description')
-
- type_data = INSTANCE_TYPES[instance_type]
- base_options['instance_type'] = instance_type
- base_options['memory_mb'] = type_data['memory_mb']
- base_options['vcpus'] = type_data['vcpus']
- base_options['local_gb'] = type_data['local_gb']
- elevated = context.elevated()
-
- for num in range(num_instances):
-
- instance_ref = self.compute_manager.create_instance(context,
- security_groups,
- mac_address=utils.generate_mac(),
- launch_index=num,
- **base_options)
- inst_id = instance_ref['id']
-
- internal_id = instance_ref['internal_id']
- ec2_id = internal_id_to_ec2_id(internal_id)
-
- self.compute_manager.update_instance(context,
- inst_id,
- hostname=ec2_id)
-
- # TODO(vish): This probably should be done in the scheduler
- # or in compute as a call. The network should be
- # allocated after the host is assigned and setup
- # can happen at the same time.
- address = self.network_manager.allocate_fixed_ip(context,
- inst_id,
- vpn)
- network_topic = self._get_network_topic(context)
- rpc.cast(elevated,
- network_topic,
- {"method": "setup_fixed_ip",
- "args": {"address": address}})
-
- rpc.cast(context,
- FLAGS.scheduler_topic,
- {"method": "run_instance",
- "args": {"topic": FLAGS.compute_topic,
- "instance_id": inst_id}})
- logging.debug("Casting to scheduler for %s/%s's instance %s" %
- (context.project.name, context.user.name, inst_id))
- return self._format_run_instances(context, reservation_id)
+ max_count = int(kwargs.get('max_count', 1))
+ instances = self.compute_api.create_instances(context,
+ instance_types.get_by_type(kwargs.get('instance_type', None)),
+ kwargs['image_id'],
+ min_count=int(kwargs.get('min_count', max_count)),
+ max_count=max_count,
+ kernel_id=kwargs.get('kernel_id', None),
+ ramdisk_id=kwargs.get('ramdisk_id'),
+ display_name=kwargs.get('display_name'),
+ description=kwargs.get('display_description'),
+ key_name=kwargs.get('key_name'),
+ security_group=kwargs.get('security_group'),
+ generate_hostname=internal_id_to_ec2_id)
+ return self._format_run_instances(context,
+ instances[0]['reservation_id'])
def terminate_instances(self, context, instance_id, **kwargs):
"""Terminate each instance in instance_id, which is a list of ec2 ids.
-
- instance_id is a kwarg so its name cannot be modified.
- """
- ec2_id_list = instance_id
+ instance_id is a kwarg so its name cannot be modified."""
logging.debug("Going to start terminating instances")
- for id_str in ec2_id_list:
- internal_id = ec2_id_to_internal_id(id_str)
- logging.debug("Going to try and terminate %s" % id_str)
- try:
- instance_ref = db.instance_get_by_internal_id(context,
- internal_id)
- except exception.NotFound:
- logging.warning("Instance %s was not found during terminate",
- id_str)
- continue
-
- if (instance_ref['state_description'] == 'terminating'):
- logging.warning("Instance %s is already being terminated",
- id_str)
- continue
- now = datetime.datetime.utcnow()
- self.compute_manager.update_instance(context,
- instance_ref['id'],
- state_description='terminating',
- state=0,
- terminated_at=now)
-
- # FIXME(ja): where should network deallocate occur?
- address = db.instance_get_floating_address(context,
- instance_ref['id'])
- if address:
- logging.debug("Disassociating address %s" % address)
- # NOTE(vish): Right now we don't really care if the ip is
- # disassociated. We may need to worry about
- # checking this later. Perhaps in the scheduler?
- network_topic = self._get_network_topic(context)
- rpc.cast(context,
- network_topic,
- {"method": "disassociate_floating_ip",
- "args": {"floating_address": address}})
-
- address = db.instance_get_fixed_address(context,
- instance_ref['id'])
- if address:
- logging.debug("Deallocating address %s" % address)
- # NOTE(vish): Currently, nothing needs to be done on the
- # network node until release. If this changes,
- # we will need to cast here.
- self.network_manager.deallocate_fixed_ip(context.elevated(),
- address)
-
- host = instance_ref['host']
- if host:
- rpc.cast(context,
- db.queue_get_for(context, FLAGS.compute_topic, host),
- {"method": "terminate_instance",
- "args": {"instance_id": instance_ref['id']}})
- else:
- db.instance_destroy(context, instance_ref['id'])
+ for ec2_id in instance_id:
+ internal_id = ec2_id_to_internal_id(ec2_id)
+ self.compute_api.delete_instance(context, internal_id)
return True
def reboot_instances(self, context, instance_id, **kwargs):
"""instance_id is a list of instance ids"""
for ec2_id in instance_id:
internal_id = ec2_id_to_internal_id(ec2_id)
- cloud.reboot(internal_id, context=context)
+ self.compute_api.reboot(context, internal_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
internal_id = ec2_id_to_internal_id(instance_id)
- cloud.rescue(internal_id, context=context)
+ self.compute_api.rescue(context, internal_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
internal_id = ec2_id_to_internal_id(instance_id)
- cloud.unrescue(internal_id, context=context)
+ self.compute_api.unrescue(context, internal_id)
return True
def update_instance(self, context, ec2_id, **kwargs):
@@ -980,7 +797,7 @@ class CloudController(object):
changes[field] = kwargs[field]
if changes:
internal_id = ec2_id_to_internal_id(ec2_id)
- inst = db.instance_get_by_internal_id(context, internal_id)
+ inst = self.compute_api.get_instance(context, internal_id)
db.instance_update(context, inst['id'], kwargs)
return True
@@ -1000,8 +817,11 @@ class CloudController(object):
return True
def describe_images(self, context, image_id=None, **kwargs):
- imageSet = self.image_service.index(context, image_id)
- return {'imagesSet': imageSet}
+ # Note: image_id is a list!
+ images = self.image_service.index(context)
+ if image_id:
+ images = filter(lambda x: x['imageId'] in image_id, images)
+ return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
self.image_service.deregister(context, image_id)
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 1dd3ba770e..b9ecbd9b8d 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -25,10 +25,12 @@ import time
import logging
import routes
+import traceback
import webob.dec
import webob.exc
import webob
+from nova import context
from nova import flags
from nova import utils
from nova import wsgi
@@ -47,6 +49,10 @@ flags.DEFINE_string('nova_api_auth',
'nova.api.openstack.auth.BasicApiAuthManager',
'The auth mechanism to use for the OpenStack API implemenation')
+flags.DEFINE_bool('allow_admin_api',
+ False,
+ 'When True, this API service will accept admin operations.')
+
class API(wsgi.Middleware):
"""WSGI entry point for all OpenStack API requests."""
@@ -61,6 +67,7 @@ class API(wsgi.Middleware):
return req.get_response(self.application)
except Exception as ex:
logging.warn("Caught error: %s" % str(ex))
+ logging.debug(traceback.format_exc())
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc)
@@ -82,9 +89,7 @@ class AuthMiddleware(wsgi.Middleware):
if not user:
return faults.Fault(webob.exc.HTTPUnauthorized())
- if 'nova.context' not in req.environ:
- req.environ['nova.context'] = {}
- req.environ['nova.context']['user'] = user
+ req.environ['nova.context'] = context.RequestContext(user, user)
return self.application
@@ -119,12 +124,12 @@ class RateLimitingMiddleware(wsgi.Middleware):
If the request should be rate limited, return a 413 status with a
Retry-After header giving the time when the request would succeed.
"""
- user_id = req.environ['nova.context']['user']['id']
action_name = self.get_action_name(req)
if not action_name:
# Not rate limited
return self.application
- delay = self.get_delay(action_name, user_id)
+ delay = self.get_delay(action_name,
+ req.environ['nova.context'].user_id)
if delay:
# TODO(gundlach): Get the retry-after format correct.
exc = webob.exc.HTTPRequestEntityTooLarge(
@@ -181,6 +186,10 @@ class APIRouter(wsgi.Router):
mapper.resource("sharedipgroup", "sharedipgroups",
controller=sharedipgroups.Controller())
+ if FLAGS.allow_admin_api:
+ logging.debug("Including admin operations in API.")
+ # TODO: Place routes for admin operations here.
+
super(APIRouter, self).__init__(mapper)
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index ff428ff701..fcda97ab19 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -23,10 +23,7 @@ class Context(object):
class BasicApiAuthManager(object):
""" Implements a somewhat rudimentary version of OpenStack Auth"""
- def __init__(self, host=None, db_driver=None):
- if not host:
- host = FLAGS.host
- self.host = host
+ def __init__(self, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
self.db = utils.import_object(db_driver)
@@ -47,7 +44,7 @@ class BasicApiAuthManager(object):
except KeyError:
return faults.Fault(webob.exc.HTTPUnauthorized())
- token, user = self._authorize_user(username, key)
+ token, user = self._authorize_user(username, key, req)
if user and token:
res = webob.Response()
res.headers['X-Auth-Token'] = token.token_hash
@@ -77,13 +74,16 @@ class BasicApiAuthManager(object):
if delta.days >= 2:
self.db.auth_destroy_token(self.context, token)
else:
- #TODO(gundlach): Why not just return dict(id=token.user_id)?
- user = self.auth.get_user(token.user_id)
- return {'id': user.id}
+ return self.auth.get_user(token.user_id)
return None
- def _authorize_user(self, username, key):
- """ Generates a new token and assigns it to a user """
+ def _authorize_user(self, username, key, req):
+ """Generates a new token and assigns it to a user.
+
+ username - string
+ key - string API key
+ req - webob.Request object
+ """
user = self.auth.get_user_from_access_key(key)
if user and user.name == username:
token_hash = hashlib.sha1('%s%s%f' % (username, key,
@@ -91,12 +91,10 @@ class BasicApiAuthManager(object):
token_dict = {}
token_dict['token_hash'] = token_hash
token_dict['cdn_management_url'] = ''
- token_dict['server_management_url'] = self._get_server_mgmt_url()
+ # Same as auth url, e.g. http://foo.org:8774/baz/v1.0
+ token_dict['server_management_url'] = req.url
token_dict['storage_url'] = ''
token_dict['user_id'] = user.id
token = self.db.auth_create_token(self.context, token_dict)
return token, user
return None, None
-
- def _get_server_mgmt_url(self):
- return 'https://%s/v1.0/' % self.host
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index cdbdc9bdd8..4a0a8e6f1c 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -17,7 +17,6 @@
from webob import exc
-from nova import context
from nova import flags
from nova import utils
from nova import wsgi
@@ -47,10 +46,8 @@ class Controller(wsgi.Controller):
def detail(self, req):
"""Return all public images in detail."""
- user_id = req.environ['nova.context']['user']['id']
- ctxt = context.RequestContext(user_id, user_id)
try:
- images = self._service.detail(ctxt)
+ images = self._service.detail(req.environ['nova.context'])
images = nova.api.openstack.limited(images, req)
except NotImplementedError:
# Emulate detail() using repeated calls to show()
@@ -61,9 +58,7 @@ class Controller(wsgi.Controller):
def show(self, req, id):
"""Return data about the given image id."""
- user_id = req.environ['nova.context']['user']['id']
- ctxt = context.RequestContext(user_id, user_id)
- return dict(image=self._service.show(ctxt, id))
+ return dict(image=self._service.show(req.environ['nova.context'], id))
def delete(self, req, id):
# Only public images are supported for now.
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 1d8aa2fa4d..7704f48f18 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -15,34 +15,16 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
-import webob
from webob import exc
-from nova import flags
-from nova import rpc
-from nova import utils
+from nova import exception
from nova import wsgi
-from nova import context
-from nova.api import cloud
from nova.api.openstack import faults
+from nova.auth import manager as auth_manager
+from nova.compute import api as compute_api
from nova.compute import instance_types
from nova.compute import power_state
import nova.api.openstack
-import nova.image.service
-
-FLAGS = flags.FLAGS
-
-
-def _filter_params(inst_dict):
- """ Extracts all updatable parameters for a server update request """
- keys = dict(name='name', admin_pass='adminPass')
- new_attrs = {}
- for k, v in keys.items():
- if v in inst_dict:
- new_attrs[k] = inst_dict[v]
- return new_attrs
def _entity_list(entities):
@@ -63,7 +45,7 @@ def _entity_detail(inst):
inst_dict = {}
mapped_keys = dict(status='state', imageId='image_id',
- flavorId='instance_type', name='server_name', id='id')
+ flavorId='instance_type', name='display_name', id='internal_id')
for k, v in mapped_keys.iteritems():
inst_dict[k] = inst[v]
@@ -78,7 +60,7 @@ def _entity_detail(inst):
def _entity_inst(inst):
""" Filters all model attributes save for id and name """
- return dict(server=dict(id=inst['id'], name=inst['server_name']))
+ return dict(server=dict(id=inst['internal_id'], name=inst['display_name']))
class Controller(wsgi.Controller):
@@ -88,14 +70,10 @@ class Controller(wsgi.Controller):
'application/xml': {
"attributes": {
"server": ["id", "imageId", "name", "flavorId", "hostId",
- "status", "progress", "progress"]}}}
+ "status", "progress"]}}}
- def __init__(self, db_driver=None):
- if not db_driver:
- db_driver = FLAGS.db_driver
- self.db_driver = utils.import_object(db_driver)
- self.network_manager = utils.import_object(FLAGS.network_manager)
- self.compute_manager = utils.import_object(FLAGS.compute_manager)
+ def __init__(self):
+ self.compute_api = compute_api.ComputeAPI()
super(Controller, self).__init__()
def index(self, req):
@@ -111,166 +89,80 @@ class Controller(wsgi.Controller):
entity_maker - either _entity_detail or _entity_inst
"""
- user_id = req.environ['nova.context']['user']['id']
- ctxt = context.RequestContext(user_id, user_id)
- instance_list = self.db_driver.instance_get_all_by_user(ctxt, user_id)
+ instance_list = self.compute_api.get_instances(
+ req.environ['nova.context'])
limited_list = nova.api.openstack.limited(instance_list, req)
res = [entity_maker(inst)['server'] for inst in limited_list]
return _entity_list(res)
def show(self, req, id):
""" Returns server details by server id """
- user_id = req.environ['nova.context']['user']['id']
- ctxt = context.RequestContext(user_id, user_id)
- inst = self.db_driver.instance_get_by_internal_id(ctxt, int(id))
- if inst:
- if inst.user_id == user_id:
- return _entity_detail(inst)
- raise faults.Fault(exc.HTTPNotFound())
+ try:
+ instance = self.compute_api.get_instance(
+ req.environ['nova.context'], int(id))
+ return _entity_detail(instance)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
def delete(self, req, id):
""" Destroys a server """
- user_id = req.environ['nova.context']['user']['id']
- ctxt = context.RequestContext(user_id, user_id)
- instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id))
- if instance and instance['user_id'] == user_id:
- self.db_driver.instance_destroy(ctxt, id)
- return faults.Fault(exc.HTTPAccepted())
- return faults.Fault(exc.HTTPNotFound())
+ try:
+ self.compute_api.delete_instance(req.environ['nova.context'],
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
def create(self, req):
""" Creates a new server for a given user """
-
env = self._deserialize(req.body, req)
if not env:
return faults.Fault(exc.HTTPUnprocessableEntity())
- #try:
- inst = self._build_server_instance(req, env)
- #except Exception, e:
- # return faults.Fault(exc.HTTPUnprocessableEntity())
-
- user_id = req.environ['nova.context']['user']['id']
- rpc.cast(context.RequestContext(user_id, user_id),
- FLAGS.compute_topic,
- {"method": "run_instance",
- "args": {"instance_id": inst['id']}})
- return _entity_inst(inst)
+ key_pair = auth_manager.AuthManager.get_key_pairs(
+ req.environ['nova.context'])[0]
+ instances = self.compute_api.create_instances(
+ req.environ['nova.context'],
+ instance_types.get_by_flavor_id(env['server']['flavorId']),
+ env['server']['imageId'],
+ display_name=env['server']['name'],
+ description=env['server']['name'],
+ key_name=key_pair['name'],
+ key_data=key_pair['public_key'])
+ return _entity_inst(instances[0])
def update(self, req, id):
""" Updates the server name or password """
- user_id = req.environ['nova.context']['user']['id']
- ctxt = context.RequestContext(user_id, user_id)
-
inst_dict = self._deserialize(req.body, req)
-
if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity())
- instance = self.db_driver.instance_get_by_internal_id(ctxt, int(id))
- if not instance or instance.user_id != user_id:
- return faults.Fault(exc.HTTPNotFound())
+ update_dict = {}
+ if 'adminPass' in inst_dict['server']:
+ update_dict['admin_pass'] = inst_dict['server']['adminPass']
+ if 'name' in inst_dict['server']:
+ update_dict['display_name'] = inst_dict['server']['name']
- self.db_driver.instance_update(ctxt,
- int(id),
- _filter_params(inst_dict['server']))
- return faults.Fault(exc.HTTPNoContent())
+ try:
+ self.compute_api.update_instance(req.environ['nova.context'],
+ instance['id'],
+ **update_dict)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPNoContent()
def action(self, req, id):
- """ multi-purpose method used to reboot, rebuild, and
+ """ Multi-purpose method used to reboot, rebuild, and
resize a server """
- user_id = req.environ['nova.context']['user']['id']
- ctxt = context.RequestContext(user_id, user_id)
input_dict = self._deserialize(req.body, req)
try:
reboot_type = input_dict['reboot']['type']
except Exception:
- raise faults.Fault(webob.exc.HTTPNotImplemented())
- inst_ref = self.db.instance_get_by_internal_id(ctxt, int(id))
- if not inst_ref or (inst_ref and not inst_ref.user_id == user_id):
+ raise faults.Fault(exc.HTTPNotImplemented())
+ try:
+ # TODO(gundlach): pass reboot_type, support soft reboot in
+ # virt driver
+ self.compute_api.reboot(req.environ['nova.context'], id)
+ except:
return faults.Fault(exc.HTTPUnprocessableEntity())
- cloud.reboot(id)
-
- def _build_server_instance(self, req, env):
- """Build instance data structure and save it to the data store."""
- ltime = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
- inst = {}
-
- user_id = req.environ['nova.context']['user']['id']
- ctxt = context.RequestContext(user_id, user_id)
-
- flavor_id = env['server']['flavorId']
-
- instance_type, flavor = [(k, v) for k, v in
- instance_types.INSTANCE_TYPES.iteritems()
- if v['flavorid'] == flavor_id][0]
-
- image_id = env['server']['imageId']
- img_service = utils.import_object(FLAGS.image_service)
-
- image = img_service.show(image_id)
-
- if not image:
- raise Exception("Image not found")
-
- inst['server_name'] = env['server']['name']
- inst['image_id'] = image_id
- inst['user_id'] = user_id
- inst['launch_time'] = ltime
- inst['mac_address'] = utils.generate_mac()
- inst['project_id'] = user_id
-
- inst['state_description'] = 'scheduling'
- inst['kernel_id'] = image.get('kernelId', FLAGS.default_kernel)
- inst['ramdisk_id'] = image.get('ramdiskId', FLAGS.default_ramdisk)
- inst['reservation_id'] = utils.generate_uid('r')
-
- inst['display_name'] = env['server']['name']
- inst['display_description'] = env['server']['name']
-
- #TODO(dietz) this may be ill advised
- key_pair_ref = self.db_driver.key_pair_get_all_by_user(
- None, user_id)[0]
-
- inst['key_data'] = key_pair_ref['public_key']
- inst['key_name'] = key_pair_ref['name']
-
- #TODO(dietz) stolen from ec2 api, see TODO there
- inst['security_group'] = 'default'
-
- # Flavor related attributes
- inst['instance_type'] = instance_type
- inst['memory_mb'] = flavor['memory_mb']
- inst['vcpus'] = flavor['vcpus']
- inst['local_gb'] = flavor['local_gb']
- inst['mac_address'] = utils.generate_mac()
- inst['launch_index'] = 0
-
- ref = self.compute_manager.create_instance(ctxt, **inst)
- inst['id'] = ref['internal_id']
-
- inst['hostname'] = str(ref['internal_id'])
- self.compute_manager.update_instance(ctxt, inst['id'], **inst)
-
- address = self.network_manager.allocate_fixed_ip(ctxt,
- inst['id'])
-
- # TODO(vish): This probably should be done in the scheduler
- # network is setup when host is assigned
- network_topic = self._get_network_topic(ctxt)
- rpc.call(ctxt,
- network_topic,
- {"method": "setup_fixed_ip",
- "args": {"address": address}})
- return inst
-
- def _get_network_topic(self, context):
- """Retrieves the network host for a project"""
- network_ref = self.network_manager.get_network(context)
- host = network_ref['host']
- if not host:
- host = rpc.call(context,
- FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"network_id": network_ref['id']}})
- return self.db_driver.queue_get_for(context, FLAGS.network_topic, host)
+ return exc.HTTPAccepted()
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index ceade1d65c..c10939d743 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -40,6 +40,8 @@ flags.DEFINE_string('ldap_user_dn', 'cn=Manager,dc=example,dc=com',
flags.DEFINE_string('ldap_user_unit', 'Users', 'OID for Users')
flags.DEFINE_string('ldap_user_subtree', 'ou=Users,dc=example,dc=com',
'OU for Users')
+flags.DEFINE_boolean('ldap_user_modify_only', False,
+ 'Modify attributes for users instead of creating/deleting')
flags.DEFINE_string('ldap_project_subtree', 'ou=Groups,dc=example,dc=com',
'OU for Projects')
flags.DEFINE_string('role_project_subtree', 'ou=Groups,dc=example,dc=com',
@@ -89,8 +91,7 @@ class LdapDriver(object):
def get_user(self, uid):
"""Retrieve user by id"""
- attr = self.__find_object(self.__uid_to_dn(uid),
- '(objectclass=novaUser)')
+ attr = self.__get_ldap_user(uid)
return self.__to_user(attr)
def get_user_from_access_key(self, access):
@@ -110,7 +111,12 @@ class LdapDriver(object):
"""Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
'(objectclass=novaUser)')
- return [self.__to_user(attr) for attr in attrs]
+ users = []
+ for attr in attrs:
+ user = self.__to_user(attr)
+ if user is not None:
+ users.append(user)
+ return users
def get_projects(self, uid=None):
"""Retrieve list of projects"""
@@ -125,21 +131,52 @@ class LdapDriver(object):
"""Create a user"""
if self.__user_exists(name):
raise exception.Duplicate("LDAP user %s already exists" % name)
- attr = [
- ('objectclass', ['person',
- 'organizationalPerson',
- 'inetOrgPerson',
- 'novaUser']),
- ('ou', [FLAGS.ldap_user_unit]),
- ('uid', [name]),
- ('sn', [name]),
- ('cn', [name]),
- ('secretKey', [secret_key]),
- ('accessKey', [access_key]),
- ('isAdmin', [str(is_admin).upper()]),
- ]
- self.conn.add_s(self.__uid_to_dn(name), attr)
- return self.__to_user(dict(attr))
+ if FLAGS.ldap_user_modify_only:
+ if self.__ldap_user_exists(name):
+ # Retrieve user by name
+ user = self.__get_ldap_user(name)
+ # Entry could be malformed, test for missing attrs.
+ # Malformed entries are useless, replace attributes found.
+ attr = []
+ if 'secretKey' in user.keys():
+ attr.append((self.ldap.MOD_REPLACE, 'secretKey', \
+ [secret_key]))
+ else:
+ attr.append((self.ldap.MOD_ADD, 'secretKey', \
+ [secret_key]))
+ if 'accessKey' in user.keys():
+ attr.append((self.ldap.MOD_REPLACE, 'accessKey', \
+ [access_key]))
+ else:
+ attr.append((self.ldap.MOD_ADD, 'accessKey', \
+ [access_key]))
+ if 'isAdmin' in user.keys():
+ attr.append((self.ldap.MOD_REPLACE, 'isAdmin', \
+ [str(is_admin).upper()]))
+ else:
+ attr.append((self.ldap.MOD_ADD, 'isAdmin', \
+ [str(is_admin).upper()]))
+ self.conn.modify_s(self.__uid_to_dn(name), attr)
+ return self.get_user(name)
+ else:
+ raise exception.NotFound("LDAP object for %s doesn't exist"
+ % name)
+ else:
+ attr = [
+ ('objectclass', ['person',
+ 'organizationalPerson',
+ 'inetOrgPerson',
+ 'novaUser']),
+ ('ou', [FLAGS.ldap_user_unit]),
+ ('uid', [name]),
+ ('sn', [name]),
+ ('cn', [name]),
+ ('secretKey', [secret_key]),
+ ('accessKey', [access_key]),
+ ('isAdmin', [str(is_admin).upper()]),
+ ]
+ self.conn.add_s(self.__uid_to_dn(name), attr)
+ return self.__to_user(dict(attr))
def create_project(self, name, manager_uid,
description=None, member_uids=None):
@@ -155,7 +192,7 @@ class LdapDriver(object):
if description is None:
description = name
members = []
- if member_uids != None:
+ if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.NotFound("Project can't be created "
@@ -256,7 +293,24 @@ class LdapDriver(object):
if not self.__user_exists(uid):
raise exception.NotFound("User %s doesn't exist" % uid)
self.__remove_from_all(uid)
- self.conn.delete_s(self.__uid_to_dn(uid))
+ if FLAGS.ldap_user_modify_only:
+ # Delete attributes
+ attr = []
+ # Retrieve user by name
+ user = self.__get_ldap_user(uid)
+ if 'secretKey' in user.keys():
+ attr.append((self.ldap.MOD_DELETE, 'secretKey', \
+ user['secretKey']))
+ if 'accessKey' in user.keys():
+ attr.append((self.ldap.MOD_DELETE, 'accessKey', \
+ user['accessKey']))
+ if 'isAdmin' in user.keys():
+ attr.append((self.ldap.MOD_DELETE, 'isAdmin', \
+ user['isAdmin']))
+ self.conn.modify_s(self.__uid_to_dn(uid), attr)
+ else:
+ # Delete entry
+ self.conn.delete_s(self.__uid_to_dn(uid))
def delete_project(self, project_id):
"""Delete a project"""
@@ -265,7 +319,7 @@ class LdapDriver(object):
self.__delete_group(project_dn)
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
- """Modify an existing project"""
+ """Modify an existing user"""
if not access_key and not secret_key and admin is None:
return
attr = []
@@ -279,11 +333,21 @@ class LdapDriver(object):
def __user_exists(self, uid):
"""Check if user exists"""
- return self.get_user(uid) != None
+ return self.get_user(uid) is not None
+
+ def __ldap_user_exists(self, uid):
+ """Check if the user exists in ldap"""
+ return self.__get_ldap_user(uid) is not None
def __project_exists(self, project_id):
"""Check if project exists"""
- return self.get_project(project_id) != None
+ return self.get_project(project_id) is not None
+
+ def __get_ldap_user(self, uid):
+ """Retrieve LDAP user entry by id"""
+ attr = self.__find_object(self.__uid_to_dn(uid),
+ '(objectclass=novaUser)')
+ return attr
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
@@ -330,12 +394,12 @@ class LdapDriver(object):
def __group_exists(self, dn):
"""Check if group exists"""
- return self.__find_object(dn, '(objectclass=groupOfNames)') != None
+ return self.__find_object(dn, '(objectclass=groupOfNames)') is not None
@staticmethod
def __role_to_dn(role, project_id=None):
"""Convert role to corresponding dn"""
- if project_id == None:
+ if project_id is None:
return FLAGS.__getitem__("ldap_%s" % role).value
else:
return 'cn=%s,cn=%s,%s' % (role,
@@ -349,7 +413,7 @@ class LdapDriver(object):
raise exception.Duplicate("Group can't be created because "
"group %s already exists" % name)
members = []
- if member_uids != None:
+ if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.NotFound("Group can't be created "
@@ -375,7 +439,7 @@ class LdapDriver(object):
res = self.__find_object(group_dn,
'(member=%s)' % self.__uid_to_dn(uid),
self.ldap.SCOPE_BASE)
- return res != None
+ return res is not None
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
@@ -447,18 +511,22 @@ class LdapDriver(object):
@staticmethod
def __to_user(attr):
"""Convert ldap attributes to User object"""
- if attr == None:
+ if attr is None:
+ return None
+ if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() \
+ and 'isAdmin' in attr.keys()):
+ return {
+ 'id': attr['uid'][0],
+ 'name': attr['cn'][0],
+ 'access': attr['accessKey'][0],
+ 'secret': attr['secretKey'][0],
+ 'admin': (attr['isAdmin'][0] == 'TRUE')}
+ else:
return None
- return {
- 'id': attr['uid'][0],
- 'name': attr['cn'][0],
- 'access': attr['accessKey'][0],
- 'secret': attr['secretKey'][0],
- 'admin': (attr['isAdmin'][0] == 'TRUE')}
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
- if attr == None:
+ if attr is None:
return None
member_dns = attr.get('member', [])
return {
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 7b2b681616..11c3bd6dfb 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -624,6 +624,10 @@ class AuthManager(object):
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
+ @staticmethod
+ def get_key_pairs(context):
+ return db.key_pair_get_all_by_user(context.elevated(), context.user_id)
+
def get_credentials(self, user, project=None):
"""Get credential zip for user in project"""
if not isinstance(user, User):
diff --git a/nova/auth/nova_openldap.schema b/nova/auth/nova_openldap.schema
new file mode 100644
index 0000000000..4047361de4
--- /dev/null
+++ b/nova/auth/nova_openldap.schema
@@ -0,0 +1,84 @@
+#
+# Person object for Nova
+# inetorgperson with extra attributes
+# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
+#
+#
+
+# using internet experimental oid arc as per BP64 3.1
+objectidentifier novaSchema 1.3.6.1.3.1.666.666
+objectidentifier novaAttrs novaSchema:3
+objectidentifier novaOCs novaSchema:4
+
+attributetype (
+ novaAttrs:1
+ NAME 'accessKey'
+ DESC 'Key for accessing data'
+ EQUALITY caseIgnoreMatch
+ SUBSTR caseIgnoreSubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE
+ )
+
+attributetype (
+ novaAttrs:2
+ NAME 'secretKey'
+ DESC 'Secret key'
+ EQUALITY caseIgnoreMatch
+ SUBSTR caseIgnoreSubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE
+ )
+
+attributetype (
+ novaAttrs:3
+ NAME 'keyFingerprint'
+ DESC 'Fingerprint of private key'
+ EQUALITY caseIgnoreMatch
+ SUBSTR caseIgnoreSubstringsMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+ SINGLE-VALUE
+ )
+
+attributetype (
+ novaAttrs:4
+ NAME 'isAdmin'
+ DESC 'Is user an administrator?'
+ EQUALITY booleanMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
+ SINGLE-VALUE
+ )
+
+attributetype (
+ novaAttrs:5
+ NAME 'projectManager'
+ DESC 'Project Managers of a project'
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
+ )
+
+objectClass (
+ novaOCs:1
+ NAME 'novaUser'
+ DESC 'access and secret keys'
+ AUXILIARY
+ MUST ( uid )
+ MAY ( accessKey $ secretKey $ isAdmin )
+ )
+
+objectClass (
+ novaOCs:2
+ NAME 'novaKeyPair'
+ DESC 'Key pair for User'
+ SUP top
+ STRUCTURAL
+ MUST ( cn $ sshPublicKey $ keyFingerprint )
+ )
+
+objectClass (
+ novaOCs:3
+ NAME 'novaProject'
+ DESC 'Container for project'
+ SUP groupOfNames
+ STRUCTURAL
+ MUST ( cn $ projectManager )
+ )
diff --git a/nova/auth/nova_sun.schema b/nova/auth/nova_sun.schema
new file mode 100644
index 0000000000..e925e05e48
--- /dev/null
+++ b/nova/auth/nova_sun.schema
@@ -0,0 +1,16 @@
+#
+# Person object for Nova
+# inetorgperson with extra attributes
+# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
+# Modified for strict RFC 4512 compatibility by: Ryan Lane <ryan@ryandlane.com>
+#
+# using internet experimental oid arc as per BP64 3.1
+dn: cn=schema
+attributeTypes: ( 1.3.6.1.3.1.666.666.3.1 NAME 'accessKey' DESC 'Key for accessing data' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
+attributeTypes: ( 1.3.6.1.3.1.666.666.3.2 NAME 'secretKey' DESC 'Secret key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE )
+attributeTypes: ( 1.3.6.1.3.1.666.666.3.3 NAME 'keyFingerprint' DESC 'Fingerprint of private key' EQUALITY caseIgnoreMatch SUBSTR caseIgnoreSubstringsMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE)
+attributeTypes: ( 1.3.6.1.3.1.666.666.3.4 NAME 'isAdmin' DESC 'Is user an administrator?' EQUALITY booleanMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.7 SINGLE-VALUE )
+attributeTypes: ( 1.3.6.1.3.1.666.666.3.5 NAME 'projectManager' DESC 'Project Managers of a project' SYNTAX 1.3.6.1.4.1.1466.115.121.1.12 )
+objectClasses: ( 1.3.6.1.3.1.666.666.4.1 NAME 'novaUser' DESC 'access and secret keys' SUP top AUXILIARY MUST ( uid ) MAY ( accessKey $ secretKey $ isAdmin ) )
+objectClasses: ( 1.3.6.1.3.1.666.666.4.2 NAME 'novaKeyPair' DESC 'Key pair for User' SUP top STRUCTURAL MUST ( cn $ sshPublicKey $ keyFingerprint ) )
+objectClasses: ( 1.3.6.1.3.1.666.666.4.3 NAME 'novaProject' DESC 'Container for project' SUP groupOfNames STRUCTURAL MUST ( cn $ projectManager ) )
diff --git a/nova/auth/opendj.sh b/nova/auth/opendj.sh
new file mode 100755
index 0000000000..8052c077d0
--- /dev/null
+++ b/nova/auth/opendj.sh
@@ -0,0 +1,119 @@
+#!/usr/bin/env bash
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+# LDAP INSTALL SCRIPT - IS IDEMPOTENT, does not scrub users
+
+apt-get install -y ldap-utils python-ldap openjdk-6-jre
+
+if [ ! -d "/usr/opendj" ]
+then
+ # TODO(rlane): Wikimedia Foundation is the current package maintainer.
+ # After the package is included in Ubuntu's channel, change this.
+ wget http://apt.wikimedia.org/wikimedia/pool/main/o/opendj/opendj_2.4.0-7_amd64.deb
+ dpkg -i opendj_2.4.0-7_amd64.deb
+fi
+
+abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
+schemapath='/var/opendj/instance/config/schema'
+cp $abspath/openssh-lpk_sun.schema $schemapath/97-openssh-lpk_sun.ldif
+cp $abspath/nova_sun.schema $schemapath/98-nova_sun.ldif
+chown opendj:opendj $schemapath/97-openssh-lpk_sun.ldif
+chown opendj:opendj $schemapath/98-nova_sun.ldif
+
+cat >/etc/ldap/ldap.conf <<LDAP_CONF_EOF
+# LDAP Client Settings
+URI ldap://localhost
+BASE dc=example,dc=com
+BINDDN cn=Directory Manager
+SIZELIMIT 0
+TIMELIMIT 0
+LDAP_CONF_EOF
+
+cat >/etc/ldap/base.ldif <<BASE_LDIF_EOF
+# This is the root of the directory tree
+dn: dc=example,dc=com
+description: Example.Com, your trusted non-existent corporation.
+dc: example
+o: Example.Com
+objectClass: top
+objectClass: dcObject
+objectClass: organization
+
+# Subtree for users
+dn: ou=Users,dc=example,dc=com
+ou: Users
+description: Users
+objectClass: organizationalUnit
+
+# Subtree for groups
+dn: ou=Groups,dc=example,dc=com
+ou: Groups
+description: Groups
+objectClass: organizationalUnit
+
+# Subtree for system accounts
+dn: ou=System,dc=example,dc=com
+ou: System
+description: Special accounts used by software applications.
+objectClass: organizationalUnit
+
+# Special Account for Authentication:
+dn: uid=authenticate,ou=System,dc=example,dc=com
+uid: authenticate
+ou: System
+description: Special account for authenticating users
+userPassword: {MD5}TLnIqASP0CKUR3/LGkEZGg==
+objectClass: account
+objectClass: simpleSecurityObject
+
+# create the sysadmin entry
+
+dn: cn=developers,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: developers
+description: IT admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=sysadmins,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: sysadmins
+description: IT admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=netadmins,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: netadmins
+description: Network admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=cloudadmins,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: cloudadmins
+description: Cloud admin group
+member: uid=admin,ou=Users,dc=example,dc=com
+
+dn: cn=itsec,ou=Groups,dc=example,dc=com
+objectclass: groupOfNames
+cn: itsec
+description: IT security users group
+member: uid=admin,ou=Users,dc=example,dc=com
+BASE_LDIF_EOF
+
+/etc/init.d/opendj stop
+su - opendj -c '/usr/opendj/setup -i -b "dc=example,dc=com" -l /etc/ldap/base.ldif -S -w changeme -O -n --noPropertiesFile'
+/etc/init.d/opendj start
diff --git a/nova/auth/openssh-lpk_openldap.schema b/nova/auth/openssh-lpk_openldap.schema
new file mode 100644
index 0000000000..93351da6dd
--- /dev/null
+++ b/nova/auth/openssh-lpk_openldap.schema
@@ -0,0 +1,19 @@
+#
+# LDAP Public Key Patch schema for use with openssh-ldappubkey
+# Author: Eric AUGE <eau@phear.org>
+#
+# Based on the proposal of : Mark Ruijter
+#
+
+
+# octetString SYNTAX
+attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
+ DESC 'MANDATORY: OpenSSH Public key'
+ EQUALITY octetStringMatch
+ SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
+
+# printableString SYNTAX yes|no
+objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
+ DESC 'MANDATORY: OpenSSH LPK objectclass'
+ MAY ( sshPublicKey $ uid )
+ )
diff --git a/nova/auth/openssh-lpk_sun.schema b/nova/auth/openssh-lpk_sun.schema
new file mode 100644
index 0000000000..5f52db3b65
--- /dev/null
+++ b/nova/auth/openssh-lpk_sun.schema
@@ -0,0 +1,10 @@
+#
+# LDAP Public Key Patch schema for use with openssh-ldappubkey
+# Author: Eric AUGE <eau@phear.org>
+#
+# Schema for Sun Directory Server.
+# Based on the original schema, modified by Stefan Fischer.
+#
+dn: cn=schema
+attributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey' DESC 'MANDATORY: OpenSSH Public key' EQUALITY octetStringMatch SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
+objectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY DESC 'MANDATORY: OpenSSH LPK objectclass' MAY ( sshPublicKey $ uid ) )
diff --git a/nova/auth/slap.sh b/nova/auth/slap.sh
index fdc0e39dc1..797675d2e4 100755
--- a/nova/auth/slap.sh
+++ b/nova/auth/slap.sh
@@ -20,115 +20,9 @@
apt-get install -y slapd ldap-utils python-ldap
-cat >/etc/ldap/schema/openssh-lpk_openldap.schema <<LPK_SCHEMA_EOF
-#
-# LDAP Public Key Patch schema for use with openssh-ldappubkey
-# Author: Eric AUGE <eau@phear.org>
-#
-# Based on the proposal of : Mark Ruijter
-#
-
-
-# octetString SYNTAX
-attributetype ( 1.3.6.1.4.1.24552.500.1.1.1.13 NAME 'sshPublicKey'
- DESC 'MANDATORY: OpenSSH Public key'
- EQUALITY octetStringMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 )
-
-# printableString SYNTAX yes|no
-objectclass ( 1.3.6.1.4.1.24552.500.1.1.2.0 NAME 'ldapPublicKey' SUP top AUXILIARY
- DESC 'MANDATORY: OpenSSH LPK objectclass'
- MAY ( sshPublicKey $ uid )
- )
-LPK_SCHEMA_EOF
-
-cat >/etc/ldap/schema/nova.schema <<NOVA_SCHEMA_EOF
-#
-# Person object for Nova
-# inetorgperson with extra attributes
-# Author: Vishvananda Ishaya <vishvananda@yahoo.com>
-#
-#
-
-# using internet experimental oid arc as per BP64 3.1
-objectidentifier novaSchema 1.3.6.1.3.1.666.666
-objectidentifier novaAttrs novaSchema:3
-objectidentifier novaOCs novaSchema:4
-
-attributetype (
- novaAttrs:1
- NAME 'accessKey'
- DESC 'Key for accessing data'
- EQUALITY caseIgnoreMatch
- SUBSTR caseIgnoreSubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
- SINGLE-VALUE
- )
-
-attributetype (
- novaAttrs:2
- NAME 'secretKey'
- DESC 'Secret key'
- EQUALITY caseIgnoreMatch
- SUBSTR caseIgnoreSubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
- SINGLE-VALUE
- )
-
-attributetype (
- novaAttrs:3
- NAME 'keyFingerprint'
- DESC 'Fingerprint of private key'
- EQUALITY caseIgnoreMatch
- SUBSTR caseIgnoreSubstringsMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
- SINGLE-VALUE
- )
-
-attributetype (
- novaAttrs:4
- NAME 'isAdmin'
- DESC 'Is user an administrator?'
- EQUALITY booleanMatch
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
- SINGLE-VALUE
- )
-
-attributetype (
- novaAttrs:5
- NAME 'projectManager'
- DESC 'Project Managers of a project'
- SYNTAX 1.3.6.1.4.1.1466.115.121.1.12
- )
-
-objectClass (
- novaOCs:1
- NAME 'novaUser'
- DESC 'access and secret keys'
- AUXILIARY
- MUST ( uid )
- MAY ( accessKey $ secretKey $ isAdmin )
- )
-
-objectClass (
- novaOCs:2
- NAME 'novaKeyPair'
- DESC 'Key pair for User'
- SUP top
- STRUCTURAL
- MUST ( cn $ sshPublicKey $ keyFingerprint )
- )
-
-objectClass (
- novaOCs:3
- NAME 'novaProject'
- DESC 'Container for project'
- SUP groupOfNames
- STRUCTURAL
- MUST ( cn $ projectManager )
- )
-
-NOVA_SCHEMA_EOF
+abspath=`dirname "$(cd "${0%/*}" 2>/dev/null; echo "$PWD"/"${0##*/}")"`
+cp $abspath/openssh-lpk_openldap.schema /etc/ldap/schema/openssh-lpk_openldap.schema
+cp $abspath/nova_openldap.schema /etc/ldap/schema/nova_openldap.schema
mv /etc/ldap/slapd.conf /etc/ldap/slapd.conf.orig
cat >/etc/ldap/slapd.conf <<SLAPD_CONF_EOF
diff --git a/nova/compute/api.py b/nova/compute/api.py
new file mode 100644
index 0000000000..ae463091dc
--- /dev/null
+++ b/nova/compute/api.py
@@ -0,0 +1,310 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all API requests relating to instances (guest vms).
+"""
+
+import datetime
+import logging
+import time
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import quota
+from nova import rpc
+from nova import utils
+from nova.compute import instance_types
+from nova.db import base
+
+FLAGS = flags.FLAGS
+
+
+def generate_default_hostname(internal_id):
+ """Default function to generate a hostname given an instance reference."""
+ return str(internal_id)
+
+
+class ComputeAPI(base.Base):
+ """API for interacting with the compute manager."""
+
+ def __init__(self, network_manager=None, image_service=None, **kwargs):
+ if not network_manager:
+ network_manager = utils.import_object(FLAGS.network_manager)
+ self.network_manager = network_manager
+ if not image_service:
+ image_service = utils.import_object(FLAGS.image_service)
+ self.image_service = image_service
+ super(ComputeAPI, self).__init__(**kwargs)
+
+ def create_instances(self, context, instance_type, image_id, min_count=1,
+ max_count=1, kernel_id=None, ramdisk_id=None,
+ display_name='', description='', key_name=None,
+ key_data=None, security_group='default',
+ generate_hostname=generate_default_hostname):
+ """Create the number of instances requested if quote and
+ other arguments check out ok."""
+
+ num_instances = quota.allowed_instances(context, max_count,
+ instance_type)
+ if num_instances < min_count:
+ logging.warn("Quota exceeeded for %s, tried to run %s instances",
+ context.project_id, min_count)
+ raise quota.QuotaError("Instance quota exceeded. You can only "
+ "run %s more instances of this type." %
+ num_instances, "InstanceLimitExceeded")
+
+ is_vpn = image_id == FLAGS.vpn_image_id
+ if not is_vpn:
+ image = self.image_service.show(context, image_id)
+
+ # If kernel_id/ramdisk_id isn't explicitly set in API call
+ # we take the defaults from the image's metadata
+ if kernel_id is None:
+ kernel_id = image.get('kernelId', None)
+ if ramdisk_id is None:
+ ramdisk_id = image.get('ramdiskId', None)
+
+ # Make sure we have access to kernel and ramdisk
+ if kernel_id:
+ self.image_service.show(context, kernel_id)
+ if ramdisk_id:
+ self.image_service.show(context, ramdisk_id)
+
+ if security_group is None:
+ security_group = ['default']
+ if not type(security_group) is list:
+ security_group = [security_group]
+
+ security_groups = []
+ self.ensure_default_security_group(context)
+ for security_group_name in security_group:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
+ security_groups.append(group['id'])
+
+ if key_data is None and key_name:
+ key_pair = db.key_pair_get(context, context.user_id, key_name)
+ key_data = key_pair['public_key']
+
+ type_data = instance_types.INSTANCE_TYPES[instance_type]
+ base_options = {
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_id': image_id,
+ 'kernel_id': kernel_id or '',
+ 'ramdisk_id': ramdisk_id or '',
+ 'state_description': 'scheduling',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id,
+ 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
+ 'instance_type': instance_type,
+ 'memory_mb': type_data['memory_mb'],
+ 'vcpus': type_data['vcpus'],
+ 'local_gb': type_data['local_gb'],
+ 'display_name': display_name,
+ 'display_description': description,
+ 'key_name': key_name,
+ 'key_data': key_data}
+
+ elevated = context.elevated()
+ instances = []
+ logging.debug("Going to run %s instances...", num_instances)
+ for num in range(num_instances):
+ instance = dict(mac_address=utils.generate_mac(),
+ launch_index=num,
+ **base_options)
+ instance = self.db.instance_create(context, instance)
+ instance_id = instance['id']
+ internal_id = instance['internal_id']
+
+ elevated = context.elevated()
+ if not security_groups:
+ security_groups = []
+ for security_group_id in security_groups:
+ self.db.instance_add_security_group(elevated,
+ instance_id,
+ security_group_id)
+
+ # Set sane defaults if not specified
+ updates = dict(hostname=generate_hostname(internal_id))
+ if 'display_name' not in instance:
+ updates['display_name'] = "Server %s" % internal_id
+
+ instance = self.update_instance(context, instance_id, **updates)
+ instances.append(instance)
+
+ # TODO(vish): This probably should be done in the scheduler
+ # or in compute as a call. The network should be
+ # allocated after the host is assigned and setup
+ # can happen at the same time.
+ address = self.network_manager.allocate_fixed_ip(context,
+ instance_id,
+ is_vpn)
+ rpc.cast(elevated,
+ self._get_network_topic(context),
+ {"method": "setup_fixed_ip",
+ "args": {"address": address}})
+
+ logging.debug("Casting to scheduler for %s/%s's instance %s",
+ context.project_id, context.user_id, instance_id)
+ rpc.cast(context,
+ FLAGS.scheduler_topic,
+ {"method": "run_instance",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id}})
+
+ return instances
+
+ def ensure_default_security_group(self, context):
+ """ Create security group for the security context if it
+ does not already exist
+
+ :param context: the security context
+
+ """
+ try:
+ db.security_group_get_by_name(context, context.project_id,
+ 'default')
+ except exception.NotFound:
+ values = {'name': 'default',
+ 'description': 'default',
+ 'user_id': context.user_id,
+ 'project_id': context.project_id}
+ db.security_group_create(context, values)
+
+ def update_instance(self, context, instance_id, **kwargs):
+ """Updates the instance in the datastore.
+
+ :param context: The security context
+ :param instance_id: ID of the instance to update
+ :param kwargs: All additional keyword args are treated
+ as data fields of the instance to be
+ updated
+
+ :retval None
+
+ """
+ return self.db.instance_update(context, instance_id, kwargs)
+
+ def delete_instance(self, context, instance_id):
+ logging.debug("Going to try and terminate %d" % instance_id)
+ try:
+ instance = self.db.instance_get_by_internal_id(context,
+ instance_id)
+ except exception.NotFound as e:
+ logging.warning("Instance %d was not found during terminate",
+ instance_id)
+ raise e
+
+ if (instance['state_description'] == 'terminating'):
+ logging.warning("Instance %d is already being terminated",
+ instance_id)
+ return
+
+ self.update_instance(context,
+ instance['id'],
+ state_description='terminating',
+ state=0,
+ terminated_at=datetime.datetime.utcnow())
+
+ # FIXME(ja): where should network deallocate occur?
+ address = self.db.instance_get_floating_address(context,
+ instance['id'])
+ if address:
+ logging.debug("Disassociating address %s" % address)
+ # NOTE(vish): Right now we don't really care if the ip is
+ # disassociated. We may need to worry about
+ # checking this later. Perhaps in the scheduler?
+ rpc.cast(context,
+ self._get_network_topic(context),
+ {"method": "disassociate_floating_ip",
+ "args": {"floating_address": address}})
+
+ address = self.db.instance_get_fixed_address(context, instance['id'])
+ if address:
+ logging.debug("Deallocating address %s" % address)
+ # NOTE(vish): Currently, nothing needs to be done on the
+ # network node until release. If this changes,
+ # we will need to cast here.
+ self.network_manager.deallocate_fixed_ip(context.elevated(),
+ address)
+
+ host = instance['host']
+ if host:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "terminate_instance",
+ "args": {"instance_id": instance['id']}})
+ else:
+ self.db.instance_destroy(context, instance['id'])
+
+ def get_instances(self, context, project_id=None):
+ """Get all instances, possibly filtered by project ID or
+ user ID. If there is no filter and the context is an admin,
+ it will retreive all instances in the system."""
+ if project_id or not context.is_admin:
+ if not context.project:
+ return self.db.instance_get_all_by_user(context,
+ context.user_id)
+ if project_id is None:
+ project_id = context.project_id
+ return self.db.instance_get_all_by_project(context, project_id)
+ return self.db.instance_get_all(context)
+
+ def get_instance(self, context, instance_id):
+ return self.db.instance_get_by_internal_id(context, instance_id)
+
+ def reboot(self, context, instance_id):
+ """Reboot the given instance."""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "reboot_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def rescue(self, context, instance_id):
+ """Rescue the given instance."""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "rescue_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def unrescue(self, context, instance_id):
+ """Unrescue the given instance."""
+ instance = self.db.instance_get_by_internal_id(context, instance_id)
+ host = instance['host']
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "unrescue_instance",
+ "args": {"instance_id": instance['id']}})
+
+ def _get_network_topic(self, context):
+ """Retrieves the network host for a project"""
+ network_ref = self.network_manager.get_network(context)
+ host = network_ref['host']
+ if not host:
+ host = rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"network_id": network_ref['id']}})
+ return self.db.queue_get_for(context, FLAGS.network_topic, host)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 67ee8f8a8a..6e47170bde 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -21,9 +21,29 @@
The built-in instance properties.
"""
+from nova import flags
+from nova import exception
+
+FLAGS = flags.FLAGS
INSTANCE_TYPES = {
'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+
+
+def get_by_type(instance_type):
+ """Build instance data structure and save it to the data store."""
+ if instance_type is None:
+ return FLAGS.default_instance_type
+ if instance_type not in INSTANCE_TYPES:
+ raise exception.ApiError("Unknown instance type: %s" % instance_type)
+ return instance_type
+
+
+def get_by_flavor_id(flavor_id):
+ for instance_type, details in INSTANCE_TYPES.iteritems():
+ if details['flavorid'] == flavor_id:
+ return instance_type
+ return FLAGS.default_instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 50a9d316b4..dd8d41129c 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -22,8 +22,8 @@ Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
handles RPC calls relating to creating instances. It is responsible for
building a disk image, launching it via the underlying virtualization driver,
-responding to calls to check it state, attaching persistent as well as
-termination.
+responding to calls to check its state, attaching persistent storage, and
+terminating it.
**Related Flags**
@@ -45,15 +45,15 @@ from nova import manager
from nova import utils
from nova.compute import power_state
-
FLAGS = flags.FLAGS
flags.DEFINE_string('instances_path', '$state_path/instances',
'where instances are stored on disk')
flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
- 'Driver to use for volume creation')
+ 'Driver to use for controlling virtualization')
class ComputeManager(manager.Manager):
+
"""Manages the running instances from creation to destruction."""
def __init__(self, compute_driver=None, *args, **kwargs):
@@ -84,47 +84,6 @@ class ComputeManager(manager.Manager):
"""This call passes stright through to the virtualization driver."""
yield self.driver.refresh_security_group(security_group_id)
- def create_instance(self, context, security_groups=None, **kwargs):
- """Creates the instance in the datastore and returns the
- new instance as a mapping
-
- :param context: The security context
- :param security_groups: list of security group ids to
- attach to the instance
- :param kwargs: All additional keyword args are treated
- as data fields of the instance to be
- created
-
- :retval Returns a mapping of the instance information
- that has just been created
-
- """
- instance_ref = self.db.instance_create(context, kwargs)
- inst_id = instance_ref['id']
-
- elevated = context.elevated()
- if not security_groups:
- security_groups = []
- for security_group_id in security_groups:
- self.db.instance_add_security_group(elevated,
- inst_id,
- security_group_id)
- return instance_ref
-
- def update_instance(self, context, instance_id, **kwargs):
- """Updates the instance in the datastore.
-
- :param context: The security context
- :param instance_id: ID of the instance to update
- :param kwargs: All additional keyword args are treated
- as data fields of the instance to be
- updated
-
- :retval None
-
- """
- self.db.instance_update(context, instance_id, kwargs)
-
@defer.inlineCallbacks
@exception.wrap_exception
def run_instance(self, context, instance_id, **_kwargs):
@@ -134,7 +93,6 @@ class ComputeManager(manager.Manager):
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error("Instance has already been created")
logging.debug("instance %s: starting...", instance_id)
- project_id = instance_ref['project_id']
self.network_manager.setup_compute_network(context, instance_id)
self.db.instance_update(context,
instance_id,
@@ -176,7 +134,6 @@ class ComputeManager(manager.Manager):
self.db.instance_destroy(context, instance_id)
raise exception.Error('trying to destroy already destroyed'
' instance: %s' % instance_id)
-
yield self.driver.destroy(instance_ref)
# TODO(ja): should we keep it in a terminated state for a bit?
diff --git a/nova/db/base.py b/nova/db/base.py
new file mode 100644
index 0000000000..1d1e80866b
--- /dev/null
+++ b/nova/db/base.py
@@ -0,0 +1,36 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Base class for classes that need modular database access.
+"""
+
+from nova import utils
+from nova import flags
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('db_driver', 'nova.db.api',
+ 'driver to use for database access')
+
+
+class Base(object):
+ """DB driver is injected in the init method"""
+ def __init__(self, db_driver=None):
+ if not db_driver:
+ db_driver = FLAGS.db_driver
+ self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index afa55fc03a..55036d1d10 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -530,6 +530,12 @@ def fixed_ip_update(context, address, values):
#functions between the two of them as well.
@require_context
def instance_create(context, values):
+ """Create a new Instance record in the database.
+
+ context - request context object
+ values - dict containing column values.
+ 'internal_id' is auto-generated and should not be specified.
+ """
instance_ref = models.Instance()
instance_ref.update(values)
@@ -537,7 +543,7 @@ def instance_create(context, values):
with session.begin():
while instance_ref.internal_id == None:
# Instances have integer internal ids.
- internal_id = random.randint(0, 2 ** 32 - 1)
+ internal_id = random.randint(0, 2 ** 31 - 1)
if not instance_internal_id_exists(context, internal_id,
session=session):
instance_ref.internal_id = internal_id
@@ -726,6 +732,7 @@ def instance_update(context, instance_id, values):
instance_ref = instance_get(context, instance_id, session=session)
instance_ref.update(values)
instance_ref.save(session=session)
+ return instance_ref
def instance_add_security_group(context, instance_id, security_group_id):
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 01b5cf3507..fe0a9a9216 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -178,8 +178,6 @@ class Instance(BASE, NovaBase):
kernel_id = Column(String(255))
ramdisk_id = Column(String(255))
- server_name = Column(String(255))
-
# image_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# kernel_id = Column(Integer, ForeignKey('images.id'), nullable=True)
# ramdisk_id = Column(Integer, ForeignKey('images.id'), nullable=True)
@@ -212,6 +210,7 @@ class Instance(BASE, NovaBase):
launched_at = Column(DateTime)
terminated_at = Column(DateTime)
+ # User editable field for display in user-facing UIs
display_name = Column(String(255))
display_description = Column(String(255))
diff --git a/nova/flags.py b/nova/flags.py
index be81fd7ed5..45f5d74698 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -223,8 +223,6 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
-DEFINE_string('cc_host', '127.0.0.1', 'ip of api server')
-DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_url', 'http://127.0.0.1:8773/services/Cloud',
'Url to ec2 api server')
@@ -264,7 +262,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
-DEFINE_string('image_service', 'nova.image.local.LocalImageService',
+DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),
diff --git a/nova/image/local.py b/nova/image/local.py
index 9b0cdcc50d..b445932218 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -59,7 +59,7 @@ class LocalImageService(service.BaseImageService):
"""
Store the image data and return the new image id.
"""
- id = random.randint(0, 2 ** 32 - 1)
+ id = random.randint(0, 2 ** 31 - 1)
data['id'] = id
self.update(context, id, data)
return id
diff --git a/nova/manager.py b/nova/manager.py
index a6efb8732c..5b61f7a4cb 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -53,23 +53,19 @@ This module provides Manager, a base class for managers.
from nova import utils
from nova import flags
+from nova.db import base
from twisted.internet import defer
FLAGS = flags.FLAGS
-flags.DEFINE_string('db_driver', 'nova.db.api',
- 'driver to use for volume creation')
-class Manager(object):
- """DB driver is injected in the init method"""
+class Manager(base.Base):
def __init__(self, host=None, db_driver=None):
if not host:
host = FLAGS.host
self.host = host
- if not db_driver:
- db_driver = FLAGS.db_driver
- self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
+ super(Manager, self).__init__(db_driver)
@defer.inlineCallbacks
def periodic_tasks(self, context=None):
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 7b00e65d42..0fefd94156 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -46,6 +46,8 @@ flags.DEFINE_string('vlan_interface', 'eth0',
'network device for vlans')
flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
'location of nova-dhcpbridge')
+flags.DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
+flags.DEFINE_integer('cc_port', 8773, 'cloud controller port')
flags.DEFINE_string('routing_source_ip', '127.0.0.1',
'Public IP of network host')
flags.DEFINE_bool('use_nova_chains', False,
diff --git a/nova/quota.py b/nova/quota.py
index 01dd0ecd45..f6ca9f77c7 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -94,3 +94,8 @@ def allowed_floating_ips(context, num_floating_ips):
quota = get_quota(context, project_id)
allowed_floating_ips = quota['floating_ips'] - used_floating_ips
return min(num_floating_ips, allowed_floating_ips)
+
+
+class QuotaError(exception.ApiError):
+ """Quota Exceeeded"""
+ pass
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 639a2ebe4e..21b8aac1c0 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -24,9 +24,10 @@ import webob
import webob.dec
from nova import auth
-from nova import utils
-from nova import flags
+from nova import context
from nova import exception as exc
+from nova import flags
+from nova import utils
import nova.api.openstack.auth
from nova.image import service
from nova.image import glance
@@ -54,12 +55,11 @@ def fake_auth_init(self):
self.db = FakeAuthDatabase()
self.context = Context()
self.auth = FakeAuthManager()
- self.host = 'foo'
@webob.dec.wsgify
def fake_wsgi(self, req):
- req.environ['nova.context'] = dict(user=dict(id=1))
+ req.environ['nova.context'] = context.RequestContext(1, 1)
if req.body:
req.environ['inst_dict'] = json.loads(req.body)
return self.application
@@ -68,12 +68,11 @@ def fake_wsgi(self, req):
def stub_out_key_pair_funcs(stubs):
def key_pair(context, user_id):
return [dict(name='key', public_key='public_key')]
- stubs.Set(nova.db.api, 'key_pair_get_all_by_user',
- key_pair)
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
def stub_out_image_service(stubs):
- def fake_image_show(meh, id):
+ def fake_image_show(meh, context, id):
return dict(kernelId=1, ramdiskId=1)
stubs.Set(nova.image.local.LocalImageService, 'show', fake_image_show)
@@ -173,6 +172,12 @@ class FakeToken(object):
setattr(self, k, v)
+class FakeRequestContext(object):
+ def __init__(self, user, project):
+ self.user_id = 1
+ self.project_id = 1
+
+
class FakeAuthDatabase(object):
data = {}
diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py
new file mode 100644
index 0000000000..1b2e1654d5
--- /dev/null
+++ b/nova/tests/api/openstack/test_adminapi.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+import stubout
+import webob
+
+import nova.api
+from nova import flags
+from nova.tests.api.openstack import fakes
+
+FLAGS = flags.FLAGS
+
+
+class AdminAPITest(unittest.TestCase):
+ def setUp(self):
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.allow_admin = FLAGS.allow_admin_api
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+
+ def test_admin_enabled(self):
+ FLAGS.allow_admin_api = True
+ # We should still be able to access public operations.
+ req = webob.Request.blank('/v1.0/flavors')
+ res = req.get_response(nova.api.API('os'))
+ self.assertEqual(res.status_int, 200)
+ # TODO: Confirm admin operations are available.
+
+ def test_admin_disabled(self):
+ FLAGS.allow_admin_api = False
+ # We should still be able to access public operations.
+ req = webob.Request.blank('/v1.0/flavors')
+ res = req.get_response(nova.api.API('os'))
+ self.assertEqual(res.status_int, 200)
+ # TODO: Confirm admin operations are unavailable.
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index 29f4b88747..7b427c2dbb 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -26,6 +26,7 @@ import nova.api
import nova.api.openstack.auth
import nova.auth.manager
from nova import auth
+from nova import context
from nova.tests.api.openstack import fakes
@@ -35,6 +36,7 @@ class Test(unittest.TestCase):
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager,
'__init__', fakes.fake_auth_init)
+ self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_rate_limiting(self.stubs)
@@ -62,14 +64,14 @@ class Test(unittest.TestCase):
f = fakes.FakeAuthManager()
f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
- req = webob.Request.blank('/v1.0/')
+ req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
req.headers['X-Auth-User'] = 'herp'
req.headers['X-Auth-Key'] = 'derp'
result = req.get_response(nova.api.API('os'))
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
self.assertEqual(result.headers['X-Server-Management-Url'],
- "https://foo/v1.0/")
+ "http://foo/v1.0/")
self.assertEqual(result.headers['X-CDN-Management-Url'],
"")
self.assertEqual(result.headers['X-Storage-Url'], "")
@@ -131,6 +133,7 @@ class TestLimiter(unittest.TestCase):
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.BasicApiAuthManager,
'__init__', fakes.fake_auth_init)
+ self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
fakes.FakeAuthManager.auth_data = {}
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 8cfc6c45a3..8444b6fce6 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -43,9 +43,21 @@ def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)]
+def return_security_group(context, instance_id, security_group_id):
+ pass
+
+
+def instance_update(context, instance_id, kwargs):
+ return stub_instance(instance_id)
+
+
+def instance_address(context, instance_id):
+ return None
+
+
def stub_instance(id, user_id=1):
- return Instance(id=id, state=0, image_id=10, server_name='server%s' % id,
- user_id=user_id)
+ return Instance(id=id + 123456, state=0, image_id=10, user_id=user_id,
+ display_name='server%s' % id, internal_id=id)
class ServersTest(unittest.TestCase):
@@ -63,6 +75,13 @@ class ServersTest(unittest.TestCase):
return_server)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
+ self.stubs.Set(nova.db.api, 'instance_add_security_group',
+ return_security_group)
+ self.stubs.Set(nova.db.api, 'instance_update', instance_update)
+ self.stubs.Set(nova.db.api, 'instance_get_fixed_address',
+ instance_address)
+ self.stubs.Set(nova.db.api, 'instance_get_floating_address',
+ instance_address)
def tearDown(self):
self.stubs.UnsetAll()
@@ -87,11 +106,11 @@ class ServersTest(unittest.TestCase):
i += 1
def test_create_instance(self):
- def server_update(context, id, params):
- pass
-
def instance_create(context, inst):
- return {'id': 1, 'internal_id': 1}
+ return {'id': 1, 'internal_id': 1, 'display_name': ''}
+
+ def server_update(context, id, params):
+ return instance_create(context, id)
def fake_method(*args, **kwargs):
pass
diff --git a/nova/tests/cloud_unittest.py b/nova/tests/cloud_unittest.py
index 9886a2449b..770c942198 100644
--- a/nova/tests/cloud_unittest.py
+++ b/nova/tests/cloud_unittest.py
@@ -126,6 +126,19 @@ class CloudTestCase(test.TrialTestCase):
db.instance_destroy(self.context, inst['id'])
db.floating_ip_destroy(self.context, address)
+ def test_describe_volumes(self):
+ """Makes sure describe_volumes works and filters results."""
+ vol1 = db.volume_create(self.context, {})
+ vol2 = db.volume_create(self.context, {})
+ result = self.cloud.describe_volumes(self.context)
+ self.assertEqual(len(result['volumeSet']), 2)
+ result = self.cloud.describe_volumes(self.context,
+ volume_id=[vol2['ec2_id']])
+ self.assertEqual(len(result['volumeSet']), 1)
+ self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id'])
+ db.volume_destroy(self.context, vol1['id'])
+ db.volume_destroy(self.context, vol2['id'])
+
def test_console_output(self):
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
diff --git a/nova/tests/compute_unittest.py b/nova/tests/compute_unittest.py
index 71a1a4457d..6f3ef96cbb 100644
--- a/nova/tests/compute_unittest.py
+++ b/nova/tests/compute_unittest.py
@@ -31,6 +31,7 @@ from nova import flags
from nova import test
from nova import utils
from nova.auth import manager
+from nova.compute import api as compute_api
FLAGS = flags.FLAGS
@@ -43,6 +44,7 @@ class ComputeTestCase(test.TrialTestCase):
self.flags(connection_type='fake',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
+ self.compute_api = compute_api.ComputeAPI()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
@@ -66,26 +68,31 @@ class ComputeTestCase(test.TrialTestCase):
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']
+ def test_create_instance_defaults_display_name(self):
+ """Verify that an instance cannot be created without a display_name."""
+ cases = [dict(), dict(display_name=None)]
+ for instance in cases:
+ ref = self.compute_api.create_instances(self.context,
+ FLAGS.default_instance_type, None, **instance)
+ try:
+ self.assertNotEqual(ref[0].display_name, None)
+ finally:
+ db.instance_destroy(self.context, ref[0]['id'])
+
def test_create_instance_associates_security_groups(self):
- """Make sure create_instance associates security groups"""
- inst = {}
- inst['user_id'] = self.user.id
- inst['project_id'] = self.project.id
+ """Make sure create_instances associates security groups"""
values = {'name': 'default',
'description': 'default',
'user_id': self.user.id,
'project_id': self.project.id}
group = db.security_group_create(self.context, values)
- ref = self.compute.create_instance(self.context,
- security_groups=[group['id']],
- **inst)
- # reload to get groups
- instance_ref = db.instance_get(self.context, ref['id'])
+ ref = self.compute_api.create_instances(self.context,
+ FLAGS.default_instance_type, None, security_group=['default'])
try:
- self.assertEqual(len(instance_ref['security_groups']), 1)
+ self.assertEqual(len(ref[0]['security_groups']), 1)
finally:
db.security_group_destroy(self.context, group['id'])
- db.instance_destroy(self.context, instance_ref['id'])
+ db.instance_destroy(self.context, ref[0]['id'])
@defer.inlineCallbacks
def test_run_terminate(self):
diff --git a/nova/tests/misc_unittest.py b/nova/tests/misc_unittest.py
index 856060afa2..667c63ad09 100644
--- a/nova/tests/misc_unittest.py
+++ b/nova/tests/misc_unittest.py
@@ -15,7 +15,6 @@
# under the License.
import os
-import subprocess
from nova import test
from nova.utils import parse_mailmap, str_dict_replace
@@ -24,18 +23,23 @@ from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TrialTestCase):
def test_authors_up_to_date(self):
if os.path.exists('../.bzr'):
- log_cmd = subprocess.Popen(["bzr", "log", "-n0"],
- stdout=subprocess.PIPE)
- changelog = log_cmd.communicate()[0]
+ contributors = set()
+
mailmap = parse_mailmap('../.mailmap')
- contributors = set()
- for l in changelog.split('\n'):
- l = l.strip()
- if (l.startswith('author:') or l.startswith('committer:')
- and not l == 'committer: Tarmac'):
- email = l.split(' ')[-1]
- contributors.add(str_dict_replace(email, mailmap))
+ import bzrlib.workingtree
+ tree = bzrlib.workingtree.WorkingTree.open('..')
+ tree.lock_read()
+ parents = tree.get_parent_ids()
+ g = tree.branch.repository.get_graph()
+ for p in parents[1:]:
+ rev_ids = [r for r, _ in g.iter_ancestry(parents)
+ if r != "null:"]
+ revs = tree.branch.repository.get_revisions(rev_ids)
+ for r in revs:
+ for author in r.get_apparent_authors():
+ email = author.split(' ')[-1]
+ contributors.add(str_dict_replace(email, mailmap))
authors_file = open('../Authors', 'r').read()
diff --git a/nova/tests/quota_unittest.py b/nova/tests/quota_unittest.py
index b7c1d2acc0..1966b51f7b 100644
--- a/nova/tests/quota_unittest.py
+++ b/nova/tests/quota_unittest.py
@@ -94,11 +94,12 @@ class QuotaTestCase(test.TrialTestCase):
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
- self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
- instance_type='m1.small')
+ instance_type='m1.small',
+ image_id='fake')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -106,11 +107,12 @@ class QuotaTestCase(test.TrialTestCase):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
- self.assertRaises(cloud.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, self.cloud.run_instances,
self.context,
min_count=1,
max_count=1,
- instance_type='m1.small')
+ instance_type='m1.small',
+ image_id='fake')
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -119,7 +121,7 @@ class QuotaTestCase(test.TrialTestCase):
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
- self.assertRaises(cloud.QuotaError, self.cloud.create_volume,
+ self.assertRaises(quota.QuotaError, self.cloud.create_volume,
self.context,
size=10)
for volume_id in volume_ids:
@@ -129,7 +131,7 @@ class QuotaTestCase(test.TrialTestCase):
volume_ids = []
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
- self.assertRaises(cloud.QuotaError,
+ self.assertRaises(quota.QuotaError,
self.cloud.create_volume,
self.context,
size=10)
@@ -146,6 +148,6 @@ class QuotaTestCase(test.TrialTestCase):
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
- self.assertRaises(cloud.QuotaError, self.cloud.allocate_address,
+ self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
self.context)
db.floating_ip_destroy(context.get_admin_context(), address)
diff --git a/nova/tests/virt_unittest.py b/nova/tests/virt_unittest.py
index bcc995a5f8..9e9d84892f 100644
--- a/nova/tests/virt_unittest.py
+++ b/nova/tests/virt_unittest.py
@@ -41,15 +41,14 @@ class LibvirtConnTestCase(test.TrialTestCase):
FLAGS.instances_path = ''
test_ip = '10.11.12.13'
- test_instance = {
- 'memory_kb' : '1024000',
- 'basepath' : '/some/path',
- 'bridge_name' : 'br100',
- 'mac_address' : '02:12:34:46:56:67',
- 'vcpus' : 2,
- 'project_id' : 'fake',
- 'bridge' : 'br101',
- 'instance_type' : 'm1.small'}
+ test_instance = {'memory_kb': '1024000',
+ 'basepath': '/some/path',
+ 'bridge_name': 'br100',
+ 'mac_address': '02:12:34:46:56:67',
+ 'vcpus': 2,
+ 'project_id': 'fake',
+ 'bridge': 'br101',
+ 'instance_type': 'm1.small'}
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
@@ -83,7 +82,6 @@ class LibvirtConnTestCase(test.TrialTestCase):
expect_kernel=True, expect_ramdisk=True,
rescue=True)
-
def do_test_xml_and_uri(self, instance,
expect_ramdisk, expect_kernel,
rescue=False):
@@ -94,14 +92,14 @@ class LibvirtConnTestCase(test.TrialTestCase):
self.network.set_network_host(context.get_admin_context(),
network_ref['id'])
- fixed_ip = { 'address' : self.test_ip,
- 'network_id' : network_ref['id'] }
+ fixed_ip = {'address': self.test_ip,
+ 'network_id': network_ref['id']}
ctxt = context.get_admin_context()
fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
db.fixed_ip_update(ctxt, self.test_ip,
- { 'allocated': True,
- 'instance_id': instance_ref['id'] })
+ {'allocated': True,
+ 'instance_id': instance_ref['id']})
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
@@ -131,15 +129,15 @@ class LibvirtConnTestCase(test.TrialTestCase):
check_list.append(check)
else:
if expect_kernel:
- check = (lambda t: t.find('./os/kernel').text.split('/')[1],
- 'kernel')
+ check = (lambda t: t.find('./os/kernel').text.split('/'
+ )[1], 'kernel')
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
-
+
if expect_ramdisk:
- check = (lambda t: t.find('./os/initrd').text.split('/')[1],
- 'ramdisk')
+ check = (lambda t: t.find('./os/initrd').text.split('/'
+ )[1], 'ramdisk')
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
@@ -189,7 +187,8 @@ class LibvirtConnTestCase(test.TrialTestCase):
expected_result,
'%s failed common check %d' % (xml, i))
- # This test is supposed to make sure we don't override a specifically set uri
+ # This test is supposed to make sure we don't override a specifically
+ # set uri
#
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index 11f0fa8ced..c40bb4bb4d 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -25,7 +25,7 @@ import sys
from nova import flags
from nova.virt import fake
from nova.virt import libvirt_conn
-from nova.virt import xenapi
+from nova.virt import xenapi_conn
FLAGS = flags.FLAGS
@@ -61,7 +61,7 @@ def get_connection(read_only=False):
elif t == 'libvirt':
conn = libvirt_conn.get_connection(read_only)
elif t == 'xenapi':
- conn = xenapi.get_connection(read_only)
+ conn = xenapi_conn.get_connection(read_only)
else:
raise Exception('Unknown connection type "%s"' % t)
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 2865c18acd..5dcb05b1f9 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -421,14 +421,14 @@ class LibvirtConnection(object):
yield images.fetch(inst.image_id, basepath('disk-raw'), user,
project)
- using_kernel = inst.kernel_id
- if using_kernel:
+ if inst.kernel_id:
if not os.path.exists(basepath('kernel')):
- yield images.fetch(inst.kernel_id, basepath('kernel'), user,
- project)
- if not os.path.exists(basepath('ramdisk')):
- yield images.fetch(inst.ramdisk_id, basepath('ramdisk'), user,
- project)
+ yield images.fetch(inst.kernel_id, basepath('kernel'),
+ user, project)
+ if inst.ramdisk_id:
+ if not os.path.exists(basepath('ramdisk')):
+ yield images.fetch(inst.ramdisk_id, basepath('ramdisk'),
+ user, project)
execute = lambda cmd, process_input = None, check_exit_code = True: \
process.simple_execute(cmd=cmd,
@@ -439,7 +439,7 @@ class LibvirtConnection(object):
# partitioned disk image where the target partition is the first
# partition
target_partition = None
- if not using_kernel:
+ if not inst.kernel_id:
target_partition = "1"
key = str(inst['key_data'])
@@ -472,7 +472,7 @@ class LibvirtConnection(object):
' into image %s (%s)',
inst['name'], inst.image_id, e)
- if using_kernel:
+ if inst.kernel_id:
if os.path.exists(basepath('disk')):
yield process.simple_execute('rm -f %s' % basepath('disk'))
diff --git a/nova/virt/xenapi.py b/nova/virt/xenapi.py
deleted file mode 100644
index 3169562a53..0000000000
--- a/nova/virt/xenapi.py
+++ /dev/null
@@ -1,444 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2010 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-A connection to XenServer or Xen Cloud Platform.
-
-The concurrency model for this class is as follows:
-
-All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator
-deferredToThread). They are remote calls, and so may hang for the usual
-reasons. They should not be allowed to block the reactor thread.
-
-All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async
-(using XenAPI.VM.async_start etc). These return a task, which can then be
-polled for completion. Polling is handled using reactor.callLater.
-
-This combination of techniques means that we don't block the reactor thread at
-all, and at the same time we don't hold lots of threads waiting for
-long-running operations.
-
-FIXME: get_info currently doesn't conform to these rules, and will block the
-reactor thread if the VM.get_by_name_label or VM.get_record calls block.
-
-**Related Flags**
-
-:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
-:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
- Platform (default: root).
-:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
- Platform.
-:xenapi_task_poll_interval: The interval (seconds) used for polling of
- remote tasks (Async.VM.start, etc)
- (default: 0.5).
-
-"""
-
-import logging
-import xmlrpclib
-
-from twisted.internet import defer
-from twisted.internet import reactor
-from twisted.internet import task
-
-from nova import db
-from nova import flags
-from nova import process
-from nova import utils
-from nova.auth.manager import AuthManager
-from nova.compute import instance_types
-from nova.compute import power_state
-from nova.virt import images
-
-XenAPI = None
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('xenapi_connection_url',
- None,
- 'URL for connection to XenServer/Xen Cloud Platform.'
- ' Required if connection_type=xenapi.')
-flags.DEFINE_string('xenapi_connection_username',
- 'root',
- 'Username for connection to XenServer/Xen Cloud Platform.'
- ' Used only if connection_type=xenapi.')
-flags.DEFINE_string('xenapi_connection_password',
- None,
- 'Password for connection to XenServer/Xen Cloud Platform.'
- ' Used only if connection_type=xenapi.')
-flags.DEFINE_float('xenapi_task_poll_interval',
- 0.5,
- 'The interval used for polling of remote tasks '
- '(Async.VM.start, etc). Used only if '
- 'connection_type=xenapi.')
-
-
-XENAPI_POWER_STATE = {
- 'Halted': power_state.SHUTDOWN,
- 'Running': power_state.RUNNING,
- 'Paused': power_state.PAUSED,
- 'Suspended': power_state.SHUTDOWN, # FIXME
- 'Crashed': power_state.CRASHED}
-
-
-def get_connection(_):
- """Note that XenAPI doesn't have a read-only connection mode, so
- the read_only parameter is ignored."""
- # This is loaded late so that there's no need to install this
- # library when not using XenAPI.
- global XenAPI
- if XenAPI is None:
- XenAPI = __import__('XenAPI')
- url = FLAGS.xenapi_connection_url
- username = FLAGS.xenapi_connection_username
- password = FLAGS.xenapi_connection_password
- if not url or password is None:
- raise Exception('Must specify xenapi_connection_url, '
- 'xenapi_connection_username (optionally), and '
- 'xenapi_connection_password to use '
- 'connection_type=xenapi')
- return XenAPIConnection(url, username, password)
-
-
-class XenAPIConnection(object):
- def __init__(self, url, user, pw):
- self._conn = XenAPI.Session(url)
- self._conn.login_with_password(user, pw)
-
- def list_instances(self):
- return [self._conn.xenapi.VM.get_name_label(vm) \
- for vm in self._conn.xenapi.VM.get_all()]
-
- @defer.inlineCallbacks
- def spawn(self, instance):
- vm = yield self._lookup(instance.name)
- if vm is not None:
- raise Exception('Attempted to create non-unique name %s' %
- instance.name)
-
- network = db.project_get_network(None, instance.project_id)
- network_ref = \
- yield self._find_network_with_bridge(network.bridge)
-
- user = AuthManager().get_user(instance.user_id)
- project = AuthManager().get_project(instance.project_id)
- vdi_uuid = yield self._fetch_image(
- instance.image_id, user, project, True)
- kernel = yield self._fetch_image(
- instance.kernel_id, user, project, False)
- ramdisk = yield self._fetch_image(
- instance.ramdisk_id, user, project, False)
- vdi_ref = yield self._call_xenapi('VDI.get_by_uuid', vdi_uuid)
-
- vm_ref = yield self._create_vm(instance, kernel, ramdisk)
- yield self._create_vbd(vm_ref, vdi_ref, 0, True)
- if network_ref:
- yield self._create_vif(vm_ref, network_ref, instance.mac_address)
- logging.debug('Starting VM %s...', vm_ref)
- yield self._call_xenapi('VM.start', vm_ref, False, False)
- logging.info('Spawning VM %s created %s.', instance.name, vm_ref)
-
- @defer.inlineCallbacks
- def _create_vm(self, instance, kernel, ramdisk):
- """Create a VM record. Returns a Deferred that gives the new
- VM reference."""
-
- instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
- mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
- vcpus = str(instance_type['vcpus'])
- rec = {
- 'name_label': instance.name,
- 'name_description': '',
- 'is_a_template': False,
- 'memory_static_min': '0',
- 'memory_static_max': mem,
- 'memory_dynamic_min': mem,
- 'memory_dynamic_max': mem,
- 'VCPUs_at_startup': vcpus,
- 'VCPUs_max': vcpus,
- 'VCPUs_params': {},
- 'actions_after_shutdown': 'destroy',
- 'actions_after_reboot': 'restart',
- 'actions_after_crash': 'destroy',
- 'PV_bootloader': '',
- 'PV_kernel': kernel,
- 'PV_ramdisk': ramdisk,
- 'PV_args': 'root=/dev/xvda1',
- 'PV_bootloader_args': '',
- 'PV_legacy_args': '',
- 'HVM_boot_policy': '',
- 'HVM_boot_params': {},
- 'platform': {},
- 'PCI_bus': '',
- 'recommendations': '',
- 'affinity': '',
- 'user_version': '0',
- 'other_config': {},
- }
- logging.debug('Created VM %s...', instance.name)
- vm_ref = yield self._call_xenapi('VM.create', rec)
- logging.debug('Created VM %s as %s.', instance.name, vm_ref)
- defer.returnValue(vm_ref)
-
- @defer.inlineCallbacks
- def _create_vbd(self, vm_ref, vdi_ref, userdevice, bootable):
- """Create a VBD record. Returns a Deferred that gives the new
- VBD reference."""
-
- vbd_rec = {}
- vbd_rec['VM'] = vm_ref
- vbd_rec['VDI'] = vdi_ref
- vbd_rec['userdevice'] = str(userdevice)
- vbd_rec['bootable'] = bootable
- vbd_rec['mode'] = 'RW'
- vbd_rec['type'] = 'disk'
- vbd_rec['unpluggable'] = True
- vbd_rec['empty'] = False
- vbd_rec['other_config'] = {}
- vbd_rec['qos_algorithm_type'] = ''
- vbd_rec['qos_algorithm_params'] = {}
- vbd_rec['qos_supported_algorithms'] = []
- logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
- vbd_ref = yield self._call_xenapi('VBD.create', vbd_rec)
- logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
- vdi_ref)
- defer.returnValue(vbd_ref)
-
- @defer.inlineCallbacks
- def _create_vif(self, vm_ref, network_ref, mac_address):
- """Create a VIF record. Returns a Deferred that gives the new
- VIF reference."""
-
- vif_rec = {}
- vif_rec['device'] = '0'
- vif_rec['network'] = network_ref
- vif_rec['VM'] = vm_ref
- vif_rec['MAC'] = mac_address
- vif_rec['MTU'] = '1500'
- vif_rec['other_config'] = {}
- vif_rec['qos_algorithm_type'] = ''
- vif_rec['qos_algorithm_params'] = {}
- logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
- network_ref)
- vif_ref = yield self._call_xenapi('VIF.create', vif_rec)
- logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
- vm_ref, network_ref)
- defer.returnValue(vif_ref)
-
- @defer.inlineCallbacks
- def _find_network_with_bridge(self, bridge):
- expr = 'field "bridge" = "%s"' % bridge
- networks = yield self._call_xenapi('network.get_all_records_where',
- expr)
- if len(networks) == 1:
- defer.returnValue(networks.keys()[0])
- elif len(networks) > 1:
- raise Exception('Found non-unique network for bridge %s' % bridge)
- else:
- raise Exception('Found no network for bridge %s' % bridge)
-
- @defer.inlineCallbacks
- def _fetch_image(self, image, user, project, use_sr):
- """use_sr: True to put the image as a VDI in an SR, False to place
- it on dom0's filesystem. The former is for VM disks, the latter for
- its kernel and ramdisk (if external kernels are being used).
- Returns a Deferred that gives the new VDI UUID."""
-
- url = images.image_url(image)
- access = AuthManager().get_access_key(user, project)
- logging.debug("Asking xapi to fetch %s as %s" % (url, access))
- fn = use_sr and 'get_vdi' or 'get_kernel'
- args = {}
- args['src_url'] = url
- args['username'] = access
- args['password'] = user.secret
- if use_sr:
- args['add_partition'] = 'true'
- task = yield self._async_call_plugin('objectstore', fn, args)
- uuid = yield self._wait_for_task(task)
- defer.returnValue(uuid)
-
- @defer.inlineCallbacks
- def reboot(self, instance):
- vm = yield self._lookup(instance.name)
- if vm is None:
- raise Exception('instance not present %s' % instance.name)
- task = yield self._call_xenapi('Async.VM.clean_reboot', vm)
- yield self._wait_for_task(task)
-
- @defer.inlineCallbacks
- def destroy(self, instance):
- vm = yield self._lookup(instance.name)
- if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
- defer.returnValue(None)
- # Get the VDIs related to the VM
- vdis = yield self._lookup_vm_vdis(vm)
- try:
- task = yield self._call_xenapi('Async.VM.hard_shutdown', vm)
- yield self._wait_for_task(task)
- except Exception, exc:
- logging.warn(exc)
- # Disk clean-up
- if vdis:
- for vdi in vdis:
- try:
- task = yield self._call_xenapi('Async.VDI.destroy', vdi)
- yield self._wait_for_task(task)
- except Exception, exc:
- logging.warn(exc)
- try:
- task = yield self._call_xenapi('Async.VM.destroy', vm)
- yield self._wait_for_task(task)
- except Exception, exc:
- logging.warn(exc)
-
- def get_info(self, instance_id):
- vm = self._lookup_blocking(instance_id)
- if vm is None:
- raise Exception('instance not present %s' % instance_id)
- rec = self._conn.xenapi.VM.get_record(vm)
- return {'state': XENAPI_POWER_STATE[rec['power_state']],
- 'max_mem': long(rec['memory_static_max']) >> 10,
- 'mem': long(rec['memory_dynamic_max']) >> 10,
- 'num_cpu': rec['VCPUs_max'],
- 'cpu_time': 0}
-
- def get_console_output(self, instance):
- return 'FAKE CONSOLE OUTPUT'
-
- @utils.deferredToThread
- def _lookup(self, i):
- return self._lookup_blocking(i)
-
- def _lookup_blocking(self, i):
- vms = self._conn.xenapi.VM.get_by_name_label(i)
- n = len(vms)
- if n == 0:
- return None
- elif n > 1:
- raise Exception('duplicate name found: %s' % i)
- else:
- return vms[0]
-
- @utils.deferredToThread
- def _lookup_vm_vdis(self, vm):
- return self._lookup_vm_vdis_blocking(vm)
-
- def _lookup_vm_vdis_blocking(self, vm):
- # Firstly we get the VBDs, then the VDIs.
- # TODO: do we leave the read-only devices?
- vbds = self._conn.xenapi.VM.get_VBDs(vm)
- vdis = []
- if vbds:
- for vbd in vbds:
- try:
- vdi = self._conn.xenapi.VBD.get_VDI(vbd)
- # Test valid VDI
- record = self._conn.xenapi.VDI.get_record(vdi)
- except Exception, exc:
- logging.warn(exc)
- else:
- vdis.append(vdi)
- if len(vdis) > 0:
- return vdis
- else:
- return None
-
- def _wait_for_task(self, task):
- """Return a Deferred that will give the result of the given task.
- The task is polled until it completes."""
- d = defer.Deferred()
- reactor.callLater(0, self._poll_task, task, d)
- return d
-
- @utils.deferredToThread
- def _poll_task(self, task, deferred):
- """Poll the given XenAPI task, and fire the given Deferred if we
- get a result."""
- try:
- #logging.debug('Polling task %s...', task)
- status = self._conn.xenapi.task.get_status(task)
- if status == 'pending':
- reactor.callLater(FLAGS.xenapi_task_poll_interval,
- self._poll_task, task, deferred)
- elif status == 'success':
- result = self._conn.xenapi.task.get_result(task)
- logging.info('Task %s status: success. %s', task, result)
- deferred.callback(_parse_xmlrpc_value(result))
- else:
- error_info = self._conn.xenapi.task.get_error_info(task)
- logging.warn('Task %s status: %s. %s', task, status,
- error_info)
- deferred.errback(XenAPI.Failure(error_info))
- #logging.debug('Polling task %s done.', task)
- except Exception, exc:
- logging.warn(exc)
- deferred.errback(exc)
-
- @utils.deferredToThread
- def _call_xenapi(self, method, *args):
- """Call the specified XenAPI method on a background thread. Returns
- a Deferred for the result."""
- f = self._conn.xenapi
- for m in method.split('.'):
- f = f.__getattr__(m)
- return f(*args)
-
- @utils.deferredToThread
- def _async_call_plugin(self, plugin, fn, args):
- """Call Async.host.call_plugin on a background thread. Returns a
- Deferred with the task reference."""
- return _unwrap_plugin_exceptions(
- self._conn.xenapi.Async.host.call_plugin,
- self._get_xenapi_host(), plugin, fn, args)
-
- def _get_xenapi_host(self):
- return self._conn.xenapi.session.get_this_host(self._conn.handle)
-
-
-def _unwrap_plugin_exceptions(func, *args, **kwargs):
- try:
- return func(*args, **kwargs)
- except XenAPI.Failure, exc:
- logging.debug("Got exception: %s", exc)
- if (len(exc.details) == 4 and
- exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
- exc.details[2] == 'Failure'):
- params = None
- try:
- params = eval(exc.details[3])
- except:
- raise exc
- raise XenAPI.Failure(params)
- else:
- raise
- except xmlrpclib.ProtocolError, exc:
- logging.debug("Got exception: %s", exc)
- raise
-
-
-def _parse_xmlrpc_value(val):
- """Parse the given value as if it were an XML-RPC value. This is
- sometimes used as the format for the task.result field."""
- if not val:
- return val
- x = xmlrpclib.loads(
- '<?xml version="1.0"?><methodResponse><params><param>' +
- val +
- '</param></params></methodResponse>')
- return x[0][0]
diff --git a/nova/virt/xenapi/__init__.py b/nova/virt/xenapi/__init__.py
new file mode 100644
index 0000000000..3d598c463c
--- /dev/null
+++ b/nova/virt/xenapi/__init__.py
@@ -0,0 +1,15 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py
new file mode 100644
index 0000000000..8cb4cce3a7
--- /dev/null
+++ b/nova/virt/xenapi/network_utils.py
@@ -0,0 +1,45 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of network
+records and their attributes like bridges, PIFs, QoS, as well as
+their lookup functions.
+"""
+
+from twisted.internet import defer
+
+
+class NetworkHelper():
+ """
+ The class that wraps the helper methods together.
+ """
+ def __init__(self):
+ return
+
+ @classmethod
+ @defer.inlineCallbacks
+ def find_network_with_bridge(cls, session, bridge):
+ """ Return the network on which the bridge is attached, if found """
+ expr = 'field "bridge" = "%s"' % bridge
+ networks = yield session.call_xenapi('network.get_all_records_where',
+ expr)
+ if len(networks) == 1:
+ defer.returnValue(networks.keys()[0])
+ elif len(networks) > 1:
+ raise Exception('Found non-unique network for bridge %s' % bridge)
+ else:
+ raise Exception('Found no network for bridge %s' % bridge)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
new file mode 100644
index 0000000000..77edb576e8
--- /dev/null
+++ b/nova/virt/xenapi/vm_utils.py
@@ -0,0 +1,268 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Helper methods for operations related to the management of VM records and
+their attributes like VDIs, VIFs, as well as their lookup functions.
+"""
+
+import logging
+import urllib
+
+from twisted.internet import defer
+from xml.dom import minidom
+
+from nova import flags
+from nova import utils
+
+from nova.auth.manager import AuthManager
+from nova.compute import instance_types
+from nova.compute import power_state
+from nova.virt import images
+
+FLAGS = flags.FLAGS
+
+XENAPI_POWER_STATE = {
+ 'Halted': power_state.SHUTDOWN,
+ 'Running': power_state.RUNNING,
+ 'Paused': power_state.PAUSED,
+ 'Suspended': power_state.SHUTDOWN, # FIXME
+ 'Crashed': power_state.CRASHED}
+
+XenAPI = None
+
+
+class VMHelper():
+ """
+ The class that wraps the helper methods together.
+ """
+ def __init__(self):
+ return
+
+ @classmethod
+ def late_import(cls):
+ """
+ Load the XenAPI module in for helper class, if required.
+ This is to avoid to install the XenAPI library when other
+ hypervisors are used
+ """
+ global XenAPI
+ if XenAPI is None:
+ XenAPI = __import__('XenAPI')
+
+ @classmethod
+ @defer.inlineCallbacks
+ def create_vm(cls, session, instance, kernel, ramdisk):
+ """Create a VM record. Returns a Deferred that gives the new
+ VM reference."""
+
+ instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
+ vcpus = str(instance_type['vcpus'])
+ rec = {
+ 'name_label': instance.name,
+ 'name_description': '',
+ 'is_a_template': False,
+ 'memory_static_min': '0',
+ 'memory_static_max': mem,
+ 'memory_dynamic_min': mem,
+ 'memory_dynamic_max': mem,
+ 'VCPUs_at_startup': vcpus,
+ 'VCPUs_max': vcpus,
+ 'VCPUs_params': {},
+ 'actions_after_shutdown': 'destroy',
+ 'actions_after_reboot': 'restart',
+ 'actions_after_crash': 'destroy',
+ 'PV_bootloader': '',
+ 'PV_kernel': kernel,
+ 'PV_ramdisk': ramdisk,
+ 'PV_args': 'root=/dev/xvda1',
+ 'PV_bootloader_args': '',
+ 'PV_legacy_args': '',
+ 'HVM_boot_policy': '',
+ 'HVM_boot_params': {},
+ 'platform': {},
+ 'PCI_bus': '',
+ 'recommendations': '',
+ 'affinity': '',
+ 'user_version': '0',
+ 'other_config': {},
+ }
+ logging.debug('Created VM %s...', instance.name)
+ vm_ref = yield session.call_xenapi('VM.create', rec)
+ logging.debug('Created VM %s as %s.', instance.name, vm_ref)
+ defer.returnValue(vm_ref)
+
+ @classmethod
+ @defer.inlineCallbacks
+ def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
+ """Create a VBD record. Returns a Deferred that gives the new
+ VBD reference."""
+
+ vbd_rec = {}
+ vbd_rec['VM'] = vm_ref
+ vbd_rec['VDI'] = vdi_ref
+ vbd_rec['userdevice'] = str(userdevice)
+ vbd_rec['bootable'] = bootable
+ vbd_rec['mode'] = 'RW'
+ vbd_rec['type'] = 'disk'
+ vbd_rec['unpluggable'] = True
+ vbd_rec['empty'] = False
+ vbd_rec['other_config'] = {}
+ vbd_rec['qos_algorithm_type'] = ''
+ vbd_rec['qos_algorithm_params'] = {}
+ vbd_rec['qos_supported_algorithms'] = []
+ logging.debug('Creating VBD for VM %s, VDI %s ... ', vm_ref, vdi_ref)
+ vbd_ref = yield session.call_xenapi('VBD.create', vbd_rec)
+ logging.debug('Created VBD %s for VM %s, VDI %s.', vbd_ref, vm_ref,
+ vdi_ref)
+ defer.returnValue(vbd_ref)
+
+ @classmethod
+ @defer.inlineCallbacks
+ def create_vif(cls, session, vm_ref, network_ref, mac_address):
+ """Create a VIF record. Returns a Deferred that gives the new
+ VIF reference."""
+
+ vif_rec = {}
+ vif_rec['device'] = '0'
+ vif_rec['network'] = network_ref
+ vif_rec['VM'] = vm_ref
+ vif_rec['MAC'] = mac_address
+ vif_rec['MTU'] = '1500'
+ vif_rec['other_config'] = {}
+ vif_rec['qos_algorithm_type'] = ''
+ vif_rec['qos_algorithm_params'] = {}
+ logging.debug('Creating VIF for VM %s, network %s ... ', vm_ref,
+ network_ref)
+ vif_ref = yield session.call_xenapi('VIF.create', vif_rec)
+ logging.debug('Created VIF %s for VM %s, network %s.', vif_ref,
+ vm_ref, network_ref)
+ defer.returnValue(vif_ref)
+
+ @classmethod
+ @defer.inlineCallbacks
+ def fetch_image(cls, session, image, user, project, use_sr):
+ """use_sr: True to put the image as a VDI in an SR, False to place
+ it on dom0's filesystem. The former is for VM disks, the latter for
+ its kernel and ramdisk (if external kernels are being used).
+ Returns a Deferred that gives the new VDI UUID."""
+
+ url = images.image_url(image)
+ access = AuthManager().get_access_key(user, project)
+ logging.debug("Asking xapi to fetch %s as %s", url, access)
+ fn = use_sr and 'get_vdi' or 'get_kernel'
+ args = {}
+ args['src_url'] = url
+ args['username'] = access
+ args['password'] = user.secret
+ if use_sr:
+ args['add_partition'] = 'true'
+ task = yield session.async_call_plugin('objectstore', fn, args)
+ uuid = yield session.wait_for_task(task)
+ defer.returnValue(uuid)
+
+ @classmethod
+ @utils.deferredToThread
+ def lookup(cls, session, i):
+ """ Look the instance i up, and returns it if available """
+ return VMHelper.lookup_blocking(session, i)
+
+ @classmethod
+ def lookup_blocking(cls, session, i):
+ """ Synchronous lookup """
+ vms = session.get_xenapi().VM.get_by_name_label(i)
+ n = len(vms)
+ if n == 0:
+ return None
+ elif n > 1:
+ raise Exception('duplicate name found: %s' % i)
+ else:
+ return vms[0]
+
+ @classmethod
+ @utils.deferredToThread
+ def lookup_vm_vdis(cls, session, vm):
+ """ Look for the VDIs that are attached to the VM """
+ return VMHelper.lookup_vm_vdis_blocking(session, vm)
+
+ @classmethod
+ def lookup_vm_vdis_blocking(cls, session, vm):
+ """ Synchronous lookup_vm_vdis """
+ # Firstly we get the VBDs, then the VDIs.
+ # TODO(Armando): do we leave the read-only devices?
+ vbds = session.get_xenapi().VM.get_VBDs(vm)
+ vdis = []
+ if vbds:
+ for vbd in vbds:
+ try:
+ vdi = session.get_xenapi().VBD.get_VDI(vbd)
+ # Test valid VDI
+ record = session.get_xenapi().VDI.get_record(vdi)
+ logging.debug('VDI %s is still available', record['uuid'])
+ except XenAPI.Failure, exc:
+ logging.warn(exc)
+ else:
+ vdis.append(vdi)
+ if len(vdis) > 0:
+ return vdis
+ else:
+ return None
+
+ @classmethod
+ def compile_info(cls, record):
+ return {'state': XENAPI_POWER_STATE[record['power_state']],
+ 'max_mem': long(record['memory_static_max']) >> 10,
+ 'mem': long(record['memory_dynamic_max']) >> 10,
+ 'num_cpu': record['VCPUs_max'],
+ 'cpu_time': 0}
+
+ @classmethod
+ def compile_diagnostics(cls, session, record):
+ """Compile VM diagnostics data"""
+ try:
+ host = session.get_xenapi_host()
+ host_ip = session.get_xenapi().host.get_record(host)["address"]
+ metrics = session.get_xenapi().VM_guest_metrics.get_record(
+ record["guest_metrics"])
+ diags = {
+ "Kernel": metrics["os_version"]["uname"],
+ "Distro": metrics["os_version"]["name"]}
+ xml = get_rrd(host_ip, record["uuid"])
+ if xml:
+ rrd = minidom.parseString(xml)
+ for i, node in enumerate(rrd.firstChild.childNodes):
+ # We don't want all of the extra garbage
+ if i >= 3 and i <= 11:
+ ref = node.childNodes
+ # Name and Value
+ diags[ref[0].firstChild.data] = ref[6].firstChild.data
+ return diags
+ except XenAPI.Failure as e:
+ return {"Unable to retrieve diagnostics": e}
+
+
+def get_rrd(host, uuid):
+ """Return the VM RRD XML as a string"""
+ try:
+ xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % (
+ FLAGS.xenapi_connection_username,
+ FLAGS.xenapi_connection_password,
+ host,
+ uuid))
+ return xml.read()
+ except IOError:
+ return None
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
new file mode 100644
index 0000000000..9bfd072671
--- /dev/null
+++ b/nova/virt/xenapi/vmops.py
@@ -0,0 +1,146 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for VM-related functions (spawn, reboot, etc).
+"""
+
+import logging
+
+from twisted.internet import defer
+
+from nova import db
+from nova import context
+
+from nova.auth.manager import AuthManager
+from nova.virt.xenapi.network_utils import NetworkHelper
+from nova.virt.xenapi.vm_utils import VMHelper
+
+XenAPI = None
+
+
+class VMOps(object):
+ """
+ Management class for VM-related tasks
+ """
+ def __init__(self, session):
+ global XenAPI
+ if XenAPI is None:
+ XenAPI = __import__('XenAPI')
+ self._session = session
+ # Load XenAPI module in the helper class
+ VMHelper.late_import()
+
+ def list_instances(self):
+ """ List VM instances """
+ return [self._session.get_xenapi().VM.get_name_label(vm) \
+ for vm in self._session.get_xenapi().VM.get_all()]
+
+ @defer.inlineCallbacks
+ def spawn(self, instance):
+ """ Create VM instance """
+ vm = yield VMHelper.lookup(self._session, instance.name)
+ if vm is not None:
+ raise Exception('Attempted to create non-unique name %s' %
+ instance.name)
+
+ bridge = db.project_get_network(context.get_admin_context(),
+ instance.project_id).bridge
+ network_ref = \
+ yield NetworkHelper.find_network_with_bridge(self._session, bridge)
+
+ user = AuthManager().get_user(instance.user_id)
+ project = AuthManager().get_project(instance.project_id)
+ vdi_uuid = yield VMHelper.fetch_image(self._session,
+ instance.image_id, user, project, True)
+ kernel = yield VMHelper.fetch_image(self._session,
+ instance.kernel_id, user, project, False)
+ ramdisk = yield VMHelper.fetch_image(self._session,
+ instance.ramdisk_id, user, project, False)
+ vdi_ref = yield self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+ vm_ref = yield VMHelper.create_vm(self._session,
+ instance, kernel, ramdisk)
+ yield VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
+ if network_ref:
+ yield VMHelper.create_vif(self._session, vm_ref,
+ network_ref, instance.mac_address)
+ logging.debug('Starting VM %s...', vm_ref)
+ yield self._session.call_xenapi('VM.start', vm_ref, False, False)
+ logging.info('Spawning VM %s created %s.', instance.name,
+ vm_ref)
+
+ @defer.inlineCallbacks
+ def reboot(self, instance):
+ """ Reboot VM instance """
+ instance_name = instance.name
+ vm = yield VMHelper.lookup(self._session, instance_name)
+ if vm is None:
+ raise Exception('instance not present %s' % instance_name)
+ task = yield self._session.call_xenapi('Async.VM.clean_reboot', vm)
+ yield self._session.wait_for_task(task)
+
+ @defer.inlineCallbacks
+ def destroy(self, instance):
+ """ Destroy VM instance """
+ vm = yield VMHelper.lookup(self._session, instance.name)
+ if vm is None:
+ # Don't complain, just return. This lets us clean up instances
+ # that have already disappeared from the underlying platform.
+ defer.returnValue(None)
+ # Get the VDIs related to the VM
+ vdis = yield VMHelper.lookup_vm_vdis(self._session, vm)
+ try:
+ task = yield self._session.call_xenapi('Async.VM.hard_shutdown',
+ vm)
+ yield self._session.wait_for_task(task)
+ except XenAPI.Failure, exc:
+ logging.warn(exc)
+ # Disk clean-up
+ if vdis:
+ for vdi in vdis:
+ try:
+ task = yield self._session.call_xenapi('Async.VDI.destroy',
+ vdi)
+ yield self._session.wait_for_task(task)
+ except XenAPI.Failure, exc:
+ logging.warn(exc)
+ try:
+ task = yield self._session.call_xenapi('Async.VM.destroy', vm)
+ yield self._session.wait_for_task(task)
+ except XenAPI.Failure, exc:
+ logging.warn(exc)
+
+ def get_info(self, instance_id):
+ """ Return data about VM instance """
+ vm = VMHelper.lookup_blocking(self._session, instance_id)
+ if vm is None:
+ raise Exception('instance not present %s' % instance_id)
+ rec = self._session.get_xenapi().VM.get_record(vm)
+ return VMHelper.compile_info(rec)
+
+ @defer.inlineCallbacks
+ def get_diagnostics(self, instance_id):
+ """Return data about VM diagnostics"""
+ vm = yield VMHelper.lookup(self._session, instance_id)
+ if vm is None:
+ raise Exception("instance not present %s" % instance_id)
+ rec = yield self._session.get_xenapi().VM.get_record(vm)
+ defer.returnValue(VMHelper.compile_diagnostics(self._session, rec))
+
+ def get_console_output(self, instance):
+ """ Return snapshot of console """
+ # TODO: implement this to fix pylint!
+ return 'FAKE CONSOLE OUTPUT of instance'
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
new file mode 100644
index 0000000000..a4c7a38619
--- /dev/null
+++ b/nova/virt/xenapi/volumeops.py
@@ -0,0 +1,32 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Management class for Storage-related functions (attach, detach, etc).
+"""
+
+
+class VolumeOps(object):
+ def __init__(self, session):
+ self._session = session
+
+ def attach_volume(self, instance_name, device_path, mountpoint):
+ # FIXME: that's going to be sorted when iscsi-xenapi lands in branch
+ return True
+
+ def detach_volume(self, instance_name, mountpoint):
+ # FIXME: that's going to be sorted when iscsi-xenapi lands in branch
+ return True
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
new file mode 100644
index 0000000000..2153810c85
--- /dev/null
+++ b/nova/virt/xenapi_conn.py
@@ -0,0 +1,242 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to XenServer or Xen Cloud Platform.
+
+The concurrency model for this class is as follows:
+
+All XenAPI calls are on a thread (using t.i.t.deferToThread, via the decorator
+deferredToThread). They are remote calls, and so may hang for the usual
+reasons. They should not be allowed to block the reactor thread.
+
+All long-running XenAPI calls (VM.start, VM.reboot, etc) are called async
+(using XenAPI.VM.async_start etc). These return a task, which can then be
+polled for completion. Polling is handled using reactor.callLater.
+
+This combination of techniques means that we don't block the reactor thread at
+all, and at the same time we don't hold lots of threads waiting for
+long-running operations.
+
+FIXME: get_info currently doesn't conform to these rules, and will block the
+reactor thread if the VM.get_by_name_label or VM.get_record calls block.
+
+**Related Flags**
+
+:xenapi_connection_url: URL for connection to XenServer/Xen Cloud Platform.
+:xenapi_connection_username: Username for connection to XenServer/Xen Cloud
+ Platform (default: root).
+:xenapi_connection_password: Password for connection to XenServer/Xen Cloud
+ Platform.
+:xenapi_task_poll_interval: The interval (seconds) used for polling of
+ remote tasks (Async.VM.start, etc)
+ (default: 0.5).
+
+"""
+
+import logging
+import xmlrpclib
+
+from twisted.internet import defer
+from twisted.internet import reactor
+
+from nova import utils
+from nova import flags
+from nova.virt.xenapi.vmops import VMOps
+from nova.virt.xenapi.volumeops import VolumeOps
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('xenapi_connection_url',
+ None,
+ 'URL for connection to XenServer/Xen Cloud Platform.'
+ ' Required if connection_type=xenapi.')
+flags.DEFINE_string('xenapi_connection_username',
+ 'root',
+ 'Username for connection to XenServer/Xen Cloud Platform.'
+ ' Used only if connection_type=xenapi.')
+flags.DEFINE_string('xenapi_connection_password',
+ None,
+ 'Password for connection to XenServer/Xen Cloud Platform.'
+ ' Used only if connection_type=xenapi.')
+flags.DEFINE_float('xenapi_task_poll_interval',
+ 0.5,
+ 'The interval used for polling of remote tasks '
+ '(Async.VM.start, etc). Used only if '
+ 'connection_type=xenapi.')
+
+XenAPI = None
+
+
+def get_connection(_):
+ """Note that XenAPI doesn't have a read-only connection mode, so
+ the read_only parameter is ignored."""
+ # This is loaded late so that there's no need to install this
+ # library when not using XenAPI.
+ global XenAPI
+ if XenAPI is None:
+ XenAPI = __import__('XenAPI')
+ url = FLAGS.xenapi_connection_url
+ username = FLAGS.xenapi_connection_username
+ password = FLAGS.xenapi_connection_password
+ if not url or password is None:
+ raise Exception('Must specify xenapi_connection_url, '
+ 'xenapi_connection_username (optionally), and '
+ 'xenapi_connection_password to use '
+ 'connection_type=xenapi')
+ return XenAPIConnection(url, username, password)
+
+
+class XenAPIConnection(object):
+ """ A connection to XenServer or Xen Cloud Platform """
+ def __init__(self, url, user, pw):
+ session = XenAPISession(url, user, pw)
+ self._vmops = VMOps(session)
+ self._volumeops = VolumeOps(session)
+
+ def list_instances(self):
+ """ List VM instances """
+ return self._vmops.list_instances()
+
+ def spawn(self, instance):
+ """ Create VM instance """
+ self._vmops.spawn(instance)
+
+ def reboot(self, instance):
+ """ Reboot VM instance """
+ self._vmops.reboot(instance)
+
+ def destroy(self, instance):
+ """ Destroy VM instance """
+ self._vmops.destroy(instance)
+
+ def get_info(self, instance_id):
+ """ Return data about VM instance """
+ return self._vmops.get_info(instance_id)
+
+ def get_diagnostics(self, instance_id):
+ """Return data about VM diagnostics"""
+ return self._vmops.get_diagnostics(instance_id)
+
+ def get_console_output(self, instance):
+ """ Return snapshot of console """
+ return self._vmops.get_console_output(instance)
+
+ def attach_volume(self, instance_name, device_path, mountpoint):
+ """ Attach volume storage to VM instance """
+ return self._volumeops.attach_volume(instance_name,
+ device_path,
+ mountpoint)
+
+ def detach_volume(self, instance_name, mountpoint):
+ """ Detach volume storage to VM instance """
+ return self._volumeops.detach_volume(instance_name, mountpoint)
+
+
+class XenAPISession(object):
+ """ The session to invoke XenAPI SDK calls """
+ def __init__(self, url, user, pw):
+ self._session = XenAPI.Session(url)
+ self._session.login_with_password(user, pw)
+
+ def get_xenapi(self):
+ """ Return the xenapi object """
+ return self._session.xenapi
+
+ def get_xenapi_host(self):
+ """ Return the xenapi host """
+ return self._session.xenapi.session.get_this_host(self._session.handle)
+
+ @utils.deferredToThread
+ def call_xenapi(self, method, *args):
+ """Call the specified XenAPI method on a background thread. Returns
+ a Deferred for the result."""
+ f = self._session.xenapi
+ for m in method.split('.'):
+ f = f.__getattr__(m)
+ return f(*args)
+
+ @utils.deferredToThread
+ def async_call_plugin(self, plugin, fn, args):
+ """Call Async.host.call_plugin on a background thread. Returns a
+ Deferred with the task reference."""
+ return _unwrap_plugin_exceptions(
+ self._session.xenapi.Async.host.call_plugin,
+ self.get_xenapi_host(), plugin, fn, args)
+
+ def wait_for_task(self, task):
+ """Return a Deferred that will give the result of the given task.
+ The task is polled until it completes."""
+ d = defer.Deferred()
+ reactor.callLater(0, self._poll_task, task, d)
+ return d
+
+ @utils.deferredToThread
+ def _poll_task(self, task, deferred):
+ """Poll the given XenAPI task, and fire the given Deferred if we
+ get a result."""
+ try:
+ #logging.debug('Polling task %s...', task)
+ status = self._session.xenapi.task.get_status(task)
+ if status == 'pending':
+ reactor.callLater(FLAGS.xenapi_task_poll_interval,
+ self._poll_task, task, deferred)
+ elif status == 'success':
+ result = self._session.xenapi.task.get_result(task)
+ logging.info('Task %s status: success. %s', task, result)
+ deferred.callback(_parse_xmlrpc_value(result))
+ else:
+ error_info = self._session.xenapi.task.get_error_info(task)
+ logging.warn('Task %s status: %s. %s', task, status,
+ error_info)
+ deferred.errback(XenAPI.Failure(error_info))
+ #logging.debug('Polling task %s done.', task)
+ except XenAPI.Failure, exc:
+ logging.warn(exc)
+ deferred.errback(exc)
+
+
+def _unwrap_plugin_exceptions(func, *args, **kwargs):
+ """ Parse exception details """
+ try:
+ return func(*args, **kwargs)
+ except XenAPI.Failure, exc:
+ logging.debug("Got exception: %s", exc)
+ if (len(exc.details) == 4 and
+ exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
+ exc.details[2] == 'Failure'):
+ params = None
+ try:
+ params = eval(exc.details[3])
+ except:
+ raise exc
+ raise XenAPI.Failure(params)
+ else:
+ raise
+ except xmlrpclib.ProtocolError, exc:
+ logging.debug("Got exception: %s", exc)
+ raise
+
+
+def _parse_xmlrpc_value(val):
+ """Parse the given value as if it were an XML-RPC value. This is
+ sometimes used as the format for the task.result field."""
+ if not val:
+ return val
+ x = xmlrpclib.loads(
+ '<?xml version="1.0"?><methodResponse><params><param>' +
+ val +
+ '</param></params></methodResponse>')
+ return x[0][0]
diff --git a/setup.py b/setup.py
index ec0014478b..d88bc1e6f8 100644
--- a/setup.py
+++ b/setup.py
@@ -57,6 +57,7 @@ setup(name='nova',
cmdclass={ 'sdist': local_sdist,
'build_sphinx' : local_BuildDoc },
packages=find_packages(exclude=['bin', 'smoketests']),
+ include_package_data=True,
scripts=['bin/nova-api',
'bin/nova-compute',
'bin/nova-dhcpbridge',
diff --git a/smoketests/admin_smoketests.py b/smoketests/admin_smoketests.py
new file mode 100644
index 0000000000..50bb3fa2e1
--- /dev/null
+++ b/smoketests/admin_smoketests.py
@@ -0,0 +1,92 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import random
+import sys
+import time
+import unittest
+import zipfile
+
+from nova import adminclient
+from smoketests import flags
+from smoketests import base
+
+
+SUITE_NAMES = '[user]'
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
+
+# TODO(devamcar): Use random tempfile
+ZIP_FILENAME = '/tmp/nova-me-x509.zip'
+
+TEST_PREFIX = 'test%s' % int(random.random()*1000000)
+TEST_USERNAME = '%suser' % TEST_PREFIX
+TEST_PROJECTNAME = '%sproject' % TEST_PREFIX
+
+
+class AdminSmokeTestCase(base.SmokeTestCase):
+ def setUp(self):
+ self.admin = adminclient.NovaAdminClient(
+ access_key=os.getenv('EC2_ACCESS_KEY'),
+ secret_key=os.getenv('EC2_SECRET_KEY'),
+ clc_url=os.getenv('EC2_URL'),
+ region=FLAGS.region)
+
+
+class UserTests(AdminSmokeTestCase):
+ """ Test admin credentials and user creation. """
+
+ def test_001_admin_can_connect(self):
+ conn = self.admin.connection_for('admin', 'admin')
+ self.assert_(conn)
+
+ def test_002_admin_can_create_user(self):
+ user = self.admin.create_user(TEST_USERNAME)
+ self.assertEqual(user.username, TEST_USERNAME)
+
+ def test_003_admin_can_create_project(self):
+ project = self.admin.create_project(TEST_PROJECTNAME,
+ TEST_USERNAME)
+ self.assertEqual(project.projectname, TEST_PROJECTNAME)
+
+ def test_004_user_can_download_credentials(self):
+ buf = self.admin.get_zip(TEST_USERNAME, TEST_PROJECTNAME)
+ output = open(ZIP_FILENAME, 'w')
+ output.write(buf)
+ output.close()
+
+ zip = zipfile.ZipFile(ZIP_FILENAME, 'a', zipfile.ZIP_DEFLATED)
+ bad = zip.testzip()
+ zip.close()
+
+ self.failIf(bad)
+
+ def test_999_tearDown(self):
+ self.admin.delete_project(TEST_PROJECTNAME)
+ self.admin.delete_user(TEST_USERNAME)
+ try:
+ os.remove(ZIP_FILENAME)
+ except:
+ pass
+
+if __name__ == "__main__":
+ suites = {'user': unittest.makeSuite(UserTests)}
+ sys.exit(base.run_tests(suites))
+
diff --git a/smoketests/novatestcase.py b/smoketests/base.py
index 513e0ca912..5a14d3e098 100644
--- a/smoketests/novatestcase.py
+++ b/smoketests/base.py
@@ -16,36 +16,26 @@
# License for the specific language governing permissions and limitations
# under the License.
+import boto
import commands
+import httplib
import os
+import paramiko
import random
import sys
import unittest
+from boto.ec2.regioninfo import RegionInfo
-
-import paramiko
-
-from nova import adminclient
from smoketests import flags
FLAGS = flags.FLAGS
-class NovaTestCase(unittest.TestCase):
- def setUp(self):
- self.nova_admin = adminclient.NovaAdminClient(
- access_key=FLAGS.admin_access_key,
- secret_key=FLAGS.admin_secret_key,
- clc_ip=FLAGS.clc_ip)
-
- def tearDown(self):
- pass
-
+class SmokeTestCase(unittest.TestCase):
def connect_ssh(self, ip, key_name):
# TODO(devcamcar): set a more reasonable connection timeout time
key = paramiko.RSAKey.from_private_key_file('/tmp/%s.pem' % key_name)
client = paramiko.SSHClient()
- client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(ip, username='root', pkey=key)
stdin, stdout, stderr = client.exec_command('uptime')
@@ -53,26 +43,50 @@ class NovaTestCase(unittest.TestCase):
return client
def can_ping(self, ip):
- return commands.getstatusoutput('ping -c 1 %s' % ip)[0] == 0
-
- @property
- def admin(self):
- return self.nova_admin.connection_for('admin')
-
- def connection_for(self, username):
- return self.nova_admin.connection_for(username)
-
- def create_user(self, username):
- return self.nova_admin.create_user(username)
-
- def get_user(self, username):
- return self.nova_admin.get_user(username)
-
- def delete_user(self, username):
- return self.nova_admin.delete_user(username)
-
- def get_signed_zip(self, username):
- return self.nova_admin.get_zip(username)
+ """ Attempt to ping the specified IP, and give up after 1 second. """
+
+ # NOTE(devcamcar): ping timeout flag is different in OSX.
+ if sys.platform == 'darwin':
+ timeout_flag = 't'
+ else:
+ timeout_flag = 'w'
+
+ status, output = commands.getstatusoutput('ping -c1 -%s1 %s' %
+ (timeout_flag, ip))
+ return status == 0
+
+ def connection_for_env(self, **kwargs):
+ """
+ Returns a boto ec2 connection for the current environment.
+ """
+ access_key = os.getenv('EC2_ACCESS_KEY')
+ secret_key = os.getenv('EC2_SECRET_KEY')
+ clc_url = os.getenv('EC2_URL')
+
+ if not access_key or not secret_key or not clc_url:
+ raise Exception('Missing EC2 environment variables. Please source '
+ 'the appropriate novarc file before running this '
+ 'test.')
+
+ parts = self.split_clc_url(clc_url)
+ return boto.connect_ec2(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key,
+ is_secure=parts['is_secure'],
+ region=RegionInfo(None,
+ 'nova',
+ parts['ip']),
+ port=parts['port'],
+ path='/services/Cloud',
+ **kwargs)
+
+ def split_clc_url(self, clc_url):
+ """
+ Splits a cloud controller endpoint url.
+ """
+ parts = httplib.urlsplit(clc_url)
+ is_secure = parts.scheme == 'https'
+ ip, port = parts.netloc.split(':')
+ return {'ip': ip, 'port': int(port), 'is_secure': is_secure}
def create_key_pair(self, conn, key_name):
try:
@@ -116,15 +130,25 @@ class NovaTestCase(unittest.TestCase):
raise Exception(output)
return True
- def register_image(self, bucket_name, manifest):
- conn = nova_admin.connection_for('admin')
- return conn.register_image("%s/%s.manifest.xml" % (bucket_name, manifest))
+def run_tests(suites):
+ argv = FLAGS(sys.argv)
+
+ if not os.getenv('EC2_ACCESS_KEY'):
+ print >> sys.stderr, 'Missing EC2 environment variables. Please ' \
+ 'source the appropriate novarc file before ' \
+ 'running this test.'
+ return 1
+
+ if FLAGS.suite:
+ try:
+ suite = suites[FLAGS.suite]
+ except KeyError:
+ print >> sys.stderr, 'Available test suites:', \
+ ', '.join(suites.keys())
+ return 1
- def setUp_test_image(self, image, kernel=False):
- self.bundle_image(image, kernel=kernel)
- bucket = "auto_test_%s" % int(random.random() * 1000000)
- self.upload_image(bucket, image)
- return self.register_image(bucket, image)
+ unittest.TextTestRunner(verbosity=2).run(suite)
+ else:
+ for suite in suites.itervalues():
+ unittest.TextTestRunner(verbosity=2).run(suite)
- def tearDown_test_image(self, conn, image_id):
- conn.deregister_image(image_id)
diff --git a/smoketests/flags.py b/smoketests/flags.py
index 3617fb797a..ae4d095085 100644
--- a/smoketests/flags.py
+++ b/smoketests/flags.py
@@ -1,7 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
+# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -33,13 +33,6 @@ DEFINE_bool = DEFINE_bool
# __GLOBAL FLAGS ONLY__
# Define any app-specific flags in their own files, docs at:
# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39
-DEFINE_string('admin_access_key', 'admin', 'Access key for admin user')
-DEFINE_string('admin_secret_key', 'admin', 'Secret key for admin user')
-DEFINE_string('clc_ip', '127.0.0.1', 'IP of cloud controller API')
-DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz',
- 'Local kernel file to use for bundling tests')
-DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
- 'Local image file to use for bundling tests')
-#DEFINE_string('vpn_image_id', 'ami-CLOUDPIPE',
-# 'AMI for cloudpipe vpn server')
+DEFINE_string('region', 'nova', 'Region to use')
+DEFINE_string('test_image', 'ami-tiny', 'Image to use for launch tests')
diff --git a/smoketests/smoketest.py b/smoketests/smoketest.py
deleted file mode 100644
index ad95114d40..0000000000
--- a/smoketests/smoketest.py
+++ /dev/null
@@ -1,566 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import commands
-import os
-import random
-import re
-import sys
-import time
-import unittest
-import zipfile
-
-
-import paramiko
-
-from smoketests import flags
-from smoketests import novatestcase
-
-SUITE_NAMES = '[user, image, security, public_network, volume]'
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
-
-# TODO(devamcar): Use random tempfile
-ZIP_FILENAME = '/tmp/nova-me-x509.zip'
-
-data = {}
-
-test_prefix = 'test%s' % int(random.random()*1000000)
-test_username = '%suser' % test_prefix
-test_bucket = '%s_bucket' % test_prefix
-test_key = '%s_key' % test_prefix
-
-# Test admin credentials and user creation
-class UserTests(novatestcase.NovaTestCase):
- def test_001_admin_can_connect(self):
- conn = self.connection_for('admin')
- self.assert_(conn)
-
- def test_002_admin_can_create_user(self):
- userinfo = self.create_user(test_username)
- self.assertEqual(userinfo.username, test_username)
-
- def test_003_user_can_download_credentials(self):
- buf = self.get_signed_zip(test_username)
- output = open(ZIP_FILENAME, 'w')
- output.write(buf)
- output.close()
-
- zip = zipfile.ZipFile(ZIP_FILENAME, 'a', zipfile.ZIP_DEFLATED)
- bad = zip.testzip()
- zip.close()
-
- self.failIf(bad)
-
- def test_999_tearDown(self):
- self.delete_user(test_username)
- user = self.get_user(test_username)
- self.assert_(user is None)
- try:
- os.remove(ZIP_FILENAME)
- except:
- pass
-
-# Test image bundling, registration, and launching
-class ImageTests(novatestcase.NovaTestCase):
- def test_000_setUp(self):
- self.create_user(test_username)
-
- def test_001_admin_can_bundle_image(self):
- self.assertTrue(self.bundle_image(FLAGS.bundle_image))
-
- def test_002_admin_can_upload_image(self):
- self.assertTrue(self.upload_image(test_bucket, FLAGS.bundle_image))
-
- def test_003_admin_can_register_image(self):
- image_id = self.register_image(test_bucket, FLAGS.bundle_image)
- self.assert_(image_id is not None)
- data['image_id'] = image_id
-
- def test_004_admin_can_bundle_kernel(self):
- self.assertTrue(self.bundle_image(FLAGS.bundle_kernel, kernel=True))
-
- def test_005_admin_can_upload_kernel(self):
- self.assertTrue(self.upload_image(test_bucket, FLAGS.bundle_kernel))
-
- def test_006_admin_can_register_kernel(self):
- # FIXME(devcamcar): registration should verify that bucket/manifest
- # exists before returning successfully.
- kernel_id = self.register_image(test_bucket, FLAGS.bundle_kernel)
- self.assert_(kernel_id is not None)
- data['kernel_id'] = kernel_id
-
- def test_007_admin_images_are_available_within_10_seconds(self):
- for i in xrange(10):
- image = self.admin.get_image(data['image_id'])
- if image and image.state == 'available':
- break
- time.sleep(1)
- else:
- print image.state
- self.assert_(False) # wasn't available within 10 seconds
- self.assert_(image.type == 'machine')
-
- for i in xrange(10):
- kernel = self.admin.get_image(data['kernel_id'])
- if kernel and kernel.state == 'available':
- break
- time.sleep(1)
- else:
- self.assert_(False) # wasn't available within 10 seconds
- self.assert_(kernel.type == 'kernel')
-
- def test_008_admin_can_describe_image_attribute(self):
- attrs = self.admin.get_image_attribute(data['image_id'],
- 'launchPermission')
- self.assert_(attrs.name, 'launch_permission')
-
- def test_009_me_cannot_see_non_public_images(self):
- conn = self.connection_for(test_username)
- images = conn.get_all_images(image_ids=[data['image_id']])
- self.assertEqual(len(images), 0)
-
- def test_010_admin_can_modify_image_launch_permission(self):
- conn = self.connection_for(test_username)
-
- self.admin.modify_image_attribute(image_id=data['image_id'],
- operation='add',
- attribute='launchPermission',
- groups='all')
-
- image = conn.get_image(data['image_id'])
- self.assertEqual(image.id, data['image_id'])
-
- def test_011_me_can_list_public_images(self):
- conn = self.connection_for(test_username)
- images = conn.get_all_images(image_ids=[data['image_id']])
- self.assertEqual(len(images), 1)
- pass
-
- def test_012_me_can_see_launch_permission(self):
- attrs = self.admin.get_image_attribute(data['image_id'],
- 'launchPermission')
- self.assert_(attrs.name, 'launch_permission')
- self.assert_(attrs.groups[0], 'all')
-
- # FIXME: add tests that user can launch image
-
-# def test_013_user_can_launch_admin_public_image(self):
-# # TODO: Use openwrt kernel instead of default kernel
-# conn = self.connection_for(test_username)
-# reservation = conn.run_instances(data['image_id'])
-# self.assertEqual(len(reservation.instances), 1)
-# data['my_instance_id'] = reservation.instances[0].id
-
-# def test_014_instances_launch_within_30_seconds(self):
-# pass
-
-# def test_015_user_can_terminate(self):
-# conn = self.connection_for(test_username)
-# terminated = conn.terminate_instances(
-# instance_ids=[data['my_instance_id']])
-# self.assertEqual(len(terminated), 1)
-
- def test_016_admin_can_deregister_kernel(self):
- self.assertTrue(self.admin.deregister_image(data['kernel_id']))
-
- def test_017_admin_can_deregister_image(self):
- self.assertTrue(self.admin.deregister_image(data['image_id']))
-
- def test_018_admin_can_delete_bundle(self):
- self.assertTrue(self.delete_bundle_bucket(test_bucket))
-
- def test_999_tearDown(self):
- data = {}
- self.delete_user(test_username)
-
-
-# Test key pairs and security groups
-class SecurityTests(novatestcase.NovaTestCase):
- def test_000_setUp(self):
- self.create_user(test_username + '_me')
- self.create_user(test_username + '_you')
- data['image_id'] = 'ami-tiny'
-
- def test_001_me_can_create_keypair(self):
- conn = self.connection_for(test_username + '_me')
- key = self.create_key_pair(conn, test_key)
- self.assertEqual(key.name, test_key)
-
- def test_002_you_can_create_keypair(self):
- conn = self.connection_for(test_username + '_you')
- key = self.create_key_pair(conn, test_key+ 'yourkey')
- self.assertEqual(key.name, test_key+'yourkey')
-
- def test_003_me_can_create_instance_with_keypair(self):
- conn = self.connection_for(test_username + '_me')
- reservation = conn.run_instances(data['image_id'], key_name=test_key)
- self.assertEqual(len(reservation.instances), 1)
- data['my_instance_id'] = reservation.instances[0].id
-
- def test_004_me_can_obtain_private_ip_within_60_seconds(self):
- conn = self.connection_for(test_username + '_me')
- reservations = conn.get_all_instances([data['my_instance_id']])
- instance = reservations[0].instances[0]
- # allow 60 seconds to exit pending with IP
- for x in xrange(60):
- instance.update()
- if instance.state != u'pending':
- break
- time.sleep(1)
- else:
- self.assert_(False)
- # self.assertEqual(instance.state, u'running')
- ip = reservations[0].instances[0].private_dns_name
- self.failIf(ip == '0.0.0.0')
- data['my_private_ip'] = ip
- print data['my_private_ip'],
-
- def test_005_can_ping_private_ip(self):
- for x in xrange(120):
- # ping waits for 1 second
- status, output = commands.getstatusoutput(
- 'ping -c1 -w1 %s' % data['my_private_ip'])
- if status == 0:
- break
- else:
- self.assert_('could not ping instance')
- #def test_005_me_cannot_ssh_when_unauthorized(self):
- # self.assertRaises(paramiko.SSHException, self.connect_ssh,
- # data['my_private_ip'], 'mykey')
-
- #def test_006_me_can_authorize_ssh(self):
- # conn = self.connection_for(test_username + '_me')
- # self.assertTrue(
- # conn.authorize_security_group(
- # 'default',
- # ip_protocol='tcp',
- # from_port=22,
- # to_port=22,
- # cidr_ip='0.0.0.0/0'
- # )
- # )
-
- def test_007_me_can_ssh_when_authorized(self):
- conn = self.connect_ssh(data['my_private_ip'], test_key)
- conn.close()
-
- #def test_008_me_can_revoke_ssh_authorization(self):
- # conn = self.connection_for('me')
- # self.assertTrue(
- # conn.revoke_security_group(
- # 'default',
- # ip_protocol='tcp',
- # from_port=22,
- # to_port=22,
- # cidr_ip='0.0.0.0/0'
- # )
- # )
-
- #def test_009_you_cannot_ping_my_instance(self):
- # TODO: should ping my_private_ip from with an instance started by you.
- #self.assertFalse(self.can_ping(data['my_private_ip']))
-
- def test_010_you_cannot_ssh_to_my_instance(self):
- try:
- conn = self.connect_ssh(data['my_private_ip'],
- test_key + 'yourkey')
- conn.close()
- except paramiko.SSHException:
- pass
- else:
- self.fail("expected SSHException")
-
- def test_999_tearDown(self):
- conn = self.connection_for(test_username + '_me')
- self.delete_key_pair(conn, test_key)
- if data.has_key('my_instance_id'):
- conn.terminate_instances([data['my_instance_id']])
-
- conn = self.connection_for(test_username + '_you')
- self.delete_key_pair(conn, test_key + 'yourkey')
-
- conn = self.connection_for('admin')
- self.delete_user(test_username + '_me')
- self.delete_user(test_username + '_you')
- #self.tearDown_test_image(conn, data['image_id'])
-
-# TODO: verify wrt image boots
-# build python into wrt image
-# build boto/m2crypto into wrt image
-# build euca2ools into wrt image
-# build a script to download and unpack credentials
-# - return "ok" to stdout for comparison in self.assertEqual()
-# build a script to bundle the instance
-# build a script to upload the bundle
-
-# status, output = commands.getstatusoutput('cmd')
-# if status == 0:
-# print 'ok'
-# else:
-# print output
-
-# Testing rebundling
-class RebundlingTests(novatestcase.NovaTestCase):
- def test_000_setUp(self):
- self.create_user('me')
- self.create_user('you')
- # TODO: create keypair for me
- # upload smoketest img
- # run instance
-
- def test_001_me_can_download_credentials_within_instance(self):
- conn = self.connect_ssh(data['my_private_ip'], 'mykey')
- stdin, stdout = conn.exec_command(
- 'python ~/smoketests/install-credentials.py')
- conn.close()
- self.assertEqual(stdout, 'ok')
-
- def test_002_me_can_rebundle_within_instance(self):
- conn = self.connect_ssh(data['my_private_ip'], 'mykey')
- stdin, stdout = conn.exec_command(
- 'python ~/smoketests/rebundle-instance.py')
- conn.close()
- self.assertEqual(stdout, 'ok')
-
- def test_003_me_can_upload_image_within_instance(self):
- conn = self.connect_ssh(data['my_private_ip'], 'mykey')
- stdin, stdout = conn.exec_command(
- 'python ~/smoketests/upload-bundle.py')
- conn.close()
- self.assertEqual(stdout, 'ok')
-
- def test_004_me_can_register_image_within_instance(self):
- conn = self.connect_ssh(data['my_private_ip'], 'mykey')
- stdin, stdout = conn.exec_command(
- 'python ~/smoketests/register-image.py')
- conn.close()
- if re.matches('ami-{\w+}', stdout):
- data['my_image_id'] = stdout.strip()
- else:
- self.fail('expected ami-nnnnnn, got:\n ' + stdout)
-
- def test_005_you_cannot_see_my_private_image(self):
- conn = self.connection_for('you')
- image = conn.get_image(data['my_image_id'])
- self.assertEqual(image, None)
-
- def test_006_me_can_make_image_public(self):
- conn = self.connection_for(test_username)
- conn.modify_image_attribute(image_id=data['my_image_id'],
- operation='add',
- attribute='launchPermission',
- groups='all')
-
- def test_007_you_can_see_my_public_image(self):
- conn = self.connection_for('you')
- image = conn.get_image(data['my_image_id'])
- self.assertEqual(image.id, data['my_image_id'])
-
- def test_999_tearDown(self):
- self.delete_user('me')
- self.delete_user('you')
-
- #if data.has_key('image_id'):
- # deregister rebundled image
-
- # TODO: tear down instance
- # delete keypairs
- data = {}
-
-# Test elastic IPs
-class ElasticIPTests(novatestcase.NovaTestCase):
- def test_000_setUp(self):
- data['image_id'] = 'ami-tiny'
-
- self.create_user('me')
- conn = self.connection_for('me')
- self.create_key_pair(conn, 'mykey')
-
- conn = self.connection_for('admin')
- #data['image_id'] = self.setUp_test_image(FLAGS.bundle_image)
-
- def test_001_me_can_launch_image_with_keypair(self):
- conn = self.connection_for('me')
- reservation = conn.run_instances(data['image_id'], key_name='mykey')
- self.assertEqual(len(reservation.instances), 1)
- data['my_instance_id'] = reservation.instances[0].id
-
- def test_002_me_can_allocate_elastic_ip(self):
- conn = self.connection_for('me')
- data['my_public_ip'] = conn.allocate_address()
- self.assert_(data['my_public_ip'].public_ip)
-
- def test_003_me_can_associate_ip_with_instance(self):
- self.assertTrue(data['my_public_ip'].associate(data['my_instance_id']))
-
- def test_004_me_can_ssh_with_public_ip(self):
- conn = self.connect_ssh(data['my_public_ip'].public_ip, 'mykey')
- conn.close()
-
- def test_005_me_can_disassociate_ip_from_instance(self):
- self.assertTrue(data['my_public_ip'].disassociate())
-
- def test_006_me_can_deallocate_elastic_ip(self):
- self.assertTrue(data['my_public_ip'].delete())
-
- def test_999_tearDown(self):
- conn = self.connection_for('me')
- self.delete_key_pair(conn, 'mykey')
-
- conn = self.connection_for('admin')
- #self.tearDown_test_image(conn, data['image_id'])
- data = {}
-
-ZONE = 'nova'
-DEVICE = 'vdb'
-# Test iscsi volumes
-class VolumeTests(novatestcase.NovaTestCase):
- def test_000_setUp(self):
- self.create_user(test_username)
- data['image_id'] = 'ami-tiny' # A7370FE3
-
- conn = self.connection_for(test_username)
- self.create_key_pair(conn, test_key)
- reservation = conn.run_instances(data['image_id'],
- instance_type='m1.tiny',
- key_name=test_key)
- data['instance_id'] = reservation.instances[0].id
- data['private_ip'] = reservation.instances[0].private_dns_name
- # wait for instance to show up
- for x in xrange(120):
- # ping waits for 1 second
- status, output = commands.getstatusoutput(
- 'ping -c1 -w1 %s' % data['private_ip'])
- if status == 0:
- break
- else:
- self.fail('unable to ping instance')
-
- def test_001_me_can_create_volume(self):
- conn = self.connection_for(test_username)
- volume = conn.create_volume(1, ZONE)
- self.assertEqual(volume.size, 1)
- data['volume_id'] = volume.id
- # give network time to find volume
- time.sleep(5)
-
- def test_002_me_can_attach_volume(self):
- conn = self.connection_for(test_username)
- conn.attach_volume(
- volume_id = data['volume_id'],
- instance_id = data['instance_id'],
- device = '/dev/%s' % DEVICE
- )
- # give instance time to recognize volume
- time.sleep(5)
-
- def test_003_me_can_mount_volume(self):
- conn = self.connect_ssh(data['private_ip'], test_key)
- # FIXME(devcamcar): the tiny image doesn't create the node properly
- # this will make /dev/vd* if it doesn't exist
- stdin, stdout, stderr = conn.exec_command(
- 'grep %s /proc/partitions |' + \
- '`awk \'{print "mknod /dev/"$4" b "$1" "$2}\'`' % DEVICE)
- commands = []
- commands.append('mkdir -p /mnt/vol')
- commands.append('mkfs.ext2 /dev/%s' % DEVICE)
- commands.append('mount /dev/%s /mnt/vol' % DEVICE)
- commands.append('echo success')
- stdin, stdout, stderr = conn.exec_command(' && '.join(commands))
- out = stdout.read()
- conn.close()
- if not out.strip().endswith('success'):
- self.fail('Unable to mount: %s %s' % (out, stderr.read()))
-
- def test_004_me_can_write_to_volume(self):
- conn = self.connect_ssh(data['private_ip'], test_key)
- # FIXME(devcamcar): This doesn't fail if the volume hasn't been mounted
- stdin, stdout, stderr = conn.exec_command(
- 'echo hello > /mnt/vol/test.txt')
- err = stderr.read()
- conn.close()
- if len(err) > 0:
- self.fail('Unable to write to mount: %s' % (err))
-
- def test_005_volume_is_correct_size(self):
- conn = self.connect_ssh(data['private_ip'], test_key)
- stdin, stdout, stderr = conn.exec_command(
- "df -h | grep %s | awk {'print $2'}" % DEVICE)
- out = stdout.read()
- conn.close()
- if not out.strip() == '1007.9M':
- self.fail('Volume is not the right size: %s %s' % (out, stderr.read()))
-
- def test_006_me_can_umount_volume(self):
- conn = self.connect_ssh(data['private_ip'], test_key)
- stdin, stdout, stderr = conn.exec_command('umount /mnt/vol')
- err = stderr.read()
- conn.close()
- if len(err) > 0:
- self.fail('Unable to unmount: %s' % (err))
-
- def test_007_me_can_detach_volume(self):
- conn = self.connection_for(test_username)
- self.assertTrue(conn.detach_volume(volume_id = data['volume_id']))
-
- def test_008_me_can_delete_volume(self):
- conn = self.connection_for(test_username)
- self.assertTrue(conn.delete_volume(data['volume_id']))
-
- def test_009_volume_size_must_be_int(self):
- conn = self.connection_for(test_username)
- self.assertRaises(Exception, conn.create_volume, 'foo', ZONE)
-
- def test_999_tearDown(self):
- global data
- conn = self.connection_for(test_username)
- self.delete_key_pair(conn, test_key)
- if data.has_key('instance_id'):
- conn.terminate_instances([data['instance_id']])
- self.delete_user(test_username)
- data = {}
-
-def build_suites():
- return {
- 'user': unittest.makeSuite(UserTests),
- 'image': unittest.makeSuite(ImageTests),
- 'security': unittest.makeSuite(SecurityTests),
- 'public_network': unittest.makeSuite(ElasticIPTests),
- 'volume': unittest.makeSuite(VolumeTests),
- }
-
-def main():
- argv = FLAGS(sys.argv)
- suites = build_suites()
-
- if FLAGS.suite:
- try:
- suite = suites[FLAGS.suite]
- except KeyError:
- print >> sys.stderr, 'Available test suites:', SUITE_NAMES
- return 1
-
- unittest.TextTestRunner(verbosity=2).run(suite)
- else:
- for suite in suites.itervalues():
- unittest.TextTestRunner(verbosity=2).run(suite)
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/smoketests/user_smoketests.py b/smoketests/user_smoketests.py
new file mode 100644
index 0000000000..d29e3aea32
--- /dev/null
+++ b/smoketests/user_smoketests.py
@@ -0,0 +1,326 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import commands
+import os
+import random
+import socket
+import sys
+import time
+import unittest
+
+from smoketests import flags
+from smoketests import base
+
+
+SUITE_NAMES = '[image, instance, volume]'
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES)
+flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz',
+ 'Local kernel file to use for bundling tests')
+flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image',
+ 'Local image file to use for bundling tests')
+
+TEST_PREFIX = 'test%s' % int (random.random()*1000000)
+TEST_BUCKET = '%s_bucket' % TEST_PREFIX
+TEST_KEY = '%s_key' % TEST_PREFIX
+TEST_DATA = {}
+
+
+class UserSmokeTestCase(base.SmokeTestCase):
+ def setUp(self):
+ global TEST_DATA
+ self.conn = self.connection_for_env()
+ self.data = TEST_DATA
+
+
+class ImageTests(UserSmokeTestCase):
+ def test_001_can_bundle_image(self):
+ self.assertTrue(self.bundle_image(FLAGS.bundle_image))
+
+ def test_002_can_upload_image(self):
+ self.assertTrue(self.upload_image(TEST_BUCKET, FLAGS.bundle_image))
+
+ def test_003_can_register_image(self):
+ image_id = self.conn.register_image('%s/%s.manifest.xml' %
+ (TEST_BUCKET, FLAGS.bundle_image))
+ self.assert_(image_id is not None)
+ self.data['image_id'] = image_id
+
+ def test_004_can_bundle_kernel(self):
+ self.assertTrue(self.bundle_image(FLAGS.bundle_kernel, kernel=True))
+
+ def test_005_can_upload_kernel(self):
+ self.assertTrue(self.upload_image(TEST_BUCKET, FLAGS.bundle_kernel))
+
+ def test_006_can_register_kernel(self):
+ kernel_id = self.conn.register_image('%s/%s.manifest.xml' %
+ (TEST_BUCKET, FLAGS.bundle_kernel))
+ self.assert_(kernel_id is not None)
+ self.data['kernel_id'] = kernel_id
+
+ def test_007_images_are_available_within_10_seconds(self):
+ for i in xrange(10):
+ image = self.conn.get_image(self.data['image_id'])
+ if image and image.state == 'available':
+ break
+ time.sleep(1)
+ else:
+ print image.state
+ self.assert_(False) # wasn't available within 10 seconds
+ self.assert_(image.type == 'machine')
+
+ for i in xrange(10):
+ kernel = self.conn.get_image(self.data['kernel_id'])
+ if kernel and kernel.state == 'available':
+ break
+ time.sleep(1)
+ else:
+ self.assert_(False) # wasn't available within 10 seconds
+ self.assert_(kernel.type == 'kernel')
+
+ def test_008_can_describe_image_attribute(self):
+ attrs = self.conn.get_image_attribute(self.data['image_id'],
+ 'launchPermission')
+ self.assert_(attrs.name, 'launch_permission')
+
+ def test_009_can_modify_image_launch_permission(self):
+ self.conn.modify_image_attribute(image_id=self.data['image_id'],
+ operation='add',
+ attribute='launchPermission',
+ groups='all')
+ image = self.conn.get_image(self.data['image_id'])
+ self.assertEqual(image.id, self.data['image_id'])
+
+ def test_010_can_see_launch_permission(self):
+ attrs = self.conn.get_image_attribute(self.data['image_id'],
+ 'launchPermission')
+ self.assert_(attrs.name, 'launch_permission')
+ self.assert_(attrs.attrs['groups'][0], 'all')
+
+ def test_011_user_can_deregister_kernel(self):
+ self.assertTrue(self.conn.deregister_image(self.data['kernel_id']))
+
+ def test_012_can_deregister_image(self):
+ self.assertTrue(self.conn.deregister_image(self.data['image_id']))
+
+ def test_013_can_delete_bundle(self):
+ self.assertTrue(self.delete_bundle_bucket(TEST_BUCKET))
+
+
+class InstanceTests(UserSmokeTestCase):
+ def test_001_can_create_keypair(self):
+ key = self.create_key_pair(self.conn, TEST_KEY)
+ self.assertEqual(key.name, TEST_KEY)
+
+ def test_002_can_create_instance_with_keypair(self):
+ reservation = self.conn.run_instances(FLAGS.test_image,
+ key_name=TEST_KEY,
+ instance_type='m1.tiny')
+ self.assertEqual(len(reservation.instances), 1)
+ self.data['instance_id'] = reservation.instances[0].id
+
+ def test_003_instance_runs_within_60_seconds(self):
+ reservations = self.conn.get_all_instances([data['instance_id']])
+ instance = reservations[0].instances[0]
+ # allow 60 seconds to exit pending with IP
+ for x in xrange(60):
+ instance.update()
+ if instance.state == u'running':
+ break
+ time.sleep(1)
+ else:
+ self.fail('instance failed to start')
+ ip = reservations[0].instances[0].private_dns_name
+ self.failIf(ip == '0.0.0.0')
+ self.data['private_ip'] = ip
+ print self.data['private_ip']
+
+ def test_004_can_ping_private_ip(self):
+ for x in xrange(120):
+ # ping waits for 1 second
+ status, output = commands.getstatusoutput(
+ 'ping -c1 %s' % self.data['private_ip'])
+ if status == 0:
+ break
+ else:
+ self.fail('could not ping instance')
+
+ def test_005_can_ssh_to_private_ip(self):
+ for x in xrange(30):
+ try:
+ conn = self.connect_ssh(self.data['private_ip'], TEST_KEY)
+ conn.close()
+ except Exception:
+ time.sleep(1)
+ else:
+ break
+ else:
+ self.fail('could not ssh to instance')
+
+ def test_006_can_allocate_elastic_ip(self):
+ result = self.conn.allocate_address()
+ self.assertTrue(hasattr(result, 'public_ip'))
+ self.data['public_ip'] = result.public_ip
+
+ def test_007_can_associate_ip_with_instance(self):
+ result = self.conn.associate_address(self.data['instance_id'],
+ self.data['public_ip'])
+ self.assertTrue(result)
+
+ def test_008_can_ssh_with_public_ip(self):
+ for x in xrange(30):
+ try:
+ conn = self.connect_ssh(self.data['public_ip'], TEST_KEY)
+ conn.close()
+ except socket.error:
+ time.sleep(1)
+ else:
+ break
+ else:
+ self.fail('could not ssh to instance')
+
+ def test_009_can_disassociate_ip_from_instance(self):
+ result = self.conn.disassociate_address(self.data['public_ip'])
+ self.assertTrue(result)
+
+ def test_010_can_deallocate_elastic_ip(self):
+ result = self.conn.release_address(self.data['public_ip'])
+ self.assertTrue(result)
+
+ def test_999_tearDown(self):
+ self.delete_key_pair(self.conn, TEST_KEY)
+ if self.data.has_key('instance_id'):
+ self.conn.terminate_instances([data['instance_id']])
+
+
+class VolumeTests(UserSmokeTestCase):
+ def setUp(self):
+ super(VolumeTests, self).setUp()
+ self.device = '/dev/vdb'
+
+ def test_000_setUp(self):
+ self.create_key_pair(self.conn, TEST_KEY)
+ reservation = self.conn.run_instances(FLAGS.test_image,
+ instance_type='m1.tiny',
+ key_name=TEST_KEY)
+ instance = reservation.instances[0]
+ self.data['instance'] = instance
+ for x in xrange(120):
+ if self.can_ping(instance.private_dns_name):
+ break
+ else:
+ self.fail('unable to start instance')
+
+ def test_001_can_create_volume(self):
+ volume = self.conn.create_volume(1, 'nova')
+ self.assertEqual(volume.size, 1)
+ self.data['volume'] = volume
+ # Give network time to find volume.
+ time.sleep(5)
+
+ def test_002_can_attach_volume(self):
+ volume = self.data['volume']
+
+ for x in xrange(10):
+ if volume.status == u'available':
+ break
+ time.sleep(5)
+ volume.update()
+ else:
+ self.fail('cannot attach volume with state %s' % volume.status)
+
+ volume.attach(self.data['instance'].id, self.device)
+
+ # Volumes seems to report "available" too soon.
+ for x in xrange(10):
+ if volume.status == u'in-use':
+ break
+ time.sleep(5)
+ volume.update()
+
+ self.assertEqual(volume.status, u'in-use')
+
+ # Give instance time to recognize volume.
+ time.sleep(5)
+
+ def test_003_can_mount_volume(self):
+ ip = self.data['instance'].private_dns_name
+ conn = self.connect_ssh(ip, TEST_KEY)
+ commands = []
+ commands.append('mkdir -p /mnt/vol')
+ commands.append('mkfs.ext2 %s' % self.device)
+ commands.append('mount %s /mnt/vol' % self.device)
+ commands.append('echo success')
+ stdin, stdout, stderr = conn.exec_command(' && '.join(commands))
+ out = stdout.read()
+ conn.close()
+ if not out.strip().endswith('success'):
+ self.fail('Unable to mount: %s %s' % (out, stderr.read()))
+
+ def test_004_can_write_to_volume(self):
+ ip = self.data['instance'].private_dns_name
+ conn = self.connect_ssh(ip, TEST_KEY)
+ # FIXME(devcamcar): This doesn't fail if the volume hasn't been mounted
+ stdin, stdout, stderr = conn.exec_command(
+ 'echo hello > /mnt/vol/test.txt')
+ err = stderr.read()
+ conn.close()
+ if len(err) > 0:
+ self.fail('Unable to write to mount: %s' % (err))
+
+ def test_005_volume_is_correct_size(self):
+ ip = self.data['instance'].private_dns_name
+ conn = self.connect_ssh(ip, TEST_KEY)
+ stdin, stdout, stderr = conn.exec_command(
+ "df -h | grep %s | awk {'print $2'}" % self.device)
+ out = stdout.read()
+ conn.close()
+ if not out.strip() == '1008M':
+ self.fail('Volume is not the right size: %s %s' %
+ (out, stderr.read()))
+
+ def test_006_me_can_umount_volume(self):
+ ip = self.data['instance'].private_dns_name
+ conn = self.connect_ssh(ip, TEST_KEY)
+ stdin, stdout, stderr = conn.exec_command('umount /mnt/vol')
+ err = stderr.read()
+ conn.close()
+ if len(err) > 0:
+ self.fail('Unable to unmount: %s' % (err))
+
+ def test_007_me_can_detach_volume(self):
+ result = self.conn.detach_volume(volume_id=self.data['volume'].id)
+ self.assertTrue(result)
+ time.sleep(5)
+
+ def test_008_me_can_delete_volume(self):
+ result = self.conn.delete_volume(self.data['volume'].id)
+ self.assertTrue(result)
+
+ def test_999_tearDown(self):
+ self.conn.terminate_instances([self.data['instance'].id])
+ self.conn.delete_key_pair(TEST_KEY)
+
+
+if __name__ == "__main__":
+ suites = {'image': unittest.makeSuite(ImageTests),
+ 'instance': unittest.makeSuite(InstanceTests),
+ 'volume': unittest.makeSuite(VolumeTests)}
+ sys.exit(base.run_tests(suites))
diff --git a/tools/pip-requires b/tools/pip-requires
index c9ca8cdf51..6bdadf3ed7 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -21,3 +21,4 @@ mox==0.5.0
-f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz
greenlet==0.3.1
nose
+bzr