summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--HACKING.rst45
-rw-r--r--REVIEWING.rst9
-rw-r--r--doc/source/cleanup.rst5
-rw-r--r--doc/source/conf.py1
-rw-r--r--doc/source/index.rst9
-rw-r--r--etc/tempest.conf.sample32
-rw-r--r--requirements.txt25
-rw-r--r--setup.cfg1
-rw-r--r--tempest/api/compute/admin/test_fixed_ips_negative.py5
-rw-r--r--tempest/api/compute/base.py51
-rw-r--r--tempest/api/compute/images/test_images_oneserver.py6
-rw-r--r--tempest/api/compute/security_groups/test_security_group_rules.py133
-rw-r--r--tempest/api/compute/servers/test_create_server.py35
-rw-r--r--tempest/api/compute/servers/test_delete_server.py12
-rw-r--r--tempest/api/compute/servers/test_list_server_filters.py36
-rw-r--r--tempest/api/compute/servers/test_server_actions.py30
-rw-r--r--tempest/api/compute/servers/test_server_rescue.py3
-rw-r--r--tempest/api/compute/servers/test_server_rescue_negative.py3
-rw-r--r--tempest/api/compute/servers/test_servers_negative.py5
-rw-r--r--tempest/api/compute/test_authorization.py8
-rw-r--r--tempest/api/compute/v3/servers/test_list_server_filters.py11
-rw-r--r--tempest/api/compute/v3/servers/test_servers_negative.py5
-rw-r--r--tempest/api/identity/base.py2
-rw-r--r--tempest/api/image/base.py17
-rw-r--r--tempest/api/messaging/base.py4
-rw-r--r--tempest/api/messaging/test_claims.py8
-rw-r--r--tempest/api/messaging/test_messages.py8
-rw-r--r--tempest/api/messaging/test_queues.py33
-rw-r--r--tempest/api/network/base.py17
-rw-r--r--tempest/api/network/common.py157
-rw-r--r--tempest/api/network/test_fwaas_extensions.py51
-rw-r--r--tempest/api/network/test_networks.py205
-rw-r--r--tempest/api/network/test_security_groups.py136
-rw-r--r--tempest/api/network/test_security_groups_negative.py116
-rw-r--r--tempest/api/object_storage/base.py18
-rw-r--r--tempest/api/orchestration/base.py8
-rw-r--r--tempest/api/orchestration/stacks/test_neutron_resources.py5
-rw-r--r--tempest/api/orchestration/stacks/test_non_empty_stack.py4
-rw-r--r--tempest/api/orchestration/stacks/test_nova_keypair_resources.py4
-rw-r--r--tempest/api/orchestration/stacks/test_stacks.py4
-rw-r--r--tempest/api/orchestration/stacks/test_swift_resources.py5
-rw-r--r--tempest/api/orchestration/stacks/test_templates.py5
-rw-r--r--tempest/api/orchestration/stacks/test_templates_negative.py4
-rw-r--r--tempest/api/orchestration/stacks/test_update.py82
-rw-r--r--tempest/api/orchestration/stacks/test_volumes.py4
-rw-r--r--tempest/api/telemetry/base.py8
-rw-r--r--tempest/api/telemetry/test_telemetry_alarming_api.py4
-rw-r--r--tempest/api/telemetry/test_telemetry_notification_api.py5
-rw-r--r--tempest/api/volume/admin/test_multi_backend.py9
-rw-r--r--tempest/api/volume/admin/test_snapshots_actions.py9
-rw-r--r--tempest/api/volume/admin/test_volume_quotas.py7
-rw-r--r--tempest/api/volume/admin/test_volume_quotas_negative.py5
-rw-r--r--tempest/api/volume/admin/test_volume_services.py4
-rw-r--r--tempest/api/volume/admin/test_volume_types_extra_specs.py8
-rw-r--r--tempest/api/volume/admin/test_volume_types_extra_specs_negative.py8
-rw-r--r--tempest/api/volume/admin/test_volumes_actions.py9
-rw-r--r--tempest/api/volume/admin/test_volumes_backup.py5
-rw-r--r--tempest/api/volume/base.py36
-rw-r--r--tempest/api/volume/test_availability_zone.py4
-rw-r--r--tempest/api/volume/test_qos.py5
-rw-r--r--tempest/api/volume/test_snapshot_metadata.py20
-rw-r--r--tempest/api/volume/test_volume_metadata.py5
-rw-r--r--tempest/api/volume/test_volume_transfers.py23
-rw-r--r--tempest/api/volume/test_volumes_actions.py9
-rw-r--r--tempest/api/volume/test_volumes_extend.py5
-rw-r--r--tempest/api/volume/test_volumes_get.py4
-rw-r--r--tempest/api/volume/test_volumes_list.py9
-rw-r--r--tempest/api/volume/test_volumes_negative.py5
-rw-r--r--tempest/api/volume/test_volumes_snapshots.py61
-rw-r--r--tempest/api/volume/test_volumes_snapshots_negative.py17
-rw-r--r--tempest/api/volume/v2/test_volumes_list.py9
-rw-r--r--tempest/api_schema/request/compute/flavors.py21
-rw-r--r--tempest/api_schema/response/messaging/v1/queues.py4
-rw-r--r--tempest/auth.py80
-rw-r--r--tempest/cli/__init__.py4
-rw-r--r--tempest/cli/simple_read_only/compute/test_nova.py4
-rw-r--r--tempest/cli/simple_read_only/compute/test_nova_manage.py11
-rw-r--r--tempest/cli/simple_read_only/data_processing/test_sahara.py4
-rw-r--r--tempest/cli/simple_read_only/image/test_glance.py4
-rw-r--r--tempest/cli/simple_read_only/network/test_neutron.py4
-rw-r--r--tempest/cli/simple_read_only/object_storage/test_swift.py4
-rw-r--r--tempest/cli/simple_read_only/orchestration/test_heat.py4
-rw-r--r--tempest/cli/simple_read_only/telemetry/test_ceilometer.py4
-rw-r--r--tempest/cli/simple_read_only/volume/test_cinder.py4
-rw-r--r--tempest/clients.py298
-rw-r--r--tempest/cmd/cleanup.py300
-rw-r--r--tempest/cmd/cleanup_service.py1062
-rwxr-xr-xtempest/cmd/javelin.py54
-rw-r--r--tempest/cmd/resources.yaml7
-rwxr-xr-xtempest/cmd/verify_tempest_config.py23
-rw-r--r--tempest/common/accounts.py22
-rw-r--r--tempest/common/cred_provider.py11
-rw-r--r--tempest/common/credentials.py39
-rw-r--r--tempest/common/generator/base_generator.py96
-rw-r--r--tempest/common/generator/negative_generator.py65
-rw-r--r--tempest/common/generator/valid_generator.py23
-rw-r--r--tempest/common/isolated_creds.py182
-rw-r--r--tempest/config.py41
-rw-r--r--tempest/hacking/checks.py27
-rw-r--r--tempest/manager.py1
-rw-r--r--tempest/scenario/manager.py1199
-rw-r--r--tempest/scenario/orchestration/test_server_cfn_init.py3
-rw-r--r--tempest/scenario/test_aggregates_basic_ops.py4
-rw-r--r--tempest/scenario/test_dashboard_basic_ops.py4
-rw-r--r--tempest/scenario/test_large_ops.py4
-rw-r--r--tempest/scenario/test_load_balancer_basic.py47
-rw-r--r--tempest/scenario/test_network_advanced_server_ops.py9
-rw-r--r--tempest/scenario/test_network_basic_ops.py23
-rw-r--r--tempest/scenario/test_security_groups_basic_ops.py12
-rw-r--r--tempest/scenario/test_server_advanced_ops.py4
-rw-r--r--tempest/scenario/test_server_basic_ops.py3
-rw-r--r--tempest/scenario/test_stamp_pattern.py4
-rw-r--r--tempest/scenario/test_swift_basic_ops.py24
-rw-r--r--tempest/scenario/test_volume_boot_pattern.py4
-rw-r--r--tempest/scenario/utils.py47
-rw-r--r--tempest/services/compute/json/images_client.py2
-rw-r--r--tempest/services/compute/xml/images_client.py2
-rw-r--r--tempest/services/compute/xml/servers_client.py7
-rw-r--r--tempest/services/identity/v3/json/identity_client.py7
-rw-r--r--tempest/services/identity/v3/xml/identity_client.py7
-rw-r--r--tempest/services/messaging/json/messaging_client.py17
-rw-r--r--tempest/services/network/json/network_client.py27
-rw-r--r--tempest/services/network/resources.py19
-rw-r--r--tempest/services/network/xml/network_client.py24
-rw-r--r--tempest/services/volume/json/snapshots_client.py13
-rw-r--r--tempest/services/volume/v2/json/snapshots_client.py23
-rw-r--r--tempest/services/volume/v2/xml/snapshots_client.py23
-rw-r--r--tempest/services/volume/xml/snapshots_client.py13
-rw-r--r--tempest/stress/actions/server_create_destroy.py6
-rw-r--r--tempest/stress/actions/ssh_floating.py10
-rw-r--r--tempest/stress/actions/volume_attach_delete.py19
-rw-r--r--tempest/stress/actions/volume_attach_verify.py34
-rw-r--r--tempest/stress/actions/volume_create_delete.py8
-rw-r--r--tempest/test.py112
-rw-r--r--tempest/tests/cmd/test_verify_tempest_config.py18
-rw-r--r--tempest/tests/common/utils/test_misc.py2
-rw-r--r--tempest/tests/negative/test_negative_auto_test.py9
-rw-r--r--tempest/tests/negative/test_negative_generators.py50
-rw-r--r--tempest/tests/test_decorators.py22
-rw-r--r--tempest/tests/test_hacking.py30
-rw-r--r--tempest/tests/test_tenant_isolation.py21
-rw-r--r--tempest/thirdparty/boto/test.py13
-rw-r--r--tempest/thirdparty/boto/test_ec2_instance_run.py27
-rw-r--r--tempest/thirdparty/boto/test_ec2_keys.py4
-rw-r--r--tempest/thirdparty/boto/test_ec2_network.py4
-rw-r--r--tempest/thirdparty/boto/test_ec2_security_groups.py4
-rw-r--r--tempest/thirdparty/boto/test_ec2_volumes.py4
-rw-r--r--tempest/thirdparty/boto/test_s3_buckets.py4
-rw-r--r--tempest/thirdparty/boto/test_s3_ec2_images.py4
-rw-r--r--tempest/thirdparty/boto/test_s3_objects.py4
-rw-r--r--test-requirements.txt9
-rw-r--r--tox.ini7
152 files changed, 3218 insertions, 2845 deletions
diff --git a/HACKING.rst b/HACKING.rst
index 025bf7460..29d5bf4dc 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -227,3 +227,48 @@ itself, and thus have a different set of guidelines around them:
2. The unit tests cannot use setUpClass, instead fixtures and testresources
should be used for shared state between tests.
+
+
+.. _TestDocumentation:
+
+Test Documentation
+------------------
+For tests being added we need to require inline documentation in the form of
+docstings to explain what is being tested. In API tests for a new API a class
+level docstring should be added to an API reference doc. If one doesn't exist
+a TODO comment should be put indicating that the reference needs to be added.
+For individual API test cases a method level docstring should be used to
+explain the functionality being tested if the test name isn't descriptive
+enough. For example::
+
+ def test_get_role_by_id(self):
+ """Get a role by its id."""
+
+the docstring there is superfluous and shouldn't be added. but for a method
+like::
+
+ def test_volume_backup_create_get_detailed_list_restore_delete(self):
+ pass
+
+a docstring would be useful because while the test title is fairly descriptive
+the operations being performed are complex enough that a bit more explanation
+will help people figure out the intent of the test.
+
+For scenario tests a class level docstring describing the steps in the scenario
+is required. If there is more than one test case in the class individual
+docstrings for the workflow in each test methods can be used instead. A good
+example of this would be::
+
+ class TestVolumeBootPattern(manager.OfficialClientTest):
+ """
+ This test case attempts to reproduce the following steps:
+
+ * Create in Cinder some bootable volume importing a Glance image
+ * Boot an instance from the bootable volume
+ * Write content to the volume
+ * Delete an instance and Boot a new instance from the volume
+ * Check written content in the instance
+ * Create a volume snapshot while the instance is running
+ * Boot an additional instance from the new snapshot based volume
+ * Check written content in the instance booted from snapshot
+ """
diff --git a/REVIEWING.rst b/REVIEWING.rst
index d6dc83ee5..74bd2adb6 100644
--- a/REVIEWING.rst
+++ b/REVIEWING.rst
@@ -51,6 +51,15 @@ skipped or not. Do not approve changes that depend on an API call to determine
whether to skip or not.
+Test Documentation
+------------------
+When a new test is being added refer to the :ref:`TestDocumentation` section in
+hacking to see if the requirements are being met. With the exception of a class
+level docstring linking to the API ref doc in the API tests and a docstring for
+scenario tests this is up to the reviewers discretion whether a docstring is
+required or not.
+
+
When to approve
---------------
* Every patch needs two +2s before being approved.
diff --git a/doc/source/cleanup.rst b/doc/source/cleanup.rst
new file mode 100644
index 000000000..acd016c3f
--- /dev/null
+++ b/doc/source/cleanup.rst
@@ -0,0 +1,5 @@
+--------------------------------
+Post Tempest Run Cleanup Utility
+--------------------------------
+
+.. automodule:: tempest.cmd.cleanup \ No newline at end of file
diff --git a/doc/source/conf.py b/doc/source/conf.py
index bd4e55330..daa293c21 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -27,7 +27,6 @@ import os
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
- 'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'oslosphinx'
diff --git a/doc/source/index.rst b/doc/source/index.rst
index d3118ac3f..bc4fc46fc 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -29,6 +29,15 @@ where your test contributions should go.
field_guide/thirdparty
field_guide/unit_tests
+---------------------
+Command Documentation
+---------------------
+
+.. toctree::
+ :maxdepth: 1
+
+ cleanup
+
==================
Indices and tables
==================
diff --git a/etc/tempest.conf.sample b/etc/tempest.conf.sample
index dfcbaba6b..003a7f7c3 100644
--- a/etc/tempest.conf.sample
+++ b/etc/tempest.conf.sample
@@ -115,6 +115,24 @@
# to use for running tests (string value)
#test_accounts_file=etc/accounts.yaml
+# Allows test cases to create/destroy tenants and users. This
+# option requires that OpenStack Identity API admin
+# credentials are known. If false, isolated test cases and
+# parallel execution, can still be achieved configuring a list
+# of test accounts (boolean value)
+# Deprecated group/name - [compute]/allow_tenant_isolation
+# Deprecated group/name - [orchestration]/allow_tenant_isolation
+#allow_tenant_isolation=false
+
+# If set to True it enables the Accounts provider, which locks
+# credentials to allow for parallel execution with pre-
+# provisioned accounts. It can only be used to run tests that
+# ensure credentials cleanup happens. It requires at least `2
+# * CONC` distinct accounts configured in
+# `test_accounts_file`, with CONC == the number of concurrent
+# test processes. (boolean value)
+#locking_credentials_provider=false
+
[baremetal]
@@ -229,12 +247,6 @@
# Options defined in tempest.config
#
-# Allows test cases to create/destroy tenants and users. This
-# option enables isolated test cases and better parallel
-# execution, but also requires that OpenStack Identity API
-# admin credentials are known. (boolean value)
-#allow_tenant_isolation=false
-
# Valid primary image reference to be used in tests. This is a
# required option (string value)
#image_ref=<None>
@@ -356,12 +368,6 @@
# value)
#floating_ip_range=10.0.0.0/29
-# Allows test cases to create/destroy tenants and users. This
-# option enables isolated test cases and better parallel
-# execution, but also requires that OpenStack Identity API
-# admin credentials are known. (boolean value)
-#allow_tenant_isolation=false
-
# Time in seconds between build status checks. (integer value)
#build_interval=1
@@ -1100,7 +1106,7 @@
# value)
#build_interval=1
-# Timeout in seconds to wait for a volume to becomeavailable.
+# Timeout in seconds to wait for a volume to become available.
# (integer value)
#build_timeout=300
diff --git a/requirements.txt b/requirements.txt
index 9a3b74d41..708ede3b8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,25 +1,28 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
pbr>=0.6,!=0.7,<1.0
anyjson>=0.3.3
httplib2>=0.7.5
jsonschema>=2.0.0,<3.0.0
testtools>=0.9.34
lxml>=2.3
-boto>=2.12.0,!=2.13.0
+boto>=2.32.1
paramiko>=1.13.0
-netaddr>=0.7.6
+netaddr>=0.7.12
python-ceilometerclient>=1.0.6
-python-glanceclient>=0.13.1
-python-keystoneclient>=0.9.0
-python-novaclient>=2.17.0
-python-neutronclient>=2.3.5,<3
-python-cinderclient>=1.0.7
+python-glanceclient>=0.14.0
+python-keystoneclient>=0.10.0
+python-novaclient>=2.18.0
+python-neutronclient>=2.3.6,<3
+python-cinderclient>=1.1.0
python-heatclient>=0.2.9
-python-ironicclient
-python-saharaclient>=0.6.0
-python-swiftclient>=2.0.2
+python-ironicclient>=0.2.1
+python-saharaclient>=0.7.3
+python-swiftclient>=2.2.0
testresources>=0.2.4
testrepository>=0.0.18
-oslo.config>=1.2.1
+oslo.config>=1.4.0 # Apache-2.0
six>=1.7.0
iso8601>=0.1.9
fixtures>=0.3.14
diff --git a/setup.cfg b/setup.cfg
index 5c62710d1..2e25aceb4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -22,6 +22,7 @@ console_scripts =
verify-tempest-config = tempest.cmd.verify_tempest_config:main
javelin2 = tempest.cmd.javelin:main
run-tempest-stress = tempest.cmd.run_stress:main
+ tempest-cleanup = tempest.cmd.cleanup:main
[build_sphinx]
all_files = 1
diff --git a/tempest/api/compute/admin/test_fixed_ips_negative.py b/tempest/api/compute/admin/test_fixed_ips_negative.py
index 90be8201b..8d6a7fcfb 100644
--- a/tempest/api/compute/admin/test_fixed_ips_negative.py
+++ b/tempest/api/compute/admin/test_fixed_ips_negative.py
@@ -68,7 +68,10 @@ class FixedIPsNegativeTestJson(base.BaseV2ComputeAdminTest):
# NOTE(maurosr): since this exercises the same code snippet, we do it
# only for reserve action
body = {"reserve": "None"}
- self.assertRaises(exceptions.NotFound,
+ # NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
+ # change the error code to BadRequest, both exceptions should be
+ # accepted by tempest
+ self.assertRaises((exceptions.NotFound, exceptions.BadRequest),
self.client.reserve_fixed_ip,
"my.invalid.ip", body)
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 6c93d3392..6496176d2 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -44,9 +44,9 @@ class BaseComputeTest(tempest.test.BaseTestCase):
# TODO(andreaf) WE should care also for the alt_manager here
# but only once client lazy load in the manager is done
- os = cls.get_client_manager()
+ cls.os = cls.get_client_manager()
+ cls.multi_user = cls.check_multi_user()
- cls.os = os
cls.build_interval = CONF.compute.build_interval
cls.build_timeout = CONF.compute.build_timeout
cls.ssh_user = CONF.compute.ssh_user
@@ -58,7 +58,6 @@ class BaseComputeTest(tempest.test.BaseTestCase):
cls.image_ssh_password = CONF.compute.image_ssh_password
cls.servers = []
cls.images = []
- cls.multi_user = cls.get_multi_user()
cls.security_groups = []
cls.server_groups = []
@@ -118,27 +117,12 @@ class BaseComputeTest(tempest.test.BaseTestCase):
raise exceptions.InvalidConfiguration(message=msg)
@classmethod
- def get_multi_user(cls):
- multi_user = True
- # Determine if there are two regular users that can be
- # used in testing. If the test cases are allowed to create
- # users (config.compute.allow_tenant_isolation is true,
- # then we allow multi-user.
- if not CONF.compute.allow_tenant_isolation:
- user1 = CONF.identity.username
- user2 = CONF.identity.alt_username
- if not user2 or user1 == user2:
- multi_user = False
- else:
- user2_password = CONF.identity.alt_password
- user2_tenant_name = CONF.identity.alt_tenant_name
- if not user2_password or not user2_tenant_name:
- msg = ("Alternate user specified but not alternate "
- "tenant or password: alt_tenant_name=%s "
- "alt_password=%s"
- % (user2_tenant_name, user2_password))
- raise exceptions.InvalidConfiguration(msg)
- return multi_user
+ def check_multi_user(cls):
+ # We have a list of accounts now, so just checking if the list is gt 2
+ if not cls.isolated_creds.is_multi_user():
+ msg = "Not enough users available for multi-user testing"
+ raise exceptions.InvalidConfiguration(msg)
+ return True
@classmethod
def clear_servers(cls):
@@ -390,19 +374,14 @@ class BaseComputeAdminTest(BaseComputeTest):
@classmethod
def resource_setup(cls):
super(BaseComputeAdminTest, cls).resource_setup()
- if (CONF.compute.allow_tenant_isolation or
- cls.force_tenant_isolation is True):
+ try:
creds = cls.isolated_creds.get_admin_creds()
- cls.os_adm = clients.Manager(credentials=creds,
- interface=cls._interface)
- else:
- try:
- cls.os_adm = clients.ComputeAdminManager(
- interface=cls._interface)
- except exceptions.InvalidCredentials:
- msg = ("Missing Compute Admin API credentials "
- "in configuration.")
- raise cls.skipException(msg)
+ cls.os_adm = clients.Manager(
+ credentials=creds, interface=cls._interface)
+ except NotImplementedError:
+ msg = ("Missing Compute Admin API credentials in configuration.")
+ raise cls.skipException(msg)
+
if cls._api_version == 2:
cls.availability_zone_admin_client = (
cls.os_adm.availability_zone_client)
diff --git a/tempest/api/compute/images/test_images_oneserver.py b/tempest/api/compute/images/test_images_oneserver.py
index c0b67305f..459d78b3f 100644
--- a/tempest/api/compute/images/test_images_oneserver.py
+++ b/tempest/api/compute/images/test_images_oneserver.py
@@ -105,7 +105,11 @@ class ImagesOneServerTestJSON(base.BaseV2ComputeTest):
raise self.skipException("Not testable in XML")
# prefix character is:
# http://www.fileformat.info/info/unicode/char/1F4A9/index.htm
- utf8_name = data_utils.rand_name(u'\xF0\x9F\x92\xA9')
+
+ # We use a string with 3 byte utf-8 character due to bug
+ # #1370954 in glance which will 500 if mysql is used as the
+ # backend and it attempts to store a 4 byte utf-8 character
+ utf8_name = data_utils.rand_name('\xe2\x82\xa1')
resp, body = self.client.create_image(self.server_id, utf8_name)
image_id = data_utils.parse_image_id(resp['location'])
self.addCleanup(self.client.delete_image, image_id)
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index b28124c12..4fd5c0291 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import six
+
from tempest.api.compute.security_groups import base
from tempest import config
from tempest import test
@@ -27,6 +29,40 @@ class SecurityGroupRulesTestJSON(base.BaseSecurityGroupsTest):
super(SecurityGroupRulesTestJSON, cls).resource_setup()
cls.client = cls.security_groups_client
cls.neutron_available = CONF.service_available.neutron
+ cls.ip_protocol = 'tcp'
+ cls.from_port = 22
+ cls.to_port = 22
+
+ def setUp(cls):
+ super(SecurityGroupRulesTestJSON, cls).setUp()
+
+ from_port = cls.from_port
+ to_port = cls.to_port
+ group = {}
+ ip_range = {}
+ if cls._interface == 'xml':
+ # NOTE: An XML response is different from the one of JSON
+ # like the following.
+ from_port = six.text_type(from_port)
+ to_port = six.text_type(to_port)
+ group = {'tenant_id': 'None', 'name': 'None'}
+ ip_range = {'cidr': 'None'}
+ cls.expected = {
+ 'id': None,
+ 'parent_group_id': None,
+ 'ip_protocol': cls.ip_protocol,
+ 'from_port': from_port,
+ 'to_port': to_port,
+ 'ip_range': ip_range,
+ 'group': group
+ }
+
+ def _check_expected_response(self, actual_rule):
+ for key in self.expected:
+ if key == 'id':
+ continue
+ self.assertEqual(self.expected[key], actual_rule[key],
+ "Miss-matched key is %s" % key)
@test.attr(type='smoke')
@test.services('network')
@@ -34,50 +70,68 @@ class SecurityGroupRulesTestJSON(base.BaseSecurityGroupsTest):
# Positive test: Creation of Security Group rule
# should be successful
# Creating a Security Group to add rules to it
- resp, security_group = self.create_security_group()
+ _, security_group = self.create_security_group()
securitygroup_id = security_group['id']
# Adding rules to the created Security Group
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
- resp, rule = \
+ _, rule = \
self.client.create_security_group_rule(securitygroup_id,
- ip_protocol,
- from_port,
- to_port)
- self.addCleanup(self.client.delete_security_group_rule, rule['id'])
- self.assertEqual(200, resp.status)
+ self.ip_protocol,
+ self.from_port,
+ self.to_port)
+ self.expected['parent_group_id'] = securitygroup_id
+ self.expected['ip_range'] = {'cidr': '0.0.0.0/0'}
+ self._check_expected_response(rule)
@test.attr(type='smoke')
@test.services('network')
- def test_security_group_rules_create_with_optional_arguments(self):
+ def test_security_group_rules_create_with_optional_cidr(self):
# Positive test: Creation of Security Group rule
- # with optional arguments
+ # with optional argument cidr
# should be successful
- secgroup1 = None
- secgroup2 = None
# Creating a Security Group to add rules to it
- resp, security_group = self.create_security_group()
- secgroup1 = security_group['id']
- # Creating a Security Group so as to assign group_id to the rule
- resp, security_group = self.create_security_group()
- secgroup2 = security_group['id']
- # Adding rules to the created Security Group with optional arguments
- parent_group_id = secgroup1
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
+ _, security_group = self.create_security_group()
+ parent_group_id = security_group['id']
+
+ # Adding rules to the created Security Group with optional cidr
cidr = '10.2.3.124/24'
- group_id = secgroup2
- resp, rule = \
+ _, rule = \
self.client.create_security_group_rule(parent_group_id,
- ip_protocol,
- from_port,
- to_port,
- cidr=cidr,
+ self.ip_protocol,
+ self.from_port,
+ self.to_port,
+ cidr=cidr)
+ self.expected['parent_group_id'] = parent_group_id
+ self.expected['ip_range'] = {'cidr': cidr}
+ self._check_expected_response(rule)
+
+ @test.attr(type='smoke')
+ @test.services('network')
+ def test_security_group_rules_create_with_optional_group_id(self):
+ # Positive test: Creation of Security Group rule
+ # with optional argument group_id
+ # should be successful
+
+ # Creating a Security Group to add rules to it
+ _, security_group = self.create_security_group()
+ parent_group_id = security_group['id']
+
+ # Creating a Security Group so as to assign group_id to the rule
+ _, security_group = self.create_security_group()
+ group_id = security_group['id']
+ group_name = security_group['name']
+
+ # Adding rules to the created Security Group with optional group_id
+ _, rule = \
+ self.client.create_security_group_rule(parent_group_id,
+ self.ip_protocol,
+ self.from_port,
+ self.to_port,
group_id=group_id)
- self.assertEqual(200, resp.status)
+ self.expected['parent_group_id'] = parent_group_id
+ self.expected['group'] = {'tenant_id': self.client.tenant_id,
+ 'name': group_name}
+ self._check_expected_response(rule)
@test.attr(type='smoke')
@test.services('network')
@@ -89,13 +143,11 @@ class SecurityGroupRulesTestJSON(base.BaseSecurityGroupsTest):
securitygroup_id = security_group['id']
# Add a first rule to the created Security Group
- ip_protocol1 = 'tcp'
- from_port1 = 22
- to_port1 = 22
resp, rule = \
self.client.create_security_group_rule(securitygroup_id,
- ip_protocol1,
- from_port1, to_port1)
+ self.ip_protocol,
+ self.from_port,
+ self.to_port)
rule1_id = rule['id']
# Add a second rule to the created Security Group
@@ -127,14 +179,11 @@ class SecurityGroupRulesTestJSON(base.BaseSecurityGroupsTest):
resp, security_group = self.create_security_group()
sg2_id = security_group['id']
# Adding rules to the Group1
- ip_protocol = 'tcp'
- from_port = 22
- to_port = 22
resp, rule = \
self.client.create_security_group_rule(sg1_id,
- ip_protocol,
- from_port,
- to_port,
+ self.ip_protocol,
+ self.from_port,
+ self.to_port,
group_id=sg2_id)
self.assertEqual(200, resp.status)
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 5df8d8263..25dc87d00 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -42,6 +42,7 @@ class ServersTestJSON(base.BaseV2ComputeTest):
personality = [{'path': '/test.txt',
'contents': base64.b64encode(file_contents)}]
cls.client = cls.servers_client
+ cls.network_client = cls.os.network_client
cli_resp = cls.create_test_server(name=cls.name,
meta=cls.meta,
accessIPv4=cls.accessIPv4,
@@ -124,6 +125,40 @@ class ServersTestJSON(base.BaseV2ComputeTest):
self.assertEqual(200, resp.status)
self.assertIn(server['id'], server_group['members'])
+ @testtools.skipUnless(CONF.service_available.neutron,
+ 'Neutron service must be available.')
+ def test_verify_multiple_nics_order(self):
+ # Verify that the networks order given at the server creation is
+ # preserved within the server.
+ name_net1 = data_utils.rand_name(self.__class__.__name__)
+ _, net1 = self.network_client.create_network(name=name_net1)
+ name_net2 = data_utils.rand_name(self.__class__.__name__)
+ _, net2 = self.network_client.create_network(name=name_net2)
+
+ _, subnet1 = self.network_client.create_subnet(
+ network_id=net1['network']['id'],
+ cidr='19.80.0.0/24',
+ ip_version=4)
+ _, subnet2 = self.network_client.create_subnet(
+ network_id=net2['network']['id'],
+ cidr='19.86.0.0/24',
+ ip_version=4)
+
+ networks = [{'uuid': net1['network']['id']},
+ {'uuid': net2['network']['id']}]
+
+ _, server_multi_nics = self.create_test_server(
+ networks=networks, wait_until='ACTIVE')
+
+ _, addresses = self.client.list_addresses(server_multi_nics['id'])
+
+ expected_addr = ['19.80.0.2', '19.86.0.2']
+
+ addr = [addresses[name_net1][0]['addr'],
+ addresses[name_net2][0]['addr']]
+
+ self.assertEqual(expected_addr, addr)
+
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index 634bc01df..6a5da5850 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -70,6 +70,18 @@ class DeleteServersTestJSON(base.BaseV2ComputeTest):
self.assertEqual('204', resp['status'])
self.client.wait_for_server_termination(server['id'])
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
+ @test.attr(type='gate')
+ def test_delete_server_while_in_suspended_state(self):
+ # Delete a server while it's VM state is Suspended
+ _, server = self.create_test_server(wait_until='ACTIVE')
+ self.client.suspend_server(server['id'])
+ self.client.wait_for_server_status(server['id'], 'SUSPENDED')
+ resp, _ = self.client.delete_server(server['id'])
+ self.assertEqual('204', resp['status'])
+ self.client.wait_for_server_termination(server['id'])
+
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
@test.attr(type='gate')
diff --git a/tempest/api/compute/servers/test_list_server_filters.py b/tempest/api/compute/servers/test_list_server_filters.py
index 60329767c..e660f0030 100644
--- a/tempest/api/compute/servers/test_list_server_filters.py
+++ b/tempest/api/compute/servers/test_list_server_filters.py
@@ -69,12 +69,12 @@ class ListServerFiltersTestJSON(base.BaseV2ComputeTest):
resp, cls.s3 = cls.create_test_server(name=cls.s3_name,
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
- if (CONF.service_available.neutron and
- CONF.compute.allow_tenant_isolation):
- network = cls.isolated_creds.get_primary_network()
- cls.fixed_network_name = network['name']
- else:
- cls.fixed_network_name = CONF.compute.fixed_network_name
+
+ cls.fixed_network_name = CONF.compute.fixed_network_name
+ if CONF.service_available.neutron:
+ if hasattr(cls.isolated_creds, 'get_primary_network'):
+ network = cls.isolated_creds.get_primary_network()
+ cls.fixed_network_name = network['name']
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@test.attr(type='gate')
@@ -233,6 +233,30 @@ class ListServerFiltersTestJSON(base.BaseV2ComputeTest):
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
+ def test_list_servers_filtered_by_name_regex(self):
+ # list of regex that should match s1, s2 and s3
+ regexes = ['^.*\-instance\-[0-9]+$', '^.*\-instance\-.*$']
+ for regex in regexes:
+ params = {'name': regex}
+ resp, body = self.client.list_servers(params)
+ servers = body['servers']
+
+ self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
+ self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
+ self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
+
+ # Let's take random part of name and try to search it
+ part_name = self.s1_name[-10:]
+
+ params = {'name': part_name}
+ resp, body = self.client.list_servers(params)
+ servers = body['servers']
+
+ self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
+ self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
+ self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
+
+ @test.attr(type='gate')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
# Here should be listed 1 server
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 256ce0c91..b51b46e4f 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -75,9 +75,7 @@ class ServerActionsTestJSON(base.BaseV2ComputeTest):
new_password)
linux_client.validate_authentication()
- @test.attr(type='smoke')
- def test_reboot_server_hard(self):
- # The server should be power cycled
+ def _test_reboot_server(self, reboot_type):
if self.run_ssh:
# Get the time the server was last rebooted,
resp, server = self.client.get_server(self.server_id)
@@ -85,7 +83,7 @@ class ServerActionsTestJSON(base.BaseV2ComputeTest):
self.password)
boot_time = linux_client.get_boot_time()
- resp, body = self.client.reboot(self.server_id, 'HARD')
+ resp, body = self.client.reboot(self.server_id, reboot_type)
self.assertEqual(202, resp.status)
self.client.wait_for_server_status(self.server_id, 'ACTIVE')
@@ -97,28 +95,16 @@ class ServerActionsTestJSON(base.BaseV2ComputeTest):
self.assertTrue(new_boot_time > boot_time,
'%s > %s' % (new_boot_time, boot_time))
+ @test.attr(type='smoke')
+ def test_reboot_server_hard(self):
+ # The server should be power cycled
+ self._test_reboot_server('HARD')
+
@test.skip_because(bug="1014647")
@test.attr(type='smoke')
def test_reboot_server_soft(self):
# The server should be signaled to reboot gracefully
- if self.run_ssh:
- # Get the time the server was last rebooted,
- resp, server = self.client.get_server(self.server_id)
- linux_client = remote_client.RemoteClient(server, self.ssh_user,
- self.password)
- boot_time = linux_client.get_boot_time()
-
- resp, body = self.client.reboot(self.server_id, 'SOFT')
- self.assertEqual(202, resp.status)
- self.client.wait_for_server_status(self.server_id, 'ACTIVE')
-
- if self.run_ssh:
- # Log in and verify the boot time has changed
- linux_client = remote_client.RemoteClient(server, self.ssh_user,
- self.password)
- new_boot_time = linux_client.get_boot_time()
- self.assertTrue(new_boot_time > boot_time,
- '%s > %s' % (new_boot_time, boot_time))
+ self._test_reboot_server('SOFT')
@test.attr(type='smoke')
def test_rebuild_server(self):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 25f24b95c..a984ade25 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -64,7 +64,8 @@ class ServerRescueTestJSON(base.BaseV2ComputeTest):
def resource_cleanup(cls):
# Deleting the floating IP which is created in this method
cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
- cls.delete_volume(cls.volume['id'])
+ if getattr(cls, 'volume', None):
+ cls.delete_volume(cls.volume['id'])
resp, cls.sg = cls.security_groups_client.delete_security_group(
cls.sg_id)
super(ServerRescueTestJSON, cls).resource_cleanup()
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index aa406f766..0d29968f4 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -56,7 +56,8 @@ class ServerRescueNegativeTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_cleanup(cls):
- cls.delete_volume(cls.volume['id'])
+ if getattr(cls, 'volume', None):
+ cls.delete_volume(cls.volume['id'])
super(ServerRescueNegativeTestJSON, cls).resource_cleanup()
def _detach(self, server_id, volume_id):
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index b86ee0657..034926012 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -45,10 +45,7 @@ class ServersNegativeTestJSON(base.BaseV2ComputeTest):
def resource_setup(cls):
super(ServersNegativeTestJSON, cls).resource_setup()
cls.client = cls.servers_client
- if CONF.compute.allow_tenant_isolation:
- cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
- else:
- cls.alt_os = clients.AltManager()
+ cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
cls.alt_client = cls.alt_os.servers_client
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
diff --git a/tempest/api/compute/test_authorization.py b/tempest/api/compute/test_authorization.py
index 015d9f580..175f008b2 100644
--- a/tempest/api/compute/test_authorization.py
+++ b/tempest/api/compute/test_authorization.py
@@ -45,12 +45,8 @@ class AuthorizationTestJSON(base.BaseV2ComputeTest):
cls.keypairs_client = cls.os.keypairs_client
cls.security_client = cls.os.security_groups_client
- if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_alt_creds()
- cls.alt_manager = clients.Manager(credentials=creds)
- else:
- # Use the alt_XXX credentials in the config file
- cls.alt_manager = clients.AltManager()
+ creds = cls.isolated_creds.get_alt_creds()
+ cls.alt_manager = clients.Manager(credentials=creds)
cls.alt_client = cls.alt_manager.servers_client
cls.alt_images_client = cls.alt_manager.images_client
diff --git a/tempest/api/compute/v3/servers/test_list_server_filters.py b/tempest/api/compute/v3/servers/test_list_server_filters.py
index 209d29361..73844cf68 100644
--- a/tempest/api/compute/v3/servers/test_list_server_filters.py
+++ b/tempest/api/compute/v3/servers/test_list_server_filters.py
@@ -70,12 +70,11 @@ class ListServerFiltersV3Test(base.BaseV3ComputeTest):
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
- if (CONF.service_available.neutron and
- CONF.compute.allow_tenant_isolation):
- network = cls.isolated_creds.get_primary_network()
- cls.fixed_network_name = network['name']
- else:
- cls.fixed_network_name = CONF.compute.fixed_network_name
+ cls.fixed_network_name = CONF.compute.fixed_network_name
+ if CONF.service_available.neutron:
+ if hasattr(cls.isolated_creds, 'get_primary_network'):
+ network = cls.isolated_creds.get_primary_network()
+ cls.fixed_network_name = network['name']
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@test.attr(type='gate')
diff --git a/tempest/api/compute/v3/servers/test_servers_negative.py b/tempest/api/compute/v3/servers/test_servers_negative.py
index 30ac0ac7c..4b1fe04d5 100644
--- a/tempest/api/compute/v3/servers/test_servers_negative.py
+++ b/tempest/api/compute/v3/servers/test_servers_negative.py
@@ -45,10 +45,7 @@ class ServersNegativeV3Test(base.BaseV3ComputeTest):
def resource_setup(cls):
super(ServersNegativeV3Test, cls).resource_setup()
cls.client = cls.servers_client
- if CONF.compute.allow_tenant_isolation:
- cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
- else:
- cls.alt_os = clients.AltManager()
+ cls.alt_os = clients.Manager(cls.isolated_creds.get_alt_creds())
cls.alt_client = cls.alt_os.servers_v3_client
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
diff --git a/tempest/api/identity/base.py b/tempest/api/identity/base.py
index a225f1201..1e4973b44 100644
--- a/tempest/api/identity/base.py
+++ b/tempest/api/identity/base.py
@@ -202,7 +202,7 @@ class DataGenerator(object):
def _try_wrapper(func, item, **kwargs):
try:
if kwargs:
- func(item['id'], kwargs)
+ func(item['id'], **kwargs)
else:
func(item['id'])
except exceptions.NotFound:
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 08767e388..74baba6b3 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -41,10 +41,7 @@ class BaseImageTest(tempest.test.BaseTestCase):
if not CONF.service_available.glance:
skip_msg = ("%s skipped as glance is not available" % cls.__name__)
raise cls.skipException(skip_msg)
- if CONF.compute.allow_tenant_isolation:
- cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
- else:
- cls.os = clients.Manager()
+ cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
@classmethod
def resource_cleanup(cls):
@@ -91,10 +88,7 @@ class BaseV1ImageMembersTest(BaseV1ImageTest):
@classmethod
def resource_setup(cls):
super(BaseV1ImageMembersTest, cls).resource_setup()
- if CONF.compute.allow_tenant_isolation:
- cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
- else:
- cls.os_alt = clients.AltManager()
+ cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
cls.alt_img_cli = cls.os_alt.image_client
cls.alt_tenant_id = cls.alt_img_cli.tenant_id
@@ -126,11 +120,8 @@ class BaseV2MemberImageTest(BaseV2ImageTest):
@classmethod
def resource_setup(cls):
super(BaseV2MemberImageTest, cls).resource_setup()
- if CONF.compute.allow_tenant_isolation:
- creds = cls.isolated_creds.get_alt_creds()
- cls.os_alt = clients.Manager(creds)
- else:
- cls.os_alt = clients.AltManager()
+ creds = cls.isolated_creds.get_alt_creds()
+ cls.os_alt = clients.Manager(creds)
cls.os_img_client = cls.os.image_client_v2
cls.alt_img_client = cls.os_alt.image_client_v2
cls.alt_tenant_id = cls.alt_img_client.tenant_id
diff --git a/tempest/api/messaging/base.py b/tempest/api/messaging/base.py
index 0e062c531..58511a942 100644
--- a/tempest/api/messaging/base.py
+++ b/tempest/api/messaging/base.py
@@ -35,8 +35,8 @@ class BaseMessagingTest(test.BaseTestCase):
"""
@classmethod
- def setUpClass(cls):
- super(BaseMessagingTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(BaseMessagingTest, cls).resource_setup()
if not CONF.service_available.zaqar:
raise cls.skipException("Zaqar support is required")
os = cls.get_client_manager()
diff --git a/tempest/api/messaging/test_claims.py b/tempest/api/messaging/test_claims.py
index 885f00e10..1b004ddc2 100644
--- a/tempest/api/messaging/test_claims.py
+++ b/tempest/api/messaging/test_claims.py
@@ -30,8 +30,8 @@ class TestClaims(base.BaseMessagingTest):
_interface = 'json'
@classmethod
- def setUpClass(cls):
- super(TestClaims, cls).setUpClass()
+ def resource_setup(cls):
+ super(TestClaims, cls).resource_setup()
cls.queue_name = data_utils.rand_name('Queues-Test')
# Create Queue
cls.create_queue(cls.queue_name)
@@ -118,6 +118,6 @@ class TestClaims(base.BaseMessagingTest):
self.client.delete_messages(message_uri)
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
cls.delete_queue(cls.queue_name)
- super(TestClaims, cls).tearDownClass()
+ super(TestClaims, cls).resource_cleanup()
diff --git a/tempest/api/messaging/test_messages.py b/tempest/api/messaging/test_messages.py
index 3217361b6..3c27ac221 100644
--- a/tempest/api/messaging/test_messages.py
+++ b/tempest/api/messaging/test_messages.py
@@ -29,8 +29,8 @@ class TestMessages(base.BaseMessagingTest):
_interface = 'json'
@classmethod
- def setUpClass(cls):
- super(TestMessages, cls).setUpClass()
+ def resource_setup(cls):
+ super(TestMessages, cls).resource_setup()
cls.queue_name = data_utils.rand_name('Queues-Test')
# Create Queue
cls.client.create_queue(cls.queue_name)
@@ -117,6 +117,6 @@ class TestMessages(base.BaseMessagingTest):
self.assertEqual('204', resp['status'])
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
cls.delete_queue(cls.queue_name)
- super(TestMessages, cls).tearDownClass()
+ super(TestMessages, cls).resource_cleanup()
diff --git a/tempest/api/messaging/test_queues.py b/tempest/api/messaging/test_queues.py
index edfe10e88..8f9ac208b 100644
--- a/tempest/api/messaging/test_queues.py
+++ b/tempest/api/messaging/test_queues.py
@@ -32,11 +32,10 @@ class TestQueues(base.BaseMessagingTest):
def test_create_queue(self):
# Create Queue
queue_name = data_utils.rand_name('test-')
- resp, body = self.create_queue(queue_name)
+ _, body = self.create_queue(queue_name)
self.addCleanup(self.client.delete_queue, queue_name)
- self.assertEqual('201', resp['status'])
self.assertEqual('', body)
@@ -44,8 +43,8 @@ class TestManageQueue(base.BaseMessagingTest):
_interface = 'json'
@classmethod
- def setUpClass(cls):
- super(TestManageQueue, cls).setUpClass()
+ def resource_setup(cls):
+ super(TestManageQueue, cls).resource_setup()
cls.queues = list()
for _ in moves.xrange(5):
queue_name = data_utils.rand_name('Queues-Test')
@@ -57,30 +56,27 @@ class TestManageQueue(base.BaseMessagingTest):
def test_delete_queue(self):
# Delete Queue
queue_name = self.queues.pop()
- resp, body = self.delete_queue(queue_name)
- self.assertEqual('204', resp['status'])
+ _, body = self.delete_queue(queue_name)
self.assertEqual('', body)
@test.attr(type='smoke')
def test_check_queue_existence(self):
# Checking Queue Existence
for queue_name in self.queues:
- resp, body = self.check_queue_exists(queue_name)
- self.assertEqual('204', resp['status'])
+ _, body = self.check_queue_exists(queue_name)
self.assertEqual('', body)
@test.attr(type='smoke')
def test_check_queue_head(self):
# Checking Queue Existence by calling HEAD
for queue_name in self.queues:
- resp, body = self.check_queue_exists_head(queue_name)
- self.assertEqual('204', resp['status'])
+ _, body = self.check_queue_exists_head(queue_name)
self.assertEqual('', body)
@test.attr(type='smoke')
def test_list_queues(self):
# Listing queues
- resp, body = self.list_queues()
+ _, body = self.list_queues()
self.assertEqual(len(body['queues']), len(self.queues))
for item in body['queues']:
self.assertIn(item['name'], self.queues)
@@ -91,7 +87,7 @@ class TestManageQueue(base.BaseMessagingTest):
queue_name = self.queues[data_utils.rand_int_id(0,
len(self.queues) - 1)]
# Get Queue Stats for a newly created Queue
- resp, body = self.get_queue_stats(queue_name)
+ _, body = self.get_queue_stats(queue_name)
msgs = body['messages']
for element in ('free', 'claimed', 'total'):
self.assertEqual(0, msgs[element])
@@ -104,8 +100,7 @@ class TestManageQueue(base.BaseMessagingTest):
queue_name = self.queues[data_utils.rand_int_id(0,
len(self.queues) - 1)]
# Check the Queue has no metadata
- resp, body = self.get_queue_metadata(queue_name)
- self.assertEqual('200', resp['status'])
+ _, body = self.get_queue_metadata(queue_name)
self.assertThat(body, matchers.HasLength(0))
# Create metadata
key3 = [0, 1, 2, 3, 4]
@@ -116,16 +111,14 @@ class TestManageQueue(base.BaseMessagingTest):
req_body = dict()
req_body[data_utils.rand_name('key1')] = req_body1
# Set Queue Metadata
- resp, body = self.set_queue_metadata(queue_name, req_body)
- self.assertEqual('204', resp['status'])
+ _, body = self.set_queue_metadata(queue_name, req_body)
self.assertEqual('', body)
# Get Queue Metadata
- resp, body = self.get_queue_metadata(queue_name)
- self.assertEqual('200', resp['status'])
+ _, body = self.get_queue_metadata(queue_name)
self.assertThat(body, matchers.Equals(req_body))
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
for queue_name in cls.queues:
cls.client.delete_queue(queue_name)
- super(TestManageQueue, cls).tearDownClass()
+ super(TestManageQueue, cls).resource_cleanup()
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 834c01034..91e3e142a 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -83,6 +83,7 @@ class BaseNetworkTest(tempest.test.BaseTestCase):
cls.fw_rules = []
cls.fw_policies = []
cls.ipsecpolicies = []
+ cls.ethertype = "IPv" + str(cls._ip_version)
@classmethod
def resource_cleanup(cls):
@@ -365,19 +366,15 @@ class BaseAdminNetworkTest(BaseNetworkTest):
@classmethod
def resource_setup(cls):
super(BaseAdminNetworkTest, cls).resource_setup()
- admin_username = CONF.compute_admin.username
- admin_password = CONF.compute_admin.password
- admin_tenant = CONF.compute_admin.tenant_name
- if not (admin_username and admin_password and admin_tenant):
+
+ try:
+ creds = cls.isolated_creds.get_admin_creds()
+ cls.os_adm = clients.Manager(
+ credentials=creds, interface=cls._interface)
+ except NotImplementedError:
msg = ("Missing Administrative Network API credentials "
"in configuration.")
raise cls.skipException(msg)
- if (CONF.compute.allow_tenant_isolation or
- cls.force_tenant_isolation is True):
- cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
- interface=cls._interface)
- else:
- cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
cls.admin_client = cls.os_adm.network_client
@classmethod
diff --git a/tempest/api/network/common.py b/tempest/api/network/common.py
deleted file mode 100644
index 5ac8b5ab9..000000000
--- a/tempest/api/network/common.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Copyright 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-import six
-
-
-class AttributeDict(dict):
-
- """
- Provide attribute access (dict.key) to dictionary values.
- """
-
- def __getattr__(self, name):
- """Allow attribute access for all keys in the dict."""
- if name in self:
- return self[name]
- return super(AttributeDict, self).__getattribute__(name)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class DeletableResource(AttributeDict):
-
- """
- Support deletion of neutron resources (networks, subnets) via a
- delete() method, as is supported by keystone and nova resources.
- """
-
- def __init__(self, *args, **kwargs):
- self.client = kwargs.pop('client', None)
- super(DeletableResource, self).__init__(*args, **kwargs)
-
- def __str__(self):
- return '<%s id="%s" name="%s">' % (self.__class__.__name__,
- self.id, self.name)
-
- @abc.abstractmethod
- def delete(self):
- return
-
- def __hash__(self):
- return id(self)
-
-
-class DeletableNetwork(DeletableResource):
-
- def delete(self):
- self.client.delete_network(self.id)
-
-
-class DeletableSubnet(DeletableResource):
-
- def __init__(self, *args, **kwargs):
- super(DeletableSubnet, self).__init__(*args, **kwargs)
- self._router_ids = set()
-
- def update(self, *args, **kwargs):
- body = dict(subnet=dict(*args, **kwargs))
- result = self.client.update_subnet(subnet=self.id, body=body)
- super(DeletableSubnet, self).update(**result['subnet'])
-
- def add_to_router(self, router_id):
- self._router_ids.add(router_id)
- body = dict(subnet_id=self.id)
- self.client.add_interface_router(router_id, body=body)
-
- def delete(self):
- for router_id in self._router_ids.copy():
- body = dict(subnet_id=self.id)
- self.client.remove_interface_router(router_id, body=body)
- self._router_ids.remove(router_id)
- self.client.delete_subnet(self.id)
-
-
-class DeletableRouter(DeletableResource):
-
- def add_gateway(self, network_id):
- body = dict(network_id=network_id)
- self.client.add_gateway_router(self.id, body=body)
-
- def delete(self):
- self.client.remove_gateway_router(self.id)
- self.client.delete_router(self.id)
-
-
-class DeletableFloatingIp(DeletableResource):
-
- def update(self, *args, **kwargs):
- result = self.client.update_floatingip(floatingip=self.id,
- body=dict(
- floatingip=dict(*args,
- **kwargs)
- ))
- super(DeletableFloatingIp, self).update(**result['floatingip'])
-
- def __repr__(self):
- return '<%s addr="%s">' % (self.__class__.__name__,
- self.floating_ip_address)
-
- def __str__(self):
- return '<"FloatingIP" addr="%s" id="%s">' % (self.floating_ip_address,
- self.id)
-
- def delete(self):
- self.client.delete_floatingip(self.id)
-
-
-class DeletablePort(DeletableResource):
-
- def delete(self):
- self.client.delete_port(self.id)
-
-
-class DeletableSecurityGroup(DeletableResource):
-
- def delete(self):
- self.client.delete_security_group(self.id)
-
-
-class DeletableSecurityGroupRule(DeletableResource):
-
- def __repr__(self):
- return '<%s id="%s">' % (self.__class__.__name__, self.id)
-
- def delete(self):
- self.client.delete_security_group_rule(self.id)
-
-
-class DeletablePool(DeletableResource):
-
- def delete(self):
- self.client.delete_pool(self.id)
-
-
-class DeletableMember(DeletableResource):
-
- def delete(self):
- self.client.delete_member(self.id)
-
-
-class DeletableVip(DeletableResource):
-
- def delete(self):
- self.client.delete_vip(self.id)
diff --git a/tempest/api/network/test_fwaas_extensions.py b/tempest/api/network/test_fwaas_extensions.py
index 193bf76fb..8e2b7f581 100644
--- a/tempest/api/network/test_fwaas_extensions.py
+++ b/tempest/api/network/test_fwaas_extensions.py
@@ -36,6 +36,8 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
List firewall policies
Create firewall policy
Update firewall policy
+ Insert firewall rule to policy
+ Remove firewall rule from policy
Delete firewall policy
Show firewall policy
List firewall
@@ -62,6 +64,14 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
except exceptions.NotFound:
pass
+ def _try_delete_rule(self, rule_id):
+ # delete rule, if it exists
+ try:
+ self.client.delete_firewall_rule(rule_id)
+ # if rule is not found, this means it was deleted in the test
+ except exceptions.NotFound:
+ pass
+
def _try_delete_firewall(self, fw_id):
# delete firewall, if it exists
try:
@@ -86,7 +96,6 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
(fw_id, target_states))
raise exceptions.TimeoutException(m)
- @test.attr(type='smoke')
def test_list_firewall_rules(self):
# List firewall rules
_, fw_rules = self.client.list_firewall_rules()
@@ -104,7 +113,6 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
m['ip_version'],
m['enabled']) for m in fw_rules])
- @test.attr(type='smoke')
def test_create_update_delete_firewall_rule(self):
# Create firewall rule
_, body = self.client.create_firewall_rule(
@@ -125,14 +133,12 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
self.assertNotIn(fw_rule_id,
[m['id'] for m in fw_rules['firewall_rules']])
- @test.attr(type='smoke')
def test_show_firewall_rule(self):
# show a created firewall rule
_, fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
for key, value in fw_rule['firewall_rule'].iteritems():
self.assertEqual(self.fw_rule[key], value)
- @test.attr(type='smoke')
def test_list_firewall_policies(self):
_, fw_policies = self.client.list_firewall_policies()
fw_policies = fw_policies['firewall_policies']
@@ -143,7 +149,6 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
m['name'],
m['firewall_rules']) for m in fw_policies])
- @test.attr(type='smoke')
def test_create_update_delete_firewall_policy(self):
# Create firewall policy
_, body = self.client.create_firewall_policy(
@@ -166,7 +171,6 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
fw_policies = fw_policies['firewall_policies']
self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
- @test.attr(type='smoke')
def test_show_firewall_policy(self):
# show a created firewall policy
_, fw_policy = self.client.show_firewall_policy(self.fw_policy['id'])
@@ -174,7 +178,6 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
for key, value in fw_policy.iteritems():
self.assertEqual(self.fw_policy[key], value)
- @test.attr(type='smoke')
def test_create_show_delete_firewall(self):
# Create tenant network resources required for an ACTIVE firewall
network = self.create_network()
@@ -218,6 +221,40 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest):
# Delete firewall
self.client.delete_firewall(firewall_id)
+ @test.attr(type='smoke')
+ def test_insert_remove_firewall_rule_from_policy(self):
+ # Create firewall rule
+ resp, body = self.client.create_firewall_rule(
+ name=data_utils.rand_name("fw-rule"),
+ action="allow",
+ protocol="tcp")
+ fw_rule_id = body['firewall_rule']['id']
+ self.addCleanup(self._try_delete_rule, fw_rule_id)
+ # Create firewall policy
+ _, body = self.client.create_firewall_policy(
+ name=data_utils.rand_name("fw-policy"))
+ fw_policy_id = body['firewall_policy']['id']
+ self.addCleanup(self._try_delete_policy, fw_policy_id)
+
+ # Insert rule to firewall policy
+ self.client.insert_firewall_rule_in_policy(
+ fw_policy_id, fw_rule_id, '', '')
+
+ # Verify insertion of rule in policy
+ self.assertIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+ # Remove rule from the firewall policy
+ self.client.remove_firewall_rule_from_policy(
+ fw_policy_id, fw_rule_id)
+
+ # Verify removal of rule from firewall policy
+ self.assertNotIn(fw_rule_id, self._get_list_fw_rule_ids(fw_policy_id))
+
+ def _get_list_fw_rule_ids(self, fw_policy_id):
+ _, fw_policy = self.client.show_firewall_policy(
+ fw_policy_id)
+ return [ruleid for ruleid in fw_policy['firewall_policy']
+ ['firewall_rules']]
+
class FWaaSExtensionTestXML(FWaaSExtensionTestJSON):
_interface = 'xml'
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index e1eb48d41..986a2c8fa 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -17,6 +17,7 @@ import netaddr
import testtools
from tempest.api.network import base
+from tempest.common import custom_matchers
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
@@ -65,13 +66,94 @@ class NetworksTestJSON(base.BaseNetworkTest):
cls.name = cls.network['name']
cls.subnet = cls.create_subnet(cls.network)
cls.cidr = cls.subnet['cidr']
+ cls._subnet_data = {6: {'gateway':
+ str(cls._get_gateway_from_tempest_conf(6)),
+ 'allocation_pools':
+ cls._get_allocation_pools_from_gateway(6),
+ 'dns_nameservers': ['2001:4860:4860::8844',
+ '2001:4860:4860::8888'],
+ 'host_routes': [{'destination': '2001::/64',
+ 'nexthop': '2003::1'}],
+ 'new_host_routes': [{'destination':
+ '2001::/64',
+ 'nexthop': '2005::1'}],
+ 'new_dns_nameservers':
+ ['2001:4860:4860::7744',
+ '2001:4860:4860::7888']},
+ 4: {'gateway':
+ str(cls._get_gateway_from_tempest_conf(4)),
+ 'allocation_pools':
+ cls._get_allocation_pools_from_gateway(4),
+ 'dns_nameservers': ['8.8.4.4', '8.8.8.8'],
+ 'host_routes': [{'destination': '10.20.0.0/32',
+ 'nexthop': '10.100.1.1'}],
+ 'new_host_routes': [{'destination':
+ '10.20.0.0/32',
+ 'nexthop':
+ '10.100.1.2'}],
+ 'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
+
+ @classmethod
+ def _get_gateway_from_tempest_conf(cls, ip_version):
+ """Return first subnet gateway for configured CIDR """
+ if ip_version == 4:
+ cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
+ mask_bits = CONF.network.tenant_network_mask_bits
+ elif ip_version == 6:
+ cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
+ mask_bits = CONF.network.tenant_network_v6_mask_bits
+
+ if mask_bits >= cidr.prefixlen:
+ return netaddr.IPAddress(cidr) + 1
+ else:
+ for subnet in cidr.subnet(mask_bits):
+ return netaddr.IPAddress(subnet) + 1
+
+ @classmethod
+ def _get_allocation_pools_from_gateway(cls, ip_version):
+ """Return allocation range for subnet of given gateway"""
+ gateway = cls._get_gateway_from_tempest_conf(ip_version)
+ return [{'start': str(gateway + 2), 'end': str(gateway + 3)}]
+
+ def subnet_dict(self, include_keys):
+ """Return a subnet dict which has include_keys and their corresponding
+ value from self._subnet_data
+ """
+ return dict((key, self._subnet_data[self._ip_version][key])
+ for key in include_keys)
+
+ def _compare_resource_attrs(self, actual, expected):
+ exclude_keys = set(actual).symmetric_difference(expected)
+ self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
+ expected, exclude_keys))
+
+ def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
+ **kwargs):
+ network = self.create_network()
+ net_id = network['id']
+ gateway = kwargs.pop('gateway', None)
+ subnet = self.create_subnet(network, gateway, cidr, mask_bits,
+ **kwargs)
+ compare_args_full = dict(gateway_ip=gateway, cidr=cidr,
+ mask_bits=mask_bits, **kwargs)
+ compare_args = dict((k, v) for k, v in compare_args_full.iteritems()
+ if v is not None)
+
+ if 'dns_nameservers' in set(subnet).intersection(compare_args):
+ self.assertEqual(sorted(compare_args['dns_nameservers']),
+ sorted(subnet['dns_nameservers']))
+ del subnet['dns_nameservers'], compare_args['dns_nameservers']
+
+ self._compare_resource_attrs(subnet, compare_args)
+ self.client.delete_network(net_id)
+ self.networks.pop()
+ self.subnets.pop()
@test.attr(type='smoke')
def test_create_update_delete_network_subnet(self):
# Create a network
name = data_utils.rand_name('network-')
- _, body = self.client.create_network(name=name)
- network = body['network']
+ network = self.create_network(network_name=name)
net_id = network['id']
self.assertEqual('ACTIVE', network['status'])
# Verify network update
@@ -87,11 +169,6 @@ class NetworksTestJSON(base.BaseNetworkTest):
_, body = self.client.update_subnet(subnet_id, name=new_name)
updated_subnet = body['subnet']
self.assertEqual(updated_subnet['name'], new_name)
- # Delete subnet and network
- _, body = self.client.delete_subnet(subnet_id)
- # Remove subnet from cleanup list
- self.subnets.pop()
- _, body = self.client.delete_network(net_id)
@test.attr(type='smoke')
def test_show_network(self):
@@ -204,32 +281,65 @@ class NetworksTestJSON(base.BaseNetworkTest):
@test.attr(type='smoke')
def test_create_delete_subnet_with_gw(self):
- gateway = '10.100.0.13'
- name = data_utils.rand_name('network-')
- _, body = self.client.create_network(name=name)
- network = body['network']
- net_id = network['id']
- subnet = self.create_subnet(network, gateway)
- # Verifies Subnet GW in IPv4
- self.assertEqual(subnet['gateway_ip'], gateway)
- # Delete network and subnet
- self.client.delete_network(net_id)
- self.subnets.pop()
+ self._create_verify_delete_subnet(
+ **self.subnet_dict(['gateway']))
@test.attr(type='smoke')
- def test_create_delete_subnet_without_gw(self):
- net = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
- gateway_ip = str(netaddr.IPAddress(net.first + 1))
- name = data_utils.rand_name('network-')
- _, body = self.client.create_network(name=name)
- network = body['network']
- net_id = network['id']
- subnet = self.create_subnet(network)
- # Verifies Subnet GW in IPv4
- self.assertEqual(subnet['gateway_ip'], gateway_ip)
- # Delete network and subnet
- self.client.delete_network(net_id)
- self.subnets.pop()
+ def test_create_delete_subnet_with_allocation_pools(self):
+ self._create_verify_delete_subnet(
+ **self.subnet_dict(['allocation_pools']))
+
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_with_gw_and_allocation_pools(self):
+ self._create_verify_delete_subnet(**self.subnet_dict(
+ ['gateway', 'allocation_pools']))
+
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_with_host_routes_and_dns_nameservers(self):
+ self._create_verify_delete_subnet(
+ **self.subnet_dict(['host_routes', 'dns_nameservers']))
+
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_with_dhcp_enabled(self):
+ self._create_verify_delete_subnet(enable_dhcp=True)
+
+ @test.attr(type='smoke')
+ def test_update_subnet_gw_dns_host_routes_dhcp(self):
+ network = self.create_network()
+
+ subnet = self.create_subnet(
+ network, **self.subnet_dict(['gateway', 'host_routes',
+ 'dns_nameservers',
+ 'allocation_pools']))
+ subnet_id = subnet['id']
+ new_gateway = str(netaddr.IPAddress(
+ self._subnet_data[self._ip_version]['gateway']) + 1)
+ # Verify subnet update
+ new_host_routes = self._subnet_data[self._ip_version][
+ 'new_host_routes']
+
+ new_dns_nameservers = self._subnet_data[self._ip_version][
+ 'new_dns_nameservers']
+ kwargs = {'host_routes': new_host_routes,
+ 'dns_nameservers': new_dns_nameservers,
+ 'gateway_ip': new_gateway, 'enable_dhcp': True}
+
+ new_name = "New_subnet"
+ _, body = self.client.update_subnet(subnet_id, name=new_name,
+ **kwargs)
+ updated_subnet = body['subnet']
+ kwargs['name'] = new_name
+ self.assertEqual(sorted(updated_subnet['dns_nameservers']),
+ sorted(kwargs['dns_nameservers']))
+ del subnet['dns_nameservers'], kwargs['dns_nameservers']
+
+ self._compare_resource_attrs(updated_subnet, kwargs)
+
+ @test.attr(type='smoke')
+ def test_create_delete_subnet_all_attributes(self):
+ self._create_verify_delete_subnet(
+ enable_dhcp=True,
+ **self.subnet_dict(['gateway', 'host_routes', 'dns_nameservers']))
class NetworksTestXML(NetworksTestJSON):
@@ -376,51 +486,30 @@ class NetworksIpV6TestJSON(NetworksTestJSON):
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
gateway = str(netaddr.IPAddress(net.first + 2))
name = data_utils.rand_name('network-')
- _, body = self.client.create_network(name=name)
- network = body['network']
- net_id = network['id']
+ network = self.create_network(network_name=name)
subnet = self.create_subnet(network, gateway)
# Verifies Subnet GW in IPv6
self.assertEqual(subnet['gateway_ip'], gateway)
- # Delete network and subnet
- self.client.delete_network(net_id)
- self.subnets.pop()
@test.attr(type='smoke')
def test_create_delete_subnet_without_gw(self):
net = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
gateway_ip = str(netaddr.IPAddress(net.first + 1))
name = data_utils.rand_name('network-')
- _, body = self.client.create_network(name=name)
- network = body['network']
- net_id = network['id']
+ network = self.create_network(network_name=name)
subnet = self.create_subnet(network)
# Verifies Subnet GW in IPv6
self.assertEqual(subnet['gateway_ip'], gateway_ip)
- # Delete network and subnet
- _, body = self.client.delete_network(net_id)
- self.subnets.pop()
@testtools.skipUnless(CONF.network_feature_enabled.ipv6_subnet_attributes,
"IPv6 extended attributes for subnets not "
"available")
@test.attr(type='smoke')
def test_create_delete_subnet_with_v6_attributes(self):
- name = data_utils.rand_name('network-')
- _, body = self.client.create_network(name=name)
- network = body['network']
- net_id = network['id']
- subnet = self.create_subnet(network,
- gateway='fe80::1',
- ipv6_ra_mode='slaac',
- ipv6_address_mode='slaac')
- # Verifies Subnet GW in IPv6
- self.assertEqual(subnet['gateway_ip'], 'fe80::1')
- self.assertEqual(subnet['ipv6_ra_mode'], 'slaac')
- self.assertEqual(subnet['ipv6_address_mode'], 'slaac')
- # Delete network and subnet
- self.client.delete_network(net_id)
- self.subnets.pop()
+ self._create_verify_delete_subnet(
+ gateway=self._subnet_data[self._ip_version]['gateway'],
+ ipv6_ra_mode='slaac',
+ ipv6_address_mode='slaac')
class NetworksIpV6TestXML(NetworksIpV6TestJSON):
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index 9764b4da7..e20b58e8c 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -17,11 +17,15 @@ import six
from tempest.api.network import base_security_groups as base
from tempest.common.utils import data_utils
+from tempest import config
from tempest import test
+CONF = config.CONF
+
class SecGroupTest(base.BaseSecGroupTest):
_interface = 'json'
+ _tenant_network_cidr = CONF.network.tenant_network_cidr
@classmethod
def resource_setup(cls):
@@ -30,6 +34,40 @@ class SecGroupTest(base.BaseSecGroupTest):
msg = "security-group extension not enabled."
raise cls.skipException(msg)
+ def _create_verify_security_group_rule(self, sg_id, direction,
+ ethertype, protocol,
+ port_range_min,
+ port_range_max,
+ remote_group_id=None,
+ remote_ip_prefix=None):
+ # Create Security Group rule with the input params and validate
+ # that SG rule is created with the same parameters.
+ resp, rule_create_body = self.client.create_security_group_rule(
+ security_group_id=sg_id,
+ direction=direction,
+ ethertype=ethertype,
+ protocol=protocol,
+ port_range_min=port_range_min,
+ port_range_max=port_range_max,
+ remote_group_id=remote_group_id,
+ remote_ip_prefix=remote_ip_prefix
+ )
+
+ sec_group_rule = rule_create_body['security_group_rule']
+ self.addCleanup(self._delete_security_group_rule,
+ sec_group_rule['id'])
+
+ expected = {'direction': direction, 'protocol': protocol,
+ 'ethertype': ethertype, 'port_range_min': port_range_min,
+ 'port_range_max': port_range_max,
+ 'remote_group_id': remote_group_id,
+ 'remote_ip_prefix': remote_ip_prefix}
+ for key, value in six.iteritems(expected):
+ self.assertEqual(value, sec_group_rule[key],
+ "Field %s of the created security group "
+ "rule does not match with %s." %
+ (key, value))
+
@test.attr(type='smoke')
def test_list_security_groups(self):
# Verify the that security group belonging to tenant exist in list
@@ -80,7 +118,8 @@ class SecGroupTest(base.BaseSecGroupTest):
_, rule_create_body = self.client.create_security_group_rule(
security_group_id=group_create_body['security_group']['id'],
protocol=protocol,
- direction='ingress'
+ direction='ingress',
+ ethertype=self.ethertype
)
# Show details of the created security rule
@@ -102,30 +141,93 @@ class SecGroupTest(base.BaseSecGroupTest):
@test.attr(type='smoke')
def test_create_security_group_rule_with_additional_args(self):
- # Verify creating security group rule with the following
- # arguments works: "protocol": "tcp", "port_range_max": 77,
- # "port_range_min": 77, "direction":"ingress".
- group_create_body, _ = self._create_security_group()
+ """Verify security group rule with additional arguments works.
+ direction:ingress, ethertype:[IPv4/IPv6],
+ protocol:tcp, port_range_min:77, port_range_max:77
+ """
+ group_create_body, _ = self._create_security_group()
+ sg_id = group_create_body['security_group']['id']
direction = 'ingress'
protocol = 'tcp'
port_range_min = 77
port_range_max = 77
- _, rule_create_body = self.client.create_security_group_rule(
- security_group_id=group_create_body['security_group']['id'],
- direction=direction,
- protocol=protocol,
- port_range_min=port_range_min,
- port_range_max=port_range_max
- )
+ self._create_verify_security_group_rule(sg_id, direction,
+ self.ethertype, protocol,
+ port_range_min,
+ port_range_max)
- sec_group_rule = rule_create_body['security_group_rule']
+ @test.attr(type='smoke')
+ def test_create_security_group_rule_with_icmp_type_code(self):
+ """Verify security group rule for icmp protocol works.
+
+ Specify icmp type (port_range_min) and icmp code
+ (port_range_max) with different values. A seperate testcase
+ is added for icmp protocol as icmp validation would be
+ different from tcp/udp.
+ """
+ group_create_body, _ = self._create_security_group()
+
+ sg_id = group_create_body['security_group']['id']
+ direction = 'ingress'
+ protocol = 'icmp'
+ icmp_type_codes = [(3, 2), (2, 3), (3, 0), (2, None)]
+ for icmp_type, icmp_code in icmp_type_codes:
+ self._create_verify_security_group_rule(sg_id, direction,
+ self.ethertype, protocol,
+ icmp_type, icmp_code)
+
+ @test.attr(type='smoke')
+ def test_create_security_group_rule_with_remote_group_id(self):
+ # Verify creating security group rule with remote_group_id works
+ sg1_body, _ = self._create_security_group()
+ sg2_body, _ = self._create_security_group()
+
+ sg_id = sg1_body['security_group']['id']
+ direction = 'ingress'
+ protocol = 'udp'
+ port_range_min = 50
+ port_range_max = 55
+ remote_id = sg2_body['security_group']['id']
+ self._create_verify_security_group_rule(sg_id, direction,
+ self.ethertype, protocol,
+ port_range_min,
+ port_range_max,
+ remote_group_id=remote_id)
- self.assertEqual(sec_group_rule['direction'], direction)
- self.assertEqual(sec_group_rule['protocol'], protocol)
- self.assertEqual(int(sec_group_rule['port_range_min']), port_range_min)
- self.assertEqual(int(sec_group_rule['port_range_max']), port_range_max)
+ @test.attr(type='smoke')
+ def test_create_security_group_rule_with_remote_ip_prefix(self):
+ # Verify creating security group rule with remote_ip_prefix works
+ sg1_body, _ = self._create_security_group()
+
+ sg_id = sg1_body['security_group']['id']
+ direction = 'ingress'
+ protocol = 'tcp'
+ port_range_min = 76
+ port_range_max = 77
+ ip_prefix = self._tenant_network_cidr
+ self._create_verify_security_group_rule(sg_id, direction,
+ self.ethertype, protocol,
+ port_range_min,
+ port_range_max,
+ remote_ip_prefix=ip_prefix)
class SecGroupTestXML(SecGroupTest):
_interface = 'xml'
+
+
+class SecGroupIPv6Test(SecGroupTest):
+ _ip_version = 6
+ _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+
+ @classmethod
+ def resource_setup(cls):
+ if not CONF.network_feature_enabled.ipv6:
+ skip_msg = "IPv6 Tests are disabled."
+ raise cls.skipException(skip_msg)
+ super(SecGroupIPv6Test, cls).resource_setup()
+
+
+class SecGroupIPv6TestXML(SecGroupIPv6Test):
+ _interface = 'xml'
diff --git a/tempest/api/network/test_security_groups_negative.py b/tempest/api/network/test_security_groups_negative.py
index 9c6c267f2..97e4cb729 100644
--- a/tempest/api/network/test_security_groups_negative.py
+++ b/tempest/api/network/test_security_groups_negative.py
@@ -16,12 +16,16 @@
import uuid
from tempest.api.network import base_security_groups as base
+from tempest import config
from tempest import exceptions
from tempest import test
+CONF = config.CONF
+
class NegativeSecGroupTest(base.BaseSecGroupTest):
_interface = 'json'
+ _tenant_network_cidr = CONF.network.tenant_network_cidr
@classmethod
def resource_setup(cls):
@@ -60,23 +64,87 @@ class NegativeSecGroupTest(base.BaseSecGroupTest):
self.assertRaises(
exceptions.BadRequest, self.client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
- protocol=pname, direction='ingress')
+ protocol=pname, direction='ingress', ethertype=self.ethertype)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_security_group_rule_with_bad_remote_ip_prefix(self):
+ group_create_body, _ = self._create_security_group()
+
+ # Create rule with bad remote_ip_prefix
+ prefix = ['192.168.1./24', '192.168.1.1/33', 'bad_prefix', '256']
+ for remote_ip_prefix in prefix:
+ self.assertRaises(
+ exceptions.BadRequest, self.client.create_security_group_rule,
+ security_group_id=group_create_body['security_group']['id'],
+ protocol='tcp', direction='ingress', ethertype=self.ethertype,
+ remote_ip_prefix=remote_ip_prefix)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_security_group_rule_with_non_existent_remote_groupid(self):
+ group_create_body, _ = self._create_security_group()
+ non_exist_id = str(uuid.uuid4())
+
+ # Create rule with non existent remote_group_id
+ group_ids = ['bad_group_id', non_exist_id]
+ for remote_group_id in group_ids:
+ self.assertRaises(
+ exceptions.NotFound, self.client.create_security_group_rule,
+ security_group_id=group_create_body['security_group']['id'],
+ protocol='tcp', direction='ingress', ethertype=self.ethertype,
+ remote_group_id=remote_group_id)
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_security_group_rule_with_remote_ip_and_group(self):
+ sg1_body, _ = self._create_security_group()
+ sg2_body, _ = self._create_security_group()
+
+ # Create rule specifying both remote_ip_prefix and remote_group_id
+ prefix = self._tenant_network_cidr
+ self.assertRaises(
+ exceptions.BadRequest, self.client.create_security_group_rule,
+ security_group_id=sg1_body['security_group']['id'],
+ protocol='tcp', direction='ingress',
+ ethertype=self.ethertype, remote_ip_prefix=prefix,
+ remote_group_id=sg2_body['security_group']['id'])
+
+ @test.attr(type=['negative', 'gate'])
+ def test_create_security_group_rule_with_bad_ethertype(self):
+ group_create_body, _ = self._create_security_group()
+
+ # Create rule with bad ethertype
+ ethertype = 'bad_ethertype'
+ self.assertRaises(
+ exceptions.BadRequest, self.client.create_security_group_rule,
+ security_group_id=group_create_body['security_group']['id'],
+ protocol='udp', direction='ingress', ethertype=ethertype)
@test.attr(type=['negative', 'gate'])
def test_create_security_group_rule_with_invalid_ports(self):
group_create_body, _ = self._create_security_group()
- # Create rule with invalid ports
+ # Create rule for tcp protocol with invalid ports
states = [(-16, 80, 'Invalid value for port -16'),
(80, 79, 'port_range_min must be <= port_range_max'),
(80, 65536, 'Invalid value for port 65536'),
+ (None, 6, 'port_range_min must be <= port_range_max'),
(-16, 65536, 'Invalid value for port')]
for pmin, pmax, msg in states:
ex = self.assertRaises(
exceptions.BadRequest, self.client.create_security_group_rule,
security_group_id=group_create_body['security_group']['id'],
protocol='tcp', port_range_min=pmin, port_range_max=pmax,
- direction='ingress')
+ direction='ingress', ethertype=self.ethertype)
+ self.assertIn(msg, str(ex))
+
+ # Create rule for icmp protocol with invalid ports
+ states = [(1, 256, 'Invalid value for ICMP code'),
+ (300, 1, 'Invalid value for ICMP type')]
+ for pmin, pmax, msg in states:
+ ex = self.assertRaises(
+ exceptions.BadRequest, self.client.create_security_group_rule,
+ security_group_id=group_create_body['security_group']['id'],
+ protocol='icmp', port_range_min=pmin, port_range_max=pmax,
+ direction='ingress', ethertype=self.ethertype)
self.assertIn(msg, str(ex))
@test.attr(type=['negative', 'smoke'])
@@ -88,14 +156,54 @@ class NegativeSecGroupTest(base.BaseSecGroupTest):
name=name)
@test.attr(type=['negative', 'smoke'])
+ def test_create_duplicate_security_group_rule_fails(self):
+ # Create duplicate security group rule, it should fail.
+ body, _ = self._create_security_group()
+
+ min_port = 66
+ max_port = 67
+ # Create a rule with valid params
+ resp, _ = self.client.create_security_group_rule(
+ security_group_id=body['security_group']['id'],
+ direction='ingress',
+ ethertype=self.ethertype,
+ protocol='tcp',
+ port_range_min=min_port,
+ port_range_max=max_port
+ )
+
+ # Try creating the same security group rule, it should fail
+ self.assertRaises(
+ exceptions.Conflict, self.client.create_security_group_rule,
+ security_group_id=body['security_group']['id'],
+ protocol='tcp', direction='ingress', ethertype=self.ethertype,
+ port_range_min=min_port, port_range_max=max_port)
+
+ @test.attr(type=['negative', 'smoke'])
def test_create_security_group_rule_with_non_existent_security_group(self):
# Create security group rules with not existing security group.
non_existent_sg = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound,
self.client.create_security_group_rule,
security_group_id=non_existent_sg,
- direction='ingress')
+ direction='ingress', ethertype=self.ethertype)
class NegativeSecGroupTestXML(NegativeSecGroupTest):
_interface = 'xml'
+
+
+class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
+ _ip_version = 6
+ _tenant_network_cidr = CONF.network.tenant_network_v6_cidr
+
+ @classmethod
+ def resource_setup(cls):
+ if not CONF.network_feature_enabled.ipv6:
+ skip_msg = "IPv6 Tests are disabled."
+ raise cls.skipException(skip_msg)
+ super(NegativeSecGroupIPv6Test, cls).resource_setup()
+
+
+class NegativeSecGroupIPv6TestXML(NegativeSecGroupIPv6Test):
+ _interface = 'xml'
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 6a5fd3d05..2e39cf945 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -36,18 +36,12 @@ class BaseObjectTest(tempest.test.BaseTestCase):
raise cls.skipException(skip_msg)
cls.isolated_creds = isolated_creds.IsolatedCreds(
cls.__name__, network_resources=cls.network_resources)
- if CONF.compute.allow_tenant_isolation:
- # Get isolated creds for normal user
- cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
- # Get isolated creds for admin user
- cls.os_admin = clients.Manager(
- cls.isolated_creds.get_admin_creds())
- # Get isolated creds for alt user
- cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
- else:
- cls.os = clients.Manager()
- cls.os_admin = clients.AdminManager()
- cls.os_alt = clients.AltManager()
+ # Get isolated creds for normal user
+ cls.os = clients.Manager(cls.isolated_creds.get_primary_creds())
+ # Get isolated creds for admin user
+ cls.os_admin = clients.Manager(cls.isolated_creds.get_admin_creds())
+ # Get isolated creds for alt user
+ cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds())
cls.object_client = cls.os.object_client
cls.container_client = cls.os.container_client
diff --git a/tempest/api/orchestration/base.py b/tempest/api/orchestration/base.py
index 0b22de596..5a586fc14 100644
--- a/tempest/api/orchestration/base.py
+++ b/tempest/api/orchestration/base.py
@@ -30,8 +30,8 @@ class BaseOrchestrationTest(tempest.test.BaseTestCase):
"""Base test case class for all Orchestration API tests."""
@classmethod
- def setUpClass(cls):
- super(BaseOrchestrationTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(BaseOrchestrationTest, cls).resource_setup()
cls.os = clients.Manager()
if not CONF.service_available.heat:
raise cls.skipException("Heat support is required")
@@ -146,11 +146,11 @@ class BaseOrchestrationTest(tempest.test.BaseTestCase):
return yaml.safe_load(f)
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
cls._clear_stacks()
cls._clear_keypairs()
cls._clear_images()
- super(BaseOrchestrationTest, cls).tearDownClass()
+ super(BaseOrchestrationTest, cls).resource_cleanup()
@staticmethod
def stack_output(stack, output_key):
diff --git a/tempest/api/orchestration/stacks/test_neutron_resources.py b/tempest/api/orchestration/stacks/test_neutron_resources.py
index ffadb1698..f1a4f85ba 100644
--- a/tempest/api/orchestration/stacks/test_neutron_resources.py
+++ b/tempest/api/orchestration/stacks/test_neutron_resources.py
@@ -30,9 +30,8 @@ LOG = logging.getLogger(__name__)
class NeutronResourcesTestJSON(base.BaseOrchestrationTest):
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(NeutronResourcesTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(NeutronResourcesTestJSON, cls).resource_setup()
if not CONF.orchestration.image_ref:
raise cls.skipException("No image available to test")
os = clients.Manager()
diff --git a/tempest/api/orchestration/stacks/test_non_empty_stack.py b/tempest/api/orchestration/stacks/test_non_empty_stack.py
index 72ad5f5bc..759cbbec6 100644
--- a/tempest/api/orchestration/stacks/test_non_empty_stack.py
+++ b/tempest/api/orchestration/stacks/test_non_empty_stack.py
@@ -25,8 +25,8 @@ LOG = logging.getLogger(__name__)
class StacksTestJSON(base.BaseOrchestrationTest):
@classmethod
- def setUpClass(cls):
- super(StacksTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(StacksTestJSON, cls).resource_setup()
cls.stack_name = data_utils.rand_name('heat')
template = cls.read_template('non_empty_stack')
image_id = (CONF.orchestration.image_ref or
diff --git a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
index 2f5861178..1da340ca8 100644
--- a/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
+++ b/tempest/api/orchestration/stacks/test_nova_keypair_resources.py
@@ -27,8 +27,8 @@ class NovaKeyPairResourcesYAMLTest(base.BaseOrchestrationTest):
_type = 'type'
@classmethod
- def setUpClass(cls):
- super(NovaKeyPairResourcesYAMLTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(NovaKeyPairResourcesYAMLTest, cls).resource_setup()
cls.stack_name = data_utils.rand_name('heat')
template = cls.read_template('nova_keypair', ext=cls._tpl_type)
diff --git a/tempest/api/orchestration/stacks/test_stacks.py b/tempest/api/orchestration/stacks/test_stacks.py
index 8023f2c6b..d7fbd6539 100644
--- a/tempest/api/orchestration/stacks/test_stacks.py
+++ b/tempest/api/orchestration/stacks/test_stacks.py
@@ -23,8 +23,8 @@ class StacksTestJSON(base.BaseOrchestrationTest):
empty_template = "HeatTemplateFormatVersion: '2012-12-12'\n"
@classmethod
- def setUpClass(cls):
- super(StacksTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(StacksTestJSON, cls).resource_setup()
@test.attr(type='smoke')
def test_stack_list_responds(self):
diff --git a/tempest/api/orchestration/stacks/test_swift_resources.py b/tempest/api/orchestration/stacks/test_swift_resources.py
index d7c2a0daa..307468e75 100644
--- a/tempest/api/orchestration/stacks/test_swift_resources.py
+++ b/tempest/api/orchestration/stacks/test_swift_resources.py
@@ -26,9 +26,8 @@ CONF = config.CONF
class SwiftResourcesTestJSON(base.BaseOrchestrationTest):
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(SwiftResourcesTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(SwiftResourcesTestJSON, cls).resource_setup()
cls.stack_name = data_utils.rand_name('heat')
template = cls.read_template('swift_basic')
os = clients.Manager()
diff --git a/tempest/api/orchestration/stacks/test_templates.py b/tempest/api/orchestration/stacks/test_templates.py
index 0d6060d2c..262c57645 100644
--- a/tempest/api/orchestration/stacks/test_templates.py
+++ b/tempest/api/orchestration/stacks/test_templates.py
@@ -26,9 +26,8 @@ Resources:
"""
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(TemplateYAMLTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(TemplateYAMLTestJSON, cls).resource_setup()
cls.stack_name = data_utils.rand_name('heat')
cls.stack_identifier = cls.create_stack(cls.stack_name, cls.template)
cls.client.wait_for_stack_status(cls.stack_identifier,
diff --git a/tempest/api/orchestration/stacks/test_templates_negative.py b/tempest/api/orchestration/stacks/test_templates_negative.py
index b325104ad..908210732 100644
--- a/tempest/api/orchestration/stacks/test_templates_negative.py
+++ b/tempest/api/orchestration/stacks/test_templates_negative.py
@@ -30,8 +30,8 @@ Resources:
invalid_template_url = 'http://www.example.com/template.yaml'
@classmethod
- def setUpClass(cls):
- super(TemplateYAMLNegativeTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(TemplateYAMLNegativeTestJSON, cls).resource_setup()
cls.parameters = {}
@test.attr(type=['gate', 'negative'])
diff --git a/tempest/api/orchestration/stacks/test_update.py b/tempest/api/orchestration/stacks/test_update.py
deleted file mode 100644
index 98761ac78..000000000
--- a/tempest/api/orchestration/stacks/test_update.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-
-from tempest.api.orchestration import base
-from tempest.common.utils import data_utils
-from tempest import test
-
-
-LOG = logging.getLogger(__name__)
-
-
-class UpdateStackTestJSON(base.BaseOrchestrationTest):
- _interface = 'json'
-
- template = '''
-heat_template_version: 2013-05-23
-resources:
- random1:
- type: OS::Heat::RandomString
-'''
- update_template = '''
-heat_template_version: 2013-05-23
-resources:
- random1:
- type: OS::Heat::RandomString
- random2:
- type: OS::Heat::RandomString
-'''
-
- def update_stack(self, stack_identifier, template):
- stack_name = stack_identifier.split('/')[0]
- self.client.update_stack(
- stack_identifier=stack_identifier,
- name=stack_name,
- template=template)
- self.client.wait_for_stack_status(stack_identifier, 'UPDATE_COMPLETE')
-
- @test.attr(type='gate')
- def test_stack_update_nochange(self):
- stack_name = data_utils.rand_name('heat')
- stack_identifier = self.create_stack(stack_name, self.template)
- self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
- expected_resources = {'random1': 'OS::Heat::RandomString'}
- self.assertEqual(expected_resources,
- self.list_resources(stack_identifier))
-
- # Update with no changes, resources should be unchanged
- self.update_stack(stack_identifier, self.template)
- self.assertEqual(expected_resources,
- self.list_resources(stack_identifier))
-
- @test.attr(type='gate')
- def test_stack_update_add_remove(self):
- stack_name = data_utils.rand_name('heat')
- stack_identifier = self.create_stack(stack_name, self.template)
- self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
- initial_resources = {'random1': 'OS::Heat::RandomString'}
- self.assertEqual(initial_resources,
- self.list_resources(stack_identifier))
-
- # Add one resource via a stack update
- self.update_stack(stack_identifier, self.update_template)
- updated_resources = {'random1': 'OS::Heat::RandomString',
- 'random2': 'OS::Heat::RandomString'}
- self.assertEqual(updated_resources,
- self.list_resources(stack_identifier))
-
- # Then remove it by updating with the original template
- self.update_stack(stack_identifier, self.template)
- self.assertEqual(initial_resources,
- self.list_resources(stack_identifier))
diff --git a/tempest/api/orchestration/stacks/test_volumes.py b/tempest/api/orchestration/stacks/test_volumes.py
index f371370a3..f47078c7c 100644
--- a/tempest/api/orchestration/stacks/test_volumes.py
+++ b/tempest/api/orchestration/stacks/test_volumes.py
@@ -26,8 +26,8 @@ LOG = logging.getLogger(__name__)
class CinderResourcesTest(base.BaseOrchestrationTest):
@classmethod
- def setUpClass(cls):
- super(CinderResourcesTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(CinderResourcesTest, cls).resource_setup()
if not CONF.service_available.cinder:
raise cls.skipException('Cinder support is required')
diff --git a/tempest/api/telemetry/base.py b/tempest/api/telemetry/base.py
index 8c2f37b4b..769c20110 100644
--- a/tempest/api/telemetry/base.py
+++ b/tempest/api/telemetry/base.py
@@ -26,11 +26,11 @@ class BaseTelemetryTest(tempest.test.BaseTestCase):
"""Base test case class for all Telemetry API tests."""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if not CONF.service_available.ceilometer:
raise cls.skipException("Ceilometer support is required")
cls.set_network_resources()
- super(BaseTelemetryTest, cls).setUpClass()
+ super(BaseTelemetryTest, cls).resource_setup()
os = cls.get_client_manager()
cls.telemetry_client = os.telemetry_client
cls.servers_client = os.servers_client
@@ -84,12 +84,12 @@ class BaseTelemetryTest(tempest.test.BaseTestCase):
pass
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
cls.cleanup_resources(cls.telemetry_client.delete_alarm, cls.alarm_ids)
cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
cls.cleanup_resources(cls.image_client.delete_image, cls.image_ids)
cls.clear_isolated_creds()
- super(BaseTelemetryTest, cls).tearDownClass()
+ super(BaseTelemetryTest, cls).resource_cleanup()
def await_samples(self, metric, query):
"""
diff --git a/tempest/api/telemetry/test_telemetry_alarming_api.py b/tempest/api/telemetry/test_telemetry_alarming_api.py
index 95758e8e7..b45d545f3 100644
--- a/tempest/api/telemetry/test_telemetry_alarming_api.py
+++ b/tempest/api/telemetry/test_telemetry_alarming_api.py
@@ -20,8 +20,8 @@ class TelemetryAlarmingAPITestJSON(base.BaseTelemetryTest):
_interface = 'json'
@classmethod
- def setUpClass(cls):
- super(TelemetryAlarmingAPITestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(TelemetryAlarmingAPITestJSON, cls).resource_setup()
cls.rule = {'meter_name': 'cpu_util',
'comparison_operator': 'gt',
'threshold': 80.0,
diff --git a/tempest/api/telemetry/test_telemetry_notification_api.py b/tempest/api/telemetry/test_telemetry_notification_api.py
index 9b15c517c..42e2a2daa 100644
--- a/tempest/api/telemetry/test_telemetry_notification_api.py
+++ b/tempest/api/telemetry/test_telemetry_notification_api.py
@@ -23,16 +23,15 @@ class TelemetryNotificationAPITestJSON(base.BaseTelemetryTest):
_interface = 'json'
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if CONF.telemetry.too_slow_to_test:
raise cls.skipException("Ceilometer feature for fast work mysql "
"is disabled")
- super(TelemetryNotificationAPITestJSON, cls).setUpClass()
+ super(TelemetryNotificationAPITestJSON, cls).resource_setup()
@test.attr(type="gate")
@testtools.skipIf(not CONF.service_available.nova,
"Nova is not available.")
- @test.skip_because(bug="1336755")
def test_check_nova_notification(self):
resp, body = self.create_server()
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 769f5e0b0..db2aab53c 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -25,9 +25,8 @@ class VolumeMultiBackendTest(base.BaseVolumeV1AdminTest):
_interface = "json"
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumeMultiBackendTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumeMultiBackendTest, cls).resource_setup()
if not CONF.volume_feature_enabled.multi_backend:
raise cls.skipException("Cinder multi-backend feature disabled")
@@ -76,7 +75,7 @@ class VolumeMultiBackendTest(base.BaseVolumeV1AdminTest):
self.volume['id'], 'available')
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
# volumes deletion
vid_prefix = getattr(cls, 'volume_id_list_with_prefix', [])
for volume_id in vid_prefix:
@@ -93,7 +92,7 @@ class VolumeMultiBackendTest(base.BaseVolumeV1AdminTest):
for volume_type_id in volume_type_id_list:
cls.client.delete_volume_type(volume_type_id)
- super(VolumeMultiBackendTest, cls).tearDownClass()
+ super(VolumeMultiBackendTest, cls).resource_cleanup()
@test.attr(type='smoke')
def test_backend_name_reporting(self):
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index abbe1e999..720734b49 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -22,9 +22,8 @@ class SnapshotsActionsTest(base.BaseVolumeV1AdminTest):
_interface = "json"
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(SnapshotsActionsTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(SnapshotsActionsTest, cls).resource_setup()
cls.client = cls.snapshots_client
# Create admin volume client
@@ -46,7 +45,7 @@ class SnapshotsActionsTest(base.BaseVolumeV1AdminTest):
'available')
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
# Delete the test snapshot
cls.client.delete_snapshot(cls.snapshot['id'])
cls.client.wait_for_resource_deletion(cls.snapshot['id'])
@@ -55,7 +54,7 @@ class SnapshotsActionsTest(base.BaseVolumeV1AdminTest):
cls.volumes_client.delete_volume(cls.volume['id'])
cls.volumes_client.wait_for_resource_deletion(cls.volume['id'])
- super(SnapshotsActionsTest, cls).tearDownClass()
+ super(SnapshotsActionsTest, cls).resource_cleanup()
def tearDown(self):
# Set snapshot's status to available after test
diff --git a/tempest/api/volume/admin/test_volume_quotas.py b/tempest/api/volume/admin/test_volume_quotas.py
index fa3b667aa..ece4299c0 100644
--- a/tempest/api/volume/admin/test_volume_quotas.py
+++ b/tempest/api/volume/admin/test_volume_quotas.py
@@ -27,8 +27,8 @@ class VolumeQuotasAdminTestJSON(base.BaseVolumeV1AdminTest):
force_tenant_isolation = True
@classmethod
- def setUpClass(cls):
- super(VolumeQuotasAdminTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumeQuotasAdminTestJSON, cls).resource_setup()
cls.admin_volume_client = cls.os_adm.volumes_client
cls.demo_tenant_id = cls.isolated_creds.get_primary_creds().tenant_id
@@ -71,7 +71,8 @@ class VolumeQuotasAdminTestJSON(base.BaseVolumeV1AdminTest):
@test.attr(type='gate')
def test_show_quota_usage(self):
- _, quota_usage = self.quotas_client.get_quota_usage(self.adm_tenant)
+ _, quota_usage = self.quotas_client.get_quota_usage(
+ self.os_adm.credentials.tenant_name)
for key in QUOTA_KEYS:
self.assertIn(key, quota_usage)
for usage_key in QUOTA_USAGE_KEYS:
diff --git a/tempest/api/volume/admin/test_volume_quotas_negative.py b/tempest/api/volume/admin/test_volume_quotas_negative.py
index 515024f99..60a0adbf8 100644
--- a/tempest/api/volume/admin/test_volume_quotas_negative.py
+++ b/tempest/api/volume/admin/test_volume_quotas_negative.py
@@ -23,9 +23,8 @@ class VolumeQuotasNegativeTestJSON(base.BaseVolumeV1AdminTest):
force_tenant_isolation = True
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumeQuotasNegativeTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumeQuotasNegativeTestJSON, cls).resource_setup()
demo_user = cls.isolated_creds.get_primary_creds()
cls.demo_tenant_id = demo_user.tenant_id
cls.shared_quota_set = {'gigabytes': 3, 'volumes': 1, 'snapshots': 1}
diff --git a/tempest/api/volume/admin/test_volume_services.py b/tempest/api/volume/admin/test_volume_services.py
index 4a68e05c6..782014828 100644
--- a/tempest/api/volume/admin/test_volume_services.py
+++ b/tempest/api/volume/admin/test_volume_services.py
@@ -25,8 +25,8 @@ class VolumesServicesTestJSON(base.BaseVolumeV1AdminTest):
_interface = "json"
@classmethod
- def setUpClass(cls):
- super(VolumesServicesTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesServicesTestJSON, cls).resource_setup()
cls.client = cls.os_adm.volume_services_client
_, cls.services = cls.client.list_services()
cls.host_name = cls.services[0]['host']
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs.py b/tempest/api/volume/admin/test_volume_types_extra_specs.py
index c682866a0..2d72dd259 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs.py
@@ -22,15 +22,15 @@ class VolumeTypesExtraSpecsTest(base.BaseVolumeV1AdminTest):
_interface = "json"
@classmethod
- def setUpClass(cls):
- super(VolumeTypesExtraSpecsTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumeTypesExtraSpecsTest, cls).resource_setup()
vol_type_name = data_utils.rand_name('Volume-type-')
_, cls.volume_type = cls.client.create_volume_type(vol_type_name)
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
cls.client.delete_volume_type(cls.volume_type['id'])
- super(VolumeTypesExtraSpecsTest, cls).tearDownClass()
+ super(VolumeTypesExtraSpecsTest, cls).resource_cleanup()
@test.attr(type='smoke')
def test_volume_type_extra_specs_list(self):
diff --git a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
index ff4f11347..f3eee0061 100644
--- a/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
+++ b/tempest/api/volume/admin/test_volume_types_extra_specs_negative.py
@@ -25,8 +25,8 @@ class ExtraSpecsNegativeTest(base.BaseVolumeV1AdminTest):
_interface = 'json'
@classmethod
- def setUpClass(cls):
- super(ExtraSpecsNegativeTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(ExtraSpecsNegativeTest, cls).resource_setup()
vol_type_name = data_utils.rand_name('Volume-type-')
cls.extra_specs = {"spec1": "val1"}
_, cls.volume_type = cls.client.create_volume_type(
@@ -34,9 +34,9 @@ class ExtraSpecsNegativeTest(base.BaseVolumeV1AdminTest):
extra_specs=cls.extra_specs)
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
cls.client.delete_volume_type(cls.volume_type['id'])
- super(ExtraSpecsNegativeTest, cls).tearDownClass()
+ super(ExtraSpecsNegativeTest, cls).resource_cleanup()
@test.attr(type='gate')
def test_update_no_body(self):
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index d6db1df60..f85718bce 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -22,9 +22,8 @@ class VolumesActionsTest(base.BaseVolumeV1AdminTest):
_interface = "json"
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumesActionsTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesActionsTest, cls).resource_setup()
cls.client = cls.volumes_client
# Create admin volume client
@@ -38,12 +37,12 @@ class VolumesActionsTest(base.BaseVolumeV1AdminTest):
cls.client.wait_for_volume_status(cls.volume['id'], 'available')
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
# Delete the test volume
cls.client.delete_volume(cls.volume['id'])
cls.client.wait_for_resource_deletion(cls.volume['id'])
- super(VolumesActionsTest, cls).tearDownClass()
+ super(VolumesActionsTest, cls).resource_cleanup()
def _reset_volume_status(self, volume_id, status):
# Reset the volume status
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 3699e9c1f..8b90b0712 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -27,9 +27,8 @@ class VolumesBackupsTest(base.BaseVolumeV1AdminTest):
_interface = "json"
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumesBackupsTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesBackupsTest, cls).resource_setup()
if not CONF.volume_feature_enabled.backup:
raise cls.skipException("Cinder backup feature disabled")
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 43f48ff9f..78fd61d4d 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -32,9 +32,9 @@ class BaseVolumeTest(tempest.test.BaseTestCase):
_interface = 'json'
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
cls.set_network_resources()
- super(BaseVolumeTest, cls).setUpClass()
+ super(BaseVolumeTest, cls).resource_setup()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
@@ -69,6 +69,7 @@ class BaseVolumeTest(tempest.test.BaseTestCase):
if not CONF.volume_feature_enabled.api_v2:
msg = "Volume API v2 is disabled"
raise cls.skipException(msg)
+ cls.snapshots_client = cls.os.snapshots_v2_client
cls.volumes_client = cls.os.volumes_v2_client
cls.volumes_extension_client = cls.os.volumes_v2_extension_client
cls.availability_zone_client = (
@@ -82,11 +83,11 @@ class BaseVolumeTest(tempest.test.BaseTestCase):
raise exceptions.InvalidConfiguration(message=msg)
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
cls.clear_snapshots()
cls.clear_volumes()
cls.clear_isolated_creds()
- super(BaseVolumeTest, cls).tearDownClass()
+ super(BaseVolumeTest, cls).resource_cleanup()
@classmethod
def create_volume(cls, size=1, **kwargs):
@@ -151,22 +152,17 @@ class BaseVolumeV1Test(BaseVolumeTest):
class BaseVolumeAdminTest(BaseVolumeTest):
"""Base test case class for all Volume Admin API tests."""
@classmethod
- def setUpClass(cls):
- super(BaseVolumeAdminTest, cls).setUpClass()
- cls.adm_user = CONF.identity.admin_username
- cls.adm_pass = CONF.identity.admin_password
- cls.adm_tenant = CONF.identity.admin_tenant_name
- if not all((cls.adm_user, cls.adm_pass, cls.adm_tenant)):
- msg = ("Missing Volume Admin API credentials "
- "in configuration.")
+ def resource_setup(cls):
+ super(BaseVolumeAdminTest, cls).resource_setup()
+
+ try:
+ cls.adm_creds = cls.isolated_creds.get_admin_creds()
+ cls.os_adm = clients.Manager(
+ credentials=cls.adm_creds, interface=cls._interface)
+ except NotImplementedError:
+ msg = "Missing Volume Admin API credentials in configuration."
raise cls.skipException(msg)
- if CONF.compute.allow_tenant_isolation:
- cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
- interface=cls._interface)
- else:
- cls.os_adm = clients.AdminManager(interface=cls._interface)
-
cls.qos_specs = []
cls.client = cls.os_adm.volume_types_client
@@ -186,9 +182,9 @@ class BaseVolumeAdminTest(BaseVolumeTest):
cls.volume_qos_client = cls.os_adm.volume_qos_v2_client
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
cls.clear_qos_specs()
- super(BaseVolumeAdminTest, cls).tearDownClass()
+ super(BaseVolumeAdminTest, cls).resource_cleanup()
@classmethod
def create_test_qos_specs(cls, name=None, consumer=None, **kwargs):
diff --git a/tempest/api/volume/test_availability_zone.py b/tempest/api/volume/test_availability_zone.py
index c026f7120..648bd8b53 100644
--- a/tempest/api/volume/test_availability_zone.py
+++ b/tempest/api/volume/test_availability_zone.py
@@ -24,8 +24,8 @@ class AvailabilityZoneV2TestJSON(base.BaseVolumeTest):
"""
@classmethod
- def setUpClass(cls):
- super(AvailabilityZoneV2TestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(AvailabilityZoneV2TestJSON, cls).resource_setup()
cls.client = cls.availability_zone_client
@test.attr(type='gate')
diff --git a/tempest/api/volume/test_qos.py b/tempest/api/volume/test_qos.py
index 8b6ba49d3..a719b7964 100644
--- a/tempest/api/volume/test_qos.py
+++ b/tempest/api/volume/test_qos.py
@@ -25,9 +25,8 @@ class QosSpecsV2TestJSON(base.BaseVolumeAdminTest):
"""
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(QosSpecsV2TestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(QosSpecsV2TestJSON, cls).resource_setup()
# Create admin qos client
# Create a test shared qos-specs for tests
cls.qos_name = utils.rand_name(cls.__name__ + '-QoS')
diff --git a/tempest/api/volume/test_snapshot_metadata.py b/tempest/api/volume/test_snapshot_metadata.py
index 94ba09519..777d3de80 100644
--- a/tempest/api/volume/test_snapshot_metadata.py
+++ b/tempest/api/volume/test_snapshot_metadata.py
@@ -17,13 +17,11 @@ from tempest.api.volume import base
from tempest import test
-class SnapshotMetadataTest(base.BaseVolumeV1Test):
- _interface = "json"
+class SnapshotV2MetadataTestJSON(base.BaseVolumeTest):
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(SnapshotMetadataTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(SnapshotV2MetadataTestJSON, cls).resource_setup()
cls.client = cls.snapshots_client
# Create a volume
cls.volume = cls.create_volume()
@@ -34,7 +32,7 @@ class SnapshotMetadataTest(base.BaseVolumeV1Test):
def tearDown(self):
# Update the metadata to {}
self.client.update_snapshot_metadata(self.snapshot_id, {})
- super(SnapshotMetadataTest, self).tearDown()
+ super(SnapshotV2MetadataTestJSON, self).tearDown()
@test.attr(type='gate')
def test_create_get_delete_snapshot_metadata(self):
@@ -100,5 +98,13 @@ class SnapshotMetadataTest(base.BaseVolumeV1Test):
self.assertEqual(expect, body)
-class SnapshotMetadataTestXML(SnapshotMetadataTest):
+class SnapshotV2MetadataTestXML(SnapshotV2MetadataTestJSON):
+ _interface = "xml"
+
+
+class SnapshotV1MetadataTestJSON(SnapshotV2MetadataTestJSON):
+ _api_version = 1
+
+
+class SnapshotV1MetadataTestXML(SnapshotV1MetadataTestJSON):
_interface = "xml"
diff --git a/tempest/api/volume/test_volume_metadata.py b/tempest/api/volume/test_volume_metadata.py
index ac760aa94..2ec866750 100644
--- a/tempest/api/volume/test_volume_metadata.py
+++ b/tempest/api/volume/test_volume_metadata.py
@@ -22,9 +22,8 @@ from tempest import test
class VolumesV2MetadataTest(base.BaseVolumeTest):
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumesV2MetadataTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2MetadataTest, cls).resource_setup()
# Create a volume
cls.volume = cls.create_volume()
cls.volume_id = cls.volume['id']
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 4a6ba03ad..fe217c19e 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -26,19 +26,20 @@ CONF = config.CONF
class VolumesV2TransfersTest(base.BaseVolumeTest):
@classmethod
- def setUpClass(cls):
- super(VolumesV2TransfersTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2TransfersTest, cls).resource_setup()
# Add another tenant to test volume-transfer
- if CONF.compute.allow_tenant_isolation:
- cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds(),
- interface=cls._interface)
- # Add admin tenant to cleanup resources
- cls.os_adm = clients.Manager(cls.isolated_creds.get_admin_creds(),
- interface=cls._interface)
- else:
- cls.os_alt = clients.AltManager()
- cls.os_adm = clients.ComputeAdminManager(interface=cls._interface)
+ cls.os_alt = clients.Manager(cls.isolated_creds.get_alt_creds(),
+ interface=cls._interface)
+ # Add admin tenant to cleanup resources
+ try:
+ creds = cls.isolated_creds.get_admin_creds()
+ cls.os_adm = clients.Manager(
+ credentials=creds, interface=cls._interface)
+ except NotImplementedError:
+ msg = "Missing Volume Admin API credentials in configuration."
+ raise cls.skipException(msg)
cls.client = cls.volumes_client
cls.alt_client = cls.os_alt.volumes_client
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index c87878d87..a9bc70afb 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -24,9 +24,8 @@ CONF = config.CONF
class VolumesV2ActionsTest(base.BaseVolumeTest):
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumesV2ActionsTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2ActionsTest, cls).resource_setup()
cls.client = cls.volumes_client
cls.image_client = cls.os.image_client
@@ -45,12 +44,12 @@ class VolumesV2ActionsTest(base.BaseVolumeTest):
self.image_client.wait_for_resource_deletion(image_id)
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
# Delete the test instance
cls.servers_client.delete_server(cls.server['id'])
cls.servers_client.wait_for_server_termination(cls.server['id'])
- super(VolumesV2ActionsTest, cls).tearDownClass()
+ super(VolumesV2ActionsTest, cls).resource_cleanup()
@test.stresstest(class_setup_per='process')
@test.attr(type='smoke')
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index c9e80aac4..edd497cdd 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -23,9 +23,8 @@ CONF = config.CONF
class VolumesV2ExtendTest(base.BaseVolumeTest):
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumesV2ExtendTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2ExtendTest, cls).resource_setup()
cls.client = cls.volumes_client
@test.attr(type='gate')
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index a346a1727..033beb4a1 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -26,8 +26,8 @@ CONF = config.CONF
class VolumesV2GetTest(base.BaseVolumeTest):
@classmethod
- def setUpClass(cls):
- super(VolumesV2GetTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2GetTest, cls).resource_setup()
cls.client = cls.volumes_client
cls.name_field = cls.special_fields['name_field']
diff --git a/tempest/api/volume/test_volumes_list.py b/tempest/api/volume/test_volumes_list.py
index 272a41af4..016e9abf3 100644
--- a/tempest/api/volume/test_volumes_list.py
+++ b/tempest/api/volume/test_volumes_list.py
@@ -55,9 +55,8 @@ class VolumesV2ListTestJSON(base.BaseVolumeTest):
[str_vol(v) for v in fetched_list]))
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumesV2ListTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2ListTestJSON, cls).resource_setup()
cls.client = cls.volumes_client
cls.name = cls.VOLUME_FIELDS[1]
@@ -72,12 +71,12 @@ class VolumesV2ListTestJSON(base.BaseVolumeTest):
cls.volume_id_list.append(volume['id'])
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
# Delete the created volumes
for volid in cls.volume_id_list:
cls.client.delete_volume(volid)
cls.client.wait_for_resource_deletion(volid)
- super(VolumesV2ListTestJSON, cls).tearDownClass()
+ super(VolumesV2ListTestJSON, cls).resource_cleanup()
def _list_by_param_value_and_assert(self, params, with_detail=False):
"""
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index 5f0cffa01..2b43c63e9 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -24,9 +24,8 @@ from tempest import test
class VolumesV2NegativeTest(base.BaseVolumeTest):
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumesV2NegativeTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2NegativeTest, cls).resource_setup()
cls.client = cls.volumes_client
cls.name_field = cls.special_fields['name_field']
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 7db1ef160..78df1dfd9 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -20,21 +20,18 @@ LOG = logging.getLogger(__name__)
CONF = config.CONF
-class VolumesSnapshotTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2SnapshotTestJSON(base.BaseVolumeTest):
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumesSnapshotTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2SnapshotTestJSON, cls).resource_setup()
cls.volume_origin = cls.create_volume()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
- @classmethod
- def tearDownClass(cls):
- super(VolumesSnapshotTest, cls).tearDownClass()
+ cls.name_field = cls.special_fields['name_field']
+ cls.descrip_field = cls.special_fields['descrip_field']
def _detach(self, volume_id):
"""Detach volume."""
@@ -90,8 +87,8 @@ class VolumesSnapshotTest(base.BaseVolumeV1Test):
def test_snapshot_create_get_list_update_delete(self):
# Create a snapshot
s_name = data_utils.rand_name('snap')
- snapshot = self.create_snapshot(self.volume_origin['id'],
- display_name=s_name)
+ params = {self.name_field: s_name}
+ snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Get the snap and check for some of its details
_, snap_get = self.snapshots_client.get_snapshot(snapshot['id'])
@@ -100,26 +97,26 @@ class VolumesSnapshotTest(base.BaseVolumeV1Test):
"Referred volume origin mismatch")
# Compare also with the output from the list action
- tracking_data = (snapshot['id'], snapshot['display_name'])
+ tracking_data = (snapshot['id'], snapshot[self.name_field])
_, snaps_list = self.snapshots_client.list_snapshots()
- snaps_data = [(f['id'], f['display_name']) for f in snaps_list]
+ snaps_data = [(f['id'], f[self.name_field]) for f in snaps_list]
self.assertIn(tracking_data, snaps_data)
# Updates snapshot with new values
new_s_name = data_utils.rand_name('new-snap')
new_desc = 'This is the new description of snapshot.'
+ params = {self.name_field: new_s_name,
+ self.descrip_field: new_desc}
_, update_snapshot = \
- self.snapshots_client.update_snapshot(snapshot['id'],
- display_name=new_s_name,
- display_description=new_desc)
+ self.snapshots_client.update_snapshot(snapshot['id'], **params)
# Assert response body for update_snapshot method
- self.assertEqual(new_s_name, update_snapshot['display_name'])
- self.assertEqual(new_desc, update_snapshot['display_description'])
+ self.assertEqual(new_s_name, update_snapshot[self.name_field])
+ self.assertEqual(new_desc, update_snapshot[self.descrip_field])
# Assert response body for get_snapshot method
_, updated_snapshot = \
self.snapshots_client.get_snapshot(snapshot['id'])
- self.assertEqual(new_s_name, updated_snapshot['display_name'])
- self.assertEqual(new_desc, updated_snapshot['display_description'])
+ self.assertEqual(new_s_name, updated_snapshot[self.name_field])
+ self.assertEqual(new_desc, updated_snapshot[self.descrip_field])
# Delete the snapshot
self.snapshots_client.delete_snapshot(snapshot['id'])
@@ -131,11 +128,11 @@ class VolumesSnapshotTest(base.BaseVolumeV1Test):
"""list snapshots with params."""
# Create a snapshot
display_name = data_utils.rand_name('snap')
- snapshot = self.create_snapshot(self.volume_origin['id'],
- display_name=display_name)
+ params = {self.name_field: display_name}
+ snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Verify list snapshots by display_name filter
- params = {'display_name': snapshot['display_name']}
+ params = {self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params)
# Verify list snapshots by status filter
@@ -144,7 +141,7 @@ class VolumesSnapshotTest(base.BaseVolumeV1Test):
# Verify list snapshots by status and display name filter
params = {'status': 'available',
- 'display_name': snapshot['display_name']}
+ self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params)
@test.attr(type='gate')
@@ -152,18 +149,18 @@ class VolumesSnapshotTest(base.BaseVolumeV1Test):
"""list snapshot details with params."""
# Create a snapshot
display_name = data_utils.rand_name('snap')
- snapshot = self.create_snapshot(self.volume_origin['id'],
- display_name=display_name)
+ params = {self.name_field: display_name}
+ snapshot = self.create_snapshot(self.volume_origin['id'], **params)
# Verify list snapshot details by display_name filter
- params = {'display_name': snapshot['display_name']}
+ params = {self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params, with_detail=True)
# Verify list snapshot details by status filter
params = {'status': 'available'}
self._list_by_param_values_and_assert(params, with_detail=True)
# Verify list snapshot details by status and display name filter
params = {'status': 'available',
- 'display_name': snapshot['display_name']}
+ self.name_field: snapshot[self.name_field]}
self._list_by_param_values_and_assert(params, with_detail=True)
@test.attr(type='gate')
@@ -181,5 +178,13 @@ class VolumesSnapshotTest(base.BaseVolumeV1Test):
self.clear_snapshots()
-class VolumesSnapshotTestXML(VolumesSnapshotTest):
+class VolumesV2SnapshotTestXML(VolumesV2SnapshotTestJSON):
+ _interface = "xml"
+
+
+class VolumesV1SnapshotTestJSON(VolumesV2SnapshotTestJSON):
+ _api_version = 1
+
+
+class VolumesV1SnapshotTestXML(VolumesV1SnapshotTestJSON):
_interface = "xml"
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index 61aa307c4..75a62a8c5 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -21,12 +21,11 @@ from tempest import test
CONF = config.CONF
-class VolumesSnapshotNegativeTest(base.BaseVolumeV1Test):
- _interface = "json"
+class VolumesV2SnapshotNegativeTestJSON(base.BaseVolumeTest):
@classmethod
- def setUpClass(cls):
- super(VolumesSnapshotNegativeTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2SnapshotNegativeTestJSON, cls).resource_setup()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
@@ -48,5 +47,13 @@ class VolumesSnapshotNegativeTest(base.BaseVolumeV1Test):
None, display_name=s_name)
-class VolumesSnapshotNegativeTestXML(VolumesSnapshotNegativeTest):
+class VolumesV2SnapshotNegativeTestXML(VolumesV2SnapshotNegativeTestJSON):
+ _interface = "xml"
+
+
+class VolumesV1SnapshotNegativeTestJSON(VolumesV2SnapshotNegativeTestJSON):
+ _api_version = 1
+
+
+class VolumesV1SnapshotNegativeTestXML(VolumesV1SnapshotNegativeTestJSON):
_interface = "xml"
diff --git a/tempest/api/volume/v2/test_volumes_list.py b/tempest/api/volume/v2/test_volumes_list.py
index 3ae227d06..cc5687377 100644
--- a/tempest/api/volume/v2/test_volumes_list.py
+++ b/tempest/api/volume/v2/test_volumes_list.py
@@ -31,9 +31,8 @@ class VolumesV2ListTestJSON(base.BaseVolumeTest):
"""
@classmethod
- @test.safe_setup
- def setUpClass(cls):
- super(VolumesV2ListTestJSON, cls).setUpClass()
+ def resource_setup(cls):
+ super(VolumesV2ListTestJSON, cls).resource_setup()
cls.client = cls.volumes_client
# Create 3 test volumes
@@ -47,12 +46,12 @@ class VolumesV2ListTestJSON(base.BaseVolumeTest):
cls.volume_id_list.append(volume['id'])
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
# Delete the created volumes
for volid in cls.volume_id_list:
cls.client.delete_volume(volid)
cls.client.wait_for_resource_deletion(volid)
- super(VolumesV2ListTestJSON, cls).tearDownClass()
+ super(VolumesV2ListTestJSON, cls).resource_cleanup()
@test.attr(type='gate')
def test_volume_list_details_with_multiple_params(self):
diff --git a/tempest/api_schema/request/compute/flavors.py b/tempest/api_schema/request/compute/flavors.py
index 8fe9e3a67..adaaf270c 100644
--- a/tempest/api_schema/request/compute/flavors.py
+++ b/tempest/api_schema/request/compute/flavors.py
@@ -40,14 +40,19 @@ common_admin_flavor_create = {
"json-schema": {
"type": "object",
"properties": {
- "name": {"type": "string"},
- "ram": {"type": "integer", "minimum": 1},
- "vcpus": {"type": "integer", "minimum": 1},
- "disk": {"type": "integer"},
- "id": {"type": "integer"},
- "swap": {"type": "integer"},
- "rxtx_factor": {"type": "integer"},
- "OS-FLV-EXT-DATA:ephemeral": {"type": "integer"}
+ "flavor": {
+ "type": "object",
+ "properties": {
+ "name": {"type": "string",
+ "exclude_tests": ["gen_str_min_length"]},
+ "ram": {"type": "integer", "minimum": 1},
+ "vcpus": {"type": "integer", "minimum": 1},
+ "disk": {"type": "integer"},
+ "id": {"type": "integer",
+ "exclude_tests": ["gen_none", "gen_string"]
+ },
+ }
+ }
}
}
}
diff --git a/tempest/api_schema/response/messaging/v1/queues.py b/tempest/api_schema/response/messaging/v1/queues.py
index f0b26915f..09e014772 100644
--- a/tempest/api_schema/response/messaging/v1/queues.py
+++ b/tempest/api_schema/response/messaging/v1/queues.py
@@ -105,7 +105,9 @@ queue_stats = {
resource_schema = {
'type': 'array',
- 'items': 'string',
+ 'items': {
+ 'type': 'string'
+ },
'minItems': 1
}
diff --git a/tempest/auth.py b/tempest/auth.py
index c84ad6bb5..b1ead29ad 100644
--- a/tempest/auth.py
+++ b/tempest/auth.py
@@ -40,11 +40,9 @@ class AuthProvider(object):
Provide authentication
"""
- def __init__(self, credentials, client_type='tempest',
- interface=None):
+ def __init__(self, credentials, interface=None):
"""
:param credentials: credentials for authentication
- :param client_type: 'tempest' or 'official'
:param interface: 'json' or 'xml'. Applicable for tempest client only
"""
credentials = self._convert_credentials(credentials)
@@ -52,9 +50,8 @@ class AuthProvider(object):
self.credentials = credentials
else:
raise TypeError("Invalid credentials")
- self.client_type = client_type
self.interface = interface
- if self.client_type == 'tempest' and self.interface is None:
+ if self.interface is None:
self.interface = 'json'
self.cache = None
self.alt_auth_data = None
@@ -68,11 +65,10 @@ class AuthProvider(object):
return credentials
def __str__(self):
- return "Creds :{creds}, client type: {client_type}, interface: " \
- "{interface}, cached auth data: {cache}".format(
- creds=self.credentials, client_type=self.client_type,
- interface=self.interface, cache=self.cache
- )
+ return "Creds :{creds}, interface: {interface}, " \
+ "cached auth data: {cache}".format(
+ creds=self.credentials, interface=self.interface,
+ cache=self.cache)
@abc.abstractmethod
def _decorate_request(self, filters, method, url, headers=None, body=None,
@@ -208,9 +204,8 @@ class KeystoneAuthProvider(AuthProvider):
token_expiry_threshold = datetime.timedelta(seconds=60)
- def __init__(self, credentials, client_type='tempest', interface=None):
- super(KeystoneAuthProvider, self).__init__(credentials, client_type,
- interface)
+ def __init__(self, credentials, interface=None):
+ super(KeystoneAuthProvider, self).__init__(credentials, interface)
self.auth_client = self._auth_client()
def _decorate_request(self, filters, method, url, headers=None, body=None,
@@ -244,15 +239,12 @@ class KeystoneAuthProvider(AuthProvider):
def _get_auth(self):
# Bypasses the cache
- if self.client_type == 'tempest':
- auth_func = getattr(self.auth_client, 'get_token')
- auth_params = self._auth_params()
+ auth_func = getattr(self.auth_client, 'get_token')
+ auth_params = self._auth_params()
- # returns token, auth_data
- token, auth_data = auth_func(**auth_params)
- return token, auth_data
- else:
- raise NotImplementedError
+ # returns token, auth_data
+ token, auth_data = auth_func(**auth_params)
+ return token, auth_data
def get_token(self):
return self.auth_data[0]
@@ -263,23 +255,17 @@ class KeystoneV2AuthProvider(KeystoneAuthProvider):
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def _auth_client(self):
- if self.client_type == 'tempest':
- if self.interface == 'json':
- return json_id.TokenClientJSON()
- else:
- return xml_id.TokenClientXML()
+ if self.interface == 'json':
+ return json_id.TokenClientJSON()
else:
- raise NotImplementedError
+ return xml_id.TokenClientXML()
def _auth_params(self):
- if self.client_type == 'tempest':
- return dict(
- user=self.credentials.username,
- password=self.credentials.password,
- tenant=self.credentials.tenant_name,
- auth_data=True)
- else:
- raise NotImplementedError
+ return dict(
+ user=self.credentials.username,
+ password=self.credentials.password,
+ tenant=self.credentials.tenant_name,
+ auth_data=True)
def _fill_credentials(self, auth_data_body):
tenant = auth_data_body['token']['tenant']
@@ -350,24 +336,18 @@ class KeystoneV3AuthProvider(KeystoneAuthProvider):
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def _auth_client(self):
- if self.client_type == 'tempest':
- if self.interface == 'json':
- return json_v3id.V3TokenClientJSON()
- else:
- return xml_v3id.V3TokenClientXML()
+ if self.interface == 'json':
+ return json_v3id.V3TokenClientJSON()
else:
- raise NotImplementedError
+ return xml_v3id.V3TokenClientXML()
def _auth_params(self):
- if self.client_type == 'tempest':
- return dict(
- user=self.credentials.username,
- password=self.credentials.password,
- tenant=self.credentials.tenant_name,
- domain=self.credentials.user_domain_name,
- auth_data=True)
- else:
- raise NotImplementedError
+ return dict(
+ user=self.credentials.username,
+ password=self.credentials.password,
+ tenant=self.credentials.tenant_name,
+ domain=self.credentials.user_domain_name,
+ auth_data=True)
def _fill_credentials(self, auth_data_body):
# project or domain, depending on the scope
diff --git a/tempest/cli/__init__.py b/tempest/cli/__init__.py
index c33589a0c..ca6d7fe6f 100644
--- a/tempest/cli/__init__.py
+++ b/tempest/cli/__init__.py
@@ -94,11 +94,11 @@ def min_client_version(*args, **kwargs):
class ClientTestBase(tempest.test.BaseTestCase):
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if not CONF.cli.enabled:
msg = "cli testing disabled"
raise cls.skipException(msg)
- super(ClientTestBase, cls).setUpClass()
+ super(ClientTestBase, cls).resource_setup()
def __init__(self, *args, **kwargs):
self.parser = tempest.cli.output_parser
diff --git a/tempest/cli/simple_read_only/compute/test_nova.py b/tempest/cli/simple_read_only/compute/test_nova.py
index 9bac7a666..6e5e07732 100644
--- a/tempest/cli/simple_read_only/compute/test_nova.py
+++ b/tempest/cli/simple_read_only/compute/test_nova.py
@@ -41,11 +41,11 @@ class SimpleReadOnlyNovaClientTest(cli.ClientTestBase):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if not CONF.service_available.nova:
msg = ("%s skipped as Nova is not available" % cls.__name__)
raise cls.skipException(msg)
- super(SimpleReadOnlyNovaClientTest, cls).setUpClass()
+ super(SimpleReadOnlyNovaClientTest, cls).resource_setup()
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/cli/simple_read_only/compute/test_nova_manage.py b/tempest/cli/simple_read_only/compute/test_nova_manage.py
index c27b12e30..cff543f51 100644
--- a/tempest/cli/simple_read_only/compute/test_nova_manage.py
+++ b/tempest/cli/simple_read_only/compute/test_nova_manage.py
@@ -36,7 +36,7 @@ class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if not CONF.service_available.nova:
msg = ("%s skipped as Nova is not available" % cls.__name__)
raise cls.skipException(msg)
@@ -44,7 +44,7 @@ class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
msg = ("%s skipped as *-manage commands not available"
% cls.__name__)
raise cls.skipException(msg)
- super(SimpleReadOnlyNovaManageTest, cls).setUpClass()
+ super(SimpleReadOnlyNovaManageTest, cls).resource_setup()
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
@@ -65,20 +65,17 @@ class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
self.nova_manage('', '--version', merge_stderr=True))
def test_debug_flag(self):
- self.assertNotEqual("", self.nova_manage('flavor list',
+ self.assertNotEqual("", self.nova_manage('service list',
'--debug'))
def test_verbose_flag(self):
- self.assertNotEqual("", self.nova_manage('flavor list',
+ self.assertNotEqual("", self.nova_manage('service list',
'--verbose'))
# test actions
def test_version(self):
self.assertNotEqual("", self.nova_manage('version'))
- def test_flavor_list(self):
- self.assertNotEqual("", self.nova_manage('flavor list'))
-
def test_db_sync(self):
# make sure command doesn't error out
self.nova_manage('db sync')
diff --git a/tempest/cli/simple_read_only/data_processing/test_sahara.py b/tempest/cli/simple_read_only/data_processing/test_sahara.py
index 2c6e0e21b..751a4ad13 100644
--- a/tempest/cli/simple_read_only/data_processing/test_sahara.py
+++ b/tempest/cli/simple_read_only/data_processing/test_sahara.py
@@ -34,11 +34,11 @@ class SimpleReadOnlySaharaClientTest(cli.ClientTestBase):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if not CONF.service_available.sahara:
msg = "Skipping all Sahara cli tests because it is not available"
raise cls.skipException(msg)
- super(SimpleReadOnlySaharaClientTest, cls).setUpClass()
+ super(SimpleReadOnlySaharaClientTest, cls).resource_setup()
@test.attr(type='negative')
def test_sahara_fake_action(self):
diff --git a/tempest/cli/simple_read_only/image/test_glance.py b/tempest/cli/simple_read_only/image/test_glance.py
index 2fd821201..a9cbadb8a 100644
--- a/tempest/cli/simple_read_only/image/test_glance.py
+++ b/tempest/cli/simple_read_only/image/test_glance.py
@@ -34,11 +34,11 @@ class SimpleReadOnlyGlanceClientTest(cli.ClientTestBase):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if not CONF.service_available.glance:
msg = ("%s skipped as Glance is not available" % cls.__name__)
raise cls.skipException(msg)
- super(SimpleReadOnlyGlanceClientTest, cls).setUpClass()
+ super(SimpleReadOnlyGlanceClientTest, cls).resource_setup()
def test_glance_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/cli/simple_read_only/network/test_neutron.py b/tempest/cli/simple_read_only/network/test_neutron.py
index 87f6b67f9..f9f890632 100644
--- a/tempest/cli/simple_read_only/network/test_neutron.py
+++ b/tempest/cli/simple_read_only/network/test_neutron.py
@@ -35,11 +35,11 @@ class SimpleReadOnlyNeutronClientTest(cli.ClientTestBase):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if (not CONF.service_available.neutron):
msg = "Skipping all Neutron cli tests because it is not available"
raise cls.skipException(msg)
- super(SimpleReadOnlyNeutronClientTest, cls).setUpClass()
+ super(SimpleReadOnlyNeutronClientTest, cls).resource_setup()
@test.attr(type='smoke')
def test_neutron_fake_action(self):
diff --git a/tempest/cli/simple_read_only/object_storage/test_swift.py b/tempest/cli/simple_read_only/object_storage/test_swift.py
index 069a38413..a1626607b 100644
--- a/tempest/cli/simple_read_only/object_storage/test_swift.py
+++ b/tempest/cli/simple_read_only/object_storage/test_swift.py
@@ -31,11 +31,11 @@ class SimpleReadOnlySwiftClientTest(cli.ClientTestBase):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if not CONF.service_available.swift:
msg = ("%s skipped as Swift is not available" % cls.__name__)
raise cls.skipException(msg)
- super(SimpleReadOnlySwiftClientTest, cls).setUpClass()
+ super(SimpleReadOnlySwiftClientTest, cls).resource_setup()
def test_swift_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/cli/simple_read_only/orchestration/test_heat.py b/tempest/cli/simple_read_only/orchestration/test_heat.py
index 430cdf1ec..7d7f8c989 100644
--- a/tempest/cli/simple_read_only/orchestration/test_heat.py
+++ b/tempest/cli/simple_read_only/orchestration/test_heat.py
@@ -32,12 +32,12 @@ class SimpleReadOnlyHeatClientTest(tempest.cli.ClientTestBase):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if (not CONF.service_available.heat):
msg = ("Skipping all Heat cli tests because it is "
"not available")
raise cls.skipException(msg)
- super(SimpleReadOnlyHeatClientTest, cls).setUpClass()
+ super(SimpleReadOnlyHeatClientTest, cls).resource_setup()
cls.heat_template_path = os.path.join(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))),
'heat_templates/heat_minimal.yaml')
diff --git a/tempest/cli/simple_read_only/telemetry/test_ceilometer.py b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
index 1d2822db9..45b793b58 100644
--- a/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
+++ b/tempest/cli/simple_read_only/telemetry/test_ceilometer.py
@@ -32,12 +32,12 @@ class SimpleReadOnlyCeilometerClientTest(cli.ClientTestBase):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if (not CONF.service_available.ceilometer):
msg = ("Skipping all Ceilometer cli tests because it is "
"not available")
raise cls.skipException(msg)
- super(SimpleReadOnlyCeilometerClientTest, cls).setUpClass()
+ super(SimpleReadOnlyCeilometerClientTest, cls).resource_setup()
def test_ceilometer_meter_list(self):
self.ceilometer('meter-list')
diff --git a/tempest/cli/simple_read_only/volume/test_cinder.py b/tempest/cli/simple_read_only/volume/test_cinder.py
index e44a57714..45f6c41a0 100644
--- a/tempest/cli/simple_read_only/volume/test_cinder.py
+++ b/tempest/cli/simple_read_only/volume/test_cinder.py
@@ -35,11 +35,11 @@ class SimpleReadOnlyCinderClientTest(cli.ClientTestBase):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if not CONF.service_available.cinder:
msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(msg)
- super(SimpleReadOnlyCinderClientTest, cls).setUpClass()
+ super(SimpleReadOnlyCinderClientTest, cls).resource_setup()
def test_cinder_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
diff --git a/tempest/clients.py b/tempest/clients.py
index 8164bf3f5..2d07852a2 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -13,9 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import keystoneclient.exceptions
-import keystoneclient.v2_0.client
-
from tempest import auth
from tempest.common import rest_client
from tempest import config
@@ -189,11 +186,15 @@ from tempest.services.volume.v2.json.availability_zone_client import \
from tempest.services.volume.v2.json.extensions_client import \
ExtensionsV2ClientJSON as VolumeV2ExtensionClientJSON
from tempest.services.volume.v2.json.qos_client import QosSpecsV2ClientJSON
+from tempest.services.volume.v2.json.snapshots_client import \
+ SnapshotsV2ClientJSON
from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
from tempest.services.volume.v2.xml.availability_zone_client import \
VolumeV2AvailabilityZoneClientXML
from tempest.services.volume.v2.xml.extensions_client import \
ExtensionsV2ClientXML as VolumeV2ExtensionClientXML
+from tempest.services.volume.v2.xml.snapshots_client import \
+ SnapshotsV2ClientXML
from tempest.services.volume.v2.xml.volumes_client import VolumesV2ClientXML
from tempest.services.volume.xml.admin.volume_hosts_client import \
VolumeHostsClientXML
@@ -224,7 +225,6 @@ class Manager(manager.Manager):
def __init__(self, credentials=None, interface='json', service=None):
# Set interface and client type first
self.interface = interface
- self.client_type = 'tempest'
# super cares for credentials validation
super(Manager, self).__init__(credentials=credentials)
@@ -246,6 +246,7 @@ class Manager(manager.Manager):
self.auth_provider)
self.backups_client = BackupsClientXML(self.auth_provider)
self.snapshots_client = SnapshotsClientXML(self.auth_provider)
+ self.snapshots_v2_client = SnapshotsV2ClientXML(self.auth_provider)
self.volumes_client = VolumesClientXML(self.auth_provider)
self.volumes_v2_client = VolumesV2ClientXML(self.auth_provider)
self.volume_types_client = VolumeTypesClientXML(
@@ -325,6 +326,8 @@ class Manager(manager.Manager):
self.auth_provider)
self.backups_client = BackupsClientJSON(self.auth_provider)
self.snapshots_client = SnapshotsClientJSON(self.auth_provider)
+ self.snapshots_v2_client = SnapshotsV2ClientJSON(
+ self.auth_provider)
self.volumes_client = VolumesClientJSON(self.auth_provider)
self.volumes_v2_client = VolumesV2ClientJSON(self.auth_provider)
self.volume_types_client = VolumeTypesClientJSON(
@@ -481,290 +484,3 @@ class ComputeAdminManager(Manager):
credentials=auth.get_default_credentials('compute_admin'),
interface=interface,
service=service)
-
-
-class OfficialClientManager(manager.Manager):
- """
- Manager that provides access to the official python clients for
- calling various OpenStack APIs.
- """
-
- NOVACLIENT_VERSION = '2'
- CINDERCLIENT_VERSION = '1'
- HEATCLIENT_VERSION = '1'
- IRONICCLIENT_VERSION = '1'
- SAHARACLIENT_VERSION = '1.1'
- CEILOMETERCLIENT_VERSION = '2'
-
- def __init__(self, credentials):
- # FIXME(andreaf) Auth provider for client_type 'official' is
- # not implemented yet, setting to 'tempest' for now.
- self.client_type = 'tempest'
- self.interface = None
- # super cares for credentials validation
- super(OfficialClientManager, self).__init__(credentials=credentials)
- self.baremetal_client = self._get_baremetal_client()
- self.compute_client = self._get_compute_client(credentials)
- self.identity_client = self._get_identity_client(credentials)
- self.image_client = self._get_image_client()
- self.network_client = self._get_network_client()
- self.volume_client = self._get_volume_client(credentials)
- self.object_storage_client = self._get_object_storage_client(
- credentials)
- self.orchestration_client = self._get_orchestration_client(
- credentials)
- self.data_processing_client = self._get_data_processing_client(
- credentials)
- self.ceilometer_client = self._get_ceilometer_client(
- credentials)
-
- def _get_roles(self):
- admin_credentials = auth.get_default_credentials('identity_admin')
- keystone_admin = self._get_identity_client(admin_credentials)
-
- username = self.credentials.username
- tenant_name = self.credentials.tenant_name
- user_id = keystone_admin.users.find(name=username).id
- tenant_id = keystone_admin.tenants.find(name=tenant_name).id
-
- roles = keystone_admin.roles.roles_for_user(
- user=user_id, tenant=tenant_id)
-
- return [r.name for r in roles]
-
- def _get_compute_client(self, credentials):
- # Novaclient will not execute operations for anyone but the
- # identified user, so a new client needs to be created for
- # each user that operations need to be performed for.
- if not CONF.service_available.nova:
- return None
- import novaclient.client
-
- auth_url = CONF.identity.uri
- dscv = CONF.identity.disable_ssl_certificate_validation
- region = CONF.identity.region
-
- client_args = (credentials.username, credentials.password,
- credentials.tenant_name, auth_url)
-
- # Create our default Nova client to use in testing
- service_type = CONF.compute.catalog_type
- endpoint_type = CONF.compute.endpoint_type
- return novaclient.client.Client(self.NOVACLIENT_VERSION,
- *client_args,
- service_type=service_type,
- endpoint_type=endpoint_type,
- region_name=region,
- no_cache=True,
- insecure=dscv,
- http_log_debug=True)
-
- def _get_image_client(self):
- if not CONF.service_available.glance:
- return None
- import glanceclient
- token = self.identity_client.auth_token
- region = CONF.identity.region
- endpoint_type = CONF.image.endpoint_type
- endpoint = self.identity_client.service_catalog.url_for(
- attr='region', filter_value=region,
- service_type=CONF.image.catalog_type, endpoint_type=endpoint_type)
- dscv = CONF.identity.disable_ssl_certificate_validation
- return glanceclient.Client('1', endpoint=endpoint, token=token,
- insecure=dscv)
-
- def _get_volume_client(self, credentials):
- if not CONF.service_available.cinder:
- return None
- import cinderclient.client
- auth_url = CONF.identity.uri
- region = CONF.identity.region
- endpoint_type = CONF.volume.endpoint_type
- dscv = CONF.identity.disable_ssl_certificate_validation
- return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
- credentials.username,
- credentials.password,
- credentials.tenant_name,
- auth_url,
- region_name=region,
- endpoint_type=endpoint_type,
- insecure=dscv,
- http_log_debug=True)
-
- def _get_object_storage_client(self, credentials):
- if not CONF.service_available.swift:
- return None
- import swiftclient
- auth_url = CONF.identity.uri
- # add current tenant to swift operator role group.
- admin_credentials = auth.get_default_credentials('identity_admin')
- keystone_admin = self._get_identity_client(admin_credentials)
-
- # enable test user to operate swift by adding operator role to him.
- roles = keystone_admin.roles.list()
- operator_role = CONF.object_storage.operator_role
- member_role = [role for role in roles if role.name == operator_role][0]
- # NOTE(maurosr): This is surrounded in the try-except block cause
- # neutron tests doesn't have tenant isolation.
- try:
- keystone_admin.roles.add_user_role(self.identity_client.user_id,
- member_role.id,
- self.identity_client.tenant_id)
- except keystoneclient.exceptions.Conflict:
- pass
-
- endpoint_type = CONF.object_storage.endpoint_type
- os_options = {'endpoint_type': endpoint_type}
- return swiftclient.Connection(auth_url, credentials.username,
- credentials.password,
- tenant_name=credentials.tenant_name,
- auth_version='2',
- os_options=os_options)
-
- def _get_orchestration_client(self, credentials):
- if not CONF.service_available.heat:
- return None
- import heatclient.client
-
- keystone = self._get_identity_client(credentials)
- region = CONF.identity.region
- endpoint_type = CONF.orchestration.endpoint_type
- token = keystone.auth_token
- service_type = CONF.orchestration.catalog_type
- try:
- endpoint = keystone.service_catalog.url_for(
- attr='region',
- filter_value=region,
- service_type=service_type,
- endpoint_type=endpoint_type)
- except keystoneclient.exceptions.EndpointNotFound:
- return None
- else:
- return heatclient.client.Client(self.HEATCLIENT_VERSION,
- endpoint,
- token=token,
- username=credentials.username,
- password=credentials.password)
-
- def _get_identity_client(self, credentials):
- # This identity client is not intended to check the security
- # of the identity service, so use admin credentials by default.
-
- auth_url = CONF.identity.uri
- dscv = CONF.identity.disable_ssl_certificate_validation
-
- return keystoneclient.v2_0.client.Client(
- username=credentials.username,
- password=credentials.password,
- tenant_name=credentials.tenant_name,
- auth_url=auth_url,
- insecure=dscv)
-
- def _get_baremetal_client(self):
- # ironic client is currently intended to by used by admin users
- if not CONF.service_available.ironic:
- return None
- import ironicclient.client
- roles = self._get_roles()
- if CONF.identity.admin_role not in roles:
- return None
-
- auth_url = CONF.identity.uri
- api_version = self.IRONICCLIENT_VERSION
- insecure = CONF.identity.disable_ssl_certificate_validation
- service_type = CONF.baremetal.catalog_type
- endpoint_type = CONF.baremetal.endpoint_type
- creds = {
- 'os_username': self.credentials.username,
- 'os_password': self.credentials.password,
- 'os_tenant_name': self.credentials.tenant_name
- }
-
- try:
- return ironicclient.client.get_client(
- api_version=api_version,
- os_auth_url=auth_url,
- insecure=insecure,
- os_service_type=service_type,
- os_endpoint_type=endpoint_type,
- **creds)
- except keystoneclient.exceptions.EndpointNotFound:
- return None
-
- def _get_network_client(self):
- # The intended configuration is for the network client to have
- # admin privileges and indicate for whom resources are being
- # created via a 'tenant_id' parameter. This will often be
- # preferable to authenticating as a specific user because
- # working with certain resources (public routers and networks)
- # often requires admin privileges anyway.
- if not CONF.service_available.neutron:
- return None
- import neutronclient.v2_0.client
-
- credentials = auth.get_default_credentials('identity_admin')
-
- auth_url = CONF.identity.uri
- dscv = CONF.identity.disable_ssl_certificate_validation
- endpoint_type = CONF.network.endpoint_type
-
- return neutronclient.v2_0.client.Client(
- username=credentials.username,
- password=credentials.password,
- tenant_name=credentials.tenant_name,
- endpoint_type=endpoint_type,
- auth_url=auth_url,
- insecure=dscv)
-
- def _get_data_processing_client(self, credentials):
- if not CONF.service_available.sahara:
- # Sahara isn't available
- return None
-
- import saharaclient.client
-
- endpoint_type = CONF.data_processing.endpoint_type
- catalog_type = CONF.data_processing.catalog_type
- auth_url = CONF.identity.uri
-
- client = saharaclient.client.Client(
- self.SAHARACLIENT_VERSION,
- credentials.username,
- credentials.password,
- project_name=credentials.tenant_name,
- endpoint_type=endpoint_type,
- service_type=catalog_type,
- auth_url=auth_url)
-
- return client
-
- def _get_ceilometer_client(self, credentials):
- if not CONF.service_available.ceilometer:
- return None
-
- import ceilometerclient.client
-
- keystone = self._get_identity_client(credentials)
- region = CONF.identity.region
-
- endpoint_type = CONF.telemetry.endpoint_type
- service_type = CONF.telemetry.catalog_type
- auth_url = CONF.identity.uri
-
- try:
- keystone.service_catalog.url_for(
- attr='region',
- filter_value=region,
- service_type=service_type,
- endpoint_type=endpoint_type)
- except keystoneclient.exceptions.EndpointNotFound:
- return None
- else:
- return ceilometerclient.client.get_client(
- self.CEILOMETERCLIENT_VERSION,
- os_username=credentials.username,
- os_password=credentials.password,
- os_tenant_name=credentials.tenant_name,
- os_auth_url=auth_url,
- os_service_type=service_type,
- os_endpoint_type=endpoint_type)
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
new file mode 100644
index 000000000..a305e4251
--- /dev/null
+++ b/tempest/cmd/cleanup.py
@@ -0,0 +1,300 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Dell Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility for cleaning up environment after Tempest run
+
+Runtime Arguments
+-----------------
+
+--init-saved-state: Before you can execute cleanup you must initialize
+the saved state by running it with the --init-saved-state flag
+(creating ./saved_state.json), which protects your deployment from
+cleanup deleting objects you want to keep. Typically you would run
+cleanup with --init-saved-state prior to a tempest run. If this is not
+the case saved_state.json must be edited, removing objects you want
+cleanup to delete.
+
+--dry-run: Creates a report (dry_run.json) of the tenants that will be
+cleaned up (in the "_tenants_to_clean" array), and the global objects
+that will be removed (tenants, users, flavors and images). Once
+cleanup is executed in normal mode, running it again with --dry-run
+should yield an empty report.
+
+**NOTE**: The _tenants_to_clean array in dry-run.json lists the
+tenants that cleanup will loop through and delete child objects, not
+delete the tenant itself. This may differ from the tenants array as you
+can clean the tempest and alternate tempest tenants but not delete the
+tenants themselves. This is actually the default behavior.
+
+**Normal mode**: running with no arguments, will query your deployment and
+build a list of objects to delete after filtering out out the objects
+found in saved_state.json and based on the
+--preserve-tempest-conf-objects and
+--delete-tempest-conf-objects flags.
+
+By default the tempest and alternate tempest users and tenants are not
+deleted and the admin user specified in tempest.conf is never deleted.
+
+Please run with --help to see full list of options.
+"""
+import argparse
+import json
+import sys
+
+from tempest import auth
+from tempest import clients
+from tempest.cmd import cleanup_service
+from tempest import config
+from tempest.openstack.common import log as logging
+
+SAVED_STATE_JSON = "saved_state.json"
+DRY_RUN_JSON = "dry_run.json"
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class Cleanup(object):
+
+ def __init__(self):
+ self.admin_mgr = clients.AdminManager()
+ self.dry_run_data = {}
+ self.json_data = {}
+ self._init_options()
+
+ self.admin_id = ""
+ self.admin_role_id = ""
+ self.admin_tenant_id = ""
+ self._init_admin_ids()
+
+ self.admin_role_added = []
+
+ # available services
+ self.tenant_services = cleanup_service.get_tenant_cleanup_services()
+ self.global_services = cleanup_service.get_global_cleanup_services()
+ cleanup_service.init_conf()
+
+ def run(self):
+ opts = self.options
+ if opts.init_saved_state:
+ self._init_state()
+ return
+
+ self._load_json()
+ self._cleanup()
+
+ def _cleanup(self):
+ LOG.debug("Begin cleanup")
+ is_dry_run = self.options.dry_run
+ is_preserve = self.options.preserve_tempest_conf_objects
+ is_save_state = False
+
+ if is_dry_run:
+ self.dry_run_data["_tenants_to_clean"] = {}
+ f = open(DRY_RUN_JSON, 'w+')
+
+ admin_mgr = self.admin_mgr
+ # Always cleanup tempest and alt tempest tenants unless
+ # they are in saved state json. Therefore is_preserve is False
+ kwargs = {'data': self.dry_run_data,
+ 'is_dry_run': is_dry_run,
+ 'saved_state_json': self.json_data,
+ 'is_preserve': False,
+ 'is_save_state': is_save_state}
+ tenant_service = cleanup_service.TenantService(admin_mgr, **kwargs)
+ tenants = tenant_service.list()
+ LOG.debug("Process %s tenants" % len(tenants))
+
+ # Loop through list of tenants and clean them up.
+ for tenant in tenants:
+ self._add_admin(tenant['id'])
+ self._clean_tenant(tenant)
+
+ kwargs = {'data': self.dry_run_data,
+ 'is_dry_run': is_dry_run,
+ 'saved_state_json': self.json_data,
+ 'is_preserve': is_preserve,
+ 'is_save_state': is_save_state}
+ for service in self.global_services:
+ svc = service(admin_mgr, **kwargs)
+ svc.run()
+
+ if is_dry_run:
+ f.write(json.dumps(self.dry_run_data, sort_keys=True,
+ indent=2, separators=(',', ': ')))
+ f.close()
+
+ self._remove_admin_user_roles()
+
+ def _remove_admin_user_roles(self):
+ tenant_ids = self.admin_role_added
+ LOG.debug("Removing admin user roles where needed for tenants: %s"
+ % tenant_ids)
+ for tenant_id in tenant_ids:
+ self._remove_admin_role(tenant_id)
+
+ def _clean_tenant(self, tenant):
+ LOG.debug("Cleaning tenant: %s " % tenant['name'])
+ is_dry_run = self.options.dry_run
+ dry_run_data = self.dry_run_data
+ is_preserve = self.options.preserve_tempest_conf_objects
+ tenant_id = tenant['id']
+ tenant_name = tenant['name']
+ tenant_data = None
+ if is_dry_run:
+ tenant_data = dry_run_data["_tenants_to_clean"][tenant_id] = {}
+ tenant_data['name'] = tenant_name
+
+ kwargs = {"username": CONF.identity.admin_username,
+ "password": CONF.identity.admin_password,
+ "tenant_name": tenant['name']}
+ mgr = clients.Manager(credentials=auth.get_credentials(**kwargs))
+ kwargs = {'data': tenant_data,
+ 'is_dry_run': is_dry_run,
+ 'saved_state_json': None,
+ 'is_preserve': is_preserve,
+ 'is_save_state': False,
+ 'tenant_id': tenant_id}
+ for service in self.tenant_services:
+ svc = service(mgr, **kwargs)
+ svc.run()
+
+ def _init_admin_ids(self):
+ id_cl = self.admin_mgr.identity_client
+
+ tenant = id_cl.get_tenant_by_name(CONF.identity.admin_tenant_name)
+ self.admin_tenant_id = tenant['id']
+
+ user = id_cl.get_user_by_username(self.admin_tenant_id,
+ CONF.identity.admin_username)
+ self.admin_id = user['id']
+
+ _, roles = id_cl.list_roles()
+ for role in roles:
+ if role['name'] == CONF.identity.admin_role:
+ self.admin_role_id = role['id']
+ break
+
+ def _init_options(self):
+ parser = argparse.ArgumentParser(
+ description='Cleanup after tempest run')
+ parser.add_argument('--init-saved-state', action="store_true",
+ dest='init_saved_state', default=False,
+ help="Creates JSON file: " + SAVED_STATE_JSON +
+ ", representing the current state of your "
+ "deployment, specifically objects types "
+ "Tempest creates and destroys during a run. "
+ "You must run with this flag prior to "
+ "executing cleanup.")
+ parser.add_argument('--preserve-tempest-conf-objects',
+ action="store_true",
+ dest='preserve_tempest_conf_objects',
+ default=True, help="Do not delete the "
+ "tempest and alternate tempest users and "
+ "tenants, so they may be used for future "
+ "tempest runs. By default this is argument "
+ "is true.")
+ parser.add_argument('--delete-tempest-conf-objects',
+ action="store_false",
+ dest='preserve_tempest_conf_objects',
+ default=False,
+ help="Delete the tempest and "
+ "alternate tempest users and tenants.")
+ parser.add_argument('--dry-run', action="store_true",
+ dest='dry_run', default=False,
+ help="Generate JSON file:" + DRY_RUN_JSON +
+ ", that reports the objects that would have "
+ "been deleted had a full cleanup been run.")
+
+ self.options = parser.parse_args()
+
+ def _add_admin(self, tenant_id):
+ id_cl = self.admin_mgr.identity_client
+ needs_role = True
+ _, roles = id_cl.list_user_roles(tenant_id, self.admin_id)
+ for role in roles:
+ if role['id'] == self.admin_role_id:
+ needs_role = False
+ LOG.debug("User already had admin privilege for this tenant")
+ if needs_role:
+ LOG.debug("Adding admin priviledge for : %s" % tenant_id)
+ id_cl.assign_user_role(tenant_id, self.admin_id,
+ self.admin_role_id)
+ self.admin_role_added.append(tenant_id)
+
+ def _remove_admin_role(self, tenant_id):
+ LOG.debug("Remove admin user role for tenant: %s" % tenant_id)
+ # Must initialize AdminManager for each user role
+ # Otherwise authentication exception is thrown, weird
+ id_cl = clients.AdminManager().identity_client
+ if (self._tenant_exists(tenant_id)):
+ try:
+ id_cl.remove_user_role(tenant_id, self.admin_id,
+ self.admin_role_id)
+ except Exception as ex:
+ LOG.exception("Failed removing role from tenant which still"
+ "exists, exception: %s" % ex)
+
+ def _tenant_exists(self, tenant_id):
+ id_cl = self.admin_mgr.identity_client
+ try:
+ t = id_cl.get_tenant(tenant_id)
+ LOG.debug("Tenant is: %s" % str(t))
+ return True
+ except Exception as ex:
+ LOG.debug("Tenant no longer exists? %s" % ex)
+ return False
+
+ def _init_state(self):
+ LOG.debug("Initializing saved state.")
+ data = {}
+ admin_mgr = self.admin_mgr
+ kwargs = {'data': data,
+ 'is_dry_run': False,
+ 'saved_state_json': data,
+ 'is_preserve': False,
+ 'is_save_state': True}
+ for service in self.global_services:
+ svc = service(admin_mgr, **kwargs)
+ svc.run()
+
+ f = open(SAVED_STATE_JSON, 'w+')
+ f.write(json.dumps(data,
+ sort_keys=True, indent=2, separators=(',', ': ')))
+ f.close()
+
+ def _load_json(self):
+ try:
+ json_file = open(SAVED_STATE_JSON)
+ self.json_data = json.load(json_file)
+ json_file.close()
+ except IOError as ex:
+ LOG.exception("Failed loading saved state, please be sure you"
+ " have first run cleanup with --init-saved-state "
+ "flag prior to running tempest. Exception: %s" % ex)
+ sys.exit(ex)
+ except Exception as ex:
+ LOG.exception("Exception parsing saved state json : %s" % ex)
+ sys.exit(ex)
+
+
+def main():
+ cleanup = Cleanup()
+ cleanup.run()
+ LOG.info('Cleanup finished!')
+ return 0
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
new file mode 100644
index 000000000..0d3c6c6e3
--- /dev/null
+++ b/tempest/cmd/cleanup_service.py
@@ -0,0 +1,1062 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Dell Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+from tempest.openstack.common import log as logging
+from tempest import test
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+CONF_USERS = None
+CONF_TENANTS = None
+CONF_PUB_NETWORK = None
+CONF_PRIV_NETWORK_NAME = None
+CONF_PUB_ROUTER = None
+CONF_FLAVORS = None
+CONF_IMAGES = None
+
+IS_CEILOMETER = None
+IS_CINDER = None
+IS_GLANCE = None
+IS_HEAT = None
+IS_NEUTRON = None
+IS_NOVA = None
+
+
+def init_conf():
+ global CONF_USERS
+ global CONF_TENANTS
+ global CONF_PUB_NETWORK
+ global CONF_PRIV_NETWORK_NAME
+ global CONF_PUB_ROUTER
+ global CONF_FLAVORS
+ global CONF_IMAGES
+
+ global IS_CEILOMETER
+ global IS_CINDER
+ global IS_GLANCE
+ global IS_HEAT
+ global IS_NEUTRON
+ global IS_NOVA
+
+ CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
+ CONF.identity.alt_username]
+ CONF_TENANTS = [CONF.identity.admin_tenant_name,
+ CONF.identity.tenant_name,
+ CONF.identity.alt_tenant_name]
+ CONF_PUB_NETWORK = CONF.network.public_network_id
+ CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
+ CONF_PUB_ROUTER = CONF.network.public_router_id
+ CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
+ CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
+
+ IS_CEILOMETER = CONF.service_available.ceilometer
+ IS_CINDER = CONF.service_available.cinder
+ IS_GLANCE = CONF.service_available.glance
+ IS_HEAT = CONF.service_available.heat
+ IS_NEUTRON = CONF.service_available.neutron
+ IS_NOVA = CONF.service_available.nova
+
+
+class BaseService(object):
+ def __init__(self, kwargs):
+ self.client = None
+ for key, value in kwargs.items():
+ setattr(self, key, value)
+
+ def _filter_by_tenant_id(self, item_list):
+ if (item_list is None
+ or len(item_list) == 0
+ or not hasattr(self, 'tenant_id')
+ or self.tenant_id is None
+ or 'tenant_id' not in item_list[0]):
+ return item_list
+
+ _filtered_list = []
+ for item in item_list:
+ if item['tenant_id'] == self.tenant_id:
+ _filtered_list.append(item)
+ return _filtered_list
+
+ def list(self):
+ pass
+
+ def delete(self):
+ pass
+
+ def dry_run(self):
+ pass
+
+ def save_state(self):
+ pass
+
+ def run(self):
+ if self.is_dry_run:
+ self.dry_run()
+ elif self.is_save_state:
+ self.save_state()
+ else:
+ self.delete()
+
+
+class SnapshotService(BaseService):
+
+ def __init__(self, manager, **kwargs):
+ super(SnapshotService, self).__init__(kwargs)
+ self.client = manager.snapshots_client
+
+ def list(self):
+ client = self.client
+ __, snaps = client.list_snapshots()
+ LOG.debug("List count, %s Snapshots" % len(snaps))
+ return snaps
+
+ def delete(self):
+ snaps = self.list()
+ client = self.client
+ for snap in snaps:
+ try:
+ client.delete_snapshot(snap['id'])
+ except Exception as e:
+ LOG.exception("Delete Snapshot exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ snaps = self.list()
+ self.data['snapshots'] = snaps
+
+
+class ServerService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(ServerService, self).__init__(kwargs)
+ self.client = manager.servers_client
+
+ def list(self):
+ client = self.client
+ _, servers_body = client.list_servers()
+ servers = servers_body['servers']
+ LOG.debug("List count, %s Servers" % len(servers))
+ return servers
+
+ def delete(self):
+ client = self.client
+ servers = self.list()
+ for server in servers:
+ try:
+ client.delete_server(server['id'])
+ except Exception as e:
+ LOG.exception("Delete Server exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ servers = self.list()
+ self.data['servers'] = servers
+
+
+class ServerGroupService(ServerService):
+
+ def list(self):
+ client = self.client
+ _, sgs = client.list_server_groups()
+ LOG.debug("List count, %s Server Groups" % len(sgs))
+ return sgs
+
+ def delete(self):
+ client = self.client
+ sgs = self.list()
+ for sg in sgs:
+ try:
+ client.delete_server_group(sg['id'])
+ except Exception as e:
+ LOG.exception("Delete Server Group exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ sgs = self.list()
+ self.data['server_groups'] = sgs
+
+
+class StackService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(StackService, self).__init__(kwargs)
+ self.client = manager.orchestration_client
+
+ def list(self):
+ client = self.client
+ _, stacks = client.list_stacks()
+ LOG.debug("List count, %s Stacks" % len(stacks))
+ return stacks
+
+ def delete(self):
+ client = self.client
+ stacks = self.list()
+ for stack in stacks:
+ try:
+ client.delete_stack(stack['id'])
+ except Exception as e:
+ LOG.exception("Delete Stack exception: %s " % e)
+ pass
+
+ def dry_run(self):
+ stacks = self.list()
+ self.data['stacks'] = stacks
+
+
+class KeyPairService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(KeyPairService, self).__init__(kwargs)
+ self.client = manager.keypairs_client
+
+ def list(self):
+ client = self.client
+ _, keypairs = client.list_keypairs()
+ LOG.debug("List count, %s Keypairs" % len(keypairs))
+ return keypairs
+
+ def delete(self):
+ client = self.client
+ keypairs = self.list()
+ for k in keypairs:
+ try:
+ name = k['keypair']['name']
+ client.delete_keypair(name)
+ except Exception as e:
+ LOG.exception("Delete Keypairs exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ keypairs = self.list()
+ self.data['keypairs'] = keypairs
+
+
+class SecurityGroupService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(SecurityGroupService, self).__init__(kwargs)
+ self.client = manager.security_groups_client
+
+ def list(self):
+ client = self.client
+ _, secgrps = client.list_security_groups()
+ secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
+ LOG.debug("List count, %s Security Groups" % len(secgrp_del))
+ return secgrp_del
+
+ def delete(self):
+ client = self.client
+ secgrp_del = self.list()
+ for g in secgrp_del:
+ try:
+ client.delete_security_group(g['id'])
+ except Exception as e:
+ LOG.exception("Delete Security Groups exception: %s" % e)
+
+ def dry_run(self):
+ secgrp_del = self.list()
+ self.data['security_groups'] = secgrp_del
+
+
+class FloatingIpService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(FloatingIpService, self).__init__(kwargs)
+ self.client = manager.floating_ips_client
+
+ def list(self):
+ client = self.client
+ _, floating_ips = client.list_floating_ips()
+ LOG.debug("List count, %s Floating IPs" % len(floating_ips))
+ return floating_ips
+
+ def delete(self):
+ client = self.client
+ floating_ips = self.list()
+ for f in floating_ips:
+ try:
+ client.delete_floating_ip(f['id'])
+ except Exception as e:
+ LOG.exception("Delete Floating IPs exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ floating_ips = self.list()
+ self.data['floating_ips'] = floating_ips
+
+
+class VolumeService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(VolumeService, self).__init__(kwargs)
+ self.client = manager.volumes_client
+
+ def list(self):
+ client = self.client
+ _, vols = client.list_volumes()
+ LOG.debug("List count, %s Volumes" % len(vols))
+ return vols
+
+ def delete(self):
+ client = self.client
+ vols = self.list()
+ for v in vols:
+ try:
+ client.delete_volume(v['id'])
+ except Exception as e:
+ LOG.exception("Delete Volume exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ vols = self.list()
+ self.data['volumes'] = vols
+
+
+# Begin network service classes
+class NetworkService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(NetworkService, self).__init__(kwargs)
+ self.client = manager.network_client
+
+ def list(self):
+ client = self.client
+ _, networks = client.list_networks()
+ networks = self._filter_by_tenant_id(networks['networks'])
+ # filter out networks declared in tempest.conf
+ if self.is_preserve:
+ networks = [network for network in networks
+ if (network['name'] != CONF_PRIV_NETWORK_NAME
+ and network['id'] != CONF_PUB_NETWORK)]
+ LOG.debug("List count, %s Networks" % networks)
+ return networks
+
+ def delete(self):
+ client = self.client
+ networks = self.list()
+ for n in networks:
+ try:
+ client.delete_network(n['id'])
+ except Exception as e:
+ LOG.exception("Delete Network exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ networks = self.list()
+ self.data['networks'] = networks
+
+
+class NetworkIpSecPolicyService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, ipsecpols = client.list_ipsecpolicies()
+ ipsecpols = ipsecpols['ipsecpolicies']
+ ipsecpols = self._filter_by_tenant_id(ipsecpols)
+ LOG.debug("List count, %s IP Security Policies" % len(ipsecpols))
+ return ipsecpols
+
+ def delete(self):
+ client = self.client
+ ipsecpols = self.list()
+ for ipsecpol in ipsecpols:
+ try:
+ client.delete_ipsecpolicy(ipsecpol['id'])
+ except Exception as e:
+ LOG.exception("Delete IP Securty Policy exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ ipsecpols = self.list()
+ self.data['ip_security_policies'] = ipsecpols
+
+
+class NetworkFwPolicyService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, fwpols = client.list_firewall_policies()
+ fwpols = fwpols['firewall_policies']
+ fwpols = self._filter_by_tenant_id(fwpols)
+ LOG.debug("List count, %s Firewall Policies" % len(fwpols))
+ return fwpols
+
+ def delete(self):
+ client = self.client
+ fwpols = self.list()
+ for fwpol in fwpols:
+ try:
+ client.delete_firewall_policy(fwpol['id'])
+ except Exception as e:
+ LOG.exception("Delete Firewall Policy exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ fwpols = self.list()
+ self.data['firewall_policies'] = fwpols
+
+
+class NetworkFwRulesService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, fwrules = client.list_firewall_rules()
+ fwrules = fwrules['firewall_rules']
+ fwrules = self._filter_by_tenant_id(fwrules)
+ LOG.debug("List count, %s Firewall Rules" % len(fwrules))
+ return fwrules
+
+ def delete(self):
+ client = self.client
+ fwrules = self.list()
+ for fwrule in fwrules:
+ try:
+ client.delete_firewall_rule(fwrule['id'])
+ except Exception as e:
+ LOG.exception("Delete Firewall Rule exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ fwrules = self.list()
+ self.data['firewall_rules'] = fwrules
+
+
+class NetworkIkePolicyService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, ikepols = client.list_ikepolicies()
+ ikepols = ikepols['ikepolicies']
+ ikepols = self._filter_by_tenant_id(ikepols)
+ LOG.debug("List count, %s IKE Policies" % len(ikepols))
+ return ikepols
+
+ def delete(self):
+ client = self.client
+ ikepols = self.list()
+ for ikepol in ikepols:
+ try:
+ client.delete_firewall_rule(ikepol['id'])
+ except Exception as e:
+ LOG.exception("Delete IKE Policy exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ ikepols = self.list()
+ self.data['ike_policies'] = ikepols
+
+
+class NetworkVpnServiceService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, vpnsrvs = client.list_vpnservices()
+ vpnsrvs = vpnsrvs['vpnservices']
+ vpnsrvs = self._filter_by_tenant_id(vpnsrvs)
+ LOG.debug("List count, %s VPN Services" % len(vpnsrvs))
+ return vpnsrvs
+
+ def delete(self):
+ client = self.client
+ vpnsrvs = self.list()
+ for vpnsrv in vpnsrvs:
+ try:
+ client.delete_vpnservice(vpnsrv['id'])
+ except Exception as e:
+ LOG.exception("Delete VPN Service exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ vpnsrvs = self.list()
+ self.data['vpn_services'] = vpnsrvs
+
+
+class NetworkFloatingIpService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, flips = client.list_floatingips()
+ flips = flips['floatingips']
+ flips = self._filter_by_tenant_id(flips)
+ LOG.debug("List count, %s Network Floating IPs" % len(flips))
+ return flips
+
+ def delete(self):
+ client = self.client
+ flips = self.list()
+ for flip in flips:
+ try:
+ client.delete_floatingip(flip['id'])
+ except Exception as e:
+ LOG.exception("Delete Network Floating IP exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ flips = self.list()
+ self.data['floating_ips'] = flips
+
+
+class NetworkRouterService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, routers = client.list_routers()
+ routers = routers['routers']
+ routers = self._filter_by_tenant_id(routers)
+ if self.is_preserve:
+ routers = [router for router in routers
+ if router['id'] != CONF_PUB_ROUTER]
+
+ LOG.debug("List count, %s Routers" % len(routers))
+ return routers
+
+ def delete(self):
+ client = self.client
+ routers = self.list()
+ for router in routers:
+ try:
+ rid = router['id']
+ _, ports = client.list_router_interfaces(rid)
+ ports = ports['ports']
+ for port in ports:
+ subid = port['fixed_ips'][0]['subnet_id']
+ client.remove_router_interface_with_subnet_id(rid, subid)
+ client.delete_router(rid)
+ except Exception as e:
+ LOG.exception("Delete Router exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ routers = self.list()
+ self.data['routers'] = routers
+
+
+class NetworkHealthMonitorService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, hms = client.list_health_monitors()
+ hms = hms['health_monitors']
+ hms = self._filter_by_tenant_id(hms)
+ LOG.debug("List count, %s Health Monitors" % len(hms))
+ return hms
+
+ def delete(self):
+ client = self.client
+ hms = self.list()
+ for hm in hms:
+ try:
+ client.delete_health_monitor(hm['id'])
+ except Exception as e:
+ LOG.exception("Delete Health Monitor exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ hms = self.list()
+ self.data['health_monitors'] = hms
+
+
+class NetworkMemberService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, members = client.list_members()
+ members = members['members']
+ members = self._filter_by_tenant_id(members)
+ LOG.debug("List count, %s Members" % len(members))
+ return members
+
+ def delete(self):
+ client = self.client
+ members = self.list()
+ for member in members:
+ try:
+ client.delete_member(member['id'])
+ except Exception as e:
+ LOG.exception("Delete Member exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ members = self.list()
+ self.data['members'] = members
+
+
+class NetworkVipService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, vips = client.list_vips()
+ vips = vips['vips']
+ vips = self._filter_by_tenant_id(vips)
+ LOG.debug("List count, %s VIPs" % len(vips))
+ return vips
+
+ def delete(self):
+ client = self.client
+ vips = self.list()
+ for vip in vips:
+ try:
+ client.delete_vip(vip['id'])
+ except Exception as e:
+ LOG.exception("Delete VIP exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ vips = self.list()
+ self.data['vips'] = vips
+
+
+class NetworkPoolService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, pools = client.list_pools()
+ pools = pools['pools']
+ pools = self._filter_by_tenant_id(pools)
+ LOG.debug("List count, %s Pools" % len(pools))
+ return pools
+
+ def delete(self):
+ client = self.client
+ pools = self.list()
+ for pool in pools:
+ try:
+ client.delete_pool(pool['id'])
+ except Exception as e:
+ LOG.exception("Delete Pool exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ pools = self.list()
+ self.data['pools'] = pools
+
+
+class NetworMeteringLabelRuleService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, rules = client.list_metering_label_rules()
+ rules = rules['metering_label_rules']
+ rules = self._filter_by_tenant_id(rules)
+ LOG.debug("List count, %s Metering Label Rules" % len(rules))
+ return rules
+
+ def delete(self):
+ client = self.client
+ rules = self.list()
+ for rule in rules:
+ try:
+ client.delete_metering_label_rule(rule['id'])
+ except Exception as e:
+ LOG.exception("Delete Metering Label Rule exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ rules = self.list()
+ self.data['rules'] = rules
+
+
+class NetworMeteringLabelService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, labels = client.list_metering_labels()
+ labels = labels['metering_labels']
+ labels = self._filter_by_tenant_id(labels)
+ LOG.debug("List count, %s Metering Labels" % len(labels))
+ return labels
+
+ def delete(self):
+ client = self.client
+ labels = self.list()
+ for label in labels:
+ try:
+ client.delete_metering_label(label['id'])
+ except Exception as e:
+ LOG.exception("Delete Metering Label exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ labels = self.list()
+ self.data['labels'] = labels
+
+
+class NetworkPortService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, ports = client.list_ports()
+ ports = ports['ports']
+ ports = self._filter_by_tenant_id(ports)
+ LOG.debug("List count, %s Ports" % len(ports))
+ return ports
+
+ def delete(self):
+ client = self.client
+ ports = self.list()
+ for port in ports:
+ try:
+ client.delete_port(port['id'])
+ except Exception as e:
+ LOG.exception("Delete Port exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ ports = self.list()
+ self.data['ports'] = ports
+
+
+class NetworkSubnetService(NetworkService):
+
+ def list(self):
+ client = self.client
+ _, subnets = client.list_subnets()
+ subnets = subnets['subnets']
+ subnets = self._filter_by_tenant_id(subnets)
+ LOG.debug("List count, %s Subnets" % len(subnets))
+ return subnets
+
+ def delete(self):
+ client = self.client
+ subnets = self.list()
+ for subnet in subnets:
+ try:
+ client.delete_subnet(subnet['id'])
+ except Exception as e:
+ LOG.exception("Delete Subnet exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ subnets = self.list()
+ self.data['subnets'] = subnets
+
+
+# Telemetry services
+class TelemetryAlarmService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(TelemetryAlarmService, self).__init__(kwargs)
+ self.client = manager.telemetry_client
+
+ def list(self):
+ client = self.client
+ _, alarms = client.list_alarms()
+ LOG.debug("List count, %s Alarms" % len(alarms))
+ return alarms
+
+ def delete(self):
+ client = self.client
+ alarms = self.list()
+ for alarm in alarms:
+ try:
+ client.delete_alarm(alarm['id'])
+ except Exception as e:
+ LOG.exception("Delete Alarms exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ alarms = self.list()
+ self.data['alarms'] = alarms
+
+
+# begin global services
+class FlavorService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(FlavorService, self).__init__(kwargs)
+ self.client = manager.flavors_client
+
+ def list(self):
+ client = self.client
+ _, flavors = client.list_flavors({"is_public": None})
+ if not self.is_save_state:
+ # recreate list removing saved flavors
+ flavors = [flavor for flavor in flavors if flavor['id']
+ not in self.saved_state_json['flavors'].keys()]
+
+ if self.is_preserve:
+ flavors = [flavor for flavor in flavors
+ if flavor['id'] not in CONF_FLAVORS]
+ LOG.debug("List count, %s Flavors after reconcile" % len(flavors))
+ return flavors
+
+ def delete(self):
+ client = self.client
+ flavors = self.list()
+ for flavor in flavors:
+ try:
+ client.delete_flavor(flavor['id'])
+ except Exception as e:
+ LOG.exception("Delete Flavor exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ flavors = self.list()
+ self.data['flavors'] = flavors
+
+ def save_state(self):
+ flavors = self.list()
+ flavor_data = self.data['flavors'] = {}
+ for flavor in flavors:
+ flavor_data[flavor['id']] = flavor['name']
+
+
+class ImageService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(ImageService, self).__init__(kwargs)
+ self.client = manager.images_client
+
+ def list(self):
+ client = self.client
+ _, images = client.list_images({"all_tenants": True})
+ if not self.is_save_state:
+ images = [image for image in images if image['id']
+ not in self.saved_state_json['images'].keys()]
+ if self.is_preserve:
+ images = [image for image in images
+ if image['id'] not in CONF_IMAGES]
+ LOG.debug("List count, %s Images after reconcile" % len(images))
+ return images
+
+ def delete(self):
+ client = self.client
+ images = self.list()
+ for image in images:
+ try:
+ client.delete_image(image['id'])
+ except Exception as e:
+ LOG.exception("Delete Image exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ images = self.list()
+ self.data['images'] = images
+
+ def save_state(self):
+ images = self.list()
+ image_data = self.data['images'] = {}
+ for image in images:
+ image_data[image['id']] = image['name']
+
+
+class IdentityService(BaseService):
+ def __init__(self, manager, **kwargs):
+ super(IdentityService, self).__init__(kwargs)
+ self.client = manager.identity_client
+
+
+class UserService(IdentityService):
+
+ def list(self):
+ client = self.client
+ _, users = client.get_users()
+
+ if not self.is_save_state:
+ users = [user for user in users if user['id']
+ not in self.saved_state_json['users'].keys()]
+
+ if self.is_preserve:
+ users = [user for user in users if user['name']
+ not in CONF_USERS]
+
+ elif not self.is_save_state: # Never delete admin user
+ users = [user for user in users if user['name'] !=
+ CONF.identity.admin_username]
+
+ LOG.debug("List count, %s Users after reconcile" % len(users))
+ return users
+
+ def delete(self):
+ client = self.client
+ users = self.list()
+ for user in users:
+ try:
+ client.delete_user(user['id'])
+ except Exception as e:
+ LOG.exception("Delete User exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ users = self.list()
+ self.data['users'] = users
+
+ def save_state(self):
+ users = self.list()
+ user_data = self.data['users'] = {}
+ for user in users:
+ user_data[user['id']] = user['name']
+
+
+class RoleService(IdentityService):
+
+ def list(self):
+ client = self.client
+ try:
+ _, roles = client.list_roles()
+ # reconcile roles with saved state and never list admin role
+ if not self.is_save_state:
+ roles = [role for role in roles if
+ (role['id'] not in
+ self.saved_state_json['roles'].keys()
+ and role['name'] != CONF.identity.admin_role)]
+ LOG.debug("List count, %s Roles after reconcile" % len(roles))
+ return roles
+ except Exception as ex:
+ LOG.exception("Cannot retrieve Roles, exception: %s" % ex)
+ return []
+
+ def delete(self):
+ client = self.client
+ roles = self.list()
+ for role in roles:
+ try:
+ client.delete_role(role['id'])
+ except Exception as e:
+ LOG.exception("Delete Role exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ roles = self.list()
+ self.data['roles'] = roles
+
+ def save_state(self):
+ roles = self.list()
+ role_data = self.data['roles'] = {}
+ for role in roles:
+ role_data[role['id']] = role['name']
+
+
+class TenantService(IdentityService):
+
+ def list(self):
+ client = self.client
+ _, tenants = client.list_tenants()
+ if not self.is_save_state:
+ tenants = [tenant for tenant in tenants if (tenant['id']
+ not in self.saved_state_json['tenants'].keys()
+ and tenant['name'] != CONF.identity.admin_tenant_name)]
+
+ if self.is_preserve:
+ tenants = [tenant for tenant in tenants if tenant['name']
+ not in CONF_TENANTS]
+
+ LOG.debug("List count, %s Tenants after reconcile" % len(tenants))
+ return tenants
+
+ def delete(self):
+ client = self.client
+ tenants = self.list()
+ for tenant in tenants:
+ try:
+ client.delete_tenant(tenant['id'])
+ except Exception as e:
+ LOG.exception("Delete Tenant exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ tenants = self.list()
+ self.data['tenants'] = tenants
+
+ def save_state(self):
+ tenants = self.list()
+ tenant_data = self.data['tenants'] = {}
+ for tenant in tenants:
+ tenant_data[tenant['id']] = tenant['name']
+
+
+class DomainService(BaseService):
+
+ def __init__(self, manager, **kwargs):
+ super(DomainService, self).__init__(kwargs)
+ self.client = manager.identity_v3_client
+
+ def list(self):
+ client = self.client
+ _, domains = client.list_domains()
+ if not self.is_save_state:
+ domains = [domain for domain in domains if domain['id']
+ not in self.saved_state_json['domains'].keys()]
+
+ LOG.debug("List count, %s Domains after reconcile" % len(domains))
+ return domains
+
+ def delete(self):
+ client = self.client
+ domains = self.list()
+ for domain in domains:
+ try:
+ client.update_domain(domain['id'], enabled=False)
+ client.delete_domain(domain['id'])
+ except Exception as e:
+ LOG.exception("Delete Domain exception: %s" % e)
+ pass
+
+ def dry_run(self):
+ domains = self.list()
+ self.data['domains'] = domains
+
+ def save_state(self):
+ domains = self.list()
+ domain_data = self.data['domains'] = {}
+ for domain in domains:
+ domain_data[domain['id']] = domain['name']
+
+
+def get_tenant_cleanup_services():
+ tenant_services = []
+
+ if IS_CEILOMETER:
+ tenant_services.append(TelemetryAlarmService)
+ if IS_NOVA:
+ tenant_services.append(ServerService)
+ tenant_services.append(KeyPairService)
+ tenant_services.append(SecurityGroupService)
+ tenant_services.append(ServerGroupService)
+ if not IS_NEUTRON:
+ tenant_services.append(FloatingIpService)
+ if IS_HEAT:
+ tenant_services.append(StackService)
+ if IS_NEUTRON:
+ if test.is_extension_enabled('vpnaas', 'network'):
+ tenant_services.append(NetworkIpSecPolicyService)
+ tenant_services.append(NetworkIkePolicyService)
+ tenant_services.append(NetworkVpnServiceService)
+ if test.is_extension_enabled('fwaas', 'network'):
+ tenant_services.append(NetworkFwPolicyService)
+ tenant_services.append(NetworkFwRulesService)
+ if test.is_extension_enabled('lbaas', 'network'):
+ tenant_services.append(NetworkHealthMonitorService)
+ tenant_services.append(NetworkMemberService)
+ tenant_services.append(NetworkVipService)
+ tenant_services.append(NetworkPoolService)
+ if test.is_extension_enabled('metering', 'network'):
+ tenant_services.append(NetworMeteringLabelRuleService)
+ tenant_services.append(NetworMeteringLabelService)
+ tenant_services.append(NetworkRouterService)
+ tenant_services.append(NetworkFloatingIpService)
+ tenant_services.append(NetworkPortService)
+ tenant_services.append(NetworkSubnetService)
+ tenant_services.append(NetworkService)
+ if IS_CINDER:
+ tenant_services.append(SnapshotService)
+ tenant_services.append(VolumeService)
+ return tenant_services
+
+
+def get_global_cleanup_services():
+ global_services = []
+ if IS_NOVA:
+ global_services.append(FlavorService)
+ if IS_GLANCE:
+ global_services.append(ImageService)
+ global_services.append(UserService)
+ global_services.append(TenantService)
+ global_services.append(DomainService)
+ global_services.append(RoleService)
+ return global_services
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 3f8db3df5..0adc7e0ee 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -20,6 +20,7 @@ resources in a declarative way.
"""
import argparse
+import collections
import datetime
import os
import sys
@@ -43,7 +44,7 @@ from tempest.services.volume.json import volumes_client
OPTS = {}
USERS = {}
-RES = {}
+RES = collections.defaultdict(list)
LOG = None
@@ -184,7 +185,9 @@ def create_users(users):
def destroy_users(users):
admin = keystone_admin()
for user in users:
- user_id = admin.identity.get_user_by_name(user['name'])['id']
+ tenant_id = admin.identity.get_tenant_by_name(user['tenant'])['id']
+ user_id = admin.identity.get_user_by_username(tenant_id,
+ user['name'])['id']
r, body = admin.identity.delete_user(user_id)
@@ -213,9 +216,7 @@ class JavelinCheck(unittest.TestCase):
self.check_users()
self.check_objects()
self.check_servers()
- # TODO(sdague): Volumes not yet working, bring it back once the
- # code is self testing.
- # self.check_volumes()
+ self.check_volumes()
self.check_telemetry()
def check_users(self):
@@ -282,6 +283,8 @@ class JavelinCheck(unittest.TestCase):
If in check mode confirm that the oldest sample available is from
before the upgrade.
"""
+ if not self.res.get('telemetry'):
+ return
LOG.info("checking telemetry")
for server in self.res['servers']:
client = client_for_user(server['owner'])
@@ -300,15 +303,15 @@ class JavelinCheck(unittest.TestCase):
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
- found = _get_volume_by_name(client, volume['name'])
+ vol_body = _get_volume_by_name(client, volume['name'])
self.assertIsNotNone(
- found,
+ vol_body,
"Couldn't find expected volume %s" % volume['name'])
# Verify that a volume's attachment retrieved
server_id = _get_server_by_name(client, volume['server'])['id']
- attachment = self.client.get_attachment_from_volume(volume)
- self.assertEqual(volume['id'], attachment['volume_id'])
+ attachment = client.volumes.get_attachment_from_volume(vol_body)
+ self.assertEqual(vol_body['id'], attachment['volume_id'])
self.assertEqual(server_id, attachment['server_id'])
def _confirm_telemetry_sample(self, server, sample):
@@ -501,37 +504,46 @@ def destroy_servers(servers):
def _get_volume_by_name(client, name):
r, body = client.volumes.list_volumes()
- for volume in body['volumes']:
- if name == volume['name']:
+ for volume in body:
+ if name == volume['display_name']:
return volume
return None
def create_volumes(volumes):
+ if not volumes:
+ return
+ LOG.info("Creating volumes")
for volume in volumes:
client = client_for_user(volume['owner'])
# only create a volume if the name isn't here
- r, body = client.volumes.list_volumes()
- if any(item['name'] == volume['name'] for item in body):
+ if _get_volume_by_name(client, volume['name']):
+ LOG.info("volume '%s' already exists" % volume['name'])
continue
- client.volumes.create_volume(volume['name'], volume['size'])
+ size = volume['gb']
+ v_name = volume['name']
+ resp, body = client.volumes.create_volume(size=size,
+ display_name=v_name)
+ client.volumes.wait_for_volume_status(body['id'], 'available')
def destroy_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
volume_id = _get_volume_by_name(client, volume['name'])['id']
- r, body = client.volumes.delete_volume(volume_id)
+ client.volumes.detach_volume(volume_id)
+ client.volumes.delete_volume(volume_id)
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
-
server_id = _get_server_by_name(client, volume['server'])['id']
- client.volumes.attach_volume(volume['name'], server_id)
+ volume_id = _get_volume_by_name(client, volume['name'])['id']
+ device = volume['device']
+ client.volumes.attach_volume(volume_id, server_id, device)
#######################
@@ -552,10 +564,8 @@ def create_resources():
create_objects(RES['objects'])
create_images(RES['images'])
create_servers(RES['servers'])
- # TODO(sdague): volumes definition doesn't work yet, bring it
- # back once we're actually executing the code
- # create_volumes(RES['volumes'])
- # attach_volumes(RES['volumes'])
+ create_volumes(RES['volumes'])
+ attach_volumes(RES['volumes'])
def destroy_resources():
@@ -626,7 +636,7 @@ def main():
global RES
get_options()
setup_logging()
- RES = load_resources(OPTS.resources)
+ RES.update(load_resources(OPTS.resources))
if OPTS.mode == 'create':
create_resources()
diff --git a/tempest/cmd/resources.yaml b/tempest/cmd/resources.yaml
index 3450e1fe3..2d5e68687 100644
--- a/tempest/cmd/resources.yaml
+++ b/tempest/cmd/resources.yaml
@@ -36,11 +36,13 @@ volumes:
- name: assegai
server: peltast
owner: javelin
- size: 1
+ gb: 1
+ device: /dev/vdb
- name: pifpouf
server: hoplite
owner: javelin
- size: 2
+ gb: 2
+ device: /dev/vdb
servers:
- name: peltast
owner: javelin
@@ -55,3 +57,4 @@ objects:
name: javelin1
owner: javelin
file: /etc/hosts
+telemetry: true
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 5046bff46..f426e4d35 100755
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -122,6 +122,18 @@ def verify_cinder_api_versions(os, update):
not CONF.volume_feature_enabled.api_v2, update)
+def verify_api_versions(os, service, update):
+ verify = {
+ 'cinder': verify_cinder_api_versions,
+ 'glance': verify_glance_api_versions,
+ 'keystone': verify_keystone_api_versions,
+ 'nova': verify_nova_api_versions,
+ }
+ if service not in verify:
+ return
+ verify[service](os, update)
+
+
def get_extension_client(os, service):
extensions_client = {
'nova': os.extensions_client,
@@ -337,10 +349,13 @@ def main():
elif service not in services:
continue
results = verify_extensions(os, service, results)
- verify_keystone_api_versions(os, update)
- verify_glance_api_versions(os, update)
- verify_nova_api_versions(os, update)
- verify_cinder_api_versions(os, update)
+
+ # Verify API verisons of all services in the keystone catalog and keystone
+ # itself.
+ services.append('keystone')
+ for service in services:
+ verify_api_versions(os, service, update)
+
display_results(results, update, replace)
if update:
conf_file.close()
diff --git a/tempest/common/accounts.py b/tempest/common/accounts.py
index 7423c17e7..88e8ced08 100644
--- a/tempest/common/accounts.py
+++ b/tempest/common/accounts.py
@@ -58,7 +58,12 @@ class Accounts(cred_provider.CredentialProvider):
return hash_dict
def is_multi_user(self):
- return len(self.hash_dict) > 1
+ # Default credentials is not a valid option with locking Account
+ if self.use_default_creds:
+ raise exceptions.InvalidConfiguration(
+ "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
+ else:
+ return len(self.hash_dict) > 1
def _create_hash_file(self, hash_string):
path = os.path.join(os.path.join(self.accounts_dir, hash_string))
@@ -144,6 +149,21 @@ class NotLockingAccounts(Accounts):
to preserve the current behaviour of the serial tempest run.
"""
+ def is_multi_user(self):
+ if self.use_default_creds:
+ # Verify that the configured users are valid and distinct
+ try:
+ user = self.get_primary_creds()
+ alt_user = self.get_alt_creds()
+ return user.username != alt_user.username
+ except exceptions.InvalidCredentials as ic:
+ msg = "At least one of the configured credentials is " \
+ "not valid: %s" % ic.message
+ raise exceptions.InvalidConfiguration(msg)
+ else:
+ # TODO(andreaf) Add a uniqueness check here
+ return len(self.hash_dict) > 1
+
def get_creds(self, id):
try:
# No need to sort the dict as within the same python process
diff --git a/tempest/common/cred_provider.py b/tempest/common/cred_provider.py
index 9808ed1f8..b09c9641b 100644
--- a/tempest/common/cred_provider.py
+++ b/tempest/common/cred_provider.py
@@ -1,4 +1,5 @@
-# (c) 2014 Deutsche Telekom AG
+# Copyright (c) 2014 Deutsche Telekom AG
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -24,8 +25,8 @@ LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class CredentialProvider(object):
- def __init__(self, name, tempest_client=True, interface='json',
- password='pass', network_resources=None):
+ def __init__(self, name, interface='json', password='pass',
+ network_resources=None):
self.name = name
@abc.abstractmethod
@@ -43,3 +44,7 @@ class CredentialProvider(object):
@abc.abstractmethod
def clear_isolated_creds(self):
return
+
+ @abc.abstractmethod
+ def is_multi_user(self):
+ return
diff --git a/tempest/common/credentials.py b/tempest/common/credentials.py
new file mode 100644
index 000000000..08b592f92
--- /dev/null
+++ b/tempest/common/credentials.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from tempest.common import accounts
+from tempest.common import isolated_creds
+from tempest import config
+
+CONF = config.CONF
+
+
+# Return the right implementation of CredentialProvider based on config
+# Dropping interface and password, as they are never used anyways
+# TODO(andreaf) Drop them from the CredentialsProvider interface completely
+def get_isolated_credentials(name, network_resources=None,
+ force_tenant_isolation=False):
+ # If a test requires a new account to work, it can have it via forcing
+ # tenant isolation. A new account will be produced only for that test.
+ # In case admin credentials are not available for the account creation,
+ # the test should be skipped else it would fail.
+ if CONF.auth.allow_tenant_isolation or force_tenant_isolation:
+ return isolated_creds.IsolatedCreds(
+ name=name,
+ network_resources=network_resources)
+ else:
+ if CONF.auth.locking_credentials_provider:
+ # Most params are not relevant for pre-created accounts
+ return accounts.Accounts(name=name)
+ else:
+ return accounts.NotLockingAccounts(name=name)
diff --git a/tempest/common/generator/base_generator.py b/tempest/common/generator/base_generator.py
index 0398af1cd..3f405b171 100644
--- a/tempest/common/generator/base_generator.py
+++ b/tempest/common/generator/base_generator.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import functools
import jsonschema
@@ -30,9 +31,11 @@ def _check_for_expected_result(name, schema):
return expected_result
-def generator_type(*args):
+def generator_type(*args, **kwargs):
def wrapper(func):
func.types = args
+ for key in kwargs:
+ setattr(func, key, kwargs[key])
return func
return wrapper
@@ -106,37 +109,74 @@ class BasicGeneratorSet(object):
jsonschema.Draft4Validator.check_schema(schema['json-schema'])
jsonschema.validate(schema, self.schema)
- def generate(self, schema):
+ def generate_scenarios(self, schema, path=None):
"""
- Generate an json dictionary based on a schema.
- Only one value is mis-generated for each dictionary created.
-
- Any generator must return a list of tuples or a single tuple.
- The values of this tuple are:
- result[0]: Name of the test
- result[1]: json schema for the test
- result[2]: expected result of the test (can be None)
+ Generates the scenario (all possible test cases) out of the given
+ schema.
+
+ :param schema: a dict style schema (see ``BasicGeneratorSet.schema``)
+ :param path: the schema path if the given schema is a subschema
"""
- LOG.debug("generate_invalid: %s" % schema)
- schema_type = schema["type"]
- if isinstance(schema_type, list):
+ schema_type = schema['type']
+ scenarios = []
+
+ if schema_type == 'object':
+ properties = schema["properties"]
+ for attribute, definition in properties.iteritems():
+ current_path = copy.copy(path)
+ if path is not None:
+ current_path.append(attribute)
+ else:
+ current_path = [attribute]
+ scenarios.extend(
+ self.generate_scenarios(definition, current_path))
+ elif isinstance(schema_type, list):
if "integer" in schema_type:
schema_type = "integer"
else:
raise Exception("non-integer list types not supported")
- result = []
- if schema_type not in self.types_dict:
- raise TypeError("generator (%s) doesn't support type: %s"
- % (self.__class__.__name__, schema_type))
for generator in self.types_dict[schema_type]:
- ret = generator(schema)
- if ret is not None:
- if isinstance(ret, list):
- result.extend(ret)
- elif isinstance(ret, tuple):
- result.append(ret)
- else:
- raise Exception("generator (%s) returns invalid result: %s"
- % (generator, ret))
- LOG.debug("result: %s" % result)
- return result
+ if hasattr(generator, "needed_property"):
+ prop = generator.needed_property
+ if (prop not in schema or
+ schema[prop] is None or
+ schema[prop] is False):
+ continue
+
+ name = generator.__name__
+ if ("exclude_tests" in schema and
+ name in schema["exclude_tests"]):
+ continue
+ if path is not None:
+ name = "%s_%s" % ("_".join(path), name)
+ scenarios.append({
+ "_negtest_name": name,
+ "_negtest_generator": generator,
+ "_negtest_schema": schema,
+ "_negtest_path": path})
+ return scenarios
+
+ def generate_payload(self, test, schema):
+ """
+ Generates one jsonschema out of the given test. It's mandatory to use
+ generate_scenarios before to register all needed variables to the test.
+
+ :param test: A test object (scenario) with all _negtest variables on it
+ :param schema: schema for the test
+ """
+ generator = test._negtest_generator
+ ret = generator(test._negtest_schema)
+ path = copy.copy(test._negtest_path)
+ expected_result = None
+
+ if ret is not None:
+ generator_result = generator(test._negtest_schema)
+ invalid_snippet = generator_result[1]
+ expected_result = generator_result[2]
+ element = path.pop()
+ if len(path) > 0:
+ schema_snip = reduce(dict.get, path, schema)
+ schema_snip[element] = invalid_snippet
+ else:
+ schema[element] = invalid_snippet
+ return expected_result
diff --git a/tempest/common/generator/negative_generator.py b/tempest/common/generator/negative_generator.py
index 4f3d2cd59..1d5ed4392 100644
--- a/tempest/common/generator/negative_generator.py
+++ b/tempest/common/generator/negative_generator.py
@@ -47,65 +47,32 @@ class NegativeTestGenerator(base.BasicGeneratorSet):
if min_length > 0:
return "x" * (min_length - 1)
- @base.generator_type("string")
+ @base.generator_type("string", needed_property="maxLength")
@base.simple_generator
def gen_str_max_length(self, schema):
max_length = schema.get("maxLength", -1)
- if max_length > -1:
- return "x" * (max_length + 1)
+ return "x" * (max_length + 1)
- @base.generator_type("integer")
+ @base.generator_type("integer", needed_property="minimum")
@base.simple_generator
def gen_int_min(self, schema):
- if "minimum" in schema:
- minimum = schema["minimum"]
- if "exclusiveMinimum" not in schema:
- minimum -= 1
- return minimum
+ minimum = schema["minimum"]
+ if "exclusiveMinimum" not in schema:
+ minimum -= 1
+ return minimum
- @base.generator_type("integer")
+ @base.generator_type("integer", needed_property="maximum")
@base.simple_generator
def gen_int_max(self, schema):
- if "maximum" in schema:
- maximum = schema["maximum"]
- if "exclusiveMaximum" not in schema:
- maximum += 1
- return maximum
-
- @base.generator_type("object")
- def gen_obj_remove_attr(self, schema):
- invalids = []
- valid_schema = valid.ValidTestGenerator().generate_valid(schema)
- required = schema.get("required", [])
- for r in required:
- new_valid = copy.deepcopy(valid_schema)
- del new_valid[r]
- invalids.append(("gen_obj_remove_attr", new_valid, None))
- return invalids
+ maximum = schema["maximum"]
+ if "exclusiveMaximum" not in schema:
+ maximum += 1
+ return maximum
- @base.generator_type("object")
+ @base.generator_type("object", needed_property="additionalProperties")
@base.simple_generator
def gen_obj_add_attr(self, schema):
valid_schema = valid.ValidTestGenerator().generate_valid(schema)
- if not schema.get("additionalProperties", True):
- new_valid = copy.deepcopy(valid_schema)
- new_valid["$$$$$$$$$$"] = "xxx"
- return new_valid
-
- @base.generator_type("object")
- def gen_inv_prop_obj(self, schema):
- LOG.debug("generate_invalid_object: %s" % schema)
- valid_schema = valid.ValidTestGenerator().generate_valid(schema)
- invalids = []
- properties = schema["properties"]
-
- for k, v in properties.iteritems():
- for invalid in self.generate(v):
- LOG.debug(v)
- new_valid = copy.deepcopy(valid_schema)
- new_valid[k] = invalid[1]
- name = "prop_%s_%s" % (k, invalid[0])
- invalids.append((name, new_valid, invalid[2]))
-
- LOG.debug("generate_invalid_object return: %s" % invalids)
- return invalids
+ new_valid = copy.deepcopy(valid_schema)
+ new_valid["$$$$$$$$$$"] = "xxx"
+ return new_valid
diff --git a/tempest/common/generator/valid_generator.py b/tempest/common/generator/valid_generator.py
index 0d7b398ed..7b80afc6c 100644
--- a/tempest/common/generator/valid_generator.py
+++ b/tempest/common/generator/valid_generator.py
@@ -54,5 +54,28 @@ class ValidTestGenerator(base.BasicGeneratorSet):
obj[k] = self.generate_valid(v)
return obj
+ def generate(self, schema):
+ schema_type = schema["type"]
+ if isinstance(schema_type, list):
+ if "integer" in schema_type:
+ schema_type = "integer"
+ else:
+ raise Exception("non-integer list types not supported")
+ result = []
+ if schema_type not in self.types_dict:
+ raise TypeError("generator (%s) doesn't support type: %s"
+ % (self.__class__.__name__, schema_type))
+ for generator in self.types_dict[schema_type]:
+ ret = generator(schema)
+ if ret is not None:
+ if isinstance(ret, list):
+ result.extend(ret)
+ elif isinstance(ret, tuple):
+ result.append(ret)
+ else:
+ raise Exception("generator (%s) returns invalid result: %s"
+ % (generator, ret))
+ return result
+
def generate_valid(self, schema):
return self.generate(schema)[0][1]
diff --git a/tempest/common/isolated_creds.py b/tempest/common/isolated_creds.py
index 02c50e44c..2d16107b8 100644
--- a/tempest/common/isolated_creds.py
+++ b/tempest/common/isolated_creds.py
@@ -13,7 +13,6 @@
# under the License.
import netaddr
-from neutronclient.common import exceptions as n_exc
from tempest import auth
from tempest import clients
@@ -29,15 +28,14 @@ LOG = logging.getLogger(__name__)
class IsolatedCreds(cred_provider.CredentialProvider):
- def __init__(self, name, tempest_client=True, interface='json',
- password='pass', network_resources=None):
- super(IsolatedCreds, self).__init__(name, tempest_client, interface,
- password, network_resources)
+ def __init__(self, name, interface='json', password='pass',
+ network_resources=None):
+ super(IsolatedCreds, self).__init__(name, interface, password,
+ network_resources)
self.network_resources = network_resources
self.isolated_creds = {}
self.isolated_net_resources = {}
self.ports = []
- self.tempest_client = tempest_client
self.interface = interface
self.password = password
self.identity_admin_client, self.network_admin_client = (
@@ -50,96 +48,50 @@ class IsolatedCreds(cred_provider.CredentialProvider):
identity
network
"""
- if self.tempest_client:
- os = clients.AdminManager(interface=self.interface)
- else:
- os = clients.OfficialClientManager(
- auth.get_default_credentials('identity_admin')
- )
+ os = clients.AdminManager(interface=self.interface)
return os.identity_client, os.network_client
def _create_tenant(self, name, description):
- if self.tempest_client:
- _, tenant = self.identity_admin_client.create_tenant(
- name=name, description=description)
- else:
- tenant = self.identity_admin_client.tenants.create(
- name,
- description=description)
+ _, tenant = self.identity_admin_client.create_tenant(
+ name=name, description=description)
return tenant
def _get_tenant_by_name(self, name):
- if self.tempest_client:
- _, tenant = self.identity_admin_client.get_tenant_by_name(name)
- else:
- tenants = self.identity_admin_client.tenants.list()
- for ten in tenants:
- if ten['name'] == name:
- tenant = ten
- break
- else:
- raise exceptions.NotFound('No such tenant')
+ _, tenant = self.identity_admin_client.get_tenant_by_name(name)
return tenant
def _create_user(self, username, password, tenant, email):
- if self.tempest_client:
- _, user = self.identity_admin_client.create_user(username,
- password,
- tenant['id'],
- email)
- else:
- user = self.identity_admin_client.users.create(username, password,
- email,
- tenant_id=tenant.id)
+ _, user = self.identity_admin_client.create_user(
+ username, password, tenant['id'], email)
return user
def _get_user(self, tenant, username):
- if self.tempest_client:
- _, user = self.identity_admin_client.get_user_by_username(
- tenant['id'],
- username)
- else:
- user = self.identity_admin_client.users.get(username)
+ _, user = self.identity_admin_client.get_user_by_username(
+ tenant['id'], username)
return user
def _list_roles(self):
- if self.tempest_client:
- _, roles = self.identity_admin_client.list_roles()
- else:
- roles = self.identity_admin_client.roles.list()
+ _, roles = self.identity_admin_client.list_roles()
return roles
def _assign_user_role(self, tenant, user, role_name):
role = None
try:
roles = self._list_roles()
- if self.tempest_client:
- role = next(r for r in roles if r['name'] == role_name)
- else:
- role = next(r for r in roles if r.name == role_name)
+ role = next(r for r in roles if r['name'] == role_name)
except StopIteration:
msg = 'No "%s" role found' % role_name
raise exceptions.NotFound(msg)
- if self.tempest_client:
- self.identity_admin_client.assign_user_role(tenant['id'],
- user['id'], role['id'])
- else:
- self.identity_admin_client.roles.add_user_role(user.id, role.id,
- tenant.id)
+ self.identity_admin_client.assign_user_role(tenant['id'], user['id'],
+ role['id'])
def _delete_user(self, user):
- if self.tempest_client:
- self.identity_admin_client.delete_user(user)
- else:
- self.identity_admin_client.users.delete(user)
+ self.identity_admin_client.delete_user(user)
def _delete_tenant(self, tenant):
if CONF.service_available.neutron:
self._cleanup_default_secgroup(tenant)
- if self.tempest_client:
- self.identity_admin_client.delete_tenant(tenant)
- else:
- self.identity_admin_client.tenants.delete(tenant)
+ self.identity_admin_client.delete_tenant(tenant)
def _create_creds(self, suffix="", admin=False):
"""Create random credentials under the following schema.
@@ -175,15 +127,9 @@ class IsolatedCreds(cred_provider.CredentialProvider):
return self._get_credentials(user, tenant)
def _get_credentials(self, user, tenant):
- if self.tempest_client:
- user_get = user.get
- tenant_get = tenant.get
- else:
- user_get = user.__dict__.get
- tenant_get = tenant.__dict__.get
return auth.get_credentials(
- username=user_get('name'), user_id=user_get('id'),
- tenant_name=tenant_get('name'), tenant_id=tenant_get('id'),
+ username=user['name'], user_id=user['id'],
+ tenant_name=tenant['name'], tenant_id=tenant['id'],
password=self.password)
def _create_network_resources(self, tenant_id):
@@ -228,45 +174,32 @@ class IsolatedCreds(cred_provider.CredentialProvider):
return network, subnet, router
def _create_network(self, name, tenant_id):
- if self.tempest_client:
- resp, resp_body = self.network_admin_client.create_network(
- name=name, tenant_id=tenant_id)
- else:
- body = {'network': {'tenant_id': tenant_id, 'name': name}}
- resp_body = self.network_admin_client.create_network(body)
+ _, resp_body = self.network_admin_client.create_network(
+ name=name, tenant_id=tenant_id)
return resp_body['network']
def _create_subnet(self, subnet_name, tenant_id, network_id):
- if not self.tempest_client:
- body = {'subnet': {'name': subnet_name, 'tenant_id': tenant_id,
- 'network_id': network_id, 'ip_version': 4}}
- if self.network_resources:
- body['enable_dhcp'] = self.network_resources['dhcp']
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
for subnet_cidr in base_cidr.subnet(mask_bits):
try:
- if self.tempest_client:
- if self.network_resources:
- resp, resp_body = self.network_admin_client.\
- create_subnet(
- network_id=network_id, cidr=str(subnet_cidr),
- name=subnet_name,
- tenant_id=tenant_id,
- enable_dhcp=self.network_resources['dhcp'],
- ip_version=4)
- else:
- resp, resp_body = self.network_admin_client.\
- create_subnet(network_id=network_id,
- cidr=str(subnet_cidr),
- name=subnet_name,
- tenant_id=tenant_id,
- ip_version=4)
+ if self.network_resources:
+ _, resp_body = self.network_admin_client.\
+ create_subnet(
+ network_id=network_id, cidr=str(subnet_cidr),
+ name=subnet_name,
+ tenant_id=tenant_id,
+ enable_dhcp=self.network_resources['dhcp'],
+ ip_version=4)
else:
- body['subnet']['cidr'] = str(subnet_cidr)
- resp_body = self.network_admin_client.create_subnet(body)
+ _, resp_body = self.network_admin_client.\
+ create_subnet(network_id=network_id,
+ cidr=str(subnet_cidr),
+ name=subnet_name,
+ tenant_id=tenant_id,
+ ip_version=4)
break
- except (n_exc.BadRequest, exceptions.BadRequest) as e:
+ except exceptions.BadRequest as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
@@ -278,25 +211,15 @@ class IsolatedCreds(cred_provider.CredentialProvider):
def _create_router(self, router_name, tenant_id):
external_net_id = dict(
network_id=CONF.network.public_network_id)
- if self.tempest_client:
- resp, resp_body = self.network_admin_client.create_router(
- router_name,
- external_gateway_info=external_net_id,
- tenant_id=tenant_id)
- else:
- body = {'router': {'name': router_name, 'tenant_id': tenant_id,
- 'external_gateway_info': external_net_id,
- 'admin_state_up': True}}
- resp_body = self.network_admin_client.create_router(body)
+ _, resp_body = self.network_admin_client.create_router(
+ router_name,
+ external_gateway_info=external_net_id,
+ tenant_id=tenant_id)
return resp_body['router']
def _add_router_interface(self, router_id, subnet_id):
- if self.tempest_client:
- self.network_admin_client.add_router_interface_with_subnet_id(
- router_id, subnet_id)
- else:
- body = {'subnet_id': subnet_id}
- self.network_admin_client.add_interface_router(router_id, body)
+ self.network_admin_client.add_router_interface_with_subnet_id(
+ router_id, subnet_id)
def get_primary_network(self):
return self.isolated_net_resources.get('primary')[0]
@@ -380,12 +303,8 @@ class IsolatedCreds(cred_provider.CredentialProvider):
def _cleanup_default_secgroup(self, tenant):
net_client = self.network_admin_client
- if self.tempest_client:
- resp, resp_body = net_client.list_security_groups(tenant_id=tenant,
- name="default")
- else:
- resp_body = net_client.list_security_groups(tenant_id=tenant,
- name="default")
+ _, resp_body = net_client.list_security_groups(tenant_id=tenant,
+ name="default")
secgroups_to_delete = resp_body['security_groups']
for secgroup in secgroups_to_delete:
try:
@@ -404,12 +323,8 @@ class IsolatedCreds(cred_provider.CredentialProvider):
if (not self.network_resources or
self.network_resources.get('router')):
try:
- if self.tempest_client:
- net_client.remove_router_interface_with_subnet_id(
- router['id'], subnet['id'])
- else:
- body = {'subnet_id': subnet['id']}
- net_client.remove_interface_router(router['id'], body)
+ net_client.remove_router_interface_with_subnet_id(
+ router['id'], subnet['id'])
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router['name'])
@@ -436,3 +351,6 @@ class IsolatedCreds(cred_provider.CredentialProvider):
except exceptions.NotFound:
LOG.warn("tenant with name: %s not found for delete" %
creds.tenant_name)
+
+ def is_multi_user(self):
+ return True
diff --git a/tempest/config.py b/tempest/config.py
index cea9dece4..6a41f247a 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -38,9 +38,28 @@ AuthGroup = [
default='etc/accounts.yaml',
help="Path to the yaml file that contains the list of "
"credentials to use for running tests"),
+ cfg.BoolOpt('allow_tenant_isolation',
+ default=False,
+ help="Allows test cases to create/destroy tenants and "
+ "users. This option requires that OpenStack Identity "
+ "API admin credentials are known. If false, isolated "
+ "test cases and parallel execution, can still be "
+ "achieved configuring a list of test accounts",
+ deprecated_opts=[cfg.DeprecatedOpt('allow_tenant_isolation',
+ group='compute'),
+ cfg.DeprecatedOpt('allow_tenant_isolation',
+ group='orchestration')]),
+ cfg.BoolOpt('locking_credentials_provider',
+ default=False,
+ help="If set to True it enables the Accounts provider, "
+ "which locks credentials to allow for parallel execution "
+ "with pre-provisioned accounts. It can only be used to "
+ "run tests that ensure credentials cleanup happens. "
+ "It requires at least `2 * CONC` distinct accounts "
+ "configured in `test_accounts_file`, with CONC == the "
+ "number of concurrent test processes."),
]
-
identity_group = cfg.OptGroup(name='identity',
title="Keystone Configuration Options")
@@ -129,12 +148,6 @@ compute_group = cfg.OptGroup(name='compute',
title='Compute Service Options')
ComputeGroup = [
- cfg.BoolOpt('allow_tenant_isolation',
- default=False,
- help="Allows test cases to create/destroy tenants and "
- "users. This option enables isolated test cases and "
- "better parallel execution, but also requires that "
- "OpenStack Identity API admin credentials are known."),
cfg.StrOpt('image_ref',
help="Valid primary image reference to be used in tests. "
"This is a required option"),
@@ -514,7 +527,7 @@ VolumeGroup = [
help='Time in seconds between volume availability checks.'),
cfg.IntOpt('build_timeout',
default=300,
- help='Timeout in seconds to wait for a volume to become'
+ help='Timeout in seconds to wait for a volume to become '
'available.'),
cfg.StrOpt('catalog_type',
default='volume',
@@ -666,12 +679,6 @@ OrchestrationGroup = [
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the orchestration service."),
- cfg.BoolOpt('allow_tenant_isolation',
- default=False,
- help="Allows test cases to create/destroy tenants and "
- "users. This option enables isolated test cases and "
- "better parallel execution, but also requires that "
- "OpenStack Identity API admin credentials are known."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between build status checks."),
@@ -1141,8 +1148,10 @@ class TempestConfigPrivate(object):
# to remove an issue with the config file up to date checker.
if parse_conf:
config_files.append(path)
-
- cfg.CONF([], project='tempest', default_config_files=config_files)
+ if os.path.isfile(path):
+ cfg.CONF([], project='tempest', default_config_files=config_files)
+ else:
+ cfg.CONF([], project='tempest')
logging.setup('tempest')
LOG = logging.getLogger('tempest')
LOG.info("Using tempest config file %s" % path)
diff --git a/tempest/hacking/checks.py b/tempest/hacking/checks.py
index abc60cb38..6014cff7c 100644
--- a/tempest/hacking/checks.py
+++ b/tempest/hacking/checks.py
@@ -24,7 +24,7 @@ PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
-SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
+SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
@@ -58,15 +58,15 @@ def scenario_tests_need_service_tags(physical_line, filename,
"T104: Scenario tests require a service decorator")
-def no_setupclass_for_unit_tests(physical_line, filename):
+def no_setup_teardown_class_for_tests(physical_line, filename):
if pep8.noqa(physical_line):
return
- if 'tempest/tests' in filename:
- if SETUPCLASS_DEFINITION.match(physical_line):
+ if 'tempest/test.py' not in filename:
+ if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
- "T105: setUpClass can not be used with unit tests")
+ "T105: (setUp|tearDown)Class can not be used in tests")
def no_vi_headers(physical_line, line_number, lines):
@@ -106,20 +106,6 @@ def service_tags_not_in_module_path(physical_line, filename):
"T107: service tag should not be in path")
-def no_official_client_manager_in_api_tests(physical_line, filename):
- """Check that the OfficialClientManager isn't used in the api tests
-
- The api tests should not use the official clients.
-
- T108: Can not use OfficialClientManager in the API tests
- """
- if 'tempest/api' in filename:
- if 'OfficialClientManager' in physical_line:
- return (physical_line.find('OfficialClientManager'),
- 'T108: OfficialClientManager can not be used in the api '
- 'tests')
-
-
def no_mutable_default_args(logical_line):
"""Check that mutable object isn't used as default argument
@@ -133,8 +119,7 @@ def no_mutable_default_args(logical_line):
def factory(register):
register(import_no_clients_in_api)
register(scenario_tests_need_service_tags)
- register(no_setupclass_for_unit_tests)
+ register(no_setup_teardown_class_for_tests)
register(no_vi_headers)
register(service_tags_not_in_module_path)
- register(no_official_client_manager_in_api_tests)
register(no_mutable_default_args)
diff --git a/tempest/manager.py b/tempest/manager.py
index 75aee961e..538b619fc 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -63,6 +63,5 @@ class Manager(object):
'Credentials must be specified')
auth_provider_class = self.get_auth_provider_class(credentials)
return auth_provider_class(
- client_type=getattr(self, 'client_type', None),
interface=getattr(self, 'interface', None),
credentials=credentials)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index eb5bdbcd1..383a28da4 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -18,18 +18,13 @@ import logging
import os
import subprocess
-from cinderclient import exceptions as cinder_exceptions
-import glanceclient
import netaddr
-from neutronclient.common import exceptions as exc
-from novaclient import exceptions as nova_exceptions
import six
-from tempest.api.network import common as net_common
from tempest import auth
from tempest import clients
+from tempest.common import credentials
from tempest.common import debug
-from tempest.common import isolated_creds
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
@@ -51,23 +46,15 @@ LOG_cinder_client.addHandler(log.NullHandler())
class ScenarioTest(tempest.test.BaseTestCase):
- """Replaces the OfficialClientTest base class.
-
- Uses tempest own clients as opposed to OfficialClients.
-
- Common differences:
- - replace resource.attribute with resource['attribute']
- - replace resouce.delete with delete_callable(resource['id'])
- - replace local waiters with common / rest_client waiters
- """
+ """Base class for scenario tests. Uses tempest own clients. """
@classmethod
- def setUpClass(cls):
- super(ScenarioTest, cls).setUpClass()
- # Using tempest client for isolated credentials as well
- cls.isolated_creds = isolated_creds.IsolatedCreds(
- cls.__name__, tempest_client=True,
- network_resources=cls.network_resources)
+ def resource_setup(cls):
+ super(ScenarioTest, cls).resource_setup()
+ # TODO(andreaf) Some of the code from this resource_setup could be
+ # moved into `BaseTestCase`
+ cls.isolated_creds = credentials.get_isolated_credentials(
+ cls.__name__, network_resources=cls.network_resources)
cls.manager = clients.Manager(
credentials=cls.credentials()
)
@@ -93,27 +80,19 @@ class ScenarioTest(tempest.test.BaseTestCase):
cls.orchestration_client = cls.manager.orchestration_client
@classmethod
- def _get_credentials(cls, get_creds, ctype):
- if CONF.compute.allow_tenant_isolation:
- creds = get_creds()
- else:
- creds = auth.get_default_credentials(ctype)
- return creds
-
- @classmethod
def credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_primary_creds,
- 'user')
+ return cls.isolated_creds.get_primary_creds()
@classmethod
def alt_credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_alt_creds,
- 'alt_user')
+ return cls.isolated_creds.get_alt_creds()
@classmethod
def admin_credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_admin_creds,
- 'identity_admin')
+ try:
+ return cls.isolated_creds.get_admin_creds()
+ except NotImplementedError:
+ raise cls.skipException('Admin Credentials are not available')
# ## Methods to handle sync and async deletes
@@ -400,6 +379,12 @@ class ScenarioTest(tempest.test.BaseTestCase):
LOG.debug(self.servers_client.get_console_output(server['id'],
length=None))
+ def _log_net_info(self, exc):
+ # network debug is called as part of ssh init
+ if not isinstance(exc, exceptions.SSHTimeout):
+ LOG.debug('Network information on a devstack host')
+ debug.log_net_debug()
+
def create_server_snapshot(self, server, name=None):
# Glance client
_image_client = self.image_client
@@ -457,7 +442,9 @@ class ScenarioTest(tempest.test.BaseTestCase):
if wait:
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
- def ping_ip_address(self, ip_address, should_succeed=True):
+ def ping_ip_address(self, ip_address, should_succeed=True,
+ ping_timeout=None):
+ timeout = ping_timeout or CONF.compute.ping_timeout
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
@@ -467,13 +454,10 @@ class ScenarioTest(tempest.test.BaseTestCase):
proc.communicate()
return (proc.returncode == 0) == should_succeed
- return tempest.test.call_until_true(
- ping, CONF.compute.ping_timeout, 1)
+ return tempest.test.call_until_true(ping, timeout, 1)
-# TODO(yfried): change this class name to NetworkScenarioTest once client
-# migration is complete
-class NeutronScenarioTest(ScenarioTest):
+class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
This class provide helpers for network scenario tests, using the neutron
API. Helpers from ancestor which use the nova network API are overridden
@@ -490,8 +474,8 @@ class NeutronScenarioTest(ScenarioTest):
raise cls.skipException('Neutron not available')
@classmethod
- def setUpClass(cls):
- super(NeutronScenarioTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(NetworkScenarioTest, cls).resource_setup()
cls.tenant_id = cls.manager.identity_client.tenant_id
cls.check_preconditions()
@@ -605,7 +589,7 @@ class NeutronScenarioTest(ScenarioTest):
def _get_network_by_name(self, network_name):
net = self._list_networks(name=network_name)
- return net_common.AttributeDict(net[0])
+ return net_resources.AttributeDict(net[0])
def _create_floating_ip(self, thing, external_network_id, port_id=None,
client=None):
@@ -638,6 +622,23 @@ class NeutronScenarioTest(ScenarioTest):
self.assertIsNone(floating_ip.port_id)
return floating_ip
+ def check_floating_ip_status(self, floating_ip, status):
+ """Verifies floatingip has reached given status. without waiting
+
+ :param floating_ip: net_resources.DeletableFloatingIp floating IP to
+ to check status
+ :param status: target status
+ :raises: AssertionError if status doesn't match
+ """
+ floating_ip.refresh()
+ self.assertEqual(status, floating_ip.status,
+ message="FloatingIP: {fp} is at status: {cst}. "
+ "failed to reach status: {st}"
+ .format(fp=floating_ip, cst=floating_ip.status,
+ st=status))
+ LOG.info("FloatingIP: {fp} is at status: {st}"
+ .format(fp=floating_ip, st=status))
+
def _check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
@@ -682,9 +683,7 @@ class NeutronScenarioTest(ScenarioTest):
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers)
- # network debug is called as part of ssh init
- if not isinstance(e, exceptions.SSHTimeout):
- debug.log_net_debug()
+ self._log_net_info(e)
raise
def _check_tenant_network_connectivity(self, server,
@@ -708,9 +707,7 @@ class NeutronScenarioTest(ScenarioTest):
except Exception as e:
LOG.exception('Tenant network connectivity check failed')
self._log_console_output(servers_for_debug)
- # network debug is called as part of ssh init
- if not isinstance(e, exceptions.SSHTimeout):
- debug.log_net_debug()
+ self._log_net_info(e)
raise
def _check_remote_connectivity(self, source, dest, should_succeed=True):
@@ -940,8 +937,8 @@ class NeutronScenarioTest(ScenarioTest):
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
- result = client.show_router(router_id)
- return net_resources.AttributeDict(**result['router'])
+ resp, body = client.show_router(router_id)
+ return net_resources.AttributeDict(**body['router'])
elif network_id:
router = self._create_router(client, tenant_id)
router.set_gateway(network_id)
@@ -992,511 +989,6 @@ class NeutronScenarioTest(ScenarioTest):
return network, subnet, router
-class OfficialClientTest(tempest.test.BaseTestCase):
- """
- Official Client test base class for scenario testing.
-
- Official Client tests are tests that have the following characteristics:
-
- * Test basic operations of an API, typically in an order that
- a regular user would perform those operations
- * Test only the correct inputs and action paths -- no fuzz or
- random input data is sent, only valid inputs.
- * Use only the default client tool for calling an API
- """
-
- @classmethod
- def setUpClass(cls):
- super(OfficialClientTest, cls).setUpClass()
- cls.isolated_creds = isolated_creds.IsolatedCreds(
- cls.__name__, tempest_client=False,
- network_resources=cls.network_resources)
-
- cls.manager = clients.OfficialClientManager(
- credentials=cls.credentials())
- cls.compute_client = cls.manager.compute_client
- cls.image_client = cls.manager.image_client
- cls.baremetal_client = cls.manager.baremetal_client
- cls.identity_client = cls.manager.identity_client
- cls.network_client = cls.manager.network_client
- cls.volume_client = cls.manager.volume_client
- cls.object_storage_client = cls.manager.object_storage_client
- cls.orchestration_client = cls.manager.orchestration_client
- cls.data_processing_client = cls.manager.data_processing_client
- cls.ceilometer_client = cls.manager.ceilometer_client
-
- @classmethod
- def _get_credentials(cls, get_creds, ctype):
- if CONF.compute.allow_tenant_isolation:
- creds = get_creds()
- else:
- creds = auth.get_default_credentials(ctype)
- return creds
-
- @classmethod
- def credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_primary_creds,
- 'user')
-
- @classmethod
- def alt_credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_alt_creds,
- 'alt_user')
-
- @classmethod
- def admin_credentials(cls):
- return cls._get_credentials(cls.isolated_creds.get_admin_creds,
- 'identity_admin')
-
- def setUp(self):
- super(OfficialClientTest, self).setUp()
- self.cleanup_waits = []
- # NOTE(mtreinish) This is safe to do in setUp instead of setUp class
- # because scenario tests in the same test class should not share
- # resources. If resources were shared between test cases then it
- # should be a single scenario test instead of multiples.
-
- # NOTE(yfried): this list is cleaned at the end of test_methods and
- # not at the end of the class
- self.addCleanup(self._wait_for_cleanups)
-
- @staticmethod
- def not_found_exception(exception):
- """
- @return: True if exception is of NotFound type
- """
- NOT_FOUND_LIST = ['NotFound', 'HTTPNotFound']
- return (exception.__class__.__name__ in NOT_FOUND_LIST
- or
- hasattr(exception, 'status_code') and
- exception.status_code == 404)
-
- def delete_wrapper(self, thing):
- """Ignores NotFound exceptions for delete operations.
-
- @param thing: object with delete() method.
- OpenStack resources are assumed to have a delete() method which
- destroys the resource
- """
-
- try:
- thing.delete()
- except Exception as e:
- # If the resource is already missing, mission accomplished.
- if not self.not_found_exception(e):
- raise
-
- def _wait_for_cleanups(self):
- """To handle async delete actions, a list of waits is added
- which will be iterated over as the last step of clearing the
- cleanup queue. That way all the delete calls are made up front
- and the tests won't succeed unless the deletes are eventually
- successful. This is the same basic approach used in the api tests to
- limit cleanup execution time except here it is multi-resource,
- because of the nature of the scenario tests.
- """
- for wait in self.cleanup_waits:
- self.delete_timeout(**wait)
-
- def addCleanup_with_wait(self, things, thing_id,
- error_status='ERROR',
- exc_type=nova_exceptions.NotFound,
- cleanup_callable=None, cleanup_args=None,
- cleanup_kwargs=None):
- """Adds wait for ansyc resource deletion at the end of cleanups
-
- @param things: type of the resource to delete
- @param thing_id:
- @param error_status: see manager.delete_timeout()
- @param exc_type: see manager.delete_timeout()
- @param cleanup_callable: method to load pass to self.addCleanup with
- the following *cleanup_args, **cleanup_kwargs.
- usually a delete method. if not used, will try to use:
- things.delete(thing_id)
- """
- if cleanup_args is None:
- cleanup_args = []
- if cleanup_kwargs is None:
- cleanup_kwargs = {}
- if cleanup_callable is None:
- LOG.debug("no delete method passed. using {rclass}.delete({id}) as"
- " default".format(rclass=things, id=thing_id))
- self.addCleanup(things.delete, thing_id)
- else:
- self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
- wait_dict = {
- 'things': things,
- 'thing_id': thing_id,
- 'error_status': error_status,
- 'not_found_exception': exc_type,
- }
- self.cleanup_waits.append(wait_dict)
-
- def status_timeout(self, things, thing_id, expected_status,
- error_status='ERROR',
- not_found_exception=nova_exceptions.NotFound):
- """
- Given a thing and an expected status, do a loop, sleeping
- for a configurable amount of time, checking for the
- expected status to show. At any time, if the returned
- status of the thing is ERROR, fail out.
- """
- self._status_timeout(things, thing_id,
- expected_status=expected_status,
- error_status=error_status,
- not_found_exception=not_found_exception)
-
- def delete_timeout(self, things, thing_id,
- error_status='ERROR',
- not_found_exception=nova_exceptions.NotFound):
- """
- Given a thing, do a loop, sleeping
- for a configurable amount of time, checking for the
- deleted status to show. At any time, if the returned
- status of the thing is ERROR, fail out.
- """
- self._status_timeout(things,
- thing_id,
- allow_notfound=True,
- error_status=error_status,
- not_found_exception=not_found_exception)
-
- def _status_timeout(self,
- things,
- thing_id,
- expected_status=None,
- allow_notfound=False,
- error_status='ERROR',
- not_found_exception=nova_exceptions.NotFound):
-
- log_status = expected_status if expected_status else ''
- if allow_notfound:
- log_status += ' or NotFound' if log_status != '' else 'NotFound'
-
- def check_status():
- # python-novaclient has resources available to its client
- # that all implement a get() method taking an identifier
- # for the singular resource to retrieve.
- try:
- thing = things.get(thing_id)
- except not_found_exception:
- if allow_notfound:
- return True
- raise
- except Exception as e:
- if allow_notfound and self.not_found_exception(e):
- return True
- raise
-
- new_status = thing.status
-
- # Some components are reporting error status in lower case
- # so case sensitive comparisons can really mess things
- # up.
- if new_status.lower() == error_status.lower():
- message = ("%s failed to get to expected status (%s). "
- "In %s state.") % (thing, expected_status,
- new_status)
- raise exceptions.BuildErrorException(message,
- server_id=thing_id)
- elif new_status == expected_status and expected_status is not None:
- return True # All good.
- LOG.debug("Waiting for %s to get to %s status. "
- "Currently in %s status",
- thing, log_status, new_status)
- if not tempest.test.call_until_true(
- check_status,
- CONF.compute.build_timeout,
- CONF.compute.build_interval):
- message = ("Timed out waiting for thing %s "
- "to become %s") % (thing_id, log_status)
- raise exceptions.TimeoutException(message)
-
- def _create_loginable_secgroup_rule_nova(self, client=None,
- secgroup_id=None):
- if client is None:
- client = self.compute_client
- if secgroup_id is None:
- sgs = client.security_groups.list()
- for sg in sgs:
- if sg.name == 'default':
- secgroup_id = sg.id
-
- # These rules are intended to permit inbound ssh and icmp
- # traffic from all sources, so no group_id is provided.
- # Setting a group_id would only permit traffic from ports
- # belonging to the same security group.
- rulesets = [
- {
- # ssh
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '0.0.0.0/0',
- },
- {
- # ssh -6
- 'ip_protocol': 'tcp',
- 'from_port': 22,
- 'to_port': 22,
- 'cidr': '::/0',
- },
- {
- # ping
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '0.0.0.0/0',
- },
- {
- # ping6
- 'ip_protocol': 'icmp',
- 'from_port': -1,
- 'to_port': -1,
- 'cidr': '::/0',
- }
- ]
- rules = list()
- for ruleset in rulesets:
- sg_rule = client.security_group_rules.create(secgroup_id,
- **ruleset)
- self.addCleanup(self.delete_wrapper, sg_rule)
- rules.append(sg_rule)
- return rules
-
- def _create_security_group_nova(self, client=None,
- namestart='secgroup-smoke-'):
- if client is None:
- client = self.compute_client
- # Create security group
- sg_name = data_utils.rand_name(namestart)
- sg_desc = sg_name + " description"
- secgroup = client.security_groups.create(sg_name, sg_desc)
- self.assertEqual(secgroup.name, sg_name)
- self.assertEqual(secgroup.description, sg_desc)
- self.addCleanup(self.delete_wrapper, secgroup)
-
- # Add rules to the security group
- self._create_loginable_secgroup_rule_nova(client, secgroup.id)
-
- return secgroup
-
- def rebuild_server(self, server, client=None, image=None,
- preserve_ephemeral=False, wait=True,
- rebuild_kwargs=None):
- if client is None:
- client = self.compute_client
- if image is None:
- image = CONF.compute.image_ref
- rebuild_kwargs = rebuild_kwargs or {}
-
- LOG.debug("Rebuilding server (name: %s, image: %s, preserve eph: %s)",
- server.name, image, preserve_ephemeral)
- server.rebuild(image, preserve_ephemeral=preserve_ephemeral,
- **rebuild_kwargs)
- if wait:
- self.status_timeout(client.servers, server.id, 'ACTIVE')
-
- def create_server(self, client=None, name=None, image=None, flavor=None,
- wait_on_boot=True, wait_on_delete=True,
- create_kwargs=None):
- """Creates VM instance.
-
- @param client: compute client to create the instance
- @param image: image from which to create the instance
- @param wait_on_boot: wait for status ACTIVE before continue
- @param wait_on_delete: force synchronous delete on cleanup
- @param create_kwargs: additional details for instance creation
- @return: client.server object
- """
- if client is None:
- client = self.compute_client
- if name is None:
- name = data_utils.rand_name('scenario-server-')
- if image is None:
- image = CONF.compute.image_ref
- if flavor is None:
- flavor = CONF.compute.flavor_ref
- if create_kwargs is None:
- create_kwargs = {}
-
- fixed_network_name = CONF.compute.fixed_network_name
- if 'nics' not in create_kwargs and fixed_network_name:
- networks = client.networks.list()
- # If several networks found, set the NetID on which to connect the
- # server to avoid the following error "Multiple possible networks
- # found, use a Network ID to be more specific."
- # See Tempest #1250866
- if len(networks) > 1:
- for network in networks:
- if network.label == fixed_network_name:
- create_kwargs['nics'] = [{'net-id': network.id}]
- break
- # If we didn't find the network we were looking for :
- else:
- msg = ("The network on which the NIC of the server must "
- "be connected can not be found : "
- "fixed_network_name=%s. Starting instance without "
- "specifying a network.") % fixed_network_name
- LOG.info(msg)
-
- LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
- name, image, flavor)
- server = client.servers.create(name, image, flavor, **create_kwargs)
- self.assertEqual(server.name, name)
- if wait_on_delete:
- self.addCleanup(self.delete_timeout,
- self.compute_client.servers,
- server.id)
- self.addCleanup_with_wait(self.compute_client.servers, server.id,
- cleanup_callable=self.delete_wrapper,
- cleanup_args=[server])
- if wait_on_boot:
- self.status_timeout(client.servers, server.id, 'ACTIVE')
- # The instance retrieved on creation is missing network
- # details, necessitating retrieval after it becomes active to
- # ensure correct details.
- server = client.servers.get(server.id)
- LOG.debug("Created server: %s", server)
- return server
-
- def create_volume(self, client=None, size=1, name=None,
- snapshot_id=None, imageRef=None, volume_type=None,
- wait_on_delete=True):
- if client is None:
- client = self.volume_client
- if name is None:
- name = data_utils.rand_name('scenario-volume-')
- LOG.debug("Creating a volume (size: %s, name: %s)", size, name)
- volume = client.volumes.create(size=size, display_name=name,
- snapshot_id=snapshot_id,
- imageRef=imageRef,
- volume_type=volume_type)
- if wait_on_delete:
- self.addCleanup(self.delete_timeout,
- self.volume_client.volumes,
- volume.id)
- self.addCleanup_with_wait(self.volume_client.volumes, volume.id,
- exc_type=cinder_exceptions.NotFound)
- self.assertEqual(name, volume.display_name)
- self.status_timeout(client.volumes, volume.id, 'available')
- LOG.debug("Created volume: %s", volume)
- return volume
-
- def create_server_snapshot(self, server, compute_client=None,
- image_client=None, name=None):
- if compute_client is None:
- compute_client = self.compute_client
- if image_client is None:
- image_client = self.image_client
- if name is None:
- name = data_utils.rand_name('scenario-snapshot-')
- LOG.debug("Creating a snapshot image for server: %s", server.name)
- image_id = compute_client.servers.create_image(server, name)
- self.addCleanup_with_wait(self.image_client.images, image_id,
- exc_type=glanceclient.exc.HTTPNotFound)
- self.status_timeout(image_client.images, image_id, 'active')
- snapshot_image = image_client.images.get(image_id)
- self.assertEqual(name, snapshot_image.name)
- LOG.debug("Created snapshot image %s for server %s",
- snapshot_image.name, server.name)
- return snapshot_image
-
- def create_keypair(self, client=None, name=None):
- if client is None:
- client = self.compute_client
- if name is None:
- name = data_utils.rand_name('scenario-keypair-')
- keypair = client.keypairs.create(name)
- self.assertEqual(keypair.name, name)
- self.addCleanup(self.delete_wrapper, keypair)
- return keypair
-
- def get_remote_client(self, server_or_ip, username=None, private_key=None):
- if isinstance(server_or_ip, six.string_types):
- ip = server_or_ip
- else:
- network_name_for_ssh = CONF.compute.network_for_ssh
- ip = server_or_ip.networks[network_name_for_ssh][0]
- if username is None:
- username = CONF.scenario.ssh_user
- if private_key is None:
- private_key = self.keypair.private_key
- linux_client = remote_client.RemoteClient(ip, username,
- pkey=private_key)
- try:
- linux_client.validate_authentication()
- except exceptions.SSHTimeout:
- LOG.exception('ssh connection to %s failed' % ip)
- debug.log_net_debug()
- raise
-
- return linux_client
-
- def _log_console_output(self, servers=None):
- if not CONF.compute_feature_enabled.console_output:
- LOG.debug('Console output not supported, cannot log')
- return
- if not servers:
- servers = self.compute_client.servers.list()
- for server in servers:
- LOG.debug('Console output for %s', server.id)
- LOG.debug(server.get_console_output())
-
- def wait_for_volume_status(self, status):
- volume_id = self.volume.id
- self.status_timeout(
- self.volume_client.volumes, volume_id, status)
-
- def _image_create(self, name, fmt, path, properties=None):
- if properties is None:
- properties = {}
- name = data_utils.rand_name('%s-' % name)
- image_file = open(path, 'rb')
- self.addCleanup(image_file.close)
- params = {
- 'name': name,
- 'container_format': fmt,
- 'disk_format': fmt,
- 'is_public': 'False',
- }
- params.update(properties)
- image = self.image_client.images.create(**params)
- self.addCleanup(self.image_client.images.delete, image)
- self.assertEqual("queued", image.status)
- image.update(data=image_file)
- return image.id
-
- def glance_image_create(self):
- img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
- aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
- ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
- ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
- img_container_format = CONF.scenario.img_container_format
- img_disk_format = CONF.scenario.img_disk_format
- LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
- "ami: %s, ari: %s, aki: %s" %
- (img_path, img_container_format, img_disk_format,
- ami_img_path, ari_img_path, aki_img_path))
- try:
- self.image = self._image_create('scenario-img',
- img_container_format,
- img_path,
- properties={'disk_format':
- img_disk_format})
- except IOError:
- LOG.debug("A qcow2 image was not found. Try to get a uec image.")
- kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
- ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
- properties = {
- 'properties': {'kernel_id': kernel, 'ramdisk_id': ramdisk}
- }
- self.image = self._image_create('scenario-ami', 'ami',
- path=ami_img_path,
- properties=properties)
- LOG.debug("image:%s" % self.image)
-
-
# power/provision states as of icehouse
class BaremetalPowerStates(object):
"""Possible power states of an Ironic node."""
@@ -1523,8 +1015,8 @@ class BaremetalProvisionStates(object):
class BaremetalScenarioTest(ScenarioTest):
@classmethod
- def setUpClass(cls):
- super(BaremetalScenarioTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(BaremetalScenarioTest, cls).resource_setup()
if (not CONF.service_available.ironic or
not CONF.baremetal.driver_enabled):
@@ -1655,8 +1147,8 @@ class EncryptionScenarioTest(ScenarioTest):
"""
@classmethod
- def setUpClass(cls):
- super(EncryptionScenarioTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(EncryptionScenarioTest, cls).resource_setup()
cls.admin_volume_types_client = cls.admin_manager.volume_types_client
def _wait_for_volume_status(self, status):
@@ -1696,565 +1188,14 @@ class EncryptionScenarioTest(ScenarioTest):
control_location=control_location)
-class NetworkScenarioTest(OfficialClientTest):
- """
- Base class for network scenario tests
- """
-
- @classmethod
- def check_preconditions(cls):
- if (CONF.service_available.neutron):
- cls.enabled = True
- # verify that neutron_available is telling the truth
- try:
- cls.network_client.list_networks()
- except exc.EndpointNotFound:
- cls.enabled = False
- raise
- else:
- cls.enabled = False
- msg = 'Neutron not available'
- raise cls.skipException(msg)
-
- @classmethod
- def setUpClass(cls):
- super(NetworkScenarioTest, cls).setUpClass()
- cls.tenant_id = cls.manager.identity_client.tenant_id
-
- def _create_network(self, tenant_id, namestart='network-smoke-'):
- name = data_utils.rand_name(namestart)
- body = dict(
- network=dict(
- name=name,
- tenant_id=tenant_id,
- ),
- )
- result = self.network_client.create_network(body=body)
- network = net_common.DeletableNetwork(client=self.network_client,
- **result['network'])
- self.assertEqual(network.name, name)
- self.addCleanup(self.delete_wrapper, network)
- return network
-
- def _list_networks(self, **kwargs):
- nets = self.network_client.list_networks(**kwargs)
- return nets['networks']
-
- def _list_subnets(self, **kwargs):
- subnets = self.network_client.list_subnets(**kwargs)
- return subnets['subnets']
-
- def _list_routers(self, **kwargs):
- routers = self.network_client.list_routers(**kwargs)
- return routers['routers']
-
- def _list_ports(self, **kwargs):
- ports = self.network_client.list_ports(**kwargs)
- return ports['ports']
-
- def _get_tenant_own_network_num(self, tenant_id):
- nets = self._list_networks(tenant_id=tenant_id)
- return len(nets)
-
- def _get_tenant_own_subnet_num(self, tenant_id):
- subnets = self._list_subnets(tenant_id=tenant_id)
- return len(subnets)
-
- def _get_tenant_own_port_num(self, tenant_id):
- ports = self._list_ports(tenant_id=tenant_id)
- return len(ports)
-
- def _create_subnet(self, network, namestart='subnet-smoke-', **kwargs):
- """
- Create a subnet for the given network within the cidr block
- configured for tenant networks.
- """
-
- def cidr_in_use(cidr, tenant_id):
- """
- :return True if subnet with cidr already exist in tenant
- False else
- """
- cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
- return len(cidr_in_use) != 0
-
- tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
- result = None
- # Repeatedly attempt subnet creation with sequential cidr
- # blocks until an unallocated block is found.
- for subnet_cidr in tenant_cidr.subnet(
- CONF.network.tenant_network_mask_bits):
- str_cidr = str(subnet_cidr)
- if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
- continue
-
- body = dict(
- subnet=dict(
- name=data_utils.rand_name(namestart),
- ip_version=4,
- network_id=network.id,
- tenant_id=network.tenant_id,
- cidr=str_cidr,
- ),
- )
- body['subnet'].update(kwargs)
- try:
- result = self.network_client.create_subnet(body=body)
- break
- except exc.NeutronClientException as e:
- is_overlapping_cidr = 'overlaps with another subnet' in str(e)
- if not is_overlapping_cidr:
- raise
- self.assertIsNotNone(result, 'Unable to allocate tenant network')
- subnet = net_common.DeletableSubnet(client=self.network_client,
- **result['subnet'])
- self.assertEqual(subnet.cidr, str_cidr)
- self.addCleanup(self.delete_wrapper, subnet)
- return subnet
-
- def _create_port(self, network, namestart='port-quotatest-'):
- name = data_utils.rand_name(namestart)
- body = dict(
- port=dict(name=name,
- network_id=network.id,
- tenant_id=network.tenant_id))
- result = self.network_client.create_port(body=body)
- self.assertIsNotNone(result, 'Unable to allocate port')
- port = net_common.DeletablePort(client=self.network_client,
- **result['port'])
- self.addCleanup(self.delete_wrapper, port)
- return port
-
- def _get_server_port_id(self, server, ip_addr=None):
- ports = self._list_ports(device_id=server.id, fixed_ip=ip_addr)
- self.assertEqual(len(ports), 1,
- "Unable to determine which port to target.")
- return ports[0]['id']
-
- def _get_network_by_name(self, network_name):
- net = self._list_networks(name=network_name)
- return net_common.AttributeDict(net[0])
-
- def _create_floating_ip(self, thing, external_network_id, port_id=None):
- if not port_id:
- port_id = self._get_server_port_id(thing)
- body = dict(
- floatingip=dict(
- floating_network_id=external_network_id,
- port_id=port_id,
- tenant_id=thing.tenant_id,
- )
- )
- result = self.network_client.create_floatingip(body=body)
- floating_ip = net_common.DeletableFloatingIp(
- client=self.network_client,
- **result['floatingip'])
- self.addCleanup(self.delete_wrapper, floating_ip)
- return floating_ip
-
- def _associate_floating_ip(self, floating_ip, server):
- port_id = self._get_server_port_id(server)
- floating_ip.update(port_id=port_id)
- self.assertEqual(port_id, floating_ip.port_id)
- return floating_ip
-
- def _disassociate_floating_ip(self, floating_ip):
- """
- :param floating_ip: type DeletableFloatingIp
- """
- floating_ip.update(port_id=None)
- self.assertIsNone(floating_ip.port_id)
- return floating_ip
-
- def _create_pool(self, lb_method, protocol, subnet_id):
- """Wrapper utility that returns a test pool."""
- name = data_utils.rand_name('pool-')
- body = {
- "pool": {
- "protocol": protocol,
- "name": name,
- "subnet_id": subnet_id,
- "lb_method": lb_method
- }
- }
- resp = self.network_client.create_pool(body=body)
- pool = net_common.DeletablePool(client=self.network_client,
- **resp['pool'])
- self.assertEqual(pool['name'], name)
- self.addCleanup(self.delete_wrapper, pool)
- return pool
-
- def _create_member(self, address, protocol_port, pool_id):
- """Wrapper utility that returns a test member."""
- body = {
- "member": {
- "protocol_port": protocol_port,
- "pool_id": pool_id,
- "address": address
- }
- }
- resp = self.network_client.create_member(body)
- member = net_common.DeletableMember(client=self.network_client,
- **resp['member'])
- self.addCleanup(self.delete_wrapper, member)
- return member
-
- def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
- """Wrapper utility that returns a test vip."""
- name = data_utils.rand_name('vip-')
- body = {
- "vip": {
- "protocol": protocol,
- "name": name,
- "subnet_id": subnet_id,
- "pool_id": pool_id,
- "protocol_port": protocol_port
- }
- }
- resp = self.network_client.create_vip(body)
- vip = net_common.DeletableVip(client=self.network_client,
- **resp['vip'])
- self.assertEqual(vip['name'], name)
- self.addCleanup(self.delete_wrapper, vip)
- return vip
-
- def _check_vm_connectivity(self, ip_address,
- username=None,
- private_key=None,
- should_connect=True):
- """
- :param ip_address: server to test against
- :param username: server's ssh username
- :param private_key: server's ssh private key to be used
- :param should_connect: True/False indicates positive/negative test
- positive - attempt ping and ssh
- negative - attempt ping and fail if succeed
-
- :raises: AssertError if the result of the connectivity check does
- not match the value of the should_connect param
- """
- if should_connect:
- msg = "Timed out waiting for %s to become reachable" % ip_address
- else:
- msg = "ip address %s is reachable" % ip_address
- self.assertTrue(self.ping_ip_address(ip_address,
- should_succeed=should_connect),
- msg=msg)
- if should_connect:
- # no need to check ssh for negative connectivity
- self.get_remote_client(ip_address, username, private_key)
-
- def _check_public_network_connectivity(self, ip_address, username,
- private_key, should_connect=True,
- msg=None, servers=None):
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- LOG.debug('checking network connections to IP %s with user: %s' %
- (ip_address, username))
- try:
- self._check_vm_connectivity(ip_address,
- username,
- private_key,
- should_connect=should_connect)
- except Exception as e:
- ex_msg = 'Public network connectivity check failed'
- if msg:
- ex_msg += ": " + msg
- LOG.exception(ex_msg)
- self._log_console_output(servers)
- # network debug is called as part of ssh init
- if not isinstance(e, exceptions.SSHTimeout):
- debug.log_net_debug()
- raise
-
- def _check_tenant_network_connectivity(self, server,
- username,
- private_key,
- should_connect=True,
- servers_for_debug=None):
- if not CONF.network.tenant_networks_reachable:
- msg = 'Tenant networks not configured to be reachable.'
- LOG.info(msg)
- return
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- try:
- for net_name, ip_addresses in server.networks.iteritems():
- for ip_address in ip_addresses:
- self._check_vm_connectivity(ip_address,
- username,
- private_key,
- should_connect=should_connect)
- except Exception as e:
- LOG.exception('Tenant network connectivity check failed')
- self._log_console_output(servers_for_debug)
- # network debug is called as part of ssh init
- if not isinstance(e, exceptions.SSHTimeout):
- debug.log_net_debug()
- raise
-
- def _check_remote_connectivity(self, source, dest, should_succeed=True):
- """
- check ping server via source ssh connection
-
- :param source: RemoteClient: an ssh connection from which to ping
- :param dest: and IP to ping against
- :param should_succeed: boolean should ping succeed or not
- :returns: boolean -- should_succeed == ping
- :returns: ping is false if ping failed
- """
- def ping_remote():
- try:
- source.ping_host(dest)
- except exceptions.SSHExecCommandFailed:
- LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
- % (dest, source.ssh_client.host))
- return not should_succeed
- return should_succeed
-
- return tempest.test.call_until_true(ping_remote,
- CONF.compute.ping_timeout,
- 1)
-
- def _create_security_group_neutron(self, tenant_id, client=None,
- namestart='secgroup-smoke-'):
- if client is None:
- client = self.network_client
- secgroup = self._create_empty_security_group(namestart=namestart,
- client=client,
- tenant_id=tenant_id)
-
- # Add rules to the security group
- rules = self._create_loginable_secgroup_rule_neutron(secgroup=secgroup)
- for rule in rules:
- self.assertEqual(tenant_id, rule.tenant_id)
- self.assertEqual(secgroup.id, rule.security_group_id)
- return secgroup
-
- def _create_empty_security_group(self, tenant_id, client=None,
- namestart='secgroup-smoke-'):
- """Create a security group without rules.
-
- Default rules will be created:
- - IPv4 egress to any
- - IPv6 egress to any
-
- :param tenant_id: secgroup will be created in this tenant
- :returns: DeletableSecurityGroup -- containing the secgroup created
- """
- if client is None:
- client = self.network_client
- sg_name = data_utils.rand_name(namestart)
- sg_desc = sg_name + " description"
- sg_dict = dict(name=sg_name,
- description=sg_desc)
- sg_dict['tenant_id'] = tenant_id
- body = dict(security_group=sg_dict)
- result = client.create_security_group(body=body)
- secgroup = net_common.DeletableSecurityGroup(
- client=client,
- **result['security_group']
- )
- self.assertEqual(secgroup.name, sg_name)
- self.assertEqual(tenant_id, secgroup.tenant_id)
- self.assertEqual(secgroup.description, sg_desc)
- self.addCleanup(self.delete_wrapper, secgroup)
- return secgroup
-
- def _default_security_group(self, tenant_id, client=None):
- """Get default secgroup for given tenant_id.
-
- :returns: DeletableSecurityGroup -- default secgroup for given tenant
- """
- if client is None:
- client = self.network_client
- sgs = [
- sg for sg in client.list_security_groups().values()[0]
- if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
- ]
- msg = "No default security group for tenant %s." % (tenant_id)
- self.assertTrue(len(sgs) > 0, msg)
- if len(sgs) > 1:
- msg = "Found %d default security groups" % len(sgs)
- raise exc.NeutronClientNoUniqueMatch(msg=msg)
- return net_common.DeletableSecurityGroup(client=client,
- **sgs[0])
-
- def _create_security_group_rule(self, client=None, secgroup=None,
- tenant_id=None, **kwargs):
- """Create a rule from a dictionary of rule parameters.
-
- Create a rule in a secgroup. if secgroup not defined will search for
- default secgroup in tenant_id.
-
- :param secgroup: type DeletableSecurityGroup.
- :param secgroup_id: search for secgroup by id
- default -- choose default secgroup for given tenant_id
- :param tenant_id: if secgroup not passed -- the tenant in which to
- search for default secgroup
- :param kwargs: a dictionary containing rule parameters:
- for example, to allow incoming ssh:
- rule = {
- direction: 'ingress'
- protocol:'tcp',
- port_range_min: 22,
- port_range_max: 22
- }
- """
- if client is None:
- client = self.network_client
- if secgroup is None:
- secgroup = self._default_security_group(tenant_id)
-
- ruleset = dict(security_group_id=secgroup.id,
- tenant_id=secgroup.tenant_id,
- )
- ruleset.update(kwargs)
-
- body = dict(security_group_rule=dict(ruleset))
- sg_rule = client.create_security_group_rule(body=body)
- sg_rule = net_common.DeletableSecurityGroupRule(
- client=client,
- **sg_rule['security_group_rule']
- )
- self.addCleanup(self.delete_wrapper, sg_rule)
- self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
- self.assertEqual(secgroup.id, sg_rule.security_group_id)
-
- return sg_rule
-
- def _create_loginable_secgroup_rule_neutron(self, client=None,
- secgroup=None):
- """These rules are intended to permit inbound ssh and icmp
- traffic from all sources, so no group_id is provided.
- Setting a group_id would only permit traffic from ports
- belonging to the same security group.
- """
-
- if client is None:
- client = self.network_client
- rules = []
- rulesets = [
- dict(
- # ssh
- protocol='tcp',
- port_range_min=22,
- port_range_max=22,
- ),
- dict(
- # ping
- protocol='icmp',
- )
- ]
- for ruleset in rulesets:
- for r_direction in ['ingress', 'egress']:
- ruleset['direction'] = r_direction
- try:
- sg_rule = self._create_security_group_rule(
- client=client, secgroup=secgroup, **ruleset)
- except exc.NeutronClientException as ex:
- # if rule already exist - skip rule and continue
- if not (ex.status_code is 409 and 'Security group rule'
- ' already exists' in ex.message):
- raise ex
- else:
- self.assertEqual(r_direction, sg_rule.direction)
- rules.append(sg_rule)
-
- return rules
-
- def _ssh_to_server(self, server, private_key):
- ssh_login = CONF.compute.image_ssh_user
- return self.get_remote_client(server,
- username=ssh_login,
- private_key=private_key)
-
- def _show_quota_network(self, tenant_id):
- quota = self.network_client.show_quota(tenant_id)
- return quota['quota']['network']
-
- def _show_quota_subnet(self, tenant_id):
- quota = self.network_client.show_quota(tenant_id)
- return quota['quota']['subnet']
-
- def _show_quota_port(self, tenant_id):
- quota = self.network_client.show_quota(tenant_id)
- return quota['quota']['port']
-
- def _get_router(self, tenant_id):
- """Retrieve a router for the given tenant id.
-
- If a public router has been configured, it will be returned.
-
- If a public router has not been configured, but a public
- network has, a tenant router will be created and returned that
- routes traffic to the public network.
- """
- router_id = CONF.network.public_router_id
- network_id = CONF.network.public_network_id
- if router_id:
- result = self.network_client.show_router(router_id)
- return net_common.AttributeDict(**result['router'])
- elif network_id:
- router = self._create_router(tenant_id)
- router.add_gateway(network_id)
- return router
- else:
- raise Exception("Neither of 'public_router_id' or "
- "'public_network_id' has been defined.")
-
- def _create_router(self, tenant_id, namestart='router-smoke-'):
- name = data_utils.rand_name(namestart)
- body = dict(
- router=dict(
- name=name,
- admin_state_up=True,
- tenant_id=tenant_id,
- ),
- )
- result = self.network_client.create_router(body=body)
- router = net_common.DeletableRouter(client=self.network_client,
- **result['router'])
- self.assertEqual(router.name, name)
- self.addCleanup(self.delete_wrapper, router)
- return router
-
- def create_networks(self, tenant_id=None):
- """Create a network with a subnet connected to a router.
-
- The baremetal driver is a special case since all nodes are
- on the same shared network.
-
- :returns: network, subnet, router
- """
- if CONF.baremetal.driver_enabled:
- # NOTE(Shrews): This exception is for environments where tenant
- # credential isolation is available, but network separation is
- # not (the current baremetal case). Likely can be removed when
- # test account mgmt is reworked:
- # https://blueprints.launchpad.net/tempest/+spec/test-accounts
- network = self._get_network_by_name(
- CONF.compute.fixed_network_name)
- router = None
- subnet = None
- else:
- if tenant_id is None:
- tenant_id = self.tenant_id
- network = self._create_network(tenant_id)
- router = self._get_router(tenant_id)
- subnet = self._create_subnet(network)
- subnet.add_to_router(router.id)
- return network, subnet, router
-
-
class OrchestrationScenarioTest(ScenarioTest):
"""
Base class for orchestration scenario tests
"""
@classmethod
- def setUpClass(cls):
- super(OrchestrationScenarioTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(OrchestrationScenarioTest, cls).resource_setup()
if not CONF.service_available.heat:
raise cls.skipException("Heat support is required")
@@ -2298,9 +1239,9 @@ class SwiftScenarioTest(ScenarioTest):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
cls.set_network_resources()
- super(SwiftScenarioTest, cls).setUpClass()
+ super(SwiftScenarioTest, cls).resource_setup()
if not CONF.service_available.swift:
skip_msg = ("%s skipped as swift is not available" %
cls.__name__)
@@ -2310,38 +1251,38 @@ class SwiftScenarioTest(ScenarioTest):
cls.container_client = cls.manager.container_client
cls.object_client = cls.manager.object_client
- def _get_swift_stat(self):
+ def get_swift_stat(self):
"""get swift status for our user account."""
self.account_client.list_account_containers()
LOG.debug('Swift status information obtained successfully')
- def _create_container(self, container_name=None):
+ def create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
self.container_client.create_container(name)
# look for the container to assure it is created
- self._list_and_check_container_objects(name)
+ self.list_and_check_container_objects(name)
LOG.debug('Container %s created' % (name))
return name
- def _delete_container(self, container_name):
+ def delete_container(self, container_name):
self.container_client.delete_container(container_name)
LOG.debug('Container %s deleted' % (container_name))
- def _upload_object_to_container(self, container_name, obj_name=None):
+ def upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
obj_data = data_utils.arbitrary_string()
self.object_client.create_object(container_name, obj_name, obj_data)
return obj_name, obj_data
- def _delete_object(self, container_name, filename):
+ def delete_object(self, container_name, filename):
self.object_client.delete_object(container_name, filename)
- self._list_and_check_container_objects(container_name,
- not_present_obj=[filename])
+ self.list_and_check_container_objects(container_name,
+ not_present_obj=[filename])
- def _list_and_check_container_objects(self, container_name,
- present_obj=None,
- not_present_obj=None):
+ def list_and_check_container_objects(self, container_name,
+ present_obj=None,
+ not_present_obj=None):
"""
List objects for a given container and assert which are present and
which are not.
@@ -2359,7 +1300,7 @@ class SwiftScenarioTest(ScenarioTest):
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
- def _change_container_acl(self, container_name, acl):
+ def change_container_acl(self, container_name, acl):
metadata_param = {'metadata_prefix': 'x-container-',
'metadata': {'read': acl}}
self.container_client.update_container_metadata(container_name,
@@ -2367,6 +1308,6 @@ class SwiftScenarioTest(ScenarioTest):
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertEqual(resp['x-container-read'], acl)
- def _download_and_verify(self, container_name, obj_name, expected_data):
+ def download_and_verify(self, container_name, obj_name, expected_data):
_, obj = self.object_client.get_object(container_name, obj_name)
self.assertEqual(obj, expected_data)
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index 0ab4311d4..791c564c2 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -83,7 +83,8 @@ class CfnInitScenarioTest(manager.OrchestrationScenarioTest):
server_ip =\
server['addresses'][CONF.compute.network_for_ssh][0]['addr']
- if not self.ping_ip_address(server_ip):
+ if not self.ping_ip_address(
+ server_ip, ping_timeout=CONF.orchestration.build_timeout):
self._log_console_output(servers=[server])
self.fail(
"(CfnInitScenarioTest:test_server_cfn_init) Timed out waiting "
diff --git a/tempest/scenario/test_aggregates_basic_ops.py b/tempest/scenario/test_aggregates_basic_ops.py
index 3ad5c6982..75769ce9b 100644
--- a/tempest/scenario/test_aggregates_basic_ops.py
+++ b/tempest/scenario/test_aggregates_basic_ops.py
@@ -33,8 +33,8 @@ class TestAggregatesBasicOps(manager.ScenarioTest):
Deletes aggregate
"""
@classmethod
- def setUpClass(cls):
- super(TestAggregatesBasicOps, cls).setUpClass()
+ def resource_setup(cls):
+ super(TestAggregatesBasicOps, cls).resource_setup()
cls.aggregates_client = cls.manager.aggregates_client
cls.hosts_client = cls.manager.hosts_client
diff --git a/tempest/scenario/test_dashboard_basic_ops.py b/tempest/scenario/test_dashboard_basic_ops.py
index 4fcc70a8f..f218fb234 100644
--- a/tempest/scenario/test_dashboard_basic_ops.py
+++ b/tempest/scenario/test_dashboard_basic_ops.py
@@ -34,9 +34,9 @@ class TestDashboardBasicOps(manager.ScenarioTest):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
cls.set_network_resources()
- super(TestDashboardBasicOps, cls).setUpClass()
+ super(TestDashboardBasicOps, cls).resource_setup()
if not CONF.service_available.horizon:
raise cls.skipException("Horizon support is required")
diff --git a/tempest/scenario/test_large_ops.py b/tempest/scenario/test_large_ops.py
index 71b8a7f1a..b11193943 100644
--- a/tempest/scenario/test_large_ops.py
+++ b/tempest/scenario/test_large_ops.py
@@ -38,12 +38,12 @@ class TestLargeOpsScenario(manager.ScenarioTest):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
if CONF.scenario.large_ops_number < 1:
raise cls.skipException("large_ops_number not set to multiple "
"instances")
cls.set_network_resources()
- super(TestLargeOpsScenario, cls).setUpClass()
+ super(TestLargeOpsScenario, cls).resource_setup()
def _wait_for_server_status(self, status):
for server in self.servers:
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 5e83ff9c9..9e404c86e 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -18,17 +18,17 @@ import tempfile
import time
import urllib2
-from tempest.api.network import common as net_common
from tempest.common import commands
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
+from tempest.services.network import resources as net_resources
from tempest import test
config = config.CONF
-class TestLoadBalancerBasic(manager.NeutronScenarioTest):
+class TestLoadBalancerBasic(manager.NetworkScenarioTest):
"""
This test checks basic load balancing.
@@ -38,9 +38,8 @@ class TestLoadBalancerBasic(manager.NeutronScenarioTest):
2. SSH to the instance and start two servers
3. Create a load balancer with two members and with ROUND_ROBIN algorithm
associate the VIP with a floating ip
- 4. Send 10 requests to the floating ip and check that they are shared
- between the two servers and that both of them get equal portions
- of the requests
+ 4. Send NUM requests to the floating ip and check that they are shared
+ between the two servers.
"""
@classmethod
@@ -58,8 +57,8 @@ class TestLoadBalancerBasic(manager.NeutronScenarioTest):
raise cls.skipException(msg)
@classmethod
- def setUpClass(cls):
- super(TestLoadBalancerBasic, cls).setUpClass()
+ def resource_setup(cls):
+ super(TestLoadBalancerBasic, cls).resource_setup()
cls.check_preconditions()
cls.servers_keypairs = {}
cls.members = []
@@ -67,6 +66,7 @@ class TestLoadBalancerBasic(manager.NeutronScenarioTest):
cls.server_ips = {}
cls.port1 = 80
cls.port2 = 88
+ cls.num = 50
def setUp(self):
super(TestLoadBalancerBasic, self).setUp()
@@ -89,7 +89,7 @@ class TestLoadBalancerBasic(manager.NeutronScenarioTest):
if tenant_net:
tenant_subnet = self._list_subnets(tenant_id=self.tenant_id)[0]
- self.subnet = net_common.DeletableSubnet(
+ self.subnet = net_resources.DeletableSubnet(
client=self.network_client,
**tenant_subnet)
self.network = tenant_net
@@ -101,7 +101,7 @@ class TestLoadBalancerBasic(manager.NeutronScenarioTest):
# should instead pull a subnet id from config, which is set by
# devstack/admin/etc.
subnet = self._list_subnets(network_id=self.network['id'])[0]
- self.subnet = net_common.AttributeDict(subnet)
+ self.subnet = net_resources.AttributeDict(subnet)
def _create_security_group_for_test(self):
self.security_group = self._create_security_group(
@@ -287,26 +287,21 @@ class TestLoadBalancerBasic(manager.NeutronScenarioTest):
def _check_load_balancing(self):
"""
- 1. Send 10 requests on the floating ip associated with the VIP
- 2. Check that the requests are shared between
- the two servers and that both of them get equal portions
- of the requests
+ 1. Send NUM requests on the floating ip associated with the VIP
+ 2. Check that the requests are shared between the two servers
"""
self._check_connection(self.vip_ip)
- self._send_requests(self.vip_ip, set(["server1", "server2"]))
-
- def _send_requests(self, vip_ip, expected, num_req=10):
- count = 0
- while count < num_req:
- resp = []
- for i in range(len(self.members)):
- resp.append(
- urllib2.urlopen(
- "http://{0}/".format(vip_ip)).read())
- count += 1
- self.assertEqual(expected,
- set(resp))
+ self._send_requests(self.vip_ip, ["server1", "server2"])
+
+ def _send_requests(self, vip_ip, servers):
+ counters = dict.fromkeys(servers, 0)
+ for i in range(self.num):
+ server = urllib2.urlopen("http://{0}/".format(vip_ip)).read()
+ counters[server] += 1
+ # Assert that each member of the pool gets balanced at least once
+ for member, counter in counters.iteritems():
+ self.assertGreater(counter, 0, 'Member %s never balanced' % member)
@test.services('compute', 'network')
def test_load_balancer_basic(self):
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 84e104810..0c48334aa 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -25,7 +25,7 @@ CONF = config.CONF
LOG = logging.getLogger(__name__)
-class TestNetworkAdvancedServerOps(manager.NeutronScenarioTest):
+class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
"""
This test case checks VM connectivity after some advanced
@@ -50,10 +50,10 @@ class TestNetworkAdvancedServerOps(manager.NeutronScenarioTest):
raise cls.skipException(msg)
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
# Create no network resources for these tests.
cls.set_network_resources()
- super(TestNetworkAdvancedServerOps, cls).setUpClass()
+ super(TestNetworkAdvancedServerOps, cls).resource_setup()
def _setup_network_and_servers(self):
self.keypair = self.create_keypair()
@@ -87,11 +87,13 @@ class TestNetworkAdvancedServerOps(manager.NeutronScenarioTest):
self._check_public_network_connectivity(floating_ip, username,
private_key, should_connect,
servers=[self.server])
+ self.check_floating_ip_status(self.floating_ip, 'ACTIVE')
def _wait_server_status_and_check_network_connectivity(self):
self.servers_client.wait_for_server_status(self.server['id'], 'ACTIVE')
self._check_network_connectivity()
+ @test.skip_because(bug="1323658")
@test.services('compute', 'network')
def test_server_connectivity_stop_start(self):
self._setup_network_and_servers()
@@ -139,6 +141,7 @@ class TestNetworkAdvancedServerOps(manager.NeutronScenarioTest):
self.servers_client.resume_server(self.server['id'])
self._wait_server_status_and_check_network_connectivity()
+ @test.skip_because(bug="1323658")
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize is not available.')
@test.services('compute', 'network')
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 10dfb66fc..5d75b64a6 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -34,7 +34,7 @@ Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
-class TestNetworkBasicOps(manager.NeutronScenarioTest):
+class TestNetworkBasicOps(manager.NetworkScenarioTest):
"""
This smoke test suite assumes that Nova has been configured to
@@ -88,10 +88,10 @@ class TestNetworkBasicOps(manager.NeutronScenarioTest):
raise cls.skipException(msg)
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
# Create no network resources for these tests.
cls.set_network_resources()
- super(TestNetworkBasicOps, cls).setUpClass()
+ super(TestNetworkBasicOps, cls).resource_setup()
for ext in ['router', 'security-group']:
if not test.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
@@ -176,16 +176,31 @@ class TestNetworkBasicOps(manager.NeutronScenarioTest):
def _check_public_network_connectivity(self, should_connect=True,
msg=None):
+ """Verifies connectivty to a VM via public network and floating IP,
+ and verifies floating IP has resource status is correct.
+
+ Floating IP status is verified after connectivity test in order to
+ not add extra waiting and mask racing conditions.
+
+ :param should_connect: bool. determines if connectivity check is
+ negative or positive.
+ :param msg: Failure message to add to Error message. Should describe
+ the place in the test scenario where the method was called,
+ to indicate the context of the failure
+ """
ssh_login = CONF.compute.image_ssh_user
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = None
+ floatingip_status = 'DOWN'
if should_connect:
private_key = self._get_server_key(server)
+ floatingip_status = 'ACTIVE'
# call the common method in the parent class
super(TestNetworkBasicOps, self)._check_public_network_connectivity(
ip_address, ssh_login, private_key, should_connect, msg,
self.servers)
+ self.check_floating_ip_status(floating_ip, floatingip_status)
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
@@ -350,6 +365,8 @@ class TestNetworkBasicOps(manager.NeutronScenarioTest):
VMs are created with unique keypair so connectivity also asserts that
floating IP is associated with the new VM instead of the old one
+ Verifies that floating IP status is updated correctly after each change
+
"""
self._setup_network_and_servers()
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 20505eb06..6c360341f 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -26,7 +26,7 @@ CONF = config.CONF
LOG = logging.getLogger(__name__)
-class TestSecurityGroupsBasicOps(manager.NeutronScenarioTest):
+class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
"""
This test suite assumes that Nova has been configured to
@@ -138,10 +138,10 @@ class TestSecurityGroupsBasicOps(manager.NeutronScenarioTest):
raise cls.skipException(msg)
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
# Create no network resources for these tests.
cls.set_network_resources()
- super(TestSecurityGroupsBasicOps, cls).setUpClass()
+ super(TestSecurityGroupsBasicOps, cls).resource_setup()
# TODO(mnewby) Consider looking up entities as needed instead
# of storing them as collections on the class.
cls.floating_ips = {}
@@ -241,7 +241,11 @@ class TestSecurityGroupsBasicOps(manager.NeutronScenarioTest):
'security_groups': security_groups,
'tenant_id': tenant.creds.tenant_id
}
- return self.create_server(name=name, create_kwargs=create_kwargs)
+ server = self.create_server(name=name, create_kwargs=create_kwargs)
+ self.assertEqual(
+ sorted([s['name'] for s in security_groups]),
+ sorted([s['name'] for s in server['security_groups']]))
+ return server
def _create_tenant_servers(self, tenant, num=1):
for i in range(num):
diff --git a/tempest/scenario/test_server_advanced_ops.py b/tempest/scenario/test_server_advanced_ops.py
index 463f5aa99..c53e22bd1 100644
--- a/tempest/scenario/test_server_advanced_ops.py
+++ b/tempest/scenario/test_server_advanced_ops.py
@@ -35,9 +35,9 @@ class TestServerAdvancedOps(manager.ScenarioTest):
"""
@classmethod
- def setUpClass(cls):
+ def resource_setup(cls):
cls.set_network_resources()
- super(TestServerAdvancedOps, cls).setUpClass()
+ super(TestServerAdvancedOps, cls).resource_setup()
if CONF.compute.flavor_ref_alt == CONF.compute.flavor_ref:
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index b38b1a391..eb636f79c 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -35,8 +35,7 @@ class TestServerBasicOps(manager.ScenarioTest):
* Create a security group to control network access in instance
* Add simple permissive rules to the security group
* Launch an instance
- * Pause/unpause the instance
- * Suspend/resume the instance
+ * Perform ssh to instance
* Terminate the instance
"""
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index f2c3dcd3d..8ea281435 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -51,8 +51,8 @@ class TestStampPattern(manager.ScenarioTest):
"""
@classmethod
- def setUpClass(cls):
- super(TestStampPattern, cls).setUpClass()
+ def resource_setup(cls):
+ super(TestStampPattern, cls).resource_setup()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
diff --git a/tempest/scenario/test_swift_basic_ops.py b/tempest/scenario/test_swift_basic_ops.py
index ad74ec4be..9e0fee0c7 100644
--- a/tempest/scenario/test_swift_basic_ops.py
+++ b/tempest/scenario/test_swift_basic_ops.py
@@ -41,13 +41,13 @@ class TestSwiftBasicOps(manager.SwiftScenarioTest):
@test.services('object_storage')
def test_swift_basic_ops(self):
- self._get_swift_stat()
- container_name = self._create_container()
- obj_name, obj_data = self._upload_object_to_container(container_name)
- self._list_and_check_container_objects(container_name, [obj_name])
- self._download_and_verify(container_name, obj_name, obj_data)
- self._delete_object(container_name, obj_name)
- self._delete_container(container_name)
+ self.get_swift_stat()
+ container_name = self.create_container()
+ obj_name, obj_data = self.upload_object_to_container(container_name)
+ self.list_and_check_container_objects(container_name, [obj_name])
+ self.download_and_verify(container_name, obj_name, obj_data)
+ self.delete_object(container_name, obj_name)
+ self.delete_container(container_name)
@test.services('object_storage')
def test_swift_acl_anonymous_download(self):
@@ -58,15 +58,15 @@ class TestSwiftBasicOps(manager.SwiftScenarioTest):
4. Check if the object can be download by anonymous user
5. Delete the object and container
"""
- container_name = self._create_container()
- obj_name, _ = self._upload_object_to_container(container_name)
+ container_name = self.create_container()
+ obj_name, _ = self.upload_object_to_container(container_name)
obj_url = '%s/%s/%s' % (self.object_client.base_url,
container_name, obj_name)
http_client = http.ClosingHttp()
resp, _ = http_client.request(obj_url, 'GET')
self.assertEqual(resp.status, 401)
- self._change_container_acl(container_name, '.r:*')
+ self.change_container_acl(container_name, '.r:*')
resp, _ = http_client.request(obj_url, 'GET')
self.assertEqual(resp.status, 200)
- self._delete_object(container_name, obj_name)
- self._delete_container(container_name)
+ self.delete_object(container_name, obj_name)
+ self.delete_container(container_name)
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index fdda423bf..a20db5c07 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -36,8 +36,8 @@ class TestVolumeBootPattern(manager.ScenarioTest):
* Check written content in the instance booted from snapshot
"""
@classmethod
- def setUpClass(cls):
- super(TestVolumeBootPattern, cls).setUpClass()
+ def resource_setup(cls):
+ super(TestVolumeBootPattern, cls).resource_setup()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
diff --git a/tempest/scenario/utils.py b/tempest/scenario/utils.py
index e2adb34e3..c20f20ca5 100644
--- a/tempest/scenario/utils.py
+++ b/tempest/scenario/utils.py
@@ -40,33 +40,33 @@ class ImageUtils(object):
self.non_ssh_image_pattern = \
CONF.input_scenario.non_ssh_image_regex
# Setup clients
- ocm = clients.OfficialClientManager(
- auth.get_default_credentials('user'))
- self.client = ocm.compute_client
+ os = clients.Manager()
+ self.images_client = os.images_client
+ self.flavors_client = os.flavors_client
def ssh_user(self, image_id):
- _image = self.client.images.get(image_id)
+ _, _image = self.images_client.get_image(image_id)
for regex, user in self.ssh_users:
# First match wins
- if re.match(regex, _image.name) is not None:
+ if re.match(regex, _image['name']) is not None:
return user
else:
return self.default_ssh_user
def _is_sshable_image(self, image):
return not re.search(pattern=self.non_ssh_image_pattern,
- string=str(image.name))
+ string=str(image['name']))
def is_sshable_image(self, image_id):
- _image = self.client.images.get(image_id)
+ _, _image = self.images_client.get_image(image_id)
return self._is_sshable_image(_image)
def _is_flavor_enough(self, flavor, image):
- return image.minDisk <= flavor.disk
+ return image['minDisk'] <= flavor['disk']
def is_flavor_enough(self, flavor_id, image_id):
- _image = self.client.images.get(image_id)
- _flavor = self.client.flavors.get(flavor_id)
+ _, _image = self.images_client.get_image(image_id)
+ _, _flavor = self.flavors_client.get_flavor_details(flavor_id)
return self._is_flavor_enough(_flavor, _image)
@@ -81,7 +81,7 @@ class InputScenarioUtils(object):
load_tests = testscenarios.load_tests_apply_scenarios
- class TestInputScenario(manager.OfficialClientTest):
+ class TestInputScenario(manager.ScenarioTest):
scenario_utils = utils.InputScenarioUtils()
scenario_flavor = scenario_utils.scenario_flavors
@@ -91,17 +91,18 @@ class InputScenarioUtils(object):
def test_create_server_metadata(self):
name = rand_name('instance')
- _ = self.compute_client.servers.create(name=name,
- flavor=self.flavor_ref,
- image=self.image_ref)
+ self.servers_client.create_server(name=name,
+ flavor_ref=self.flavor_ref,
+ image_ref=self.image_ref)
"""
validchars = "-_.{ascii}{digit}".format(ascii=string.ascii_letters,
digit=string.digits)
def __init__(self):
- ocm = clients.OfficialClientManager(
+ os = clients.Manager(
auth.get_default_credentials('user', fill_in=False))
- self.client = ocm.compute_client
+ self.images_client = os.images_client
+ self.flavors_client = os.flavors_client
self.image_pattern = CONF.input_scenario.image_regex
self.flavor_pattern = CONF.input_scenario.flavor_regex
@@ -118,10 +119,11 @@ class InputScenarioUtils(object):
if not CONF.service_available.glance:
return []
if not hasattr(self, '_scenario_images'):
- images = self.client.images.list(detailed=False)
+ _, images = self.images_client.list_images()
self._scenario_images = [
- (self._normalize_name(i.name), dict(image_ref=i.id))
- for i in images if re.search(self.image_pattern, str(i.name))
+ (self._normalize_name(i['name']), dict(image_ref=i['id']))
+ for i in images if re.search(self.image_pattern,
+ str(i['name']))
]
return self._scenario_images
@@ -131,10 +133,11 @@ class InputScenarioUtils(object):
:return: a scenario with name and uuid of flavors
"""
if not hasattr(self, '_scenario_flavors'):
- flavors = self.client.flavors.list(detailed=False)
+ _, flavors = self.flavors_client.list_flavors()
self._scenario_flavors = [
- (self._normalize_name(f.name), dict(flavor_ref=f.id))
- for f in flavors if re.search(self.flavor_pattern, str(f.name))
+ (self._normalize_name(f['name']), dict(flavor_ref=f['id']))
+ for f in flavors if re.search(self.flavor_pattern,
+ str(f['name']))
]
return self._scenario_flavors
diff --git a/tempest/services/compute/json/images_client.py b/tempest/services/compute/json/images_client.py
index 9877391c4..4af8331f0 100644
--- a/tempest/services/compute/json/images_client.py
+++ b/tempest/services/compute/json/images_client.py
@@ -76,7 +76,7 @@ class ImagesClientJSON(rest_client.RestClient):
def get_image(self, image_id):
"""Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id))
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = json.loads(body)
self.validate_response(schema.get_image, resp, body)
return resp, body['image']
diff --git a/tempest/services/compute/xml/images_client.py b/tempest/services/compute/xml/images_client.py
index 6b15404e0..94acf3660 100644
--- a/tempest/services/compute/xml/images_client.py
+++ b/tempest/services/compute/xml/images_client.py
@@ -127,7 +127,7 @@ class ImagesClientXML(rest_client.RestClient):
def get_image(self, image_id):
"""Returns the details of a single image."""
resp, body = self.get("images/%s" % str(image_id))
- self.expected_success(200, resp)
+ self.expected_success(200, resp.status)
body = self._parse_image(etree.fromstring(body))
return resp, body
diff --git a/tempest/services/compute/xml/servers_client.py b/tempest/services/compute/xml/servers_client.py
index 156d889e0..06f1b8301 100644
--- a/tempest/services/compute/xml/servers_client.py
+++ b/tempest/services/compute/xml/servers_client.py
@@ -349,8 +349,11 @@ class ServersClientXML(rest_client.RestClient):
networks = xml_utils.Element("networks")
server.append(networks)
for network in kwargs['networks']:
- s = xml_utils.Element("network", uuid=network['uuid'],
- fixed_ip=network['fixed_ip'])
+ if 'fixed_ip' in network:
+ s = xml_utils.Element("network", uuid=network['uuid'],
+ fixed_ip=network['fixed_ip'])
+ else:
+ s = xml_utils.Element("network", uuid=network['uuid'])
networks.append(s)
if 'meta' in kwargs:
diff --git a/tempest/services/identity/v3/json/identity_client.py b/tempest/services/identity/v3/json/identity_client.py
index df424ca54..5ad416cc0 100644
--- a/tempest/services/identity/v3/json/identity_client.py
+++ b/tempest/services/identity/v3/json/identity_client.py
@@ -31,14 +31,11 @@ class IdentityV3ClientJSON(rest_client.RestClient):
self.endpoint_url = 'adminURL'
self.api_version = "v3"
- def create_user(self, user_name, **kwargs):
+ def create_user(self, user_name, password=None, project_id=None,
+ email=None, domain_id='default', **kwargs):
"""Creates a user."""
- password = kwargs.get('password', None)
- email = kwargs.get('email', None)
en = kwargs.get('enabled', True)
- project_id = kwargs.get('project_id', None)
description = kwargs.get('description', None)
- domain_id = kwargs.get('domain_id', 'default')
post_body = {
'project_id': project_id,
'description': description,
diff --git a/tempest/services/identity/v3/xml/identity_client.py b/tempest/services/identity/v3/xml/identity_client.py
index 5c436929f..fdc0a0aef 100644
--- a/tempest/services/identity/v3/xml/identity_client.py
+++ b/tempest/services/identity/v3/xml/identity_client.py
@@ -95,14 +95,11 @@ class IdentityV3ClientXML(rest_client.RestClient):
_json = common.xml_to_json(body)
return _json
- def create_user(self, user_name, **kwargs):
+ def create_user(self, user_name, password=None, project_id=None,
+ email=None, domain_id='default', **kwargs):
"""Creates a user."""
- password = kwargs.get('password', None)
- email = kwargs.get('email', None)
en = kwargs.get('enabled', 'true')
- project_id = kwargs.get('project_id', None)
description = kwargs.get('description', None)
- domain_id = kwargs.get('domain_id', 'default')
post_body = common.Element("user",
xmlns=XMLNS,
name=user_name,
diff --git a/tempest/services/messaging/json/messaging_client.py b/tempest/services/messaging/json/messaging_client.py
index 3e8239908..2794ea9d8 100644
--- a/tempest/services/messaging/json/messaging_client.py
+++ b/tempest/services/messaging/json/messaging_client.py
@@ -48,22 +48,26 @@ class MessagingClientJSON(rest_client.RestClient):
def create_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=None)
+ self.expected_success(201, resp.status)
return resp, body
def get_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
+ self.expected_success(204, resp.status)
return resp, body
def head_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
resp, body = self.head(uri)
+ self.expected_success(204, resp.status)
return resp, body
def delete_queue(self, queue_name):
uri = '{0}/queues/{1}'.format(self.uri_prefix, queue_name)
- resp = self.delete(uri)
- return resp
+ resp, body = self.delete(uri)
+ self.expected_success(204, resp.status)
+ return resp, body
def get_queue_stats(self, queue_name):
uri = '{0}/queues/{1}/stats'.format(self.uri_prefix, queue_name)
@@ -75,12 +79,14 @@ class MessagingClientJSON(rest_client.RestClient):
def get_queue_metadata(self, queue_name):
uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
resp, body = self.get(uri)
+ self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def set_queue_metadata(self, queue_name, rbody):
uri = '{0}/queues/{1}/metadata'.format(self.uri_prefix, queue_name)
resp, body = self.put(uri, body=json.dumps(rbody))
+ self.expected_success(204, resp.status)
return resp, body
def post_messages(self, queue_name, rbody):
@@ -90,6 +96,7 @@ class MessagingClientJSON(rest_client.RestClient):
headers=self.headers)
body = json.loads(body)
+ self.validate_response(queues_schema.post_messages, resp, body)
return resp, body
def list_messages(self, queue_name):
@@ -126,7 +133,7 @@ class MessagingClientJSON(rest_client.RestClient):
def delete_messages(self, message_uri):
resp, body = self.delete(message_uri)
- assert(resp['status'] == '204')
+ self.expected_success(204, resp.status)
return resp, body
def post_claims(self, queue_name, rbody, url_params=False):
@@ -152,10 +159,10 @@ class MessagingClientJSON(rest_client.RestClient):
def update_claim(self, claim_uri, rbody):
resp, body = self.patch(claim_uri, body=json.dumps(rbody))
- assert(resp['status'] == '204')
+ self.expected_success(204, resp.status)
return resp, body
def release_claim(self, claim_uri):
resp, body = self.delete(claim_uri)
- assert(resp['status'] == '204')
+ self.expected_success(204, resp.status)
return resp, body
diff --git a/tempest/services/network/json/network_client.py b/tempest/services/network/json/network_client.py
index 16a4f5cb0..78ed56ffc 100644
--- a/tempest/services/network/json/network_client.py
+++ b/tempest/services/network/json/network_client.py
@@ -320,3 +320,30 @@ class NetworkClientJSON(network_client_base.NetworkClientBase):
self.rest_client.expected_success(201, resp.status)
body = json.loads(body)
return resp, body
+
+ def insert_firewall_rule_in_policy(self, firewall_policy_id,
+ firewall_rule_id, insert_after="",
+ insert_before=""):
+ uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
+ firewall_policy_id)
+ body = {
+ "firewall_rule_id": firewall_rule_id,
+ "insert_after": insert_after,
+ "insert_before": insert_before
+ }
+ body = json.dumps(body)
+ resp, body = self.put(uri, body)
+ self.rest_client.expected_success(200, resp.status)
+ body = json.loads(body)
+ return resp, body
+
+ def remove_firewall_rule_from_policy(self, firewall_policy_id,
+ firewall_rule_id):
+ uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
+ firewall_policy_id)
+ update_body = {"firewall_rule_id": firewall_rule_id}
+ update_body = json.dumps(update_body)
+ resp, body = self.put(uri, update_body)
+ self.rest_client.expected_success(200, resp.status)
+ body = json.loads(body)
+ return resp, body
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
index 2b182d03e..a84b4d5cd 100644
--- a/tempest/services/network/resources.py
+++ b/tempest/services/network/resources.py
@@ -52,7 +52,7 @@ class DeletableResource(AttributeDict):
return
@abc.abstractmethod
- def show(self):
+ def refresh(self):
return
def __hash__(self):
@@ -62,7 +62,11 @@ class DeletableResource(AttributeDict):
if not hasattr(self, 'status'):
return
- return self.client.wait_for_resource_status(self.show, status)
+ def helper_get():
+ self.refresh()
+ return self
+
+ return self.client.wait_for_resource_status(helper_get, status)
class DeletableNetwork(DeletableResource):
@@ -116,6 +120,12 @@ class DeletableRouter(DeletableResource):
class DeletableFloatingIp(DeletableResource):
+ def refresh(self, *args, **kwargs):
+ _, result = self.client.show_floatingip(self.id,
+ *args,
+ **kwargs)
+ super(DeletableFloatingIp, self).update(**result['floatingip'])
+
def update(self, *args, **kwargs):
_, result = self.client.update_floatingip(self.id,
*args,
@@ -172,7 +182,6 @@ class DeletableVip(DeletableResource):
def delete(self):
self.client.delete_vip(self.id)
- def show(self):
+ def refresh(self):
_, result = self.client.show_vip(self.id)
- super(DeletableVip, self).update(**result['vip'])
- return self
+ super(DeletableVip, self).update(**result['vip']) \ No newline at end of file
diff --git a/tempest/services/network/xml/network_client.py b/tempest/services/network/xml/network_client.py
index 17b1f8ee5..c65390e61 100644
--- a/tempest/services/network/xml/network_client.py
+++ b/tempest/services/network/xml/network_client.py
@@ -25,7 +25,8 @@ class NetworkClientXML(client_base.NetworkClientBase):
# list of plurals used for xml serialization
PLURALS = ['dns_nameservers', 'host_routes', 'allocation_pools',
'fixed_ips', 'extensions', 'extra_dhcp_opts', 'pools',
- 'health_monitors', 'vips', 'members', 'allowed_address_pairs']
+ 'health_monitors', 'vips', 'members', 'allowed_address_pairs',
+ 'firewall_rules']
def get_rest_client(self, auth_provider):
rc = rest_client.RestClient(auth_provider)
@@ -281,6 +282,27 @@ class NetworkClientXML(client_base.NetworkClientBase):
body = _root_tag_fetcher_and_xml_to_json_parse(body)
return resp, body
+ def insert_firewall_rule_in_policy(self, firewall_policy_id,
+ firewall_rule_id, insert_after="",
+ insert_before=""):
+ uri = '%s/fw/firewall_policies/%s/insert_rule' % (self.uri_prefix,
+ firewall_policy_id)
+ rule = common.Element("firewall_rule_id", firewall_rule_id)
+ resp, body = self.put(uri, str(common.Document(rule)))
+ self.rest_client.expected_success(200, resp.status)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
+ def remove_firewall_rule_from_policy(self, firewall_policy_id,
+ firewall_rule_id):
+ uri = '%s/fw/firewall_policies/%s/remove_rule' % (self.uri_prefix,
+ firewall_policy_id)
+ rule = common.Element("firewall_rule_id", firewall_rule_id)
+ resp, body = self.put(uri, str(common.Document(rule)))
+ self.rest_client.expected_success(200, resp.status)
+ body = _root_tag_fetcher_and_xml_to_json_parse(body)
+ return resp, body
+
def _root_tag_fetcher_and_xml_to_json_parse(xml_returned_body):
body = ET.fromstring(xml_returned_body)
diff --git a/tempest/services/volume/json/snapshots_client.py b/tempest/services/volume/json/snapshots_client.py
index f50ba2f39..1f8065baa 100644
--- a/tempest/services/volume/json/snapshots_client.py
+++ b/tempest/services/volume/json/snapshots_client.py
@@ -24,15 +24,16 @@ CONF = config.CONF
LOG = logging.getLogger(__name__)
-class SnapshotsClientJSON(rest_client.RestClient):
- """Client class to send CRUD Volume API requests."""
+class BaseSnapshotsClientJSON(rest_client.RestClient):
+ """Base Client class to send CRUD Volume API requests."""
def __init__(self, auth_provider):
- super(SnapshotsClientJSON, self).__init__(auth_provider)
+ super(BaseSnapshotsClientJSON, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
self.build_interval = CONF.volume.build_interval
self.build_timeout = CONF.volume.build_timeout
+ self.create_resp = 200
def list_snapshots(self, params=None):
"""List all the snapshot."""
@@ -77,7 +78,7 @@ class SnapshotsClientJSON(rest_client.RestClient):
post_body = json.dumps({'snapshot': post_body})
resp, body = self.post('snapshots', post_body)
body = json.loads(body)
- self.expected_success(200, resp.status)
+ self.expected_success(self.create_resp, resp.status)
return resp, body['snapshot']
def update_snapshot(self, snapshot_id, **kwargs):
@@ -203,3 +204,7 @@ class SnapshotsClientJSON(rest_client.RestClient):
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body)
self.expected_success(202, resp.status)
return resp, body
+
+
+class SnapshotsClientJSON(BaseSnapshotsClientJSON):
+ """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/services/volume/v2/json/snapshots_client.py b/tempest/services/volume/v2/json/snapshots_client.py
new file mode 100644
index 000000000..553176ba4
--- /dev/null
+++ b/tempest/services/volume/v2/json/snapshots_client.py
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.volume.json import snapshots_client
+
+
+class SnapshotsV2ClientJSON(snapshots_client.BaseSnapshotsClientJSON):
+ """Client class to send CRUD Volume V2 API requests."""
+
+ def __init__(self, auth_provider):
+ super(SnapshotsV2ClientJSON, self).__init__(auth_provider)
+
+ self.api_version = "v2"
+ self.create_resp = 202
diff --git a/tempest/services/volume/v2/xml/snapshots_client.py b/tempest/services/volume/v2/xml/snapshots_client.py
new file mode 100644
index 000000000..b29d86c81
--- /dev/null
+++ b/tempest/services/volume/v2/xml/snapshots_client.py
@@ -0,0 +1,23 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.services.volume.xml import snapshots_client
+
+
+class SnapshotsV2ClientXML(snapshots_client.BaseSnapshotsClientXML):
+ """Client class to send CRUD Volume V2 API requests."""
+
+ def __init__(self, auth_provider):
+ super(SnapshotsV2ClientXML, self).__init__(auth_provider)
+
+ self.api_version = "v2"
+ self.create_resp = 202
diff --git a/tempest/services/volume/xml/snapshots_client.py b/tempest/services/volume/xml/snapshots_client.py
index 763670763..ce98eea1d 100644
--- a/tempest/services/volume/xml/snapshots_client.py
+++ b/tempest/services/volume/xml/snapshots_client.py
@@ -26,16 +26,17 @@ CONF = config.CONF
LOG = logging.getLogger(__name__)
-class SnapshotsClientXML(rest_client.RestClient):
- """Client class to send CRUD Volume API requests."""
+class BaseSnapshotsClientXML(rest_client.RestClient):
+ """Base Client class to send CRUD Volume API requests."""
TYPE = "xml"
def __init__(self, auth_provider):
- super(SnapshotsClientXML, self).__init__(auth_provider)
+ super(BaseSnapshotsClientXML, self).__init__(auth_provider)
self.service = CONF.volume.catalog_type
self.build_interval = CONF.volume.build_interval
self.build_timeout = CONF.volume.build_timeout
+ self.create_resp = 200
def list_snapshots(self, params=None):
"""List all snapshot."""
@@ -90,7 +91,7 @@ class SnapshotsClientXML(rest_client.RestClient):
resp, body = self.post('snapshots',
str(common.Document(snapshot)))
body = common.xml_to_json(etree.fromstring(body))
- self.expected_success(200, resp.status)
+ self.expected_success(self.create_resp, resp.status)
return resp, body
def update_snapshot(self, snapshot_id, **kwargs):
@@ -243,3 +244,7 @@ class SnapshotsClientXML(rest_client.RestClient):
body = common.xml_to_json(etree.fromstring(body))
self.expected_success(202, resp.status)
return resp, body
+
+
+class SnapshotsClientXML(BaseSnapshotsClientXML):
+ """Client class to send CRUD Volume V1 API requests."""
diff --git a/tempest/stress/actions/server_create_destroy.py b/tempest/stress/actions/server_create_destroy.py
index 4a9f0d551..34e299def 100644
--- a/tempest/stress/actions/server_create_destroy.py
+++ b/tempest/stress/actions/server_create_destroy.py
@@ -28,15 +28,13 @@ class ServerCreateDestroyTest(stressaction.StressAction):
def run(self):
name = data_utils.rand_name("instance")
self.logger.info("creating %s" % name)
- resp, server = self.manager.servers_client.create_server(
+ _, server = self.manager.servers_client.create_server(
name, self.image, self.flavor)
server_id = server['id']
- assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(server_id,
'ACTIVE')
self.logger.info("created %s" % server_id)
self.logger.info("deleting %s" % name)
- resp, _ = self.manager.servers_client.delete_server(server_id)
- assert(resp.status == 204)
+ self.manager.servers_client.delete_server(server_id)
self.manager.servers_client.wait_for_server_termination(server_id)
self.logger.info("deleted %s" % server_id)
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index d78112c1f..5bc8cacf8 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -74,19 +74,17 @@ class FloatingStress(stressaction.StressAction):
self.logger.info("creating %s" % name)
vm_args = self.vm_extra_args.copy()
vm_args['security_groups'] = [self.sec_grp]
- resp, server = servers_client.create_server(name, self.image,
- self.flavor,
- **vm_args)
+ _, server = servers_client.create_server(name, self.image,
+ self.flavor,
+ **vm_args)
self.server_id = server['id']
- assert(resp.status == 202)
if self.wait_after_vm_create:
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
def _destroy_vm(self):
self.logger.info("deleting %s" % self.server_id)
- resp, _ = self.manager.servers_client.delete_server(self.server_id)
- assert(resp.status == 204) # It cannot be 204 if I had to wait..
+ self.manager.servers_client.delete_server(self.server_id)
self.manager.servers_client.wait_for_server_termination(self.server_id)
self.logger.info("deleted %s" % self.server_id)
diff --git a/tempest/stress/actions/volume_attach_delete.py b/tempest/stress/actions/volume_attach_delete.py
index e0238d3d3..9c4070f1c 100644
--- a/tempest/stress/actions/volume_attach_delete.py
+++ b/tempest/stress/actions/volume_attach_delete.py
@@ -28,10 +28,9 @@ class VolumeAttachDeleteTest(stressaction.StressAction):
# Step 1: create volume
name = data_utils.rand_name("volume")
self.logger.info("creating volume: %s" % name)
- resp, volume = self.manager.volumes_client.create_volume(
+ _, volume = self.manager.volumes_client.create_volume(
size=1,
display_name=name)
- assert(resp.status == 200)
self.manager.volumes_client.wait_for_volume_status(volume['id'],
'available')
self.logger.info("created volume: %s" % volume['id'])
@@ -39,20 +38,18 @@ class VolumeAttachDeleteTest(stressaction.StressAction):
# Step 2: create vm instance
vm_name = data_utils.rand_name("instance")
self.logger.info("creating vm: %s" % vm_name)
- resp, server = self.manager.servers_client.create_server(
+ _, server = self.manager.servers_client.create_server(
vm_name, self.image, self.flavor)
server_id = server['id']
- assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
self.logger.info("created vm %s" % server_id)
# Step 3: attach volume to vm
self.logger.info("attach volume (%s) to vm %s" %
(volume['id'], server_id))
- resp, body = self.manager.servers_client.attach_volume(server_id,
- volume['id'],
- '/dev/vdc')
- assert(resp.status == 200)
+ self.manager.servers_client.attach_volume(server_id,
+ volume['id'],
+ '/dev/vdc')
self.manager.volumes_client.wait_for_volume_status(volume['id'],
'in-use')
self.logger.info("volume (%s) attached to vm %s" %
@@ -60,14 +57,12 @@ class VolumeAttachDeleteTest(stressaction.StressAction):
# Step 4: delete vm
self.logger.info("deleting vm: %s" % vm_name)
- resp, _ = self.manager.servers_client.delete_server(server_id)
- assert(resp.status == 204)
+ self.manager.servers_client.delete_server(server_id)
self.manager.servers_client.wait_for_server_termination(server_id)
self.logger.info("deleted vm: %s" % server_id)
# Step 5: delete volume
self.logger.info("deleting volume: %s" % volume['id'])
- resp, _ = self.manager.volumes_client.delete_volume(volume['id'])
- assert(resp.status == 202)
+ self.manager.volumes_client.delete_volume(volume['id'])
self.manager.volumes_client.wait_for_resource_deletion(volume['id'])
self.logger.info("deleted volume: %s" % volume['id'])
diff --git a/tempest/stress/actions/volume_attach_verify.py b/tempest/stress/actions/volume_attach_verify.py
index 0d3cb2347..a13d89059 100644
--- a/tempest/stress/actions/volume_attach_verify.py
+++ b/tempest/stress/actions/volume_attach_verify.py
@@ -24,12 +24,10 @@ class VolumeVerifyStress(stressaction.StressAction):
def _create_keypair(self):
keyname = data_utils.rand_name("key")
- resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
- assert(resp.status == 200)
+ _, self.key = self.manager.keypairs_client.create_keypair(keyname)
def _delete_keypair(self):
- resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
- assert(resp.status == 202)
+ self.manager.keypairs_client.delete_keypair(self.key['name'])
def _create_vm(self):
self.name = name = data_utils.rand_name("instance")
@@ -38,18 +36,16 @@ class VolumeVerifyStress(stressaction.StressAction):
vm_args = self.vm_extra_args.copy()
vm_args['security_groups'] = [self.sec_grp]
vm_args['key_name'] = self.key['name']
- resp, server = servers_client.create_server(name, self.image,
- self.flavor,
- **vm_args)
+ _, server = servers_client.create_server(name, self.image,
+ self.flavor,
+ **vm_args)
self.server_id = server['id']
- assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
def _destroy_vm(self):
self.logger.info("deleting server: %s" % self.server_id)
- resp, _ = self.manager.servers_client.delete_server(self.server_id)
- assert(resp.status == 204) # It cannot be 204 if I had to wait..
+ self.manager.servers_client.delete_server(self.server_id)
self.manager.servers_client.wait_for_server_termination(self.server_id)
self.logger.info("deleted server: %s" % self.server_id)
@@ -81,10 +77,9 @@ class VolumeVerifyStress(stressaction.StressAction):
name = data_utils.rand_name("volume")
self.logger.info("creating volume: %s" % name)
volumes_client = self.manager.volumes_client
- resp, self.volume = volumes_client.create_volume(
+ _, self.volume = volumes_client.create_volume(
size=1,
display_name=name)
- assert(resp.status == 200)
volumes_client.wait_for_volume_status(self.volume['id'],
'available')
self.logger.info("created volume: %s" % self.volume['id'])
@@ -92,8 +87,7 @@ class VolumeVerifyStress(stressaction.StressAction):
def _delete_volume(self):
self.logger.info("deleting volume: %s" % self.volume['id'])
volumes_client = self.manager.volumes_client
- resp, _ = volumes_client.delete_volume(self.volume['id'])
- assert(resp.status == 202)
+ volumes_client.delete_volume(self.volume['id'])
volumes_client.wait_for_resource_deletion(self.volume['id'])
self.logger.info("deleted volume: %s" % self.volume['id'])
@@ -193,10 +187,9 @@ class VolumeVerifyStress(stressaction.StressAction):
servers_client = self.manager.servers_client
self.logger.info("attach volume (%s) to vm %s" %
(self.volume['id'], self.server_id))
- resp, body = servers_client.attach_volume(self.server_id,
- self.volume['id'],
- self.part_name)
- assert(resp.status == 200)
+ servers_client.attach_volume(self.server_id,
+ self.volume['id'],
+ self.part_name)
self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
'in-use')
if self.enable_ssh_verify:
@@ -204,9 +197,8 @@ class VolumeVerifyStress(stressaction.StressAction):
% self.server_id)
self.part_wait(self.attach_match_count)
- resp, body = servers_client.detach_volume(self.server_id,
- self.volume['id'])
- assert(resp.status == 202)
+ servers_client.detach_volume(self.server_id,
+ self.volume['id'])
self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
if self.enable_ssh_verify:
diff --git a/tempest/stress/actions/volume_create_delete.py b/tempest/stress/actions/volume_create_delete.py
index 4e75be07f..b1c5bb709 100644
--- a/tempest/stress/actions/volume_create_delete.py
+++ b/tempest/stress/actions/volume_create_delete.py
@@ -20,14 +20,12 @@ class VolumeCreateDeleteTest(stressaction.StressAction):
name = data_utils.rand_name("volume")
self.logger.info("creating %s" % name)
volumes_client = self.manager.volumes_client
- resp, volume = volumes_client.create_volume(size=1,
- display_name=name)
- assert(resp.status == 200)
+ _, volume = volumes_client.create_volume(size=1,
+ display_name=name)
vol_id = volume['id']
volumes_client.wait_for_volume_status(vol_id, 'available')
self.logger.info("created %s" % volume['id'])
self.logger.info("deleting %s" % name)
- resp, _ = volumes_client.delete_volume(vol_id)
- assert(resp.status == 202)
+ volumes_client.delete_volume(vol_id)
volumes_client.wait_for_resource_deletion(vol_id)
self.logger.info("deleted %s" % vol_id)
diff --git a/tempest/test.py b/tempest/test.py
index 4a22b1b08..1c6265dff 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -29,8 +29,8 @@ import testscenarios
import testtools
from tempest import clients
+from tempest.common import credentials
import tempest.common.generator.valid_generator as valid
-from tempest.common import isolated_creds
from tempest import config
from tempest import exceptions
from tempest.openstack.common import importutils
@@ -66,35 +66,6 @@ def attr(*args, **kwargs):
return decorator
-def safe_setup(f):
- """A decorator used to wrap the setUpClass for cleaning up resources
- when setUpClass failed.
-
- Deprecated, see:
- http://specs.openstack.org/openstack/qa-specs/specs/resource-cleanup.html
- """
- @functools.wraps(f)
- def decorator(cls):
- try:
- f(cls)
- except Exception as se:
- etype, value, trace = sys.exc_info()
- if etype is cls.skipException:
- LOG.info("setUpClass skipped: %s:" % se)
- else:
- LOG.exception("setUpClass failed: %s" % se)
- try:
- cls.tearDownClass()
- except Exception as te:
- LOG.exception("tearDownClass failed: %s" % te)
- try:
- raise etype(value), None, trace
- finally:
- del trace # for avoiding circular refs
-
- return decorator
-
-
def get_service_list():
service_list = {
'compute': CONF.service_available.nova,
@@ -123,7 +94,7 @@ def services(*args, **kwargs):
def decorator(f):
services = ['compute', 'image', 'baremetal', 'volume', 'orchestration',
'network', 'identity', 'object_storage', 'dashboard',
- 'ceilometer', 'data_processing']
+ 'telemetry', 'data_processing']
for service in args:
if service not in services:
raise exceptions.InvalidServiceTag('%s is not a valid '
@@ -299,7 +270,14 @@ class BaseTestCase(BaseDeps):
try:
cls.tearDownClass()
except Exception as te:
- LOG.exception("tearDownClass failed: %s" % te)
+ tetype, _, _ = sys.exc_info()
+ # TODO(gmann): Till we split-up resource_setup &
+ # resource_cleanup in more structural way, log
+ # AttributeError as info instead of exception.
+ if tetype is AttributeError:
+ LOG.info("tearDownClass failed: %s" % te)
+ else:
+ LOG.exception("tearDownClass failed: %s" % te)
try:
raise etype(value), None, trace
finally:
@@ -362,31 +340,20 @@ class BaseTestCase(BaseDeps):
"""
Returns an OpenStack client manager
"""
- cls.isolated_creds = isolated_creds.IsolatedCreds(
- cls.__name__, network_resources=cls.network_resources)
-
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
- if CONF.compute.allow_tenant_isolation or force_tenant_isolation:
- creds = cls.isolated_creds.get_primary_creds()
- if getattr(cls, '_interface', None):
- os = clients.Manager(credentials=creds,
- interface=cls._interface,
- service=cls._service)
- elif interface:
- os = clients.Manager(credentials=creds,
- interface=interface,
- service=cls._service)
- else:
- os = clients.Manager(credentials=creds,
- service=cls._service)
- else:
- if getattr(cls, '_interface', None):
- os = clients.Manager(interface=cls._interface,
- service=cls._service)
- elif interface:
- os = clients.Manager(interface=interface, service=cls._service)
- else:
- os = clients.Manager(service=cls._service)
+
+ cls.isolated_creds = credentials.get_isolated_credentials(
+ name=cls.__name__, network_resources=cls.network_resources,
+ force_tenant_isolation=force_tenant_isolation,
+ )
+
+ creds = cls.isolated_creds.get_primary_creds()
+ params = dict(credentials=creds, service=cls._service)
+ if getattr(cls, '_interface', None):
+ interface = cls._interface
+ if interface:
+ params['interface'] = interface
+ os = clients.Manager(**params)
return os
@classmethod
@@ -510,13 +477,9 @@ class NegativeAutoTest(BaseTestCase):
"expected_result": expected_result
}))
if schema is not None:
- for name, schema, expected_result in generator.generate(schema):
- if (expected_result is None and
- "default_result_code" in description):
- expected_result = description["default_result_code"]
- scenario_list.append((name,
- {"schema": schema,
- "expected_result": expected_result}))
+ for scenario in generator.generate_scenarios(schema):
+ scenario_list.append((scenario['_negtest_name'],
+ scenario))
LOG.debug(scenario_list)
return scenario_list
@@ -546,8 +509,14 @@ class NegativeAutoTest(BaseTestCase):
"""
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
+ generator = importutils.import_class(
+ CONF.negative.test_generator)()
+ schema = description.get("json-schema", None)
method = description["http-method"]
url = description["url"]
+ expected_result = None
+ if "default_result_code" in description:
+ expected_result = description["default_result_code"]
resources = [self.get_resource(r) for
r in description.get("resources", [])]
@@ -557,13 +526,19 @@ class NegativeAutoTest(BaseTestCase):
# entry (see get_resource).
# We just send a valid json-schema with it
valid_schema = None
- schema = description.get("json-schema", None)
if schema:
valid_schema = \
valid.ValidTestGenerator().generate_valid(schema)
new_url, body = self._http_arguments(valid_schema, url, method)
- elif hasattr(self, "schema"):
- new_url, body = self._http_arguments(self.schema, url, method)
+ elif hasattr(self, "_negtest_name"):
+ schema_under_test = \
+ valid.ValidTestGenerator().generate_valid(schema)
+ local_expected_result = \
+ generator.generate_payload(self, schema_under_test)
+ if local_expected_result is not None:
+ expected_result = local_expected_result
+ new_url, body = \
+ self._http_arguments(schema_under_test, url, method)
else:
raise Exception("testscenarios are not active. Please make sure "
"that your test runner supports the load_tests "
@@ -575,7 +550,7 @@ class NegativeAutoTest(BaseTestCase):
client = self.client
resp, resp_body = client.send_request(method, new_url,
resources, body=body)
- self._check_negative_response(resp.status, resp_body)
+ self._check_negative_response(expected_result, resp.status, resp_body)
def _http_arguments(self, json_dict, url, method):
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
@@ -586,8 +561,7 @@ class NegativeAutoTest(BaseTestCase):
else:
return url, json.dumps(json_dict)
- def _check_negative_response(self, result, body):
- expected_result = getattr(self, "expected_result", None)
+ def _check_negative_response(self, expected_result, result, body):
self.assertTrue(result >= 400 and result < 500 and result != 413,
"Expected client error, got %s:%s" %
(result, body))
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index a28684e3c..6679c7916 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -86,6 +86,24 @@ class TestDiscovery(base.TestCase):
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
+ def test_verify_api_versions(self):
+ api_services = ['cinder', 'glance', 'keystone', 'nova']
+ fake_os = mock.MagicMock()
+ for svc in api_services:
+ m = 'verify_%s_api_versions' % svc
+ with mock.patch.object(verify_tempest_config, m) as verify_mock:
+ verify_tempest_config.verify_api_versions(fake_os, svc, True)
+ verify_mock.assert_called_once_with(fake_os, True)
+
+ def test_verify_api_versions_not_implemented(self):
+ api_services = ['cinder', 'glance', 'keystone', 'nova']
+ fake_os = mock.MagicMock()
+ for svc in api_services:
+ m = 'verify_%s_api_versions' % svc
+ with mock.patch.object(verify_tempest_config, m) as verify_mock:
+ verify_tempest_config.verify_api_versions(fake_os, 'foo', True)
+ self.assertFalse(verify_mock.called)
+
def test_verify_keystone_api_versions_no_v3(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
diff --git a/tempest/tests/common/utils/test_misc.py b/tempest/tests/common/utils/test_misc.py
index aee9805a5..554027f3c 100644
--- a/tempest/tests/common/utils/test_misc.py
+++ b/tempest/tests/common/utils/test_misc.py
@@ -82,7 +82,7 @@ class TestMisc(base.TestCase):
self.assertEqual(':tearDown', tearDown())
def test_find_test_caller_teardown_class(self):
- def tearDownClass(cls):
+ def tearDownClass(cls): # noqa
return misc.find_test_caller()
self.assertEqual('TestMisc:tearDownClass',
tearDownClass(self.__class__))
diff --git a/tempest/tests/negative/test_negative_auto_test.py b/tempest/tests/negative/test_negative_auto_test.py
index dddd083d6..fb1da43af 100644
--- a/tempest/tests/negative/test_negative_auto_test.py
+++ b/tempest/tests/negative/test_negative_auto_test.py
@@ -43,9 +43,9 @@ class TestNegativeAutoTest(base.TestCase):
def _check_prop_entries(self, result, entry):
entries = [a for a in result if entry in a[0]]
self.assertIsNotNone(entries)
- self.assertIs(len(entries), 2)
+ self.assertGreater(len(entries), 1)
for entry in entries:
- self.assertIsNotNone(entry[1]['schema'])
+ self.assertIsNotNone(entry[1]['_negtest_name'])
def _check_resource_entries(self, result, entry):
entries = [a for a in result if entry in a[0]]
@@ -57,12 +57,11 @@ class TestNegativeAutoTest(base.TestCase):
def test_generate_scenario(self):
scenarios = test.NegativeAutoTest.\
generate_scenario(self.fake_input_desc)
-
self.assertIsInstance(scenarios, list)
for scenario in scenarios:
self.assertIsInstance(scenario, tuple)
self.assertIsInstance(scenario[0], str)
self.assertIsInstance(scenario[1], dict)
- self._check_prop_entries(scenarios, "prop_minRam")
- self._check_prop_entries(scenarios, "prop_minDisk")
+ self._check_prop_entries(scenarios, "minRam")
+ self._check_prop_entries(scenarios, "minDisk")
self._check_resource_entries(scenarios, "inv_res")
diff --git a/tempest/tests/negative/test_negative_generators.py b/tempest/tests/negative/test_negative_generators.py
index a7af61919..2fa69334d 100644
--- a/tempest/tests/negative/test_negative_generators.py
+++ b/tempest/tests/negative/test_negative_generators.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
+
import jsonschema
import mock
@@ -86,15 +88,6 @@ class TestNegativeBasicGenerator(base.TestCase):
class BaseNegativeGenerator(object):
types = ['string', 'integer', 'object']
- fake_input_str = {"type": "string",
- "minLength": 2,
- "maxLength": 8,
- 'results': {'gen_int': 404}}
-
- fake_input_int = {"type": "integer",
- "maximum": 255,
- "minimum": 1}
-
fake_input_obj = {"type": "object",
"properties": {"minRam": {"type": "integer"},
"diskName": {"type": "string"},
@@ -106,31 +99,21 @@ class BaseNegativeGenerator(object):
"type": "not_defined"
}
- def _validate_result(self, data):
- self.assertTrue(isinstance(data, list))
- for t in data:
- self.assertIsInstance(t, tuple)
- self.assertEqual(3, len(t))
- self.assertIsInstance(t[0], str)
-
- def test_generate_string(self):
- result = self.generator.generate(self.fake_input_str)
- self._validate_result(result)
+ class fake_test_class(object):
+ def __init__(self, scenario):
+ for k, v in scenario.iteritems():
+ setattr(self, k, v)
- def test_generate_integer(self):
- result = self.generator.generate(self.fake_input_int)
- self._validate_result(result)
-
- def test_generate_obj(self):
- result = self.generator.generate(self.fake_input_obj)
- self._validate_result(result)
+ def _validate_result(self, valid_schema, invalid_schema):
+ for k, v in valid_schema.iteritems():
+ self.assertTrue(k in invalid_schema)
def test_generator_mandatory_functions(self):
for data_type in self.types:
self.assertIn(data_type, self.generator.types_dict)
def test_generate_with_unknown_type(self):
- self.assertRaises(TypeError, self.generator.generate,
+ self.assertRaises(TypeError, self.generator.generate_payload,
self.unknown_type_schema)
@@ -151,3 +134,16 @@ class TestNegativeNegativeGenerator(base.TestCase, BaseNegativeGenerator):
def setUp(self):
super(TestNegativeNegativeGenerator, self).setUp()
self.generator = negative_generator.NegativeTestGenerator()
+
+ def test_generate_obj(self):
+ schema = self.fake_input_obj
+ scenarios = self.generator.generate_scenarios(schema)
+ for scenario in scenarios:
+ test = self.fake_test_class(scenario)
+ valid_schema = \
+ valid_generator.ValidTestGenerator().generate_valid(schema)
+ schema_under_test = copy.copy(valid_schema)
+ expected_result = \
+ self.generator.generate_payload(test, schema_under_test)
+ self.assertEqual(expected_result, None)
+ self._validate_result(valid_schema, schema_under_test)
diff --git a/tempest/tests/test_decorators.py b/tempest/tests/test_decorators.py
index 12104ecfc..32cefd0c2 100644
--- a/tempest/tests/test_decorators.py
+++ b/tempest/tests/test_decorators.py
@@ -97,6 +97,28 @@ class TestServicesDecorator(BaseDecoratorsTest):
self._test_services_helper, 'compute',
'volume')
+ def test_services_list(self):
+ service_list = test.get_service_list()
+ for service in service_list:
+ try:
+ self._test_services_helper(service)
+ except exceptions.InvalidServiceTag:
+ self.fail('%s is not listed in the valid service tag list'
+ % service)
+ except KeyError:
+ # NOTE(mtreinish): This condition is to test for a entry in
+ # the outer decorator list but not in the service_list dict.
+ # However, because we're looping over the service_list dict
+ # it's unlikely we'll trigger this. So manual review is still
+ # need for the list in the outer decorator.
+ self.fail('%s is in the list of valid service tags but there '
+ 'is no corresponding entry in the dict returned from'
+ ' get_service_list()' % service)
+ except testtools.TestCase.skipException:
+ # Test didn't raise an exception because of an incorrect list
+ # entry so move onto the next entry
+ continue
+
class TestStressDecorator(BaseDecoratorsTest):
def _test_stresstest_helper(self, expected_frequency='process',
diff --git a/tempest/tests/test_hacking.py b/tempest/tests/test_hacking.py
index 9c13013ed..6857461c8 100644
--- a/tempest/tests/test_hacking.py
+++ b/tempest/tests/test_hacking.py
@@ -47,13 +47,27 @@ class HackingTestCase(base.TestCase):
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
- def test_no_setupclass_for_unit_tests(self):
- self.assertTrue(checks.no_setupclass_for_unit_tests(
+ def test_no_setup_teardown_class_for_tests(self):
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/tests/fake_test.py'))
- self.assertIsNone(checks.no_setupclass_for_unit_tests(
+ self.assertIsNone(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
- self.assertFalse(checks.no_setupclass_for_unit_tests(
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/api/fake_test.py'))
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
+ " def setUpClass(cls):", './tempest/scenario/fake_test.py'))
+ self.assertFalse(checks.no_setup_teardown_class_for_tests(
+ " def setUpClass(cls):", './tempest/test.py'))
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls):", './tempest/tests/fake_test.py'))
+ self.assertIsNone(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls): # noqa", './tempest/tests/fake_test.py'))
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls):", './tempest/api/fake_test.py'))
+ self.assertTrue(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls):", './tempest/scenario/fake_test.py'))
+ self.assertFalse(checks.no_setup_teardown_class_for_tests(
+ " def tearDownClass(cls):", './tempest/test.py'))
def test_import_no_clients_in_api(self):
for client in checks.PYTHON_CLIENTS:
@@ -100,14 +114,6 @@ class HackingTestCase(base.TestCase):
self.assertFalse(checks.service_tags_not_in_module_path(
"@test.services('compute')", './tempest/api/image/fake_test.py'))
- def test_no_official_client_manager_in_api_tests(self):
- self.assertTrue(checks.no_official_client_manager_in_api_tests(
- "cls.official_client = clients.OfficialClientManager(credentials)",
- "tempest/api/compute/base.py"))
- self.assertFalse(checks.no_official_client_manager_in_api_tests(
- "cls.official_client = clients.OfficialClientManager(credentials)",
- "tempest/scenario/fake_test.py"))
-
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
" def function1(para={}):"))))
diff --git a/tempest/tests/test_tenant_isolation.py b/tempest/tests/test_tenant_isolation.py
index 48c523ec7..27c45c23d 100644
--- a/tempest/tests/test_tenant_isolation.py
+++ b/tempest/tests/test_tenant_isolation.py
@@ -12,12 +12,9 @@
# License for the specific language governing permissions and limitations
# under the License.
-import keystoneclient.v2_0.client as keystoneclient
import mock
-import neutronclient.v2_0.client as neutronclient
from oslo.config import cfg
-from tempest import clients
from tempest.common import http
from tempest.common import isolated_creds
from tempest import config
@@ -52,24 +49,6 @@ class TestTenantIsolation(base.TestCase):
self.assertTrue(isinstance(iso_creds.network_admin_client,
json_network_client.NetworkClientJSON))
- def test_official_client(self):
- self.useFixture(mockpatch.PatchObject(keystoneclient.Client,
- 'authenticate'))
- self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
- '_get_image_client'))
- self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
- '_get_object_storage_client'))
- self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
- '_get_orchestration_client'))
- self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
- '_get_ceilometer_client'))
- iso_creds = isolated_creds.IsolatedCreds('test class',
- tempest_client=False)
- self.assertTrue(isinstance(iso_creds.identity_admin_client,
- keystoneclient.Client))
- self.assertTrue(isinstance(iso_creds.network_admin_client,
- neutronclient.Client))
-
def test_tempest_client_xml(self):
iso_creds = isolated_creds.IsolatedCreds('test class', interface='xml')
self.assertEqual(iso_creds.interface, 'xml')
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index f94d880eb..62073bdc6 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -195,8 +195,8 @@ class BotoTestCase(tempest.test.BaseTestCase):
"""Recommended to use as base class for boto related test."""
@classmethod
- def setUpClass(cls):
- super(BotoTestCase, cls).setUpClass()
+ def resource_setup(cls):
+ super(BotoTestCase, cls).resource_setup()
cls.conclusion = decision_maker()
cls.os = cls.get_client_manager()
# The trash contains cleanup functions and paramaters in tuples
@@ -245,7 +245,7 @@ class BotoTestCase(tempest.test.BaseTestCase):
raise self.failureException, "BotoServerError not raised"
@classmethod
- def tearDownClass(cls):
+ def resource_cleanup(cls):
"""Calls the callables added by addResourceCleanUp,
when you overwrite this function don't forget to call this too.
"""
@@ -264,7 +264,7 @@ class BotoTestCase(tempest.test.BaseTestCase):
finally:
del cls._resource_trash_bin[key]
cls.clear_isolated_creds()
- super(BotoTestCase, cls).tearDownClass()
+ super(BotoTestCase, cls).resource_cleanup()
# NOTE(afazekas): let the super called even on exceptions
# The real exceptions already logged, if the super throws another,
# does not causes hidden issues
@@ -498,7 +498,10 @@ class BotoTestCase(tempest.test.BaseTestCase):
def _volume_state():
volume.update(validate=True)
try:
- if volume.status != "available":
+ # NOTE(gmann): Make sure volume is attached.
+ # Checking status as 'not "available"' is not enough to make
+ # sure volume is attached as it can be in "error" state
+ if volume.status == "in-use":
volume.detach(force=True)
except BaseException:
LOG.exception("Failed to detach volume %s" % volume)
diff --git a/tempest/thirdparty/boto/test_ec2_instance_run.py b/tempest/thirdparty/boto/test_ec2_instance_run.py
index ee904c726..f3f11fde0 100644
--- a/tempest/thirdparty/boto/test_ec2_instance_run.py
+++ b/tempest/thirdparty/boto/test_ec2_instance_run.py
@@ -30,8 +30,8 @@ LOG = logging.getLogger(__name__)
class InstanceRunTest(boto_test.BotoTestCase):
@classmethod
- def setUpClass(cls):
- super(InstanceRunTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(InstanceRunTest, cls).resource_setup()
if not cls.conclusion['A_I_IMAGES_READY']:
raise cls.skipException("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
@@ -200,29 +200,6 @@ class InstanceRunTest(boto_test.BotoTestCase):
instance.terminate()
self.assertInstanceStateWait(instance, '_GONE')
- def test_run_reboot_terminate_instance(self):
- # EC2 run, await till it reaches to running state, then reboot,
- # and wait untill its state is running, and then terminate
- image_ami = self.ec2_client.get_image(self.images["ami"]
- ["image_id"])
- reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
- ramdisk_id=self.images["ari"]["image_id"],
- instance_type=self.instance_type)
-
- self.assertEqual(1, len(reservation.instances))
-
- instance = reservation.instances[0]
- if instance.state != "running":
- self.assertInstanceStateWait(instance, "running")
-
- instance.reboot()
- if instance.state != "running":
- self.assertInstanceStateWait(instance, "running")
- LOG.debug("Instance rebooted - state: %s", instance.state)
-
- instance.terminate()
- self.assertInstanceStateWait(instance, '_GONE')
-
def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
diff --git a/tempest/thirdparty/boto/test_ec2_keys.py b/tempest/thirdparty/boto/test_ec2_keys.py
index 698e3e149..c3e1e2a2d 100644
--- a/tempest/thirdparty/boto/test_ec2_keys.py
+++ b/tempest/thirdparty/boto/test_ec2_keys.py
@@ -26,8 +26,8 @@ def compare_key_pairs(a, b):
class EC2KeysTest(boto_test.BotoTestCase):
@classmethod
- def setUpClass(cls):
- super(EC2KeysTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(EC2KeysTest, cls).resource_setup()
cls.client = cls.os.ec2api_client
cls.ec = cls.ec2_error_code
diff --git a/tempest/thirdparty/boto/test_ec2_network.py b/tempest/thirdparty/boto/test_ec2_network.py
index 792dde37a..a75fb7b26 100644
--- a/tempest/thirdparty/boto/test_ec2_network.py
+++ b/tempest/thirdparty/boto/test_ec2_network.py
@@ -20,8 +20,8 @@ from tempest.thirdparty.boto import test as boto_test
class EC2NetworkTest(boto_test.BotoTestCase):
@classmethod
- def setUpClass(cls):
- super(EC2NetworkTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(EC2NetworkTest, cls).resource_setup()
cls.client = cls.os.ec2api_client
# Note(afazekas): these tests for things duable without an instance
diff --git a/tempest/thirdparty/boto/test_ec2_security_groups.py b/tempest/thirdparty/boto/test_ec2_security_groups.py
index 7d9bdab57..fb3d32b1d 100644
--- a/tempest/thirdparty/boto/test_ec2_security_groups.py
+++ b/tempest/thirdparty/boto/test_ec2_security_groups.py
@@ -20,8 +20,8 @@ from tempest.thirdparty.boto import test as boto_test
class EC2SecurityGroupTest(boto_test.BotoTestCase):
@classmethod
- def setUpClass(cls):
- super(EC2SecurityGroupTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(EC2SecurityGroupTest, cls).resource_setup()
cls.client = cls.os.ec2api_client
def test_create_authorize_security_group(self):
diff --git a/tempest/thirdparty/boto/test_ec2_volumes.py b/tempest/thirdparty/boto/test_ec2_volumes.py
index b50c6b097..9cee8a47f 100644
--- a/tempest/thirdparty/boto/test_ec2_volumes.py
+++ b/tempest/thirdparty/boto/test_ec2_volumes.py
@@ -29,8 +29,8 @@ def compare_volumes(a, b):
class EC2VolumesTest(boto_test.BotoTestCase):
@classmethod
- def setUpClass(cls):
- super(EC2VolumesTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(EC2VolumesTest, cls).resource_setup()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
diff --git a/tempest/thirdparty/boto/test_s3_buckets.py b/tempest/thirdparty/boto/test_s3_buckets.py
index 1576492fb..342fc0ed5 100644
--- a/tempest/thirdparty/boto/test_s3_buckets.py
+++ b/tempest/thirdparty/boto/test_s3_buckets.py
@@ -20,8 +20,8 @@ from tempest.thirdparty.boto import test as boto_test
class S3BucketsTest(boto_test.BotoTestCase):
@classmethod
- def setUpClass(cls):
- super(S3BucketsTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(S3BucketsTest, cls).resource_setup()
cls.client = cls.os.s3_client
def test_create_and_get_delete_bucket(self):
diff --git a/tempest/thirdparty/boto/test_s3_ec2_images.py b/tempest/thirdparty/boto/test_s3_ec2_images.py
index 389e25cd2..f5dec955d 100644
--- a/tempest/thirdparty/boto/test_s3_ec2_images.py
+++ b/tempest/thirdparty/boto/test_s3_ec2_images.py
@@ -26,8 +26,8 @@ CONF = config.CONF
class S3ImagesTest(boto_test.BotoTestCase):
@classmethod
- def setUpClass(cls):
- super(S3ImagesTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(S3ImagesTest, cls).resource_setup()
if not cls.conclusion['A_I_IMAGES_READY']:
raise cls.skipException("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
diff --git a/tempest/thirdparty/boto/test_s3_objects.py b/tempest/thirdparty/boto/test_s3_objects.py
index db3c1cf01..43774c219 100644
--- a/tempest/thirdparty/boto/test_s3_objects.py
+++ b/tempest/thirdparty/boto/test_s3_objects.py
@@ -24,8 +24,8 @@ from tempest.thirdparty.boto import test as boto_test
class S3BucketsTest(boto_test.BotoTestCase):
@classmethod
- def setUpClass(cls):
- super(S3BucketsTest, cls).setUpClass()
+ def resource_setup(cls):
+ super(S3BucketsTest, cls).resource_setup()
cls.client = cls.os.s3_client
def test_create_get_delete_object(self):
diff --git a/test-requirements.txt b/test-requirements.txt
index cd8154b05..ba70259b2 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,10 +1,13 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
hacking>=0.9.2,<0.10
# needed for doc build
sphinx>=1.1.2,!=1.2.0,<1.3
python-subunit>=0.0.18
-oslosphinx
+oslosphinx>=2.2.0 # Apache-2.0
mox>=0.5.3
mock>=1.0
coverage>=3.6
-oslotest
-stevedore>=0.14
+oslotest>=1.1.0 # Apache-2.0
+stevedore>=1.0.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 492c4f6be..cab59a83a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,6 +8,8 @@ setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
usedevelop = True
install_command = pip install -U {opts} {packages}
+whitelist_externals = bash
+
[testenv:py26]
setenv = OS_TEST_PATH=./tempest/tests
@@ -17,6 +19,11 @@ commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
setenv = OS_TEST_PATH=./tempest/tests
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
+[testenv:py34]
+setenv = OS_TEST_PATH=./tempest/tests
+ PYTHONHASHSEED=0
+commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
+
[testenv:py27]
setenv = OS_TEST_PATH=./tempest/tests
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'