summaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib')
-rw-r--r--contrib/.testr.conf4
-rw-r--r--contrib/bin/run_tests.py39
-rw-r--r--contrib/rackspace/heat/engine/plugins/clients.py12
-rw-r--r--contrib/rackspace/heat/engine/plugins/cloud_dns.py9
-rw-r--r--contrib/rackspace/heat/engine/plugins/cloud_loadbalancer.py6
-rw-r--r--contrib/rackspace/heat/engine/plugins/cloud_server.py77
-rw-r--r--contrib/rackspace/heat/tests/test_rackspace_cloud_server.py16
7 files changed, 116 insertions, 47 deletions
diff --git a/contrib/.testr.conf b/contrib/.testr.conf
new file mode 100644
index 000000000..8396da395
--- /dev/null
+++ b/contrib/.testr.conf
@@ -0,0 +1,4 @@
+[DEFAULT]
+test_command=python -m subunit.run discover -s contrib $LISTOPT $IDOPTION
+test_id_option=--load-list $IDFILE
+test_list_option=--list
diff --git a/contrib/bin/run_tests.py b/contrib/bin/run_tests.py
new file mode 100644
index 000000000..4b5290e5f
--- /dev/null
+++ b/contrib/bin/run_tests.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import sys
+
+from testrepository import commands
+
+CONTRIB_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__),
+ os.pardir))
+TESTR_PATH = os.path.join(CONTRIB_PATH, ".testrepository")
+
+
+def _run_testr(*args):
+ return commands.run_argv([sys.argv[0]] + list(args),
+ sys.stdin, sys.stdout, sys.stderr)
+
+# initialize the contrib test repository if needed
+if not os.path.isdir(TESTR_PATH):
+ _run_testr('init', '-d', CONTRIB_PATH)
+if not _run_testr('run', '-d', CONTRIB_PATH, '--parallel'):
+ cur_dir = os.getcwd()
+ os.chdir(CONTRIB_PATH)
+ print("Slowest Contrib Tests")
+ _run_testr("slowest")
+ os.chdir(cur_dir)
+else:
+ sys.exit(1)
diff --git a/contrib/rackspace/heat/engine/plugins/clients.py b/contrib/rackspace/heat/engine/plugins/clients.py
index 08c03181b..45ca01d4a 100644
--- a/contrib/rackspace/heat/engine/plugins/clients.py
+++ b/contrib/rackspace/heat/engine/plugins/clients.py
@@ -26,18 +26,18 @@ from heat.openstack.common.gettextutils import _
try:
import pyrax
except ImportError:
- logger.info('pyrax not available')
+ logger.info(_('pyrax not available'))
try:
from swiftclient import client as swiftclient
except ImportError:
swiftclient = None
- logger.info('swiftclient not available')
+ logger.info(_('swiftclient not available'))
try:
from ceilometerclient.v2 import client as ceilometerclient
except ImportError:
ceilometerclient = None
- logger.info('ceilometerclient not available')
+ logger.info(_('ceilometerclient not available'))
cloud_opts = [
cfg.StrOpt('region_name',
@@ -82,7 +82,7 @@ class Clients(clients.OpenStackClients):
actually a valid option to change within pyrax.
'''
if service_type is not "compute":
- raise ValueError("service_type should be compute.")
+ raise ValueError(_("service_type should be compute."))
return self._get_client(service_type)
def neutron(self):
@@ -96,7 +96,7 @@ class Clients(clients.OpenStackClients):
def __authenticate(self):
pyrax.set_setting("identity_type", "keystone")
pyrax.set_setting("auth_endpoint", self.context.auth_url)
- logger.info("Authenticating with username:%s" %
+ logger.info(_("Authenticating username:%s") %
self.context.username)
self.pyrax = pyrax.auth_with_token(self.context.auth_token,
tenant_id=self.context.tenant_id,
@@ -105,5 +105,5 @@ class Clients(clients.OpenStackClients):
or None))
if not self.pyrax:
raise exception.AuthorizationFailure("No services available.")
- logger.info("User %s authenticated successfully."
+ logger.info(_("User %s authenticated successfully.")
% self.context.username)
diff --git a/contrib/rackspace/heat/engine/plugins/cloud_dns.py b/contrib/rackspace/heat/engine/plugins/cloud_dns.py
index e690f22e5..6f4b14c6d 100644
--- a/contrib/rackspace/heat/engine/plugins/cloud_dns.py
+++ b/contrib/rackspace/heat/engine/plugins/cloud_dns.py
@@ -29,6 +29,7 @@ from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.openstack.common import log as logging
+from heat.openstack.common.gettextutils import _
logger = logging.getLogger(__name__)
@@ -155,7 +156,7 @@ class CloudDns(resource.Resource):
"""
# There is no check_create_complete as the pyrax create for DNS is
# synchronous.
- logger.debug("CloudDns handle_create called.")
+ logger.debug(_("CloudDns handle_create called."))
args = dict((k, v) for k, v in self.properties.items())
for rec in args[self.RECORDS] or {}:
# only pop the priority for the correct types
@@ -169,9 +170,9 @@ class CloudDns(resource.Resource):
"""
Update a Rackspace CloudDns Instance.
"""
- logger.debug("CloudDns handle_update called.")
+ logger.debug(_("CloudDns handle_update called."))
if not self.resource_id:
- raise exception.Error('Update called on a non-existent domain')
+ raise exception.Error(_('Update called on a non-existent domain'))
if prop_diff:
dom = self.cloud_dns().get(self.resource_id)
@@ -194,7 +195,7 @@ class CloudDns(resource.Resource):
"""
Delete a Rackspace CloudDns Instance.
"""
- logger.debug("CloudDns handle_delete called.")
+ logger.debug(_("CloudDns handle_delete called."))
if self.resource_id:
try:
dom = self.cloud_dns().get(self.resource_id)
diff --git a/contrib/rackspace/heat/engine/plugins/cloud_loadbalancer.py b/contrib/rackspace/heat/engine/plugins/cloud_loadbalancer.py
index b5d48cb17..da3e75523 100644
--- a/contrib/rackspace/heat/engine/plugins/cloud_loadbalancer.py
+++ b/contrib/rackspace/heat/engine/plugins/cloud_loadbalancer.py
@@ -473,7 +473,7 @@ class CloudLoadBalancer(resource.Resource):
lb_name = (self.properties.get(self.NAME) or
self.physical_resource_name())
- logger.debug('Creating loadbalancer: %s' % {lb_name: lb_body})
+ logger.debug(_("Creating loadbalancer: %s") % {lb_name: lb_body})
loadbalancer = self.clb.create(lb_name, **lb_body)
self.resource_id_set(str(loadbalancer.id))
@@ -516,8 +516,8 @@ class CloudLoadBalancer(resource.Resource):
updated = new_set.intersection(old_set)
if len(current_nodes) + len(added) - len(deleted) < 1:
- raise ValueError("The loadbalancer:%s requires at least one "
- "node." % self.name)
+ raise ValueError(_("The loadbalancer:%s requires at least one "
+ "node.") % self.name)
"""
Add loadbalancers in the new map that are not in the old map.
Add before delete to avoid deleting the last node and getting in
diff --git a/contrib/rackspace/heat/engine/plugins/cloud_server.py b/contrib/rackspace/heat/engine/plugins/cloud_server.py
index b73fd9ab5..854bba399 100644
--- a/contrib/rackspace/heat/engine/plugins/cloud_server.py
+++ b/contrib/rackspace/heat/engine/plugins/cloud_server.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import socket
import tempfile
import json
@@ -19,6 +20,7 @@ import novaclient.exceptions as novaexception
from heat.common import exception
from heat.openstack.common import log as logging
+from heat.openstack.common.gettextutils import _
from heat.engine import properties
from heat.engine import scheduler
from heat.engine.resources import instance
@@ -186,9 +188,9 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
'rhel': rhel_script,
'ubuntu': ubuntu_script}
- script_error_msg = ("The %(path)s script exited with a non-zero exit "
+ script_error_msg = (_("The %(path)s script exited with a non-zero exit "
"status. To see the error message, log into the "
- "server and view %(log)s")
+ "server and view %(log)s"))
# Template keys supported for handle_update. Properties not
# listed here trigger an UpdateReplace
@@ -196,6 +198,7 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
def __init__(self, name, json_snippet, stack):
super(CloudServer, self).__init__(name, json_snippet, stack)
+ self.stack = stack
self._private_key = None
self._server = None
self._distro = None
@@ -208,7 +211,7 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
def server(self):
"""Get the Cloud Server object."""
if not self._server:
- logger.debug("Calling nova().servers.get()")
+ logger.debug(_("Calling nova().servers.get()"))
self._server = self.nova().servers.get(self.resource_id)
return self._server
@@ -216,7 +219,7 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
def distro(self):
"""Get the Linux distribution for this server."""
if not self._distro:
- logger.debug("Calling nova().images.get()")
+ logger.debug(_("Calling nova().images.get()"))
image_data = self.nova().images.get(self.image)
self._distro = image_data.metadata['os_distro']
return self._distro
@@ -267,8 +270,10 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
if ip['version'] == 4:
return ip['addr']
- raise exception.Error("Could not determine the %s IP of %s." %
- (ip_type, self.properties[self.IMAGE]))
+ raise exception.Error(_("Could not determine the %(ip)s IP of "
+ "%(image)s.") %
+ {'ip': ip_type,
+ 'image': self.properties[self.IMAGE]})
@property
def public_ip(self):
@@ -313,8 +318,19 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
username="root",
key_filename=private_key_file.name)
chan = ssh.get_transport().open_session()
+ chan.settimeout(self.stack.timeout_mins * 60.0)
chan.exec_command(command)
- return chan.recv_exit_status()
+ try:
+ # The channel timeout only works for read/write operations
+ chan.recv(1024)
+ except socket.timeout:
+ raise exception.Error("SSH command timed out after %s minutes"
+ % self.stack.timeout_mins)
+ else:
+ return chan.recv_exit_status()
+ finally:
+ ssh.close()
+ chan.close()
def _sftp_files(self, files):
"""Transfer files to the Cloud Server via SFTP."""
@@ -325,10 +341,16 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
transport = paramiko.Transport((self.public_ip, 22))
transport.connect(hostkey=None, username="root", pkey=pkey)
sftp = paramiko.SFTPClient.from_transport(transport)
- for remote_file in files:
- sftp_file = sftp.open(remote_file['path'], 'w')
- sftp_file.write(remote_file['data'])
- sftp_file.close()
+ try:
+ for remote_file in files:
+ sftp_file = sftp.open(remote_file['path'], 'w')
+ sftp_file.write(remote_file['data'])
+ sftp_file.close()
+ except:
+ raise
+ finally:
+ sftp.close()
+ transport.close()
def handle_create(self):
"""Create a Rackspace Cloud Servers container.
@@ -353,7 +375,7 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
# Create server
client = self.nova().servers
- logger.debug("Calling nova().servers.create()")
+ logger.debug(_("Calling nova().servers.create()"))
server = client.create(self.physical_resource_name(),
self.image,
self.flavor,
@@ -370,7 +392,7 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
return scheduler.PollingTaskGroup(tasks)
def _attach_volume(self, volume_id, device):
- logger.debug("Calling nova().volumes.create_server_volume()")
+ logger.debug(_("Calling nova().volumes.create_server_volume()"))
self.nova().volumes.create_server_volume(self.server.id,
volume_id,
device or None)
@@ -408,12 +430,13 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
server.get()
if 'rack_connect' in self.context.roles: # Account has RackConnect
if 'rackconnect_automation_status' not in server.metadata:
- logger.debug("RackConnect server does not have the "
- "rackconnect_automation_status metadata tag yet")
+ logger.debug(_("RackConnect server does not have the "
+ "rackconnect_automation_status metadata tag "
+ "yet"))
return False
rc_status = server.metadata['rackconnect_automation_status']
- logger.debug("RackConnect automation status: " + rc_status)
+ logger.debug(_("RackConnect automation status: ") + rc_status)
if rc_status == 'DEPLOYING':
return False
@@ -422,13 +445,13 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
self._public_ip = None # The public IP changed, forget old one
elif rc_status == 'FAILED':
- raise exception.Error("RackConnect automation FAILED")
+ raise exception.Error(_("RackConnect automation FAILED"))
elif rc_status == 'UNPROCESSABLE':
reason = server.metadata.get(
"rackconnect_unprocessable_reason", None)
if reason is not None:
- logger.warning("RackConnect unprocessable reason: "
+ logger.warning(_("RackConnect unprocessable reason: ")
+ reason)
# UNPROCESSABLE means the RackConnect automation was
# not attempted (eg. Cloud Server in a different DC
@@ -436,17 +459,17 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
# It is okay if we do not raise an exception.
else:
- raise exception.Error("Unknown RackConnect automation status: "
- + rc_status)
+ raise exception.Error(_("Unknown RackConnect automation "
+ "status: ") + rc_status)
if 'rax_managed' in self.context.roles: # Managed Cloud account
if 'rax_service_level_automation' not in server.metadata:
- logger.debug("Managed Cloud server does not have the "
- "rax_service_level_automation metadata tag yet")
+ logger.debug(_("Managed Cloud server does not have the "
+ "rax_service_level_automation metadata tag yet"))
return False
mc_status = server.metadata['rax_service_level_automation']
- logger.debug("Managed Cloud automation status: " + mc_status)
+ logger.debug(_("Managed Cloud automation status: ") + mc_status)
if mc_status == 'In Progress':
return False
@@ -455,11 +478,11 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
pass
elif mc_status == 'Build Error':
- raise exception.Error("Managed Cloud automation failed")
+ raise exception.Error(_("Managed Cloud automation failed"))
else:
- raise exception.Error("Unknown Managed Cloud automation "
- "status: " + mc_status)
+ raise exception.Error(_("Unknown Managed Cloud automation "
+ "status: ") + mc_status)
if self.has_userdata:
# Create heat-script and userdata files on server
@@ -495,7 +518,7 @@ zypper --non-interactive in cloud-init python-boto python-pip gcc python-devel
if server.status == "DELETED":
break
elif server.status == "ERROR":
- raise exception.Error("Deletion of server %s failed." %
+ raise exception.Error(_("Deletion of server %s failed.") %
server.name)
except novaexception.NotFound:
break
diff --git a/contrib/rackspace/heat/tests/test_rackspace_cloud_server.py b/contrib/rackspace/heat/tests/test_rackspace_cloud_server.py
index 96bfb5a50..85792517d 100644
--- a/contrib/rackspace/heat/tests/test_rackspace_cloud_server.py
+++ b/contrib/rackspace/heat/tests/test_rackspace_cloud_server.py
@@ -110,8 +110,12 @@ class RackspaceCloudServerTest(HeatTestCase):
chan = ssh.get_transport().AndReturn(fake_chan)
fake_chan_session = self.m.CreateMockAnything()
chan_session = chan.open_session().AndReturn(fake_chan_session)
+ fake_chan_session.settimeout(3600.0)
chan_session.exec_command(mox.IgnoreArg())
+ fake_chan_session.recv(1024)
chan_session.recv_exit_status().AndReturn(exit_code)
+ fake_chan_session.close()
+ ssh.close()
# SFTP
self.m.StubOutWithMock(paramiko, "Transport")
@@ -122,13 +126,11 @@ class RackspaceCloudServerTest(HeatTestCase):
self.m.StubOutWithMock(paramiko, "SFTPClient")
paramiko.SFTPClient.from_transport(transport).AndReturn(sftp)
sftp_file = self.m.CreateMockAnything()
- sftp.open(mox.IgnoreArg(), 'w').AndReturn(sftp_file)
- sftp_file.write(mox.IgnoreArg())
- sftp_file.close()
- sftp_file = self.m.CreateMockAnything()
- sftp.open(mox.IgnoreArg(), 'w').AndReturn(sftp_file)
- sftp_file.write(mox.IgnoreArg())
- sftp_file.close()
+ sftp.open(mox.IgnoreArg(), 'w').MultipleTimes().AndReturn(sftp_file)
+ sftp_file.write(mox.IgnoreArg()).MultipleTimes()
+ sftp_file.close().MultipleTimes()
+ sftp.close()
+ transport.close()
def _setup_test_cs(self, return_server, name, exit_code=0):
stack_name = '%s_stack' % name