summaryrefslogtreecommitdiff
path: root/saharaclient
diff options
context:
space:
mode:
Diffstat (limited to 'saharaclient')
-rw-r--r--saharaclient/api/job_binaries.py10
-rw-r--r--saharaclient/api/job_executions.py21
-rw-r--r--saharaclient/api/jobs.py14
-rw-r--r--saharaclient/osc/v1/cluster_templates.py490
-rw-r--r--saharaclient/osc/v1/clusters.py518
-rw-r--r--saharaclient/osc/v1/job_templates.py323
-rw-r--r--saharaclient/osc/v1/job_types.py133
-rw-r--r--saharaclient/osc/v1/node_group_templates.py677
-rw-r--r--saharaclient/osc/v1/utils.py29
-rw-r--r--saharaclient/tests/unit/osc/v1/fakes.py1
-rw-r--r--saharaclient/tests/unit/osc/v1/test_cluster_templates.py304
-rw-r--r--saharaclient/tests/unit/osc/v1/test_clusters.py441
-rw-r--r--saharaclient/tests/unit/osc/v1/test_job_templates.py270
-rw-r--r--saharaclient/tests/unit/osc/v1/test_job_types.py145
-rw-r--r--saharaclient/tests/unit/osc/v1/test_node_group_templates.py371
-rw-r--r--saharaclient/tests/unit/osc/v1/test_utils.py5
-rw-r--r--saharaclient/tests/unit/test_job_executions.py1
17 files changed, 3721 insertions, 32 deletions
diff --git a/saharaclient/api/job_binaries.py b/saharaclient/api/job_binaries.py
index 371f03a..5a54534 100644
--- a/saharaclient/api/job_binaries.py
+++ b/saharaclient/api/job_binaries.py
@@ -23,17 +23,15 @@ class JobBinaries(base.Resource):
class JobBinariesManager(base.ResourceManager):
resource_class = JobBinaries
- def create(self, name, url, description, extra, is_public=None,
+ def create(self, name, url, description=None, extra=None, is_public=None,
is_protected=None):
data = {
"name": name,
- "url": url,
- "description": description,
- "extra": extra
+ "url": url
}
- self._copy_if_defined(data, is_public=is_public,
- is_protected=is_protected)
+ self._copy_if_defined(data, description=description, extra=extra,
+ is_public=is_public, is_protected=is_protected)
return self._create('/job-binaries', data, 'job_binary')
diff --git a/saharaclient/api/job_executions.py b/saharaclient/api/job_executions.py
index 23fd686..4d76b24 100644
--- a/saharaclient/api/job_executions.py
+++ b/saharaclient/api/job_executions.py
@@ -33,29 +33,18 @@ class JobExecutionsManager(base.ResourceManager):
def delete(self, obj_id):
self._delete('/job-executions/%s' % obj_id)
- def create(self, job_id, cluster_id, input_id,
- output_id, configs, interface=None, is_public=None,
+ def create(self, job_id, cluster_id, input_id=None,
+ output_id=None, configs=None, interface=None, is_public=None,
is_protected=None):
url = "/jobs/%s/execute" % job_id
data = {
"cluster_id": cluster_id,
- "job_configs": configs,
}
- if interface:
- data['interface'] = interface
-
- # Leave these out if they are null. For Java job types they
- # are not part of the schema
- io_ids = (("input_id", input_id),
- ("output_id", output_id))
- for key, value in io_ids:
- if value is not None:
- data.update({key: value})
-
- self._copy_if_defined(data, is_public=is_public,
- is_protected=is_protected)
+ self._copy_if_defined(data, input_id=input_id, output_id=output_id,
+ job_configs=configs, interface=interface,
+ is_public=is_public, is_protected=is_protected)
return self._create(url, data, 'job_execution')
diff --git a/saharaclient/api/jobs.py b/saharaclient/api/jobs.py
index 0e5bbec..b352772 100644
--- a/saharaclient/api/jobs.py
+++ b/saharaclient/api/jobs.py
@@ -23,18 +23,16 @@ class Job(base.Resource):
class JobsManager(base.ResourceManager):
resource_class = Job
- def create(self, name, type, mains, libs, description, interface=None,
- is_public=None, is_protected=None):
+ def create(self, name, type, mains=None, libs=None, description=None,
+ interface=None, is_public=None, is_protected=None):
data = {
'name': name,
- 'type': type,
- 'description': description,
- 'mains': mains,
- 'libs': libs,
+ 'type': type
}
- self._copy_if_defined(data, interface=interface, is_public=is_public,
- is_protected=is_protected)
+ self._copy_if_defined(data, description=description, mains=mains,
+ libs=libs, interface=interface,
+ is_public=is_public, is_protected=is_protected)
return self._create('/jobs', data, 'job')
diff --git a/saharaclient/osc/v1/cluster_templates.py b/saharaclient/osc/v1/cluster_templates.py
new file mode 100644
index 0000000..90652d2
--- /dev/null
+++ b/saharaclient/osc/v1/cluster_templates.py
@@ -0,0 +1,490 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+from cliff import command
+from cliff import lister
+from cliff import show
+from openstackclient.common import exceptions
+from openstackclient.common import utils as osc_utils
+from oslo_log import log as logging
+
+from saharaclient.osc.v1 import utils
+
+CT_FIELDS = ['id', 'name', 'plugin_name', 'version', 'description',
+ 'node_groups', 'anti_affinity', 'use_autoconfig', 'is_default',
+ 'is_protected', 'is_public']
+
+
+def _format_node_groups_list(node_groups):
+ return ', '.join(
+ ['%s:%s' % (ng['name'], ng['count']) for ng in node_groups])
+
+
+def _format_ct_output(data):
+ data['version'] = data.pop('hadoop_version')
+ data['node_groups'] = _format_node_groups_list(data['node_groups'])
+ data['anti_affinity'] = osc_utils.format_list(data['anti_affinity'])
+
+
+def _configure_node_groups(node_groups, client):
+ node_groups_list = dict(
+ map(lambda x: x.split(':', 1), node_groups))
+
+ node_groups = []
+ plugins_versions = set()
+
+ for name, count in node_groups_list.items():
+ ng = utils.get_resource(client.node_group_templates, name)
+ node_groups.append({'name': ng.name,
+ 'count': int(count),
+ 'node_group_template_id': ng.id})
+ plugins_versions.add((ng.plugin_name, ng.hadoop_version))
+
+ if len(plugins_versions) != 1:
+ raise exceptions.CommandError('Node groups with the same plugins '
+ 'and versions must be specified')
+
+ plugin, version = plugins_versions.pop()
+ return plugin, version, node_groups
+
+
+class CreateClusterTemplate(show.ShowOne):
+ """Creates cluster template"""
+
+ log = logging.getLogger(__name__ + ".CreateClusterTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(CreateClusterTemplate, self).get_parser(prog_name)
+
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help="Name of the cluster template [REQUIRED if JSON is not "
+ "provided]",
+ )
+ parser.add_argument(
+ '--node-groups',
+ metavar="<node-group:instances_count>",
+ nargs="+",
+ help="List of the node groups(names or IDs) and numbers of "
+ "instances for each one of them [REQUIRED if JSON is not "
+ "provided]"
+ )
+ parser.add_argument(
+ '--anti-affinity',
+ metavar="<anti-affinity>",
+ nargs="+",
+ help="List of processes that should be added to an anti-affinity "
+ "group"
+ )
+ parser.add_argument(
+ '--description',
+ metavar="<description>",
+ help='Description of the cluster template'
+ )
+ parser.add_argument(
+ '--autoconfig',
+ action='store_true',
+ default=False,
+ help='If enabled, instances of the cluster will be '
+ 'automatically configured',
+ )
+ parser.add_argument(
+ '--public',
+ action='store_true',
+ default=False,
+ help='Make the cluster template public (Visible from other '
+ 'tenants)',
+ )
+ parser.add_argument(
+ '--protected',
+ action='store_true',
+ default=False,
+ help='Make the cluster template protected',
+ )
+ parser.add_argument(
+ '--json',
+ metavar='<filename>',
+ help='JSON representation of the cluster template. Other '
+ 'arguments will not be taken into account if this one is '
+ 'provided'
+ )
+ parser.add_argument(
+ '--shares',
+ metavar='<filename>',
+ help='JSON representation of the manila shares'
+ )
+ parser.add_argument(
+ '--configs',
+ metavar='<filename>',
+ help='JSON representation of the cluster template configs'
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ if parsed_args.json:
+ blob = osc_utils.read_blob_file_contents(parsed_args.json)
+ try:
+ template = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'template from file %s: %s' % (parsed_args.json, e))
+
+ if 'neutron_management_network' in template:
+ template['net_id'] = template.pop('neutron_management_network')
+
+ data = client.cluster_templates.create(**template).to_dict()
+ else:
+ if not parsed_args.name or not parsed_args.node_groups:
+ raise exceptions.CommandError(
+ 'At least --name , --node-groups arguments should be '
+ 'specified or json template should be provided with '
+ '--json argument')
+
+ configs = None
+ if parsed_args.configs:
+ blob = osc_utils.read_blob_file_contents(parsed_args.configs)
+ try:
+ configs = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'configs from file %s: %s' % (parsed_args.configs, e))
+
+ shares = None
+ if parsed_args.shares:
+ blob = osc_utils.read_blob_file_contents(parsed_args.shares)
+ try:
+ shares = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'shares from file %s: %s' % (parsed_args.shares, e))
+
+ plugin, version, node_groups = _configure_node_groups(
+ parsed_args.node_groups, client)
+
+ data = client.cluster_templates.create(
+ name=parsed_args.name,
+ plugin_name=plugin,
+ hadoop_version=version,
+ description=parsed_args.description,
+ node_groups=node_groups,
+ use_autoconfig=parsed_args.autoconfig,
+ cluster_configs=configs,
+ shares=shares,
+ is_public=parsed_args.public,
+ is_protected=parsed_args.protected
+ ).to_dict()
+
+ _format_ct_output(data)
+ data = utils.prepare_data(data, CT_FIELDS)
+
+ return self.dict2columns(data)
+
+
+class ListClusterTemplates(lister.Lister):
+ """Lists cluster templates"""
+
+ log = logging.getLogger(__name__ + ".ListClusterTemplates")
+
+ def get_parser(self, prog_name):
+ parser = super(ListClusterTemplates, self).get_parser(prog_name)
+ parser.add_argument(
+ '--long',
+ action='store_true',
+ default=False,
+ help='List additional fields in output',
+ )
+ parser.add_argument(
+ '--plugin',
+ metavar="<plugin>",
+ help="List cluster templates for specific plugin"
+ )
+
+ parser.add_argument(
+ '--version',
+ metavar="<version>",
+ help="List cluster templates with specific version of the "
+ "plugin"
+ )
+
+ parser.add_argument(
+ '--name',
+ metavar="<name-substring>",
+ help="List cluster templates with specific substring in the "
+ "name"
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+ search_opts = {}
+ if parsed_args.plugin:
+ search_opts['plugin_name'] = parsed_args.plugin
+ if parsed_args.version:
+ search_opts['hadoop_version'] = parsed_args.version
+
+ data = client.cluster_templates.list(search_opts=search_opts)
+
+ if parsed_args.name:
+ data = utils.get_by_name_substring(data, parsed_args.name)
+
+ if parsed_args.long:
+ columns = ('name', 'id', 'plugin_name', 'hadoop_version',
+ 'node_groups', 'description')
+ column_headers = utils.prepare_column_headers(
+ columns, {'hadoop_version': 'version'})
+
+ else:
+ columns = ('name', 'id', 'plugin_name', 'hadoop_version')
+ column_headers = utils.prepare_column_headers(
+ columns, {'hadoop_version': 'version'})
+
+ return (
+ column_headers,
+ (osc_utils.get_item_properties(
+ s,
+ columns,
+ formatters={
+ 'node_groups': _format_node_groups_list
+ }
+ ) for s in data)
+ )
+
+
+class ShowClusterTemplate(show.ShowOne):
+ """Display cluster template details"""
+
+ log = logging.getLogger(__name__ + ".ShowClusterTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(ShowClusterTemplate, self).get_parser(prog_name)
+ parser.add_argument(
+ "cluster_template",
+ metavar="<cluster-template>",
+ help="Name or id of the cluster template to display",
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ data = utils.get_resource(
+ client.cluster_templates, parsed_args.cluster_template).to_dict()
+
+ _format_ct_output(data)
+ data = utils.prepare_data(data, CT_FIELDS)
+
+ return self.dict2columns(data)
+
+
+class DeleteClusterTemplate(command.Command):
+ """Deletes cluster template"""
+
+ log = logging.getLogger(__name__ + ".DeleteClusterTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(DeleteClusterTemplate, self).get_parser(prog_name)
+ parser.add_argument(
+ "cluster_template",
+ metavar="<cluster-template>",
+ nargs="+",
+ help="Name(s) or id(s) of the cluster template(s) to delete",
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+ for ct in parsed_args.cluster_template:
+ ct_id = utils.get_resource(
+ client.cluster_templates, ct).id
+ client.cluster_templates.delete(ct_id)
+
+
+class UpdateClusterTemplate(show.ShowOne):
+ """Updates cluster template"""
+
+ log = logging.getLogger(__name__ + ".UpdateClusterTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(UpdateClusterTemplate, self).get_parser(prog_name)
+
+ parser.add_argument(
+ 'cluster_template',
+ metavar="<cluster-template>",
+ help="Name or ID of the cluster template [REQUIRED]",
+ )
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help="New name of the cluster template",
+ )
+ parser.add_argument(
+ '--node-groups',
+ metavar="<node-group:instances_count>",
+ nargs="+",
+ help="List of the node groups(names or IDs) and numbers of"
+ "instances for each one of them"
+ )
+ parser.add_argument(
+ '--anti-affinity',
+ metavar="<anti-affinity>",
+ nargs="+",
+ help="List of processes that should be added to an anti-affinity "
+ "group"
+ )
+ parser.add_argument(
+ '--description',
+ metavar="<description>",
+ help='Description of the cluster template'
+ )
+ autoconfig = parser.add_mutually_exclusive_group()
+ autoconfig.add_argument(
+ '--autoconfig-enable',
+ action='store_true',
+ help='Instances of the cluster will be '
+ 'automatically configured',
+ dest='use_autoconfig'
+ )
+ autoconfig.add_argument(
+ '--autoconfig-disable',
+ action='store_false',
+ help='Instances of the cluster will not be '
+ 'automatically configured',
+ dest='use_autoconfig'
+ )
+ public = parser.add_mutually_exclusive_group()
+ public.add_argument(
+ '--public',
+ action='store_true',
+ help='Make the cluster template public '
+ '(Visible from other tenants)',
+ dest='is_public'
+ )
+ public.add_argument(
+ '--private',
+ action='store_false',
+ help='Make the cluster template private '
+ '(Visible only from this tenant)',
+ dest='is_public'
+ )
+ protected = parser.add_mutually_exclusive_group()
+ protected.add_argument(
+ '--protected',
+ action='store_true',
+ help='Make the cluster template protected',
+ dest='is_protected'
+ )
+ protected.add_argument(
+ '--unprotected',
+ action='store_false',
+ help='Make the cluster template unprotected',
+ dest='is_protected'
+ )
+ parser.add_argument(
+ '--json',
+ metavar='<filename>',
+ help='JSON representation of the cluster template. Other '
+ 'arguments will not be taken into account if this one is '
+ 'provided'
+ )
+ parser.add_argument(
+ '--shares',
+ metavar='<filename>',
+ help='JSON representation of the manila shares'
+ )
+ parser.add_argument(
+ '--configs',
+ metavar='<filename>',
+ help='JSON representation of the cluster template configs'
+ )
+ parser.set_defaults(is_public=None, is_protected=None,
+ use_autoconfig=None)
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ ct_id = utils.get_resource(
+ client.cluster_templates, parsed_args.cluster_template).id
+
+ if parsed_args.json:
+ blob = osc_utils.read_blob_file_contents(parsed_args.json)
+ try:
+ template = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'template from file %s: %s' % (parsed_args.json, e))
+ data = client.cluster_templates.update(
+ ct_id, **template).to_dict()
+ else:
+ plugin, version, node_groups = None, None, None
+ if parsed_args.node_groups:
+ plugin, version, node_groups = _configure_node_groups(
+ parsed_args.node_groups, client)
+
+ configs = None
+ if parsed_args.configs:
+ blob = osc_utils.read_blob_file_contents(parsed_args.configs)
+ try:
+ configs = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'configs from file %s: %s' % (parsed_args.configs, e))
+
+ shares = None
+ if parsed_args.shares:
+ blob = osc_utils.read_blob_file_contents(parsed_args.shares)
+ try:
+ shares = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'shares from file %s: %s' % (parsed_args.shares, e))
+
+ data = client.cluster_templates.update(
+ ct_id,
+ name=parsed_args.name,
+ plugin_name=plugin,
+ hadoop_version=version,
+ description=parsed_args.description,
+ node_groups=node_groups,
+ use_autoconfig=parsed_args.use_autoconfig,
+ cluster_configs=configs,
+ shares=shares,
+ is_public=parsed_args.is_public,
+ is_protected=parsed_args.is_protected
+ ).to_dict()
+
+ _format_ct_output(data)
+ data = utils.prepare_data(data, CT_FIELDS)
+
+ return self.dict2columns(data)
diff --git a/saharaclient/osc/v1/clusters.py b/saharaclient/osc/v1/clusters.py
new file mode 100644
index 0000000..c08a2b6
--- /dev/null
+++ b/saharaclient/osc/v1/clusters.py
@@ -0,0 +1,518 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+from cliff import command
+from cliff import lister
+from cliff import show
+from openstackclient.common import exceptions
+from openstackclient.common import utils as osc_utils
+from oslo_log import log as logging
+
+from saharaclient.osc.v1 import utils
+
+CLUSTER_FIELDS = ["cluster_template_id", "use_autoconfig", "user_keypair_id",
+ "status", "image", "node_groups", "id",
+ "anti_affinity", "version", "name", "is_transient",
+ "is_protected", "description", "is_public",
+ "neutron_management_network", "plugin_name"]
+
+
+def _format_node_groups_list(node_groups):
+ return ', '.join(
+ ['%s:%s' % (ng['name'], ng['count']) for ng in node_groups])
+
+
+def _format_cluster_output(data):
+ data['version'] = data.pop('hadoop_version')
+ data['image'] = data.pop('default_image_id')
+ data['node_groups'] = _format_node_groups_list(data['node_groups'])
+ data['anti_affinity'] = osc_utils.format_list(data['anti_affinity'])
+
+
+def _get_plugin_version(cluster_template, client):
+ ct = utils.get_resource(client.cluster_templates, cluster_template)
+ return ct.plugin_name, ct.hadoop_version, ct.id
+
+
+class CreateCluster(show.ShowOne):
+ """Creates cluster"""
+
+ log = logging.getLogger(__name__ + ".CreateCluster")
+
+ def get_parser(self, prog_name):
+ parser = super(CreateCluster, self).get_parser(prog_name)
+
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help="Name of the cluster [REQUIRED if JSON is not provided]",
+ )
+ parser.add_argument(
+ '--cluster-template',
+ metavar="<cluster-template>",
+ help="Cluster template name or ID [REQUIRED if JSON is not "
+ "provided]"
+ )
+ parser.add_argument(
+ '--image',
+ metavar="<image>",
+ help='Image that will be used for cluster deployment (Name or ID) '
+ '[REQUIRED if JSON is not provided]'
+ )
+ parser.add_argument(
+ '--description',
+ metavar="<description>",
+ help='Description of the cluster'
+ )
+ parser.add_argument(
+ '--user-keypair',
+ metavar="<keypair>",
+ help='User keypair to get acces to VMs after cluster creation'
+ )
+ parser.add_argument(
+ '--neutron-network',
+ metavar="<network>",
+ help='Instances of the cluster will get fixed IP addresses in '
+ 'this network. (Name or ID should be provided)'
+ )
+ parser.add_argument(
+ '--count',
+ metavar="<count>",
+ type=int,
+ help='Number of clusters to be created'
+ )
+ parser.add_argument(
+ '--public',
+ action='store_true',
+ default=False,
+ help='Make the cluster public (Visible from other tenants)',
+ )
+ parser.add_argument(
+ '--protected',
+ action='store_true',
+ default=False,
+ help='Make the cluster protected',
+ )
+ parser.add_argument(
+ '--transient',
+ action='store_true',
+ default=False,
+ help='Create transient cluster',
+ )
+ parser.add_argument(
+ '--json',
+ metavar='<filename>',
+ help='JSON representation of the cluster. Other '
+ 'arguments (except for --wait) will not be taken into '
+ 'account if this one is provided'
+ )
+ parser.add_argument(
+ '--wait',
+ action='store_true',
+ default=False,
+ help='Wait for the cluster creation to complete',
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+ network_client = self.app.client_manager.network
+
+ if parsed_args.json:
+ blob = osc_utils.read_blob_file_contents(parsed_args.json)
+ try:
+ template = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'template from file %s: %s' % (parsed_args.json, e))
+
+ if 'neutron_management_network' in template:
+ template['net_id'] = template.pop('neutron_management_network')
+
+ if 'count' in template:
+ parsed_args.count = template['count']
+
+ data = client.clusters.create(**template).to_dict()
+ else:
+ if not parsed_args.name or not parsed_args.cluster_template \
+ or not parsed_args.image:
+ raise exceptions.CommandError(
+ 'At least --name , --cluster-template, --image arguments '
+ 'should be specified or json template should be provided '
+ 'with --json argument')
+
+ plugin, version, template_id = _get_plugin_version(
+ parsed_args.cluster_template, client)
+
+ image_id = utils.get_resource(client.images, parsed_args.image).id
+
+ net_id = (network_client.api.find_attr(
+ 'networks', parsed_args.neutron_network)['id'] if
+ parsed_args.neutron_network else None)
+
+ data = client.clusters.create(
+ name=parsed_args.name,
+ plugin_name=plugin,
+ hadoop_version=version,
+ cluster_template_id=template_id,
+ default_image_id=image_id,
+ description=parsed_args.description,
+ is_transient=parsed_args.transient,
+ user_keypair_id=parsed_args.user_keypair,
+ net_id=net_id,
+ count=parsed_args.count,
+ is_public=parsed_args.public,
+ is_protected=parsed_args.protected
+ ).to_dict()
+ if parsed_args.count and parsed_args.count > 1:
+ clusters = [
+ utils.get_resource(client.clusters, id)
+ for id in data['clusters']]
+
+ if parsed_args.wait:
+ for cluster in clusters:
+ if not osc_utils.wait_for_status(
+ client.clusters.get, cluster.id):
+ self.log.error(
+ 'Error occurred during cluster creation: %s',
+ data['id'])
+
+ data = {}
+ for cluster in clusters:
+ data[cluster.name] = cluster.id
+
+ else:
+ if parsed_args.wait:
+ if not osc_utils.wait_for_status(
+ client.clusters.get, data['id']):
+ self.log.error(
+ 'Error occurred during cluster creation: %s',
+ data['id'])
+ data = client.clusters.get(data['id']).to_dict()
+ _format_cluster_output(data)
+ data = utils.prepare_data(data, CLUSTER_FIELDS)
+
+ return self.dict2columns(data)
+
+
+class ListClusters(lister.Lister):
+ """Lists clusters"""
+
+ log = logging.getLogger(__name__ + ".ListClusters")
+
+ def get_parser(self, prog_name):
+ parser = super(ListClusters, self).get_parser(prog_name)
+ parser.add_argument(
+ '--long',
+ action='store_true',
+ default=False,
+ help='List additional fields in output',
+ )
+ parser.add_argument(
+ '--plugin',
+ metavar="<plugin>",
+ help="List clusters with specific plugin"
+ )
+
+ parser.add_argument(
+ '--version',
+ metavar="<version>",
+ help="List clusters with specific version of the "
+ "plugin"
+ )
+
+ parser.add_argument(
+ '--name',
+ metavar="<name-substring>",
+ help="List clusters with specific substring in the name"
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+ search_opts = {}
+ if parsed_args.plugin:
+ search_opts['plugin_name'] = parsed_args.plugin
+ if parsed_args.version:
+ search_opts['hadoop_version'] = parsed_args.version
+
+ data = client.clusters.list(search_opts=search_opts)
+
+ if parsed_args.name:
+ data = utils.get_by_name_substring(data, parsed_args.name)
+
+ if parsed_args.long:
+ columns = ('name', 'id', 'plugin_name', 'hadoop_version',
+ 'status', 'description', 'default_image_id')
+ column_headers = utils.prepare_column_headers(
+ columns, {'hadoop_version': 'version',
+ 'default_image_id': 'image'})
+
+ else:
+ columns = ('name', 'id', 'plugin_name', 'hadoop_version', 'status')
+ column_headers = utils.prepare_column_headers(
+ columns, {'hadoop_version': 'version',
+ 'default_image_id': 'image'})
+ return (
+ column_headers,
+ (osc_utils.get_item_properties(
+ s,
+ columns
+ ) for s in data)
+ )
+
+
+class ShowCluster(show.ShowOne):
+ """Display cluster details"""
+
+ log = logging.getLogger(__name__ + ".ShowCluster")
+
+ def get_parser(self, prog_name):
+ parser = super(ShowCluster, self).get_parser(prog_name)
+ parser.add_argument(
+ "cluster",
+ metavar="<cluster>",
+ help="Name or id of the cluster to display",
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ data = utils.get_resource(
+ client.clusters, parsed_args.cluster).to_dict()
+
+ _format_cluster_output(data)
+ data = utils.prepare_data(data, CLUSTER_FIELDS)
+
+ return self.dict2columns(data)
+
+
+class DeleteCluster(command.Command):
+ """Deletes cluster"""
+
+ log = logging.getLogger(__name__ + ".DeleteCluster")
+
+ def get_parser(self, prog_name):
+ parser = super(DeleteCluster, self).get_parser(prog_name)
+ parser.add_argument(
+ "cluster",
+ metavar="<cluster>",
+ nargs="+",
+ help="Name(s) or id(s) of the cluster(s) to delete",
+ )
+ parser.add_argument(
+ '--wait',
+ action='store_true',
+ default=False,
+ help='Wait for the cluster(s) delete to complete',
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+ clusters = []
+ for cluster in parsed_args.cluster:
+ cluster_id = utils.get_resource(
+ client.clusters, cluster).id
+ client.clusters.delete(cluster_id)
+ clusters.append(cluster_id)
+ if parsed_args.wait:
+ for cluster_id in clusters:
+ if not utils.wait_for_delete(client.clusters, cluster_id):
+ self.log.error(
+ 'Error occurred during cluster deleting: %s',
+ cluster_id)
+
+
+class UpdateCluster(show.ShowOne):
+ """Updates cluster"""
+
+ log = logging.getLogger(__name__ + ".UpdateCluster")
+
+ def get_parser(self, prog_name):
+ parser = super(UpdateCluster, self).get_parser(prog_name)
+
+ parser.add_argument(
+ 'cluster',
+ metavar="<cluster>",
+ help="Name or ID of the cluster",
+ )
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help="New name of the cluster",
+ )
+ parser.add_argument(
+ '--description',
+ metavar="<description>",
+ help='Description of the cluster'
+ )
+ public = parser.add_mutually_exclusive_group()
+ public.add_argument(
+ '--public',
+ action='store_true',
+ help='Make the cluster public '
+ '(Visible from other tenants)',
+ dest='is_public'
+ )
+ public.add_argument(
+ '--private',
+ action='store_false',
+ help='Make the cluster private '
+ '(Visible only from this tenant)',
+ dest='is_public'
+ )
+ protected = parser.add_mutually_exclusive_group()
+ protected.add_argument(
+ '--protected',
+ action='store_true',
+ help='Make the cluster protected',
+ dest='is_protected'
+ )
+ protected.add_argument(
+ '--unprotected',
+ action='store_false',
+ help='Make the cluster unprotected',
+ dest='is_protected'
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ cluster_id = utils.get_resource(
+ client.clusters, parsed_args.cluster).id
+
+ data = client.clusters.update(
+ cluster_id,
+ name=parsed_args.name,
+ description=parsed_args.description,
+ is_public=parsed_args.is_public,
+ is_protected=parsed_args.is_protected
+ ).cluster
+
+ _format_cluster_output(data)
+ data = utils.prepare_data(data, CLUSTER_FIELDS)
+
+ return self.dict2columns(data)
+
+
+class ScaleCluster(show.ShowOne):
+ """Scales cluster"""
+
+ log = logging.getLogger(__name__ + ".ScaleCluster")
+
+ def get_parser(self, prog_name):
+ parser = super(ScaleCluster, self).get_parser(prog_name)
+
+ parser.add_argument(
+ 'cluster',
+ metavar="<cluster>",
+ help="Name or ID of the cluster",
+ )
+ parser.add_argument(
+ '--node-groups',
+ nargs='+',
+ metavar='<node-group:instances_count>',
+ help='Node groups and number of their instances to be scale to '
+ '[REQUIRED if JSON is not provided]'
+ )
+ parser.add_argument(
+ '--json',
+ metavar='<filename>',
+ help='JSON representation of the cluster scale object. Other '
+ 'arguments (except for --wait) will not be taken into '
+ 'account if this one is provided'
+ )
+ parser.add_argument(
+ '--wait',
+ action='store_true',
+ default=False,
+ help='Wait for the cluster scale to complete',
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ cluster = utils.get_resource(
+ client.clusters, parsed_args.cluster)
+
+ if parsed_args.json:
+ blob = osc_utils.read_blob_file_contents(parsed_args.json)
+ try:
+ template = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'template from file %s: %s' % (parsed_args.json, e))
+
+ data = client.clusters.scale(cluster.id, template).to_dict()
+ else:
+ scale_object = {
+ "add_node_groups": [],
+ "resize_node_groups": []
+ }
+ scale_node_groups = dict(
+ map(lambda x: x.split(':', 1), parsed_args.node_groups))
+ cluster_node_groups = [ng['name'] for ng in cluster.node_groups]
+ for name, count in scale_node_groups.items():
+ ng = utils.get_resource(client.node_group_templates, name)
+ if ng.name in cluster_node_groups:
+ scale_object["resize_node_groups"].append({
+ "name": ng.name,
+ "count": int(count)
+ })
+ else:
+ scale_object["add_node_groups"].append({
+ "node_group_template_id": ng.id,
+ "name": ng.name,
+ "count": int(count)
+ })
+ if not scale_object['add_node_groups']:
+ del scale_object['add_node_groups']
+ if not scale_object['resize_node_groups']:
+ del scale_object['resize_node_groups']
+
+ data = client.clusters.scale(cluster.id, scale_object).cluster
+
+ if parsed_args.wait:
+ if not osc_utils.wait_for_status(
+ client.clusters.get, data['id']):
+ self.log.error(
+ 'Error occurred during cluster scaling: %s',
+ cluster.id)
+ data = client.clusters.get(cluster.id).to_dict()
+
+ _format_cluster_output(data)
+ data = utils.prepare_data(data, CLUSTER_FIELDS)
+
+ return self.dict2columns(data)
diff --git a/saharaclient/osc/v1/job_templates.py b/saharaclient/osc/v1/job_templates.py
new file mode 100644
index 0000000..359bbb5
--- /dev/null
+++ b/saharaclient/osc/v1/job_templates.py
@@ -0,0 +1,323 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from cliff import command
+from cliff import lister
+from cliff import show
+from openstackclient.common import exceptions
+from openstackclient.common import utils as osc_utils
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+
+from saharaclient.osc.v1 import utils
+
+JOB_TEMPLATE_FIELDS = ['name', 'id', 'type', 'mains', 'libs', 'description',
+ 'is_public', 'is_protected']
+
+JOB_TYPES_CHOICES = ['Hive', 'Java', 'MapReduce', 'Storm', 'Pig', 'Shell',
+ 'MapReduce.Streaming', 'Spark']
+
+
+def _format_job_template_output(data):
+ data['mains'] = osc_utils.format_list(
+ ['%s:%s' % (m['name'], m['id']) for m in data['mains']])
+ data['libs'] = osc_utils.format_list(
+ ['%s:%s' % (l['name'], l['id']) for l in data['libs']])
+
+
+class CreateJobTemplate(show.ShowOne):
+ """Creates job template"""
+
+ log = logging.getLogger(__name__ + ".CreateJobTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(CreateJobTemplate, self).get_parser(prog_name)
+
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help="Name of the job template [REQUIRED if JSON is not provided]",
+ )
+ parser.add_argument(
+ '--type',
+ metavar="<type>",
+ choices=JOB_TYPES_CHOICES,
+ help="Type of the job (%s) "
+ "[REQUIRED if JSON is not provided]" % ', '.join(
+ JOB_TYPES_CHOICES)
+ )
+ parser.add_argument(
+ '--mains',
+ metavar="<main>",
+ nargs='+',
+ help="Name(s) or ID(s) for job's main job binary(s)",
+ )
+ parser.add_argument(
+ '--libs',
+ metavar="<lib>",
+ nargs='+',
+ help="Name(s) or ID(s) for job's lib job binary(s)",
+ )
+ parser.add_argument(
+ '--description',
+ metavar="<description>",
+ help="Description of the job template"
+ )
+ parser.add_argument(
+ '--public',
+ action='store_true',
+ default=False,
+ help='Make the job template public',
+ )
+ parser.add_argument(
+ '--protected',
+ action='store_true',
+ default=False,
+ help='Make the job template protected',
+ )
+ parser.add_argument(
+ '--interface',
+ metavar='<filename>',
+ help='JSON representation of the interface'
+ )
+ parser.add_argument(
+ '--json',
+ metavar='<filename>',
+ help='JSON representation of the job template'
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ if parsed_args.json:
+ blob = osc_utils.read_blob_file_contents(parsed_args.json)
+ try:
+ template = jsonutils.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'template from file %s: %s' % (parsed_args.json, e))
+ data = client.jobs.create(**template).to_dict()
+ else:
+ if parsed_args.interface:
+ blob = osc_utils.read_blob_file_contents(parsed_args.json)
+ try:
+ parsed_args.interface = jsonutils.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'interface from file %s: %s' % (parsed_args.json, e))
+
+ mains_ids = [utils.get_resource(client.job_binaries, m).id for m
+ in parsed_args.mains] if parsed_args.mains else None
+ libs_ids = [utils.get_resource(client.job_binaries, m).id for m
+ in parsed_args.libs] if parsed_args.libs else None
+
+ data = client.jobs.create(
+ name=parsed_args.name, type=parsed_args.type, mains=mains_ids,
+ libs=libs_ids, description=parsed_args.description,
+ interface=parsed_args.interface, is_public=parsed_args.public,
+ is_protected=parsed_args.protected).to_dict()
+
+ _format_job_template_output(data)
+ data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS)
+
+ return self.dict2columns(data)
+
+
+class ListJobTemplates(lister.Lister):
+ """Lists job templates"""
+
+ log = logging.getLogger(__name__ + ".ListJobTemplates")
+
+ def get_parser(self, prog_name):
+ parser = super(ListJobTemplates, self).get_parser(prog_name)
+ parser.add_argument(
+ '--long',
+ action='store_true',
+ default=False,
+ help='List additional fields in output',
+ )
+ parser.add_argument(
+ '--type',
+ metavar="<type>",
+ choices=JOB_TYPES_CHOICES,
+ help="List job templates of specific type"
+ )
+ parser.add_argument(
+ '--name',
+ metavar="<name-substring>",
+ help="List job templates with specific substring in the "
+ "name"
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+ search_opts = {'type': parsed_args.type} if parsed_args.type else {}
+
+ data = client.jobs.list(search_opts=search_opts)
+
+ if parsed_args.name:
+ data = utils.get_by_name_substring(data, parsed_args.name)
+
+ if parsed_args.long:
+ columns = ('name', 'id', 'type', 'description', 'is_public',
+ 'is_protected')
+ column_headers = utils.prepare_column_headers(columns)
+
+ else:
+ columns = ('name', 'id', 'type')
+ column_headers = utils.prepare_column_headers(columns)
+
+ return (
+ column_headers,
+ (osc_utils.get_item_properties(
+ s,
+ columns
+ ) for s in data)
+ )
+
+
+class ShowJobTemplate(show.ShowOne):
+ """Display job template details"""
+
+ log = logging.getLogger(__name__ + ".ShowJobTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(ShowJobTemplate, self).get_parser(prog_name)
+ parser.add_argument(
+ "job_template",
+ metavar="<job-template>",
+ help="Name or ID of the job template to display",
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ data = utils.get_resource(
+ client.jobs, parsed_args.job_template).to_dict()
+
+ _format_job_template_output(data)
+ data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS)
+
+ return self.dict2columns(data)
+
+
+class DeleteJobTemplate(command.Command):
+ """Deletes job template"""
+
+ log = logging.getLogger(__name__ + ".DeleteJobTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(DeleteJobTemplate, self).get_parser(prog_name)
+ parser.add_argument(
+ "job_template",
+ metavar="<job-template>",
+ nargs="+",
+ help="Name(s) or id(s) of the job template(s) to delete",
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+ for jt in parsed_args.job_template:
+ jt_id = utils.get_resource(
+ client.jobs, jt).id
+ client.jobs.delete(jt_id)
+
+
+class UpdateJobTemplate(show.ShowOne):
+ """Updates job template"""
+
+ log = logging.getLogger(__name__ + ".UpdateJobTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(UpdateJobTemplate, self).get_parser(prog_name)
+
+ parser.add_argument(
+ 'job_template',
+ metavar="<job-template>",
+ help="Name or ID of the job template",
+ )
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help="New name of the job template",
+ )
+ parser.add_argument(
+ '--description',
+ metavar="<description>",
+ help='Description of the job template'
+ )
+ public = parser.add_mutually_exclusive_group()
+ public.add_argument(
+ '--public',
+ action='store_true',
+ help='Make the job template public '
+ '(Visible from other tenants)',
+ dest='is_public'
+ )
+ public.add_argument(
+ '--private',
+ action='store_false',
+ help='Make the job_template private '
+ '(Visible only from this tenant)',
+ dest='is_public'
+ )
+ protected = parser.add_mutually_exclusive_group()
+ protected.add_argument(
+ '--protected',
+ action='store_true',
+ help='Make the job template protected',
+ dest='is_protected'
+ )
+ protected.add_argument(
+ '--unprotected',
+ action='store_false',
+ help='Make the job template unprotected',
+ dest='is_protected'
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ jt_id = utils.get_resource(
+ client.jobs, parsed_args.job_template).id
+
+ data = client.jobs.update(
+ jt_id,
+ name=parsed_args.name,
+ description=parsed_args.description,
+ is_public=parsed_args.is_public,
+ is_protected=parsed_args.is_protected
+ ).job
+
+ _format_job_template_output(data)
+ data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS)
+
+ return self.dict2columns(data)
diff --git a/saharaclient/osc/v1/job_types.py b/saharaclient/osc/v1/job_types.py
new file mode 100644
index 0000000..ca483b8
--- /dev/null
+++ b/saharaclient/osc/v1/job_types.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from os import path
+
+from cliff import command
+from cliff import lister
+from openstackclient.common import exceptions
+from openstackclient.common import utils as osc_utils
+from oslo_log import log as logging
+from oslo_serialization import jsonutils
+
+from saharaclient.osc.v1.job_templates import JOB_TYPES_CHOICES
+from saharaclient.osc.v1 import utils
+
+
+class ListJobTypes(lister.Lister):
+ """Lists job types supported by plugins"""
+
+ log = logging.getLogger(__name__ + ".ListJobTypes")
+
+ def get_parser(self, prog_name):
+ parser = super(ListJobTypes, self).get_parser(prog_name)
+ parser.add_argument(
+ '--type',
+ metavar="<type>",
+ choices=JOB_TYPES_CHOICES,
+ help="Get information about specific job type"
+ )
+ parser.add_argument(
+ '--plugin',
+ metavar="<plugin>",
+ help="Get only job types supported by this plugin"
+ )
+ parser.add_argument(
+ '--version',
+ metavar="<version>",
+ help="Get only job types supported by specific version of the "
+ "plugin. This parameter will be taken into account only if "
+ "plugin is provided"
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ search_opts = {}
+ if parsed_args.type:
+ search_opts['type'] = parsed_args.type
+ if parsed_args.plugin:
+ search_opts['plugin'] = parsed_args.plugin
+ if parsed_args.version:
+ search_opts['version'] = parsed_args.version
+ elif parsed_args.version:
+ raise exceptions.CommandError(
+ '--version argument should be specified with --plugin '
+ 'argument')
+
+ data = client.job_types.list(search_opts=search_opts)
+ for job in data:
+ plugins = []
+ for plugin in job.plugins:
+ versions = ", ".join(sorted(plugin["versions"].keys()))
+ if versions:
+ versions = "(" + versions + ")"
+ plugins.append(plugin["name"] + versions)
+ job.plugins = ', '.join(plugins)
+
+ columns = ('name', 'plugins')
+ column_headers = utils.prepare_column_headers(columns)
+
+ return (
+ column_headers,
+ (osc_utils.get_item_properties(
+ s,
+ columns
+ ) for s in data)
+ )
+
+
+class GetJobTypeConfigs(command.Command):
+ """Get job type configs"""
+
+ log = logging.getLogger(__name__ + ".GetJobTypeConfigs")
+
+ def get_parser(self, prog_name):
+ parser = super(GetJobTypeConfigs, self).get_parser(prog_name)
+ parser.add_argument(
+ "job_type",
+ metavar="<job-type>",
+ choices=JOB_TYPES_CHOICES,
+ help="Type of the job to provide config information about",
+ )
+ parser.add_argument(
+ '--file',
+ metavar="<file>",
+ help='Destination file (defaults to job type)',
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ if not parsed_args.file:
+ parsed_args.file = parsed_args.job_type
+
+ data = client.jobs.get_configs(parsed_args.job_type).to_dict()
+
+ if path.exists(parsed_args.file):
+ self.log.error('File "%s" already exists. Choose another one with '
+ '--file argument.' % parsed_args.file)
+ else:
+ with open(parsed_args.file, 'w') as f:
+ jsonutils.dump(data, f, indent=4)
+ self.log.info(
+ '"%(type)s" job configs were saved in "%(file)s"'
+ 'file' % {'type': parsed_args.job_type,
+ 'file': parsed_args.file})
diff --git a/saharaclient/osc/v1/node_group_templates.py b/saharaclient/osc/v1/node_group_templates.py
new file mode 100644
index 0000000..0fceac8
--- /dev/null
+++ b/saharaclient/osc/v1/node_group_templates.py
@@ -0,0 +1,677 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+
+from cliff import command
+from cliff import lister
+from cliff import show
+from openstackclient.common import exceptions
+from openstackclient.common import utils as osc_utils
+from oslo_log import log as logging
+
+from saharaclient.osc.v1 import utils
+
+NGT_FIELDS = ['id', 'name', 'plugin_name', 'version', 'node_processes',
+ 'description', 'auto_security_group', 'security_groups',
+ 'availability_zone', 'flavor_id', 'floating_ip_pool',
+ 'volumes_per_node', 'volumes_size',
+ 'volume_type', 'volume_local_to_instance', 'volume_mount_prefix',
+ 'volumes_availability_zone', 'use_autoconfig',
+ 'is_proxy_gateway', 'is_default', 'is_protected', 'is_public']
+
+
+def _format_ngt_output(data):
+ data['node_processes'] = osc_utils.format_list(data['node_processes'])
+ data['version'] = data.pop('hadoop_version')
+ if data['volumes_per_node'] == 0:
+ del data['volume_local_to_instance']
+ del data['volume_mount_prefix']
+ del data['volume_type'],
+ del data['volumes_availability_zone']
+ del data['volumes_size']
+
+
+class CreateNodeGroupTemplate(show.ShowOne):
+ """Creates node group template"""
+
+ log = logging.getLogger(__name__ + ".CreateNodeGroupTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(CreateNodeGroupTemplate, self).get_parser(prog_name)
+
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help="Name of the node group template [REQUIRED if JSON is not "
+ "provided]",
+ )
+ parser.add_argument(
+ '--plugin',
+ metavar="<plugin>",
+ help="Name of the plugin [REQUIRED if JSON is not provided]"
+ )
+ parser.add_argument(
+ '--version',
+ metavar="<version>",
+ help="Version of the plugin [REQUIRED if JSON is not provided]"
+ )
+ parser.add_argument(
+ '--processes',
+ metavar="<processes>",
+ nargs="+",
+ help="List of the processes that will be launched on each "
+ "instance [REQUIRED if JSON is not provided]"
+ )
+ parser.add_argument(
+ '--security-groups',
+ metavar="<security-groups>",
+ nargs="+",
+ help="List of the security groups for the instances in this node "
+ "group"
+ )
+ parser.add_argument(
+ '--auto-security-group',
+ action='store_true',
+ default=False,
+ help='Indicates if an additional security group should be created '
+ 'for the node group',
+ )
+ parser.add_argument(
+ '--availability-zone',
+ metavar="<availability-zone>",
+ help="Name of the availability zone where instances "
+ "will be created"
+ )
+ parser.add_argument(
+ '--flavor',
+ metavar="<flavor-id>",
+ help="ID of the flavor"
+ )
+ parser.add_argument(
+ '--floating-ip-pool',
+ metavar="<floating-ip-pool>",
+ help="ID of the floating IP pool"
+ )
+ parser.add_argument(
+ '--volumes-per-node',
+ type=int,
+ metavar="<volumes-per-node>",
+ help="Number of volumes attached to every node"
+ )
+ parser.add_argument(
+ '--volumes-size',
+ type=int,
+ metavar="<volumes-size>",
+ help='Size of volumes attached to node (GB). '
+ 'This parameter will be taken into account only '
+ 'if volumes-per-node is set and non-zero'
+ )
+ parser.add_argument(
+ '--volumes-type',
+ metavar="<volumes-type>",
+ help='Type of the volumes. '
+ 'This parameter will be taken into account only '
+ 'if volumes-per-node is set and non-zero'
+ )
+ parser.add_argument(
+ '--volumes-availability-zone',
+ metavar="<volumes-availability-zone>",
+ help='Name of the availability zone where volumes will be created.'
+ ' This parameter will be taken into account only '
+ 'if volumes-per-node is set and non-zero'
+ )
+ parser.add_argument(
+ '--volumes-mount-prefix',
+ metavar="<volumes-mount-prefix>",
+ help='Prefix for mount point directory. '
+ 'This parameter will be taken into account only '
+ 'if volumes-per-node is set and non-zero'
+ )
+ parser.add_argument(
+ '--volumes-locality',
+ action='store_true',
+ default=False,
+ help='If enabled, instance and attached volumes will be created on'
+ ' the same physical host. This parameter will be taken into '
+ 'account only if volumes-per-node is set and non-zero',
+ )
+ parser.add_argument(
+ '--description',
+ metavar="<description>",
+ help='Description of the node group template'
+ )
+ parser.add_argument(
+ '--autoconfig',
+ action='store_true',
+ default=False,
+ help='If enabled, instances of the node group will be '
+ 'automatically configured',
+ )
+ parser.add_argument(
+ '--proxy-gateway',
+ action='store_true',
+ default=False,
+ help='If enabled, instances of the node group will be used to '
+ 'access other instances in the cluster',
+ )
+ parser.add_argument(
+ '--public',
+ action='store_true',
+ default=False,
+ help='Make the node group template public (Visible from other '
+ 'tenants)',
+ )
+ parser.add_argument(
+ '--protected',
+ action='store_true',
+ default=False,
+ help='Make the node group template protected',
+ )
+ parser.add_argument(
+ '--json',
+ metavar='<filename>',
+ help='JSON representation of the node group template. Other '
+ 'arguments will not be taken into account if this one is '
+ 'provided'
+ )
+ parser.add_argument(
+ '--shares',
+ metavar='<filename>',
+ help='JSON representation of the manila shares'
+ )
+ parser.add_argument(
+ '--configs',
+ metavar='<filename>',
+ help='JSON representation of the node group template configs'
+ )
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ if parsed_args.json:
+ blob = osc_utils.read_blob_file_contents(parsed_args.json)
+ try:
+ template = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'template from file %s: %s' % (parsed_args.json, e))
+ data = client.node_group_templates.create(**template).to_dict()
+ else:
+ if (not parsed_args.name or not parsed_args.plugin or
+ not parsed_args.version or not parsed_args or
+ not parsed_args.processes):
+ raise exceptions.CommandError(
+ 'At least --name, --plugin, --version, --processes '
+ 'arguments should be specified or json template should '
+ 'be provided with --json argument')
+
+ configs = None
+ if parsed_args.configs:
+ blob = osc_utils.read_blob_file_contents(parsed_args.configs)
+ try:
+ configs = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'configs from file %s: %s' % (parsed_args.configs, e))
+
+ shares = None
+ if parsed_args.shares:
+ blob = osc_utils.read_blob_file_contents(parsed_args.shares)
+ try:
+ shares = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'shares from file %s: %s' % (parsed_args.shares, e))
+
+ data = client.node_group_templates.create(
+ name=parsed_args.name,
+ plugin_name=parsed_args.plugin,
+ hadoop_version=parsed_args.version,
+ flavor_id=parsed_args.flavor,
+ description=parsed_args.description,
+ volumes_per_node=parsed_args.volumes_per_node,
+ volumes_size=parsed_args.volumes_size,
+ node_processes=parsed_args.processes,
+ floating_ip_pool=parsed_args.floating_ip_pool,
+ security_groups=parsed_args.security_groups,
+ auto_security_group=parsed_args.auto_security_group,
+ availability_zone=parsed_args.availability_zone,
+ volume_type=parsed_args.volumes_type,
+ is_proxy_gateway=parsed_args.proxy_gateway,
+ volume_local_to_instance=parsed_args.volumes_locality,
+ use_autoconfig=parsed_args.autoconfig,
+ is_public=parsed_args.public,
+ is_protected=parsed_args.protected,
+ node_configs=configs,
+ shares=shares,
+ volumes_availability_zone=parsed_args.volumes_availability_zone
+ ).to_dict()
+
+ _format_ngt_output(data)
+ data = utils.prepare_data(data, NGT_FIELDS)
+
+ return self.dict2columns(data)
+
+
+class ListNodeGroupTemplates(lister.Lister):
+ """Lists node group templates"""
+
+ log = logging.getLogger(__name__ + ".ListNodeGroupTemplates")
+
+ def get_parser(self, prog_name):
+ parser = super(ListNodeGroupTemplates, self).get_parser(prog_name)
+ parser.add_argument(
+ '--long',
+ action='store_true',
+ default=False,
+ help='List additional fields in output',
+ )
+ parser.add_argument(
+ '--plugin',
+ metavar="<plugin>",
+ help="List node group templates for specific plugin"
+ )
+
+ parser.add_argument(
+ '--version',
+ metavar="<version>",
+ help="List node group templates with specific version of the "
+ "plugin"
+ )
+
+ parser.add_argument(
+ '--name',
+ metavar="<name-substring>",
+ help="List node group templates with specific substring in the "
+ "name"
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+ search_opts = {}
+ if parsed_args.plugin:
+ search_opts['plugin_name'] = parsed_args.plugin
+ if parsed_args.version:
+ search_opts['hadoop_version'] = parsed_args.version
+
+ data = client.node_group_templates.list(search_opts=search_opts)
+
+ if parsed_args.name:
+ data = utils.get_by_name_substring(data, parsed_args.name)
+
+ if parsed_args.long:
+ columns = ('name', 'id', 'plugin_name', 'hadoop_version',
+ 'node_processes', 'description')
+ column_headers = utils.prepare_column_headers(
+ columns, {'hadoop_version': 'version'})
+
+ else:
+ columns = ('name', 'id', 'plugin_name', 'hadoop_version')
+ column_headers = utils.prepare_column_headers(
+ columns, {'hadoop_version': 'version'})
+
+ return (
+ column_headers,
+ (osc_utils.get_item_properties(
+ s,
+ columns,
+ formatters={
+ 'node_processes': osc_utils.format_list
+ }
+ ) for s in data)
+ )
+
+
+class ShowNodeGroupTemplate(show.ShowOne):
+ """Display node group template details"""
+
+ log = logging.getLogger(__name__ + ".ShowNodeGroupTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(ShowNodeGroupTemplate, self).get_parser(prog_name)
+ parser.add_argument(
+ "node_group_template",
+ metavar="<node-group-template>",
+ help="Name or id of the node group template to display",
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ data = utils.get_resource(
+ client.node_group_templates,
+ parsed_args.node_group_template).to_dict()
+
+ _format_ngt_output(data)
+
+ data = utils.prepare_data(data, NGT_FIELDS)
+
+ return self.dict2columns(data)
+
+
+class DeleteNodeGroupTemplate(command.Command):
+ """Deletes node group template"""
+
+ log = logging.getLogger(__name__ + ".DeleteNodeGroupTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(DeleteNodeGroupTemplate, self).get_parser(prog_name)
+ parser.add_argument(
+ "node_group_template",
+ metavar="<node-group-template>",
+ nargs="+",
+ help="Name(s) or id(s) of the node group template(s) to delete",
+ )
+
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+ for ngt in parsed_args.node_group_template:
+ ngt_id = utils.get_resource(
+ client.node_group_templates, ngt).id
+ client.node_group_templates.delete(ngt_id)
+
+
+class UpdateNodeGroupTemplate(show.ShowOne):
+ """Updates node group template"""
+
+ log = logging.getLogger(__name__ + ".UpdateNodeGroupTemplate")
+
+ def get_parser(self, prog_name):
+ parser = super(UpdateNodeGroupTemplate, self).get_parser(prog_name)
+
+ parser.add_argument(
+ 'node_group_template',
+ metavar="<node-group-template>",
+ help="Name or ID of the node group template",
+ )
+ parser.add_argument(
+ '--name',
+ metavar="<name>",
+ help="New name of the node group template",
+ )
+ parser.add_argument(
+ '--plugin',
+ metavar="<plugin>",
+ help="Name of the plugin"
+ )
+ parser.add_argument(
+ '--version',
+ metavar="<version>",
+ help="Version of the plugin"
+ )
+ parser.add_argument(
+ '--processes',
+ metavar="<processes>",
+ nargs="+",
+ help="List of the processes that will be launched on each "
+ "instance"
+ )
+ parser.add_argument(
+ '--security-groups',
+ metavar="<security-groups>",
+ nargs="+",
+ help="List of the security groups for the instances in this node "
+ "group"
+ )
+ autosecurity = parser.add_mutually_exclusive_group()
+ autosecurity.add_argument(
+ '--auto-security-group-enable',
+ action='store_true',
+ help='Additional security group should be created '
+ 'for the node group',
+ dest='use_auto_security_group'
+ )
+ autosecurity.add_argument(
+ '--auto-security-group-disable',
+ action='store_false',
+ help='Additional security group should not be created '
+ 'for the node group',
+ dest='use_auto_security_group'
+ )
+ parser.add_argument(
+ '--availability-zone',
+ metavar="<availability-zone>",
+ help="Name of the availability zone where instances "
+ "will be created"
+ )
+ parser.add_argument(
+ '--flavor',
+ metavar="<flavor-id>",
+ help="ID of the flavor"
+ )
+ parser.add_argument(
+ '--floating-ip-pool',
+ metavar="<floating-ip-pool>",
+ help="ID of the floating IP pool"
+ )
+ parser.add_argument(
+ '--volumes-per-node',
+ type=int,
+ metavar="<volumes-per-node>",
+ help="Number of volumes attached to every node"
+ )
+ parser.add_argument(
+ '--volumes-size',
+ type=int,
+ metavar="<volumes-size>",
+ help='Size of volumes attached to node (GB). '
+ 'This parameter will be taken into account only '
+ 'if volumes-per-node is set and non-zero'
+ )
+ parser.add_argument(
+ '--volumes-type',
+ metavar="<volumes-type>",
+ help='Type of the volumes. '
+ 'This parameter will be taken into account only '
+ 'if volumes-per-node is set and non-zero'
+ )
+ parser.add_argument(
+ '--volumes-availability-zone',
+ metavar="<volumes-availability-zone>",
+ help='Name of the availability zone where volumes will be created.'
+ ' This parameter will be taken into account only '
+ 'if volumes-per-node is set and non-zero'
+ )
+ parser.add_argument(
+ '--volumes-mount-prefix',
+ metavar="<volumes-mount-prefix>",
+ help='Prefix for mount point directory. '
+ 'This parameter will be taken into account only '
+ 'if volumes-per-node is set and non-zero'
+ )
+ volumelocality = parser.add_mutually_exclusive_group()
+ volumelocality.add_argument(
+ '--volumes-locality-enable',
+ action='store_true',
+ help='Instance and attached volumes will be created on '
+ 'the same physical host. This parameter will be taken into '
+ 'account only if volumes-per-node is set and non-zero',
+ dest='volume_locality'
+ )
+ volumelocality.add_argument(
+ '--volumes-locality-disable',
+ action='store_false',
+ help='Instance and attached volumes creation on the same physical '
+ 'host will not be regulated. This parameter will be taken'
+ 'into account only if volumes-per-node is set and non-zero',
+ dest='volume_locality'
+ )
+ parser.add_argument(
+ '--description',
+ metavar="<description>",
+ help='Description of the node group template'
+ )
+ autoconfig = parser.add_mutually_exclusive_group()
+ autoconfig.add_argument(
+ '--autoconfig-enable',
+ action='store_true',
+ help='Instances of the node group will be '
+ 'automatically configured',
+ dest='use_autoconfig'
+ )
+ autoconfig.add_argument(
+ '--autoconfig-disable',
+ action='store_false',
+ help='Instances of the node group will not be '
+ 'automatically configured',
+ dest='use_autoconfig'
+ )
+ proxy = parser.add_mutually_exclusive_group()
+ proxy.add_argument(
+ '--proxy-gateway-enable',
+ action='store_true',
+ help='Instances of the node group will be used to '
+ 'access other instances in the cluster',
+ dest='is_proxy_gateway'
+ )
+ proxy.add_argument(
+ '--proxy-gateway-disable',
+ action='store_false',
+ help='Instances of the node group will not be used to '
+ 'access other instances in the cluster',
+ dest='is_proxy_gateway'
+ )
+ public = parser.add_mutually_exclusive_group()
+ public.add_argument(
+ '--public',
+ action='store_true',
+ help='Make the node group template public '
+ '(Visible from other tenants)',
+ dest='is_public'
+ )
+ public.add_argument(
+ '--private',
+ action='store_false',
+ help='Make the node group template private '
+ '(Visible only from this tenant)',
+ dest='is_public'
+ )
+ protected = parser.add_mutually_exclusive_group()
+ protected.add_argument(
+ '--protected',
+ action='store_true',
+ help='Make the node group template protected',
+ dest='is_protected'
+ )
+ protected.add_argument(
+ '--unprotected',
+ action='store_false',
+ help='Make the node group template unprotected',
+ dest='is_protected'
+ )
+ parser.add_argument(
+ '--json',
+ metavar='<filename>',
+ help='JSON representation of the node group template update '
+ 'fields. Other arguments will not be taken into account if '
+ 'this one is provided'
+ )
+ parser.add_argument(
+ '--shares',
+ metavar='<filename>',
+ help='JSON representation of the manila shares'
+ )
+ parser.add_argument(
+ '--configs',
+ metavar='<filename>',
+ help='JSON representation of the node group template configs'
+ )
+ parser.set_defaults(is_public=None, is_protected=None,
+ is_proxy_gateway=None, volume_locality=None,
+ use_auto_security_group=None, use_autoconfig=None)
+ return parser
+
+ def take_action(self, parsed_args):
+ self.log.debug("take_action(%s)" % parsed_args)
+ client = self.app.client_manager.data_processing
+
+ ngt_id = utils.get_resource(
+ client.node_group_templates, parsed_args.node_group_template).id
+
+ if parsed_args.json:
+ blob = osc_utils.read_blob_file_contents(parsed_args.json)
+ try:
+ template = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'template from file %s: %s' % (parsed_args.json, e))
+ data = client.node_group_templates.update(
+ ngt_id, **template).to_dict()
+ else:
+ configs = None
+ if parsed_args.configs:
+ blob = osc_utils.read_blob_file_contents(parsed_args.configs)
+ try:
+ configs = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'configs from file %s: %s' % (parsed_args.configs, e))
+
+ shares = None
+ if parsed_args.shares:
+ blob = osc_utils.read_blob_file_contents(parsed_args.shares)
+ try:
+ shares = json.loads(blob)
+ except ValueError as e:
+ raise exceptions.CommandError(
+ 'An error occurred when reading '
+ 'shares from file %s: %s' % (parsed_args.shares, e))
+
+ data = client.node_group_templates.update(
+ ngt_id,
+ name=parsed_args.name,
+ plugin_name=parsed_args.plugin,
+ hadoop_version=parsed_args.version,
+ flavor_id=parsed_args.flavor,
+ description=parsed_args.description,
+ volumes_per_node=parsed_args.volumes_per_node,
+ volumes_size=parsed_args.volumes_size,
+ node_processes=parsed_args.processes,
+ floating_ip_pool=parsed_args.floating_ip_pool,
+ security_groups=parsed_args.security_groups,
+ auto_security_group=parsed_args.use_auto_security_group,
+ availability_zone=parsed_args.availability_zone,
+ volume_type=parsed_args.volumes_type,
+ is_proxy_gateway=parsed_args.is_proxy_gateway,
+ volume_local_to_instance=parsed_args.volume_locality,
+ use_autoconfig=parsed_args.use_autoconfig,
+ is_public=parsed_args.is_public,
+ is_protected=parsed_args.is_protected,
+ node_configs=configs,
+ shares=shares,
+ volumes_availability_zone=parsed_args.volumes_availability_zone
+ ).to_dict()
+
+ _format_ngt_output(data)
+ data = utils.prepare_data(data, NGT_FIELDS)
+
+ return self.dict2columns(data)
diff --git a/saharaclient/osc/v1/utils.py b/saharaclient/osc/v1/utils.py
index d21408c..8afb6c9 100644
--- a/saharaclient/osc/v1/utils.py
+++ b/saharaclient/osc/v1/utils.py
@@ -14,9 +14,13 @@
# limitations under the License.
import six
+import time
+from oslo_utils import timeutils
from oslo_utils import uuidutils
+from saharaclient.api import base
+
def get_resource(manager, name_or_id):
if uuidutils.is_uuid_like(name_or_id):
@@ -38,9 +42,30 @@ def prepare_data(data, fields):
return new_data
-def prepare_column_headers(columns):
- return [c.replace('_', ' ').capitalize() for c in columns]
+def prepare_column_headers(columns, remap=None):
+ remap = remap if remap else {}
+ new_columns = []
+ for c in columns:
+ for old, new in remap.items():
+ c = c.replace(old, new)
+ new_columns.append(c.replace('_', ' ').capitalize())
+
+ return new_columns
def get_by_name_substring(data, name):
return [obj for obj in data if name in obj.name]
+
+
+def wait_for_delete(manager, obj_id, sleep_time=5, timeout=3000):
+ s_time = timeutils.utcnow()
+ while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
+ try:
+ manager.get(obj_id)
+ except base.APIException as ex:
+ if ex.error_code == 404:
+ return True
+ raise
+ time.sleep(sleep_time)
+
+ return False
diff --git a/saharaclient/tests/unit/osc/v1/fakes.py b/saharaclient/tests/unit/osc/v1/fakes.py
index d8062a3..829a4d7 100644
--- a/saharaclient/tests/unit/osc/v1/fakes.py
+++ b/saharaclient/tests/unit/osc/v1/fakes.py
@@ -24,3 +24,4 @@ class TestDataProcessing(utils.TestCommand):
super(TestDataProcessing, self).setUp()
self.app.client_manager.data_processing = mock.Mock()
+ self.app.client_manager.network = mock.Mock()
diff --git a/saharaclient/tests/unit/osc/v1/test_cluster_templates.py b/saharaclient/tests/unit/osc/v1/test_cluster_templates.py
new file mode 100644
index 0000000..c5c2632
--- /dev/null
+++ b/saharaclient/tests/unit/osc/v1/test_cluster_templates.py
@@ -0,0 +1,304 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from openstackclient.tests import utils as osc_utils
+
+from saharaclient.api import cluster_templates as api_ct
+from saharaclient.api import node_group_templates as api_ngt
+from saharaclient.osc.v1 import cluster_templates as osc_ct
+from saharaclient.tests.unit.osc.v1 import fakes
+
+CT_INFO = {
+ "description": "Cluster template for tests",
+ "use_autoconfig": True,
+ "is_default": False,
+ "node_groups": [
+ {
+ "count": 2,
+ "id": "d29631fc-0fad-434b-80aa-7a3e9526f57c",
+ "name": "fakeng",
+ "plugin_name": 'fake',
+ "hadoop_version": '0.1'
+ }
+ ],
+ "hadoop_version": "0.1",
+ "is_public": False,
+ "plugin_name": "fake",
+ "id": "0647061f-ab98-4c89-84e0-30738ea55750",
+ "anti_affinity": [],
+ "name": "template",
+ "is_protected": False
+}
+
+
+class TestClusterTemplates(fakes.TestDataProcessing):
+ def setUp(self):
+ super(TestClusterTemplates, self).setUp()
+ self.ct_mock = (
+ self.app.client_manager.data_processing.cluster_templates)
+ self.ngt_mock = (
+ self.app.client_manager.data_processing.node_group_templates)
+ self.ct_mock.reset_mock()
+ self.ngt_mock.reset_mock()
+
+
+class TestCreateClusterTemplate(TestClusterTemplates):
+ # TODO(apavlov): check for creation with --json
+ def setUp(self):
+ super(TestCreateClusterTemplate, self).setUp()
+ self.ct_mock.create.return_value = api_ct.ClusterTemplate(
+ None, CT_INFO)
+ self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
+ None, CT_INFO['node_groups'][0])
+
+ # Command to test
+ self.cmd = osc_ct.CreateClusterTemplate(self.app, None)
+
+ def test_ct_create_minimum_options(self):
+ arglist = ['--name', 'template', '--node-groups', 'fakeng:2']
+ verifylist = [('name', 'template'),
+ ('node_groups', ['fakeng:2'])]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ct_mock.create.assert_called_once_with(
+ description=None, hadoop_version='0.1', is_protected=False,
+ is_public=False, name='template', node_groups=[
+ {'count': 2, 'name': 'fakeng',
+ 'node_group_template_id':
+ 'd29631fc-0fad-434b-80aa-7a3e9526f57c'}],
+ plugin_name='fake', use_autoconfig=False, shares=None,
+ cluster_configs=None)
+
+ def test_ct_create_all_options(self):
+ arglist = ['--name', 'template', '--node-groups', 'fakeng:2',
+ '--anti-affinity', 'datanode',
+ '--description', 'descr',
+ '--autoconfig', '--public', '--protected']
+
+ verifylist = [('name', 'template'),
+ ('node_groups', ['fakeng:2']),
+ ('description', 'descr'), ('autoconfig', True),
+ ('public', True), ('protected', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ct_mock.create.assert_called_once_with(
+ description='descr', hadoop_version='0.1', is_protected=True,
+ is_public=True, name='template', node_groups=[
+ {'count': 2, 'name': 'fakeng',
+ 'node_group_template_id':
+ 'd29631fc-0fad-434b-80aa-7a3e9526f57c'}],
+ plugin_name='fake', use_autoconfig=True, shares=None,
+ cluster_configs=None)
+
+ # Check that columns are correct
+ expected_columns = ('Anti affinity', 'Description', 'Id', 'Is default',
+ 'Is protected', 'Is public', 'Name', 'Node groups',
+ 'Plugin name', 'Use autoconfig', 'Version')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('', 'Cluster template for tests',
+ '0647061f-ab98-4c89-84e0-30738ea55750', False, False,
+ False, 'template', 'fakeng:2', 'fake', True, '0.1')
+ self.assertEqual(expected_data, data)
+
+
+class TestListClusterTemplates(TestClusterTemplates):
+ def setUp(self):
+ super(TestListClusterTemplates, self).setUp()
+ self.ct_mock.list.return_value = [api_ct.ClusterTemplate(
+ None, CT_INFO)]
+
+ # Command to test
+ self.cmd = osc_ct.ListClusterTemplates(self.app, None)
+
+ def test_ct_list_no_options(self):
+ arglist = []
+ verifylist = []
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Plugin name', 'Version']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('template', '0647061f-ab98-4c89-84e0-30738ea55750',
+ 'fake', '0.1')]
+ self.assertEqual(expected_data, list(data))
+
+ def test_ct_list_long(self):
+ arglist = ['--long']
+ verifylist = [('long', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Plugin name', 'Version',
+ 'Node groups', 'Description']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('template', '0647061f-ab98-4c89-84e0-30738ea55750',
+ 'fake', '0.1', 'fakeng:2',
+ 'Cluster template for tests')]
+ self.assertEqual(expected_data, list(data))
+
+ def test_ct_list_extra_search_opts(self):
+ arglist = ['--plugin', 'fake', '--version', '0.1', '--name', 'templ']
+ verifylist = [('plugin', 'fake'), ('version', '0.1'),
+ ('name', 'templ')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Plugin name', 'Version']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('template', '0647061f-ab98-4c89-84e0-30738ea55750',
+ 'fake', '0.1')]
+ self.assertEqual(expected_data, list(data))
+
+
+class TestShowClusterTemplate(TestClusterTemplates):
+ def setUp(self):
+ super(TestShowClusterTemplate, self).setUp()
+ self.ct_mock.find_unique.return_value = api_ct.ClusterTemplate(
+ None, CT_INFO)
+
+ # Command to test
+ self.cmd = osc_ct.ShowClusterTemplate(self.app, None)
+
+ def test_ct_show(self):
+ arglist = ['template']
+ verifylist = [('cluster_template', 'template')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ct_mock.find_unique.assert_called_once_with(name='template')
+
+ # Check that columns are correct
+ expected_columns = ('Anti affinity', 'Description', 'Id', 'Is default',
+ 'Is protected', 'Is public', 'Name', 'Node groups',
+ 'Plugin name', 'Use autoconfig', 'Version')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = (
+ '', 'Cluster template for tests',
+ '0647061f-ab98-4c89-84e0-30738ea55750', False, False, False,
+ 'template', 'fakeng:2', 'fake', True, '0.1')
+ self.assertEqual(expected_data, data)
+
+
+class TestDeleteClusterTemplate(TestClusterTemplates):
+ def setUp(self):
+ super(TestDeleteClusterTemplate, self).setUp()
+ self.ct_mock.find_unique.return_value = api_ct.ClusterTemplate(
+ None, CT_INFO)
+
+ # Command to test
+ self.cmd = osc_ct.DeleteClusterTemplate(self.app, None)
+
+ def test_ct_delete(self):
+ arglist = ['template']
+ verifylist = [('cluster_template', ['template'])]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ct_mock.delete.assert_called_once_with(
+ '0647061f-ab98-4c89-84e0-30738ea55750')
+
+
+class TestUpdateClusterTemplate(TestClusterTemplates):
+ # TODO(apavlov): check for update with --json
+ def setUp(self):
+ super(TestUpdateClusterTemplate, self).setUp()
+ self.ct_mock.update.return_value = api_ct.ClusterTemplate(
+ None, CT_INFO)
+ self.ct_mock.find_unique.return_value = api_ct.ClusterTemplate(
+ None, CT_INFO)
+ self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
+ None, CT_INFO['node_groups'][0])
+
+ # Command to test
+ self.cmd = osc_ct.UpdateClusterTemplate(self.app, None)
+
+ def test_ct_update_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(osc_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_ct_update_all_options(self):
+ arglist = ['template', '--name', 'template', '--node-groups',
+ 'fakeng:2', '--anti-affinity', 'datanode',
+ '--description', 'descr', '--autoconfig-enable',
+ '--public', '--protected']
+
+ verifylist = [('cluster_template', 'template'), ('name', 'template'),
+ ('node_groups', ['fakeng:2']),
+ ('description', 'descr'), ('use_autoconfig', True),
+ ('is_public', True), ('is_protected', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ct_mock.update.assert_called_once_with(
+ '0647061f-ab98-4c89-84e0-30738ea55750', description='descr',
+ hadoop_version='0.1', is_protected=True, is_public=True,
+ name='template',
+ node_groups=[
+ {'count': 2, 'name': 'fakeng',
+ 'node_group_template_id':
+ 'd29631fc-0fad-434b-80aa-7a3e9526f57c'}],
+ plugin_name='fake', use_autoconfig=True, shares=None,
+ cluster_configs=None)
+
+ # Check that columns are correct
+ expected_columns = ('Anti affinity', 'Description', 'Id', 'Is default',
+ 'Is protected', 'Is public', 'Name', 'Node groups',
+ 'Plugin name', 'Use autoconfig', 'Version')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('', 'Cluster template for tests',
+ '0647061f-ab98-4c89-84e0-30738ea55750', False, False,
+ False, 'template', 'fakeng:2', 'fake', True, '0.1')
+ self.assertEqual(expected_data, data)
diff --git a/saharaclient/tests/unit/osc/v1/test_clusters.py b/saharaclient/tests/unit/osc/v1/test_clusters.py
new file mode 100644
index 0000000..24b931e
--- /dev/null
+++ b/saharaclient/tests/unit/osc/v1/test_clusters.py
@@ -0,0 +1,441 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+from openstackclient.tests import utils as osc_utils
+
+from saharaclient.api import cluster_templates as api_ct
+from saharaclient.api import clusters as api_cl
+from saharaclient.api import images as api_img
+from saharaclient.api import node_group_templates as api_ngt
+from saharaclient.osc.v1 import clusters as osc_cl
+from saharaclient.tests.unit.osc.v1 import fakes
+
+CLUSTER_INFO = {
+ "description": "Cluster template for tests",
+
+ "use_autoconfig": True,
+ "is_default": False,
+ "node_groups": [
+ {
+ "count": 2,
+ "id": "ng_id",
+ "name": "fakeng",
+ "plugin_name": 'fake',
+ "hadoop_version": '0.1'
+ }
+ ],
+ "hadoop_version": "0.1",
+ "is_public": False,
+ "plugin_name": "fake",
+ "id": "cluster_id",
+ "anti_affinity": [],
+ "name": "fake",
+ "is_protected": False,
+ "cluster_template_id": "ct_id",
+ "neutron_management_network": "net_id",
+ "user_keypair_id": "test",
+ "status": 'Active',
+ "default_image_id": "img_id"
+}
+
+CT_INFO = {
+ "plugin_name": "fake",
+ "hadoop_version": "0.1",
+ "name": '"template',
+ "id": "ct_id"
+}
+
+
+class TestClusters(fakes.TestDataProcessing):
+ def setUp(self):
+ super(TestClusters, self).setUp()
+ self.cl_mock = (
+ self.app.client_manager.data_processing.clusters)
+ self.ngt_mock = (
+ self.app.client_manager.data_processing.node_group_templates)
+ self.ct_mock = (
+ self.app.client_manager.data_processing.cluster_templates)
+ self.img_mock = (
+ self.app.client_manager.data_processing.images)
+ self.cl_mock.reset_mock()
+ self.ngt_mock.reset_mock()
+ self.ct_mock.reset_mock()
+ self.img_mock.reset_mock()
+
+
+class TestCreateCluster(TestClusters):
+ # TODO(apavlov): check for creation with --json
+ def setUp(self):
+ super(TestCreateCluster, self).setUp()
+ self.cl_mock.create.return_value = api_cl.Cluster(
+ None, CLUSTER_INFO)
+ self.cl_mock.find_unique.return_value = api_cl.Cluster(
+ None, CLUSTER_INFO)
+ self.ct_mock.find_unique.return_value = api_ct.ClusterTemplate(
+ None, CT_INFO)
+ self.img_mock.find_unique.return_value = api_img.Image(
+ None, {'id': 'img_id'})
+ self.net_mock = self.app.client_manager.network.api
+ self.net_mock.find_attr.return_value = {'id': 'net_id'}
+ self.net_mock.reset_mock()
+
+ # Command to test
+ self.cmd = osc_cl.CreateCluster(self.app, None)
+
+ def test_cluster_create_minimum_options(self):
+ arglist = ['--name', 'fake', '--cluster-template', 'template',
+ '--image', 'ubuntu']
+ verifylist = [('name', 'fake'), ('cluster_template', 'template'),
+ ('image', 'ubuntu')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.cl_mock.create.assert_called_once_with(
+ cluster_template_id='ct_id', count=None, default_image_id='img_id',
+ description=None, hadoop_version='0.1', is_protected=False,
+ is_public=False, is_transient=False, name='fake', net_id=None,
+ plugin_name='fake', user_keypair_id=None)
+
+ def test_cluster_create_all_options(self):
+ arglist = ['--name', 'fake', '--cluster-template', 'template',
+ '--image', 'ubuntu', '--user-keypair', 'test',
+ '--neutron-network', 'net', '--description', 'descr',
+ '--transient', '--public', '--protected']
+
+ verifylist = [('name', 'fake'), ('cluster_template', 'template'),
+ ('image', 'ubuntu'), ('user_keypair', 'test'),
+ ('neutron_network', 'net'), ('description', 'descr'),
+ ('transient', True), ('public', True),
+ ('protected', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.cl_mock.create.assert_called_once_with(
+ cluster_template_id='ct_id', count=None, default_image_id='img_id',
+ description='descr', hadoop_version='0.1', is_protected=True,
+ is_public=True, is_transient=True, name='fake', net_id='net_id',
+ plugin_name='fake', user_keypair_id='test')
+
+ # Check that columns are correct
+ expected_columns = ('Anti affinity', 'Cluster template id',
+ 'Description', 'Id', 'Image',
+ 'Is protected', 'Is public', 'Name',
+ 'Neutron management network', 'Node groups',
+ 'Plugin name', 'Status', 'Use autoconfig',
+ 'User keypair id', 'Version')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('', 'ct_id', 'Cluster template for tests',
+ 'cluster_id', 'img_id', False, False, 'fake',
+ 'net_id', 'fakeng:2', 'fake', 'Active', True, 'test',
+ '0.1')
+ self.assertEqual(expected_data, data)
+
+ def test_cluster_create_with_count(self):
+ clusters_mock = mock.Mock()
+ clusters_mock.to_dict.return_value = {
+ 'clusters': ['cluster1_id', 'cluster2_id']
+ }
+ self.cl_mock.create.return_value = clusters_mock
+
+ arglist = ['--name', 'fake', '--cluster-template', 'template',
+ '--image', 'ubuntu', '--count', '2']
+ verifylist = [('name', 'fake'), ('cluster_template', 'template'),
+ ('image', 'ubuntu'), ('count', 2)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.cl_mock.create.assert_called_once_with(
+ cluster_template_id='ct_id', count=2, default_image_id='img_id',
+ description=None, hadoop_version='0.1', is_protected=False,
+ is_public=False, is_transient=False, name='fake', net_id=None,
+ plugin_name='fake', user_keypair_id=None)
+
+ # Check that columns are correct
+ expected_columns = ('fake',)
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('cluster_id',)
+ self.assertEqual(expected_data, data)
+
+
+class TestListClusters(TestClusters):
+ def setUp(self):
+ super(TestListClusters, self).setUp()
+ self.cl_mock.list.return_value = [api_cl.Cluster(
+ None, CLUSTER_INFO)]
+
+ # Command to test
+ self.cmd = osc_cl.ListClusters(self.app, None)
+
+ def test_clusters_list_no_options(self):
+ arglist = []
+ verifylist = []
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Plugin name', 'Version', 'Status']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('fake', 'cluster_id', 'fake', '0.1', 'Active')]
+ self.assertEqual(expected_data, list(data))
+
+ def test_clusters_list_long(self):
+ arglist = ['--long']
+ verifylist = [('long', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Plugin name', 'Version', 'Status',
+ 'Description', 'Image']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('fake', 'cluster_id', 'fake', '0.1', 'Active',
+ 'Cluster template for tests', 'img_id')]
+ self.assertEqual(expected_data, list(data))
+
+ def test_clusters_list_extra_search_opts(self):
+ arglist = ['--plugin', 'fake', '--version', '0.1', '--name', 'fake']
+ verifylist = [('plugin', 'fake'), ('version', '0.1'),
+ ('name', 'fake')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Plugin name', 'Version', 'Status']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('fake', 'cluster_id', 'fake', '0.1', 'Active')]
+ self.assertEqual(expected_data, list(data))
+
+
+class TestShowCluster(TestClusters):
+ def setUp(self):
+ super(TestShowCluster, self).setUp()
+ self.cl_mock.find_unique.return_value = api_cl.Cluster(
+ None, CLUSTER_INFO)
+
+ # Command to test
+ self.cmd = osc_cl.ShowCluster(self.app, None)
+
+ def test_cluster_show(self):
+ arglist = ['fake']
+ verifylist = [('cluster', 'fake')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.cl_mock.find_unique.assert_called_once_with(name='fake')
+
+ # Check that columns are correct
+ expected_columns = ('Anti affinity', 'Cluster template id',
+ 'Description', 'Id', 'Image',
+ 'Is protected', 'Is public', 'Name',
+ 'Neutron management network', 'Node groups',
+ 'Plugin name', 'Status', 'Use autoconfig',
+ 'User keypair id', 'Version')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('', 'ct_id', 'Cluster template for tests',
+ 'cluster_id', 'img_id', False, False, 'fake',
+ 'net_id', 'fakeng:2', 'fake', 'Active', True, 'test',
+ '0.1')
+ self.assertEqual(expected_data, data)
+
+
+class TestDeleteCluster(TestClusters):
+ def setUp(self):
+ super(TestDeleteCluster, self).setUp()
+ self.cl_mock.find_unique.return_value = api_cl.Cluster(
+ None, CLUSTER_INFO)
+
+ # Command to test
+ self.cmd = osc_cl.DeleteCluster(self.app, None)
+
+ def test_cluster_delete(self):
+ arglist = ['fake']
+ verifylist = [('cluster', ['fake'])]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.cl_mock.delete.assert_called_once_with('cluster_id')
+
+
+class TestUpdateCluster(TestClusters):
+ def setUp(self):
+ super(TestUpdateCluster, self).setUp()
+ self.cl_mock.update.return_value = mock.Mock(
+ cluster=CLUSTER_INFO.copy())
+ self.cl_mock.find_unique.return_value = api_cl.Cluster(
+ None, CLUSTER_INFO)
+
+ # Command to test
+ self.cmd = osc_cl.UpdateCluster(self.app, None)
+
+ def test_cluster_update_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(osc_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_cluster_update_all_options(self):
+ arglist = ['fake', '--name', 'fake', '--description', 'descr',
+ '--public', '--protected']
+
+ verifylist = [('cluster', 'fake'), ('name', 'fake'),
+ ('description', 'descr'), ('is_public', True),
+ ('is_protected', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.cl_mock.update.assert_called_once_with(
+ 'cluster_id', description='descr', is_protected=True,
+ is_public=True, name='fake')
+
+ # Check that columns are correct
+ expected_columns = ('Anti affinity', 'Cluster template id',
+ 'Description', 'Id', 'Image',
+ 'Is protected', 'Is public', 'Name',
+ 'Neutron management network', 'Node groups',
+ 'Plugin name', 'Status', 'Use autoconfig',
+ 'User keypair id', 'Version')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('', 'ct_id', 'Cluster template for tests',
+ 'cluster_id', 'img_id', False, False, 'fake',
+ 'net_id', 'fakeng:2', 'fake', 'Active', True, 'test',
+ '0.1')
+ self.assertEqual(expected_data, data)
+
+ def test_cluster_update_private_unprotected(self):
+ arglist = ['fake', '--private', '--unprotected']
+
+ verifylist = [('cluster', 'fake'), ('is_public', False),
+ ('is_protected', False)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.cl_mock.update.assert_called_once_with(
+ 'cluster_id', description=None, is_protected=False,
+ is_public=False, name=None)
+
+
+class TestScaleCluster(TestClusters):
+ def setUp(self):
+ super(TestScaleCluster, self).setUp()
+ self.cl_mock.scale.return_value = mock.Mock(
+ cluster=CLUSTER_INFO.copy())
+ self.cl_mock.find_unique.return_value = api_cl.Cluster(
+ None, CLUSTER_INFO)
+
+ # Command to test
+ self.cmd = osc_cl.ScaleCluster(self.app, None)
+
+ def test_cluster_scale_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(osc_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_cluster_scale_resize(self):
+ self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
+ None, CLUSTER_INFO['node_groups'][0])
+ arglist = ['fake', '--node-groups', 'fakeng:1']
+
+ verifylist = [('cluster', 'fake'), ('node_groups', ['fakeng:1'])]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.cl_mock.scale.assert_called_once_with(
+ 'cluster_id',
+ {'resize_node_groups': [{'count': 1, 'name': 'fakeng'}]})
+
+ # Check that columns are correct
+ expected_columns = ('Anti affinity', 'Cluster template id',
+ 'Description', 'Id', 'Image',
+ 'Is protected', 'Is public', 'Name',
+ 'Neutron management network', 'Node groups',
+ 'Plugin name', 'Status', 'Use autoconfig',
+ 'User keypair id', 'Version')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('', 'ct_id', 'Cluster template for tests',
+ 'cluster_id', 'img_id', False, False, 'fake',
+ 'net_id', 'fakeng:2', 'fake', 'Active', True, 'test',
+ '0.1')
+ self.assertEqual(expected_data, data)
+
+ def test_cluster_scale_add_ng(self):
+ new_ng = {'name': 'new', 'id': 'new_id'}
+ self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
+ None, new_ng)
+ arglist = ['fake', '--node-groups', 'fakeng:1']
+
+ verifylist = [('cluster', 'fake'), ('node_groups', ['fakeng:1'])]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.cl_mock.scale.assert_called_once_with(
+ 'cluster_id',
+ {'add_node_groups': [
+ {'count': 1, 'node_group_template_id': 'new_id',
+ 'name': 'new'}
+ ]})
diff --git a/saharaclient/tests/unit/osc/v1/test_job_templates.py b/saharaclient/tests/unit/osc/v1/test_job_templates.py
new file mode 100644
index 0000000..5e716d0
--- /dev/null
+++ b/saharaclient/tests/unit/osc/v1/test_job_templates.py
@@ -0,0 +1,270 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+from saharaclient.api import jobs as api_j
+from saharaclient.osc.v1 import job_templates as osc_j
+from saharaclient.tests.unit.osc.v1 import fakes
+
+JOB_INFO = {
+ "is_public": False,
+ "id": "job_id",
+ "name": "pig-job",
+ "description": "Job for test",
+ "interface": [],
+ "libs": [
+ {
+ "id": "lib_id",
+ "name": "lib"
+ }
+ ],
+ "type": "Pig",
+ "is_protected": False,
+ "mains": [
+ {
+ "id": "main_id",
+ "name": "main"
+ }
+ ]
+}
+
+
+class TestJobTemplates(fakes.TestDataProcessing):
+ def setUp(self):
+ super(TestJobTemplates, self).setUp()
+ self.job_mock = self.app.client_manager.data_processing.jobs
+ self.job_mock.reset_mock()
+
+
+class TestCreateJobTemplate(TestJobTemplates):
+ # TODO(apavlov): check for creation with --interface
+ def setUp(self):
+ super(TestCreateJobTemplate, self).setUp()
+ self.job_mock.create.return_value = api_j.Job(
+ None, JOB_INFO)
+ self.jb_mock = self.app.client_manager.data_processing.job_binaries
+ self.jb_mock.find_unique.return_value = mock.Mock(id='jb_id')
+ self.jb_mock.reset_mock()
+
+ # Command to test
+ self.cmd = osc_j.CreateJobTemplate(self.app, None)
+
+ def test_job_template_create_minimum_options(self):
+ arglist = ['--name', 'pig-job', '--type', 'Pig']
+ verifylist = [('name', 'pig-job'), ('type', 'Pig')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.job_mock.create.assert_called_once_with(
+ description=None, interface=None, is_protected=False,
+ is_public=False, libs=None, mains=None, name='pig-job', type='Pig')
+
+ def test_job_template_create_all_options(self):
+ arglist = ['--name', 'pig-job', '--type', 'Pig', '--mains', 'main',
+ '--libs', 'lib', '--description', 'descr', '--public',
+ '--protected']
+
+ verifylist = [('name', 'pig-job'), ('type', 'Pig'),
+ ('mains', ['main']), ('libs', ['lib']),
+ ('description', 'descr'), ('public', True),
+ ('protected', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.job_mock.create.assert_called_once_with(
+ description='descr', interface=None, is_protected=True,
+ is_public=True, libs=['jb_id'], mains=['jb_id'], name='pig-job',
+ type='Pig')
+
+ # Check that columns are correct
+ expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
+ 'Libs', 'Mains', 'Name', 'Type')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('Job for test', 'job_id', False, False, 'lib:lib_id',
+ 'main:main_id', 'pig-job', 'Pig')
+ self.assertEqual(expected_data, data)
+
+
+class TestListJobTemplates(TestJobTemplates):
+ def setUp(self):
+ super(TestListJobTemplates, self).setUp()
+ self.job_mock.list.return_value = [api_j.Job(
+ None, JOB_INFO)]
+
+ # Command to test
+ self.cmd = osc_j.ListJobTemplates(self.app, None)
+
+ def test_job_templates_list_no_options(self):
+ arglist = []
+ verifylist = []
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Type']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('pig-job', 'job_id', 'Pig')]
+ self.assertEqual(expected_data, list(data))
+
+ def test_job_template_list_long(self):
+ arglist = ['--long']
+ verifylist = [('long', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Type', 'Description', 'Is public',
+ 'Is protected']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('pig-job', 'job_id', 'Pig', 'Job for test',
+ False, False)]
+ self.assertEqual(expected_data, list(data))
+
+ def test_job_template_list_extra_search_opts(self):
+ arglist = ['--type', 'Pig', '--name', 'pig']
+ verifylist = [('type', 'Pig'), ('name', 'pig')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Type']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('pig-job', 'job_id', 'Pig')]
+ self.assertEqual(expected_data, list(data))
+
+
+class TestShowJobTemplate(TestJobTemplates):
+ def setUp(self):
+ super(TestShowJobTemplate, self).setUp()
+ self.job_mock.find_unique.return_value = api_j.Job(
+ None, JOB_INFO)
+
+ # Command to test
+ self.cmd = osc_j.ShowJobTemplate(self.app, None)
+
+ def test_job_template_show(self):
+ arglist = ['pig-job']
+ verifylist = [('job_template', 'pig-job')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.job_mock.find_unique.assert_called_once_with(name='pig-job')
+
+ # Check that columns are correct
+ expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
+ 'Libs', 'Mains', 'Name', 'Type')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('Job for test', 'job_id', False, False, 'lib:lib_id',
+ 'main:main_id', 'pig-job', 'Pig')
+ self.assertEqual(expected_data, data)
+
+
+class TestDeleteJobTemplate(TestJobTemplates):
+ def setUp(self):
+ super(TestDeleteJobTemplate, self).setUp()
+ self.job_mock.find_unique.return_value = api_j.Job(
+ None, JOB_INFO)
+
+ # Command to test
+ self.cmd = osc_j.DeleteJobTemplate(self.app, None)
+
+ def test_job_template_delete(self):
+ arglist = ['pig-job']
+ verifylist = [('job_template', ['pig-job'])]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.job_mock.delete.assert_called_once_with('job_id')
+
+
+class TestUpdateJobTemplate(TestJobTemplates):
+ def setUp(self):
+ super(TestUpdateJobTemplate, self).setUp()
+ self.job_mock.find_unique.return_value = api_j.Job(None, JOB_INFO)
+ self.job_mock.update.return_value = mock.Mock(job=JOB_INFO.copy())
+
+ # Command to test
+ self.cmd = osc_j.UpdateJobTemplate(self.app, None)
+
+ def test_job_template_update_all_options(self):
+ arglist = ['pig-job', '--name', 'pig-job', '--description', 'descr',
+ '--public', '--protected']
+
+ verifylist = [('job_template', 'pig-job'), ('name', 'pig-job'),
+ ('description', 'descr'), ('is_public', True),
+ ('is_protected', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.job_mock.update.assert_called_once_with(
+ 'job_id', description='descr', is_protected=True, is_public=True,
+ name='pig-job')
+
+ # Check that columns are correct
+ expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
+ 'Libs', 'Mains', 'Name', 'Type')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = ('Job for test', 'job_id', False, False, 'lib:lib_id',
+ 'main:main_id', 'pig-job', 'Pig')
+ self.assertEqual(expected_data, data)
+
+ def test_job_template_update_private_unprotected(self):
+ arglist = ['pig-job', '--private', '--unprotected']
+
+ verifylist = [('job_template', 'pig-job'), ('is_public', False),
+ ('is_protected', False)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.job_mock.update.assert_called_once_with(
+ 'job_id', description=None, is_protected=False, is_public=False,
+ name=None)
diff --git a/saharaclient/tests/unit/osc/v1/test_job_types.py b/saharaclient/tests/unit/osc/v1/test_job_types.py
new file mode 100644
index 0000000..ed2c1bb
--- /dev/null
+++ b/saharaclient/tests/unit/osc/v1/test_job_types.py
@@ -0,0 +1,145 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import mock
+
+from saharaclient.api import job_types as api_jt
+from saharaclient.api import jobs as api_j
+from saharaclient.osc.v1 import job_types as osc_jt
+
+from saharaclient.tests.unit.osc.v1 import fakes
+
+JOB_TYPE_INFO = {
+ "name": 'Pig',
+ "plugins": [
+ {
+ 'versions': {
+ '0.1': {},
+ '0.2': {}
+ },
+ 'name': 'fake'
+ },
+ {
+ 'versions': {
+ '6.2.2': {}
+ },
+ 'name': 'wod'
+ }
+ ]
+}
+
+
+class TestJobTypes(fakes.TestDataProcessing):
+ def setUp(self):
+ super(TestJobTypes, self).setUp()
+ self.job_mock = self.app.client_manager.data_processing.jobs
+ self.jt_mock = self.app.client_manager.data_processing.job_types
+ self.jt_mock.reset_mock()
+ self.job_mock.reset_mock()
+
+
+class TestListJobTemplates(TestJobTypes):
+ def setUp(self):
+ super(TestListJobTemplates, self).setUp()
+ self.jt_mock.list.return_value = [api_jt.JobType(None, JOB_TYPE_INFO)]
+
+ # Command to test
+ self.cmd = osc_jt.ListJobTypes(self.app, None)
+
+ def test_job_types_list_no_options(self):
+ arglist = []
+ verifylist = []
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Plugins']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('Pig', 'fake(0.1, 0.2), wod(6.2.2)')]
+ self.assertEqual(expected_data, list(data))
+
+ def test_job_types_list_extra_search_opts(self):
+ arglist = ['--type', 'Pig', '--plugin', 'fake', '--version', '0.1']
+ verifylist = [('type', 'Pig'), ('plugin', 'fake'), ('version', '0.1')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Plugins']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('Pig', 'fake(0.1, 0.2), wod(6.2.2)')]
+ self.assertEqual(expected_data, list(data))
+
+
+class TestGetJobTypeConfigs(TestJobTypes):
+ def setUp(self):
+ super(TestGetJobTypeConfigs, self).setUp()
+ self.job_mock.get_configs.return_value = (
+ api_j.Job(None, JOB_TYPE_INFO))
+
+ # Command to test
+ self.cmd = osc_jt.GetJobTypeConfigs(self.app, None)
+
+ @mock.patch('oslo_serialization.jsonutils.dump')
+ def test_get_job_type_configs_default_file(self, p_dump):
+ m_open = mock.mock_open()
+ with mock.patch('six.moves.builtins.open', m_open, create=True):
+ arglist = ['Pig']
+ verifylist = [('job_type', 'Pig')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments was passed
+ self.job_mock.get_configs.assert_called_once_with(
+ 'Pig')
+
+ args_to_dump = p_dump.call_args[0]
+ # Check that the right data will be saved
+
+ self.assertEqual(JOB_TYPE_INFO, args_to_dump[0])
+ # Check that data will be saved to the right file
+ self.assertEqual('Pig', m_open.call_args[0][0])
+
+ @mock.patch('oslo_serialization.jsonutils.dump')
+ def test_get_job_type_configs_specified_file(self, p_dump):
+ m_open = mock.mock_open()
+ with mock.patch('six.moves.builtins.open', m_open):
+ arglist = ['Pig', '--file', 'testfile']
+ verifylist = [('job_type', 'Pig'), ('file', 'testfile')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments was passed
+ self.job_mock.get_configs.assert_called_once_with(
+ 'Pig')
+
+ args_to_dump = p_dump.call_args[0]
+ # Check that the right data will be saved
+
+ self.assertEqual(JOB_TYPE_INFO, args_to_dump[0])
+ # Check that data will be saved to the right file
+ self.assertEqual('testfile', m_open.call_args[0][0])
diff --git a/saharaclient/tests/unit/osc/v1/test_node_group_templates.py b/saharaclient/tests/unit/osc/v1/test_node_group_templates.py
new file mode 100644
index 0000000..92548f3
--- /dev/null
+++ b/saharaclient/tests/unit/osc/v1/test_node_group_templates.py
@@ -0,0 +1,371 @@
+# Copyright (c) 2015 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from openstackclient.tests import utils as osc_utils
+
+from saharaclient.api import node_group_templates as api_ngt
+from saharaclient.osc.v1 import node_group_templates as osc_ngt
+from saharaclient.tests.unit.osc.v1 import fakes
+
+
+NGT_INFO = {
+ "node_processes": [
+ "namenode",
+ "tasktracker"
+ ],
+ "name": "template",
+ "tenant_id": "tenant_id",
+ "availability_zone": 'av_zone',
+ "use_autoconfig": True,
+ "hadoop_version": "0.1",
+ "shares": None,
+ "is_default": False,
+ "description": 'description',
+ "node_configs": {},
+ "is_proxy_gateway": False,
+ "auto_security_group": True,
+ "volume_type": None,
+ "volumes_size": 2,
+ "volume_mount_prefix": "/volumes/disk",
+ "plugin_name": "fake",
+ "is_protected": False,
+ "security_groups": None,
+ "floating_ip_pool": "floating_pool",
+ "is_public": True,
+ "id": "ea3c8624-a1f0-49cf-83c4-f5a6634699ca",
+ "flavor_id": "2",
+ "volumes_availability_zone": None,
+ "volumes_per_node": 2,
+ "volume_local_to_instance": False
+}
+
+
+class TestNodeGroupTemplates(fakes.TestDataProcessing):
+ def setUp(self):
+ super(TestNodeGroupTemplates, self).setUp()
+ self.ngt_mock = (
+ self.app.client_manager.data_processing.node_group_templates)
+ self.ngt_mock.reset_mock()
+
+
+class TestCreateNodeGroupTemplate(TestNodeGroupTemplates):
+ # TODO(apavlov): check for creation with --json
+ def setUp(self):
+ super(TestCreateNodeGroupTemplate, self).setUp()
+ self.ngt_mock.create.return_value = api_ngt.NodeGroupTemplate(
+ None, NGT_INFO)
+
+ # Command to test
+ self.cmd = osc_ngt.CreateNodeGroupTemplate(self.app, None)
+
+ def test_ngt_create_minimum_options(self):
+ arglist = ['--name', 'template', '--plugin', 'fake', '--version',
+ '0.1', '--processes', 'namenode', 'tasktracker']
+ verifylist = [('name', 'template'), ('plugin', 'fake'),
+ ('version', '0.1'),
+ ('processes', ['namenode', 'tasktracker'])]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ngt_mock.create.assert_called_once_with(
+ auto_security_group=False, availability_zone=None,
+ description=None, flavor_id=None, floating_ip_pool=None,
+ hadoop_version='0.1', is_protected=False, is_proxy_gateway=False,
+ is_public=False, name='template',
+ node_processes=['namenode', 'tasktracker'], plugin_name='fake',
+ security_groups=None, use_autoconfig=False,
+ volume_local_to_instance=False,
+ volume_type=None, volumes_availability_zone=None,
+ volumes_per_node=None, volumes_size=None, shares=None,
+ node_configs=None)
+
+ def test_ngt_create_all_options(self):
+ arglist = ['--name', 'template', '--plugin', 'fake', '--version',
+ '0.1', '--processes', 'namenode', 'tasktracker',
+ '--security-groups', 'secgr', '--auto-security-group',
+ '--availability-zone', 'av_zone', '--flavor', '2',
+ '--floating-ip-pool', 'floating_pool', '--volumes-per-node',
+ '2', '--volumes-size', '2', '--volumes-type', 'type',
+ '--volumes-availability-zone', 'vavzone',
+ '--volumes-mount-prefix', '/volume/asd',
+ '--volumes-locality', '--description', 'descr',
+ '--autoconfig', '--proxy-gateway', '--public',
+ '--protected']
+
+ verifylist = [('name', 'template'), ('plugin', 'fake'),
+ ('version', '0.1'),
+ ('processes', ['namenode', 'tasktracker']),
+ ('security_groups', ['secgr']),
+ ('auto_security_group', True),
+ ('availability_zone', 'av_zone'), ('flavor', '2'),
+ ('floating_ip_pool', 'floating_pool'),
+ ('volumes_per_node', 2), ('volumes_size', 2),
+ ('volumes_type', 'type'),
+ ('volumes_availability_zone', 'vavzone'),
+ ('volumes_mount_prefix', '/volume/asd'),
+ ('volumes_locality', True), ('description', 'descr'),
+ ('autoconfig', True), ('proxy_gateway', True),
+ ('public', True), ('protected', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ngt_mock.create.assert_called_once_with(
+ auto_security_group=True, availability_zone='av_zone',
+ description='descr', flavor_id='2',
+ floating_ip_pool='floating_pool', hadoop_version='0.1',
+ is_protected=True, is_proxy_gateway=True, is_public=True,
+ name='template', node_processes=['namenode', 'tasktracker'],
+ plugin_name='fake', security_groups=['secgr'], use_autoconfig=True,
+ volume_local_to_instance=True, volume_type='type',
+ volumes_availability_zone='vavzone', volumes_per_node=2,
+ volumes_size=2, shares=None, node_configs=None)
+
+ # Check that columns are correct
+ expected_columns = (
+ 'Auto security group', 'Availability zone', 'Description',
+ 'Flavor id', 'Floating ip pool', 'Id', 'Is default',
+ 'Is protected', 'Is proxy gateway', 'Is public', 'Name',
+ 'Node processes', 'Plugin name', 'Security groups',
+ 'Use autoconfig', 'Version', 'Volume local to instance',
+ 'Volume mount prefix', 'Volume type', 'Volumes availability zone',
+ 'Volumes per node', 'Volumes size')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = (
+ True, 'av_zone', 'description', '2', 'floating_pool',
+ 'ea3c8624-a1f0-49cf-83c4-f5a6634699ca', False, False, False,
+ True, 'template', 'namenode, tasktracker', 'fake', None, True,
+ '0.1', False, '/volumes/disk', None, None, 2, 2)
+ self.assertEqual(expected_data, data)
+
+
+class TestListNodeGroupTemplates(TestNodeGroupTemplates):
+ def setUp(self):
+ super(TestListNodeGroupTemplates, self).setUp()
+ self.ngt_mock.list.return_value = [api_ngt.NodeGroupTemplate(
+ None, NGT_INFO)]
+
+ # Command to test
+ self.cmd = osc_ngt.ListNodeGroupTemplates(self.app, None)
+
+ def test_ngt_list_no_options(self):
+ arglist = []
+ verifylist = []
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Plugin name', 'Version']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('template', 'ea3c8624-a1f0-49cf-83c4-f5a6634699ca',
+ 'fake', '0.1')]
+ self.assertEqual(expected_data, list(data))
+
+ def test_ngt_list_long(self):
+ arglist = ['--long']
+ verifylist = [('long', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Plugin name', 'Version',
+ 'Node processes', 'Description']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('template', 'ea3c8624-a1f0-49cf-83c4-f5a6634699ca',
+ 'fake', '0.1', 'namenode, tasktracker',
+ 'description')]
+ self.assertEqual(expected_data, list(data))
+
+ def test_ngt_list_extra_search_opts(self):
+ arglist = ['--plugin', 'fake', '--version', '0.1', '--name', 'templ']
+ verifylist = [('plugin', 'fake'), ('version', '0.1'),
+ ('name', 'templ')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that columns are correct
+ expected_columns = ['Name', 'Id', 'Plugin name', 'Version']
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = [('template', 'ea3c8624-a1f0-49cf-83c4-f5a6634699ca',
+ 'fake', '0.1')]
+ self.assertEqual(expected_data, list(data))
+
+
+class TestShowNodeGroupTemplate(TestNodeGroupTemplates):
+ def setUp(self):
+ super(TestShowNodeGroupTemplate, self).setUp()
+ self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
+ None, NGT_INFO)
+
+ # Command to test
+ self.cmd = osc_ngt.ShowNodeGroupTemplate(self.app, None)
+
+ def test_ngt_show(self):
+ arglist = ['template']
+ verifylist = [('node_group_template', 'template')]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ngt_mock.find_unique.assert_called_once_with(name='template')
+
+ # Check that columns are correct
+ expected_columns = (
+ 'Auto security group', 'Availability zone', 'Description',
+ 'Flavor id', 'Floating ip pool', 'Id', 'Is default',
+ 'Is protected', 'Is proxy gateway', 'Is public', 'Name',
+ 'Node processes', 'Plugin name', 'Security groups',
+ 'Use autoconfig', 'Version', 'Volume local to instance',
+ 'Volume mount prefix', 'Volume type', 'Volumes availability zone',
+ 'Volumes per node', 'Volumes size')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = (
+ True, 'av_zone', 'description', '2', 'floating_pool',
+ 'ea3c8624-a1f0-49cf-83c4-f5a6634699ca', False, False, False,
+ True, 'template', 'namenode, tasktracker', 'fake', None, True,
+ '0.1', False, '/volumes/disk', None, None, 2, 2)
+ self.assertEqual(expected_data, data)
+
+
+class TestDeleteNodeGroupTemplate(TestNodeGroupTemplates):
+ def setUp(self):
+ super(TestDeleteNodeGroupTemplate, self).setUp()
+ self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
+ None, NGT_INFO)
+
+ # Command to test
+ self.cmd = osc_ngt.DeleteNodeGroupTemplate(self.app, None)
+
+ def test_ngt_delete(self):
+ arglist = ['template']
+ verifylist = [('node_group_template', ['template'])]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ngt_mock.delete.assert_called_once_with(
+ 'ea3c8624-a1f0-49cf-83c4-f5a6634699ca')
+
+
+class TestUpdateNodeGroupTemplate(TestNodeGroupTemplates):
+ # TODO(apavlov): check for update with --json
+ def setUp(self):
+ super(TestUpdateNodeGroupTemplate, self).setUp()
+ self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate(
+ None, NGT_INFO)
+ self.ngt_mock.update.return_value = api_ngt.NodeGroupTemplate(
+ None, NGT_INFO)
+
+ # Command to test
+ self.cmd = osc_ngt.UpdateNodeGroupTemplate(self.app, None)
+
+ def test_ngt_update_no_options(self):
+ arglist = []
+ verifylist = []
+
+ self.assertRaises(osc_utils.ParserException, self.check_parser,
+ self.cmd, arglist, verifylist)
+
+ def test_ngt_update_all_options(self):
+ arglist = ['template', '--name', 'template', '--plugin', 'fake',
+ '--version', '0.1', '--processes', 'namenode',
+ 'tasktracker', '--security-groups', 'secgr',
+ '--auto-security-group-enable',
+ '--availability-zone', 'av_zone', '--flavor', '2',
+ '--floating-ip-pool', 'floating_pool', '--volumes-per-node',
+ '2', '--volumes-size', '2', '--volumes-type', 'type',
+ '--volumes-availability-zone', 'vavzone',
+ '--volumes-mount-prefix', '/volume/asd',
+ '--volumes-locality-enable', '--description', 'descr',
+ '--autoconfig-enable', '--proxy-gateway-enable', '--public',
+ '--protected']
+
+ verifylist = [('node_group_template', 'template'),
+ ('name', 'template'), ('plugin', 'fake'),
+ ('version', '0.1'),
+ ('processes', ['namenode', 'tasktracker']),
+ ('security_groups', ['secgr']),
+ ('use_auto_security_group', True),
+ ('availability_zone', 'av_zone'), ('flavor', '2'),
+ ('floating_ip_pool', 'floating_pool'),
+ ('volumes_per_node', 2), ('volumes_size', 2),
+ ('volumes_type', 'type'),
+ ('volumes_availability_zone', 'vavzone'),
+ ('volumes_mount_prefix', '/volume/asd'),
+ ('volume_locality', True),
+ ('description', 'descr'), ('use_autoconfig', True),
+ ('is_proxy_gateway', True),
+ ('is_public', True), ('is_protected', True)]
+
+ parsed_args = self.check_parser(self.cmd, arglist, verifylist)
+
+ columns, data = self.cmd.take_action(parsed_args)
+
+ # Check that correct arguments were passed
+ self.ngt_mock.update.assert_called_once_with(
+ 'ea3c8624-a1f0-49cf-83c4-f5a6634699ca',
+ auto_security_group=True, availability_zone='av_zone',
+ description='descr', flavor_id='2',
+ floating_ip_pool='floating_pool', hadoop_version='0.1',
+ is_protected=True, is_proxy_gateway=True, is_public=True,
+ name='template', node_processes=['namenode', 'tasktracker'],
+ plugin_name='fake', security_groups=['secgr'], use_autoconfig=True,
+ volume_local_to_instance=True, volume_type='type',
+ volumes_availability_zone='vavzone', volumes_per_node=2,
+ volumes_size=2, shares=None, node_configs=None)
+
+ # Check that columns are correct
+ expected_columns = (
+ 'Auto security group', 'Availability zone', 'Description',
+ 'Flavor id', 'Floating ip pool', 'Id', 'Is default',
+ 'Is protected', 'Is proxy gateway', 'Is public', 'Name',
+ 'Node processes', 'Plugin name', 'Security groups',
+ 'Use autoconfig', 'Version', 'Volume local to instance',
+ 'Volume mount prefix', 'Volume type', 'Volumes availability zone',
+ 'Volumes per node', 'Volumes size')
+ self.assertEqual(expected_columns, columns)
+
+ # Check that data is correct
+ expected_data = (
+ True, 'av_zone', 'description', '2', 'floating_pool',
+ 'ea3c8624-a1f0-49cf-83c4-f5a6634699ca', False, False, False,
+ True, 'template', 'namenode, tasktracker', 'fake', None, True,
+ '0.1', False, '/volumes/disk', None, None, 2, 2)
+ self.assertEqual(expected_data, data)
diff --git a/saharaclient/tests/unit/osc/v1/test_utils.py b/saharaclient/tests/unit/osc/v1/test_utils.py
index b9aeef4..021d747 100644
--- a/saharaclient/tests/unit/osc/v1/test_utils.py
+++ b/saharaclient/tests/unit/osc/v1/test_utils.py
@@ -66,3 +66,8 @@ class TestUtils(base.BaseTestCase):
columns2 = ['First', 'Second column']
self.assertEqual(
['First', 'Second column'], utils.prepare_column_headers(columns2))
+
+ columns3 = ['first', 'second_column']
+ self.assertEqual(
+ ['First', 'Second'], utils.prepare_column_headers(
+ columns3, remap={'second_column': 'second'}))
diff --git a/saharaclient/tests/unit/test_job_executions.py b/saharaclient/tests/unit/test_job_executions.py
index 855b149..e6867cb 100644
--- a/saharaclient/tests/unit/test_job_executions.py
+++ b/saharaclient/tests/unit/test_job_executions.py
@@ -29,6 +29,7 @@ class JobExecutionTest(base.BaseTestCase):
}
response = {
'cluster_id': 'cluster_id',
+ 'interface': {},
'job_configs': {}
}