diff options
author | Telles Nobrega <tenobreg@redhat.com> | 2018-07-12 21:02:52 -0300 |
---|---|---|
committer | Telles Nobrega <tellesnobrega@gmail.com> | 2019-01-10 20:15:23 -0300 |
commit | 3f6f2d1128314e541f5a9036b1560a0f8e79ac32 (patch) | |
tree | e9e1544224db9820281dd5faf330e2b278bfd33b /saharaclient/osc | |
parent | 2a66f9a7150cd92e45f29bb028b60863a27fa443 (diff) | |
download | python-saharaclient-3f6f2d1128314e541f5a9036b1560a0f8e79ac32.tar.gz |
Preparing OSC for APIv2
On the way to have APIv2 as stable we need to have it available on the
OSC for sahara.
Change-Id: I84f4bc56c641caad7c04190c7a344a6773440eef
Diffstat (limited to 'saharaclient/osc')
-rw-r--r-- | saharaclient/osc/utils.py | 235 | ||||
-rw-r--r-- | saharaclient/osc/v1/cluster_templates.py | 116 | ||||
-rw-r--r-- | saharaclient/osc/v1/clusters.py | 95 | ||||
-rw-r--r-- | saharaclient/osc/v1/job_templates.py | 23 | ||||
-rw-r--r-- | saharaclient/osc/v1/jobs.py | 57 | ||||
-rw-r--r-- | saharaclient/osc/v2/cluster_templates.py | 149 | ||||
-rw-r--r-- | saharaclient/osc/v2/clusters.py | 172 | ||||
-rw-r--r-- | saharaclient/osc/v2/data_sources.py | 48 | ||||
-rw-r--r-- | saharaclient/osc/v2/images.py | 62 | ||||
-rw-r--r-- | saharaclient/osc/v2/job_binaries.py | 212 | ||||
-rw-r--r-- | saharaclient/osc/v2/job_templates.py | 48 | ||||
-rw-r--r-- | saharaclient/osc/v2/job_types.py | 54 | ||||
-rw-r--r-- | saharaclient/osc/v2/jobs.py | 142 | ||||
-rw-r--r-- | saharaclient/osc/v2/plugins.py | 40 |
14 files changed, 1315 insertions, 138 deletions
diff --git a/saharaclient/osc/utils.py b/saharaclient/osc/utils.py index aebb628..9928ecc 100644 --- a/saharaclient/osc/utils.py +++ b/saharaclient/osc/utils.py @@ -105,6 +105,241 @@ def wait_for_delete(manager, obj_id, sleep_time=5, timeout=3000): return False +def get_api_version(app): + return app.api_version['data_processing'] + + +def is_api_v2(app): + if get_api_version(app) == '2': + return True + return False + + +def _cluster_templates_configure_ng(app, node_groups, client): + node_groups_list = dict( + map(lambda x: x.split(':', 1), node_groups)) + + node_groups = [] + plugins_versions = set() + + for name, count in node_groups_list.items(): + ng = get_resource(client.node_group_templates, name) + node_groups.append({'name': ng.name, + 'count': int(count), + 'node_group_template_id': ng.id}) + if is_api_v2(app): + plugins_versions.add((ng.plugin_name, ng.plugin_version)) + else: + plugins_versions.add((ng.plugin_name, ng.hadoop_version)) + + if len(plugins_versions) != 1: + raise exceptions.CommandError('Node groups with the same plugins ' + 'and versions must be specified') + + plugin, plugin_version = plugins_versions.pop() + return plugin, plugin_version, node_groups + + +def _get_plugin_version(app, cluster_template, client): + ct = get_resource(client.cluster_templates, cluster_template) + if is_api_v2(app): + return ct.plugin_name, ct.plugin_version, ct.id + else: + return ct.plugin_name, ct.hadoop_version, ct.id + + +def create_job_templates(app, client, mains_ids, libs_ids, parsed_args): + args_dict = dict(name=parsed_args.name, + type=parsed_args.type, + mains=mains_ids, + libs=libs_ids, + description=parsed_args.description, + interface=parsed_args.interface, + is_public=parsed_args.public, + is_protected=parsed_args.protected) + + if is_api_v2(app): + data = client.job_templates.create(**args_dict).to_dict() + else: + data = client.jobs.create(**args_dict).to_dict() + + return data + + +def create_job_template_json(app, client, **template): + if is_api_v2(app): + data = client.job_templates.create(**template).to_dict() + else: + data = client.jobs.create(**template).to_dict() + + return data + + +def list_job_templates(app, client, search_opts): + if is_api_v2(app): + data = client.job_templates.list(search_opts=search_opts) + else: + data = client.jobs.list(search_opts=search_opts) + + return data + + +def get_job_templates_resources(app, client, parsed_args): + if is_api_v2(app): + data = get_resource( + client.job_templates, parsed_args.job_template).to_dict() + else: + data = get_resource( + client.jobs, parsed_args.job_template).to_dict() + + return data + + +def delete_job_templates(app, client, jt): + if is_api_v2(app): + jt_id = get_resource_id(client.job_templates, jt) + client.job_templates.delete(jt_id) + else: + jt_id = get_resource_id(client.jobs, jt) + client.jobs.delete(jt_id) + + +def get_job_template_id(app, client, parsed_args): + if is_api_v2(app): + jt_id = get_resource_id( + client.job_templates, parsed_args.job_template) + else: + jt_id = get_resource_id( + client.jobs, parsed_args.job_template) + + return jt_id + + +def update_job_templates(app, client, jt_id, update_data): + if is_api_v2(app): + data = client.job_templates.update(jt_id, **update_data).job_template + else: + data = client.jobs.update(jt_id, **update_data).job + + return data + + +def create_cluster_template(app, client, plugin, plugin_version, + parsed_args, configs, shares, node_groups): + + args_dict = dict( + name=parsed_args.name, + plugin_name=plugin, + description=parsed_args.description, + node_groups=node_groups, + use_autoconfig=parsed_args.autoconfig, + cluster_configs=configs, + shares=shares, + is_public=parsed_args.public, + is_protected=parsed_args.protected, + domain_name=parsed_args.domain_name) + + if is_api_v2(app): + args_dict['plugin_version'] = plugin_version + else: + args_dict['hadoop_version'] = plugin_version + + data = client.cluster_templates.create(**args_dict).to_dict() + return data + + +def update_cluster_template(app, client, plugin, plugin_version, + parsed_args, configs, shares, node_groups, ct_id): + + args_dict = dict( + name=parsed_args.name, + plugin_name=plugin, + description=parsed_args.description, + node_groups=node_groups, + use_autoconfig=parsed_args.use_autoconfig, + cluster_configs=configs, + shares=shares, + is_public=parsed_args.is_public, + is_protected=parsed_args.is_protected, + domain_name=parsed_args.domain_name + ) + + if is_api_v2(app): + args_dict['plugin_version'] = plugin_version + else: + args_dict['hadoop_version'] = plugin_version + + update_dict = create_dict_from_kwargs(**args_dict) + data = client.cluster_templates.update( + ct_id, **update_dict).to_dict() + + return data + + +def create_cluster(client, app, parsed_args, plugin, plugin_version, + template_id, image_id, net_id): + + args = dict( + name=parsed_args.name, + plugin_name=plugin, + cluster_template_id=template_id, + default_image_id=image_id, + description=parsed_args.description, + is_transient=parsed_args.transient, + user_keypair_id=parsed_args.user_keypair, + net_id=net_id, + count=parsed_args.count, + is_public=parsed_args.public, + is_protected=parsed_args.protected) + + if is_api_v2(app): + args['plugin_version'] = plugin_version + else: + args['hadoop_version'] = plugin_version + + data = client.clusters.create(**args).to_dict() + return data + + +def create_job(client, app, jt_id, cluster_id, input_id, output_id, + job_configs, parsed_args): + args_dict = dict(cluster_id=cluster_id, + input_id=input_id, + output_id=output_id, + interface=parsed_args.interface, + configs=job_configs, + is_public=parsed_args.public, + is_protected=parsed_args.protected) + + if is_api_v2(app): + args_dict['job_template_id'] = jt_id + data = client.jobs.create(**args_dict).to_dict() + else: + args_dict['job_id'] = jt_id + data = client.job_executions.create(**args_dict).to_dict() + + return data + + +def create_job_json(client, app, **template): + if is_api_v2(app): + data = client.jobs.create(**template).to_dict() + else: + data = client.job_executions.create(**template).to_dict() + + return data + + +def update_job(client, app, parsed_args, update_dict): + if is_api_v2(app): + data = client.jobs.update( + parsed_args.job, **update_dict).job + else: + data = client.job_executions.update( + parsed_args.job, **update_dict).job_execution + return data + + def create_node_group_templates(client, app, parsed_args, flavor_id, configs, shares): if app.api_version['data_processing'] == '2': diff --git a/saharaclient/osc/v1/cluster_templates.py b/saharaclient/osc/v1/cluster_templates.py index 1442415..161dea7 100644 --- a/saharaclient/osc/v1/cluster_templates.py +++ b/saharaclient/osc/v1/cluster_templates.py @@ -33,13 +33,13 @@ def _format_node_groups_list(node_groups): ['%s:%s' % (ng['name'], ng['count']) for ng in node_groups]) -def _format_ct_output(data): +def _format_ct_output(app, data): data['plugin_version'] = data.pop('hadoop_version') data['node_groups'] = _format_node_groups_list(data['node_groups']) data['anti_affinity'] = osc_utils.format_list(data['anti_affinity']) -def _configure_node_groups(node_groups, client): +def _configure_node_groups(app, node_groups, client): node_groups_list = dict( map(lambda x: x.split(':', 1), node_groups)) @@ -140,10 +140,7 @@ class CreateClusterTemplate(command.ShowOne): ) return parser - def take_action(self, parsed_args): - self.log.debug("take_action(%s)", parsed_args) - client = self.app.client_manager.data_processing - + def _take_action(self, client, parsed_args): if parsed_args.json: blob = osc_utils.read_blob_file_contents(parsed_args.json) try: @@ -184,24 +181,24 @@ class CreateClusterTemplate(command.ShowOne): 'An error occurred when reading ' 'shares from file %s: %s' % (parsed_args.shares, e)) - plugin, plugin_version, node_groups = _configure_node_groups( - parsed_args.node_groups, client) - - data = client.cluster_templates.create( - name=parsed_args.name, - plugin_name=plugin, - hadoop_version=plugin_version, - description=parsed_args.description, - node_groups=node_groups, - use_autoconfig=parsed_args.autoconfig, - cluster_configs=configs, - shares=shares, - is_public=parsed_args.public, - is_protected=parsed_args.protected, - domain_name=parsed_args.domain_name - ).to_dict() - - _format_ct_output(data) + plugin, plugin_version, node_groups = ( + utils._cluster_templates_configure_ng(self.app, + parsed_args.node_groups, + client)) + data = utils.create_cluster_template(self.app, client, plugin, + plugin_version, + parsed_args, configs, shares, + node_groups) + + return data + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_ct_output(self.app, data) data = utils.prepare_data(data, CT_FIELDS) return self.dict2columns(data) @@ -249,7 +246,10 @@ class ListClusterTemplates(command.Lister): if parsed_args.plugin: search_opts['plugin_name'] = parsed_args.plugin if parsed_args.plugin_version: - search_opts['hadoop_version'] = parsed_args.plugin_version + if utils.is_api_v2(self.app): + search_opts['plugin_version'] = parsed_args.plugin_version + else: + search_opts['hadoop_version'] = parsed_args.plugin_version data = client.cluster_templates.list(search_opts=search_opts) @@ -301,7 +301,7 @@ class ShowClusterTemplate(command.ShowOne): data = utils.get_resource( client.cluster_templates, parsed_args.cluster_template).to_dict() - _format_ct_output(data) + _format_ct_output(self.app, data) data = utils.prepare_data(data, CT_FIELDS) return self.dict2columns(data) @@ -442,13 +442,7 @@ class UpdateClusterTemplate(command.ShowOne): use_autoconfig=None) return parser - def take_action(self, parsed_args): - self.log.debug("take_action(%s)", parsed_args) - client = self.app.client_manager.data_processing - - ct_id = utils.get_resource_id( - client.cluster_templates, parsed_args.cluster_template) - + def _take_action(self, client, parsed_args, ct_id): if parsed_args.json: blob = osc_utils.read_blob_file_contents(parsed_args.json) try: @@ -462,8 +456,9 @@ class UpdateClusterTemplate(command.ShowOne): else: plugin, plugin_version, node_groups = None, None, None if parsed_args.node_groups: - plugin, plugin_version, node_groups = _configure_node_groups( - parsed_args.node_groups, client) + plugin, plugin_version, node_groups = ( + utils._cluster_templates_configure_ng( + self.app, parsed_args.node_groups, client)) configs = None if parsed_args.configs: @@ -485,24 +480,23 @@ class UpdateClusterTemplate(command.ShowOne): 'An error occurred when reading ' 'shares from file %s: %s' % (parsed_args.shares, e)) - update_dict = utils.create_dict_from_kwargs( - name=parsed_args.name, - plugin_name=plugin, - hadoop_version=plugin_version, - description=parsed_args.description, - node_groups=node_groups, - use_autoconfig=parsed_args.use_autoconfig, - cluster_configs=configs, - shares=shares, - is_public=parsed_args.is_public, - is_protected=parsed_args.is_protected, - domain_name=parsed_args.domain_name - ) + data = utils.update_cluster_template(self.app, client, plugin, + plugin_version, parsed_args, + configs, shares, node_groups, + ct_id) - data = client.cluster_templates.update( - ct_id, **update_dict).to_dict() + return data - _format_ct_output(data) + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + ct_id = utils.get_resource_id( + client.cluster_templates, parsed_args.cluster_template) + + data = self._take_action(client, parsed_args, ct_id) + + _format_ct_output(self.app, data) data = utils.prepare_data(data, CT_FIELDS) return self.dict2columns(data) @@ -541,10 +535,7 @@ class ImportClusterTemplate(command.ShowOne): ) return parser - def take_action(self, parsed_args): - self.log.debug("take_action(%s)", parsed_args) - client = self.app.client_manager.data_processing - + def _take_action(self, client, parsed_args): if (not parsed_args.node_groups): raise exceptions.CommandError('--node_groups should be specified') @@ -569,8 +560,9 @@ class ImportClusterTemplate(command.ShowOne): template['cluster_template']['net_id'] = ( template['cluster_template'].pop('neutron_management_network')) - plugin, plugin_version, node_groups = _configure_node_groups( - parsed_args.node_groups, client) + plugin, plugin_version, node_groups = ( + utils._cluster_templates_configure_ng_configure_node_groups( + self.app, parsed_args.node_groups, client)) if (('plugin_version' in template['cluster_template'] and template['cluster_template']['plugin_version'] != plugin_version) or @@ -584,7 +576,15 @@ class ImportClusterTemplate(command.ShowOne): data = client.cluster_templates.create( **template['cluster_template']).to_dict() - _format_ct_output(data) + return data + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_ct_output(self.app, data) data = utils.prepare_data(data, CT_FIELDS) return self.dict2columns(data) diff --git a/saharaclient/osc/v1/clusters.py b/saharaclient/osc/v1/clusters.py index 6930bf3..edcf3ac 100644 --- a/saharaclient/osc/v1/clusters.py +++ b/saharaclient/osc/v1/clusters.py @@ -35,7 +35,7 @@ def _format_node_groups_list(node_groups): ['%s:%s' % (ng['name'], ng['count']) for ng in node_groups]) -def _format_cluster_output(data): +def _format_cluster_output(app, data): data['plugin_version'] = data.pop('hadoop_version') data['image'] = data.pop('default_image_id') data['node_groups'] = _format_node_groups_list(data['node_groups']) @@ -54,11 +54,6 @@ def _prepare_health_checks(data): return additional_data, additional_fields -def _get_plugin_version(cluster_template, client): - ct = utils.get_resource(client.cluster_templates, cluster_template) - return ct.plugin_name, ct.hadoop_version, ct.id - - class CreateCluster(command.ShowOne): """Creates cluster""" @@ -140,9 +135,7 @@ class CreateCluster(command.ShowOne): return parser - def take_action(self, parsed_args): - self.log.debug("take_action(%s)", parsed_args) - client = self.app.client_manager.data_processing + def _take_action(self, client, parsed_args): network_client = self.app.client_manager.network if parsed_args.json: @@ -169,8 +162,8 @@ class CreateCluster(command.ShowOne): 'should be specified or json template should be provided ' 'with --json argument') - plugin, plugin_version, template_id = _get_plugin_version( - parsed_args.cluster_template, client) + plugin, plugin_version, template_id = utils._get_plugin_version( + self.app, parsed_args.cluster_template, client) image_id = utils.get_resource_id(client.images, parsed_args.image) @@ -178,20 +171,17 @@ class CreateCluster(command.ShowOne): parsed_args.neutron_network, ignore_missing=False).id if parsed_args.neutron_network else None) - data = client.clusters.create( - name=parsed_args.name, - plugin_name=plugin, - hadoop_version=plugin_version, - cluster_template_id=template_id, - default_image_id=image_id, - description=parsed_args.description, - is_transient=parsed_args.transient, - user_keypair_id=parsed_args.user_keypair, - net_id=net_id, - count=parsed_args.count, - is_public=parsed_args.public, - is_protected=parsed_args.protected - ).to_dict() + data = utils.create_cluster(client, self.app, parsed_args, plugin, + plugin_version, template_id, image_id, + net_id) + return data + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + if parsed_args.count and parsed_args.count > 1: clusters = [ utils.get_resource(client.clusters, id) @@ -217,7 +207,7 @@ class CreateCluster(command.ShowOne): 'Error occurred during cluster creation: %s', data['id']) data = client.clusters.get(data['id']).to_dict() - _format_cluster_output(data) + _format_cluster_output(self.app, data) data = utils.prepare_data(data, CLUSTER_FIELDS) return self.dict2columns(data) @@ -277,12 +267,13 @@ class ListClusters(command.Lister): column_headers = utils.prepare_column_headers( columns, {'hadoop_version': 'plugin_version', 'default_image_id': 'image'}) - else: - columns = ('name', 'id', 'plugin_name', 'hadoop_version', 'status') + columns = ('name', 'id', 'plugin_name', 'hadoop_version', + 'status') column_headers = utils.prepare_column_headers( columns, {'hadoop_version': 'plugin_version', 'default_image_id': 'image'}) + return ( column_headers, (osc_utils.get_item_properties( @@ -326,10 +317,7 @@ class ShowCluster(command.ShowOne): ) return parser - def take_action(self, parsed_args): - self.log.debug("take_action(%s)", parsed_args) - client = self.app.client_manager.data_processing - + def _take_action(self, client, parsed_args): kwargs = {} if parsed_args.show_progress or parsed_args.full_dump_events: kwargs['show_progress'] = True @@ -344,8 +332,9 @@ class ShowCluster(command.ShowOne): with open(file_name, 'w') as file: jsonutils.dump(provision_steps, file, indent=4) sys.stdout.write('Event log dump saved to file: %s\n' % file_name) + return data, provision_steps - _format_cluster_output(data) + def _show_cluster_info(self, data, provision_steps, parsed_args): fields = [] if parsed_args.verification: ver_data, fields = _prepare_health_checks(data) @@ -370,6 +359,17 @@ class ShowCluster(command.ShowOne): return data + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data, provision_steps = self._take_action(client, parsed_args) + + _format_cluster_output(self.app, data) + + data = self._show_cluster_info(data, provision_steps, parsed_args) + return data + class DeleteCluster(command.Command): """Deletes cluster""" @@ -477,10 +477,7 @@ class UpdateCluster(command.ShowOne): return parser - def take_action(self, parsed_args): - self.log.debug("take_action(%s)", parsed_args) - client = self.app.client_manager.data_processing - + def _take_action(self, client, parsed_args): cluster_id = utils.get_resource_id( client.clusters, parsed_args.cluster) @@ -502,8 +499,15 @@ class UpdateCluster(command.ShowOne): shares=shares ) data = client.clusters.update(cluster_id, **update_dict).cluster + return data + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing - _format_cluster_output(data) + data = self._take_action(client, parsed_args) + + _format_cluster_output(self.app, data) data = utils.prepare_data(data, CLUSTER_FIELDS) return self.dict2columns(data) @@ -545,10 +549,7 @@ class ScaleCluster(command.ShowOne): return parser - def take_action(self, parsed_args): - self.log.debug("take_action(%s)", parsed_args) - client = self.app.client_manager.data_processing - + def _take_action(self, client, parsed_args): cluster = utils.get_resource( client.clusters, parsed_args.cluster) @@ -603,7 +604,15 @@ class ScaleCluster(command.ShowOne): cluster.id) data = client.clusters.get(cluster.id).cluster - _format_cluster_output(data) + return data + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_cluster_output(self.app, data) data = utils.prepare_data(data, CLUSTER_FIELDS) return self.dict2columns(data) diff --git a/saharaclient/osc/v1/job_templates.py b/saharaclient/osc/v1/job_templates.py index 9013563..d391683 100644 --- a/saharaclient/osc/v1/job_templates.py +++ b/saharaclient/osc/v1/job_templates.py @@ -111,7 +111,8 @@ class CreateJobTemplate(command.ShowOne): raise exceptions.CommandError( 'An error occurred when reading ' 'template from file %s: %s' % (parsed_args.json, e)) - data = client.jobs.create(**template).to_dict() + data = utils.create_job_template_json(self.app, + client, **template) else: if parsed_args.interface: blob = osc_utils.read_blob_file_contents(parsed_args.json) @@ -127,11 +128,8 @@ class CreateJobTemplate(command.ShowOne): libs_ids = [utils.get_resource_id(client.job_binaries, m) for m in parsed_args.libs] if parsed_args.libs else None - data = client.jobs.create( - name=parsed_args.name, type=parsed_args.type, mains=mains_ids, - libs=libs_ids, description=parsed_args.description, - interface=parsed_args.interface, is_public=parsed_args.public, - is_protected=parsed_args.protected).to_dict() + data = utils.create_job_templates(self.app, client, mains_ids, + libs_ids, parsed_args) _format_job_template_output(data) data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS) @@ -172,7 +170,7 @@ class ListJobTemplates(command.Lister): client = self.app.client_manager.data_processing search_opts = {'type': parsed_args.type} if parsed_args.type else {} - data = client.jobs.list(search_opts=search_opts) + data = utils.list_job_templates(self.app, client, search_opts) if parsed_args.name: data = utils.get_by_name_substring(data, parsed_args.name) @@ -214,8 +212,7 @@ class ShowJobTemplate(command.ShowOne): self.log.debug("take_action(%s)", parsed_args) client = self.app.client_manager.data_processing - data = utils.get_resource( - client.jobs, parsed_args.job_template).to_dict() + data = utils.get_job_templates_resources(self.app, client, parsed_args) _format_job_template_output(data) data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS) @@ -243,8 +240,7 @@ class DeleteJobTemplate(command.Command): self.log.debug("take_action(%s)", parsed_args) client = self.app.client_manager.data_processing for jt in parsed_args.job_template: - jt_id = utils.get_resource_id(client.jobs, jt) - client.jobs.delete(jt_id) + utils.delete_job_templates(self.app, client, jt) sys.stdout.write( 'Job template "{jt}" has been removed ' 'successfully.\n'.format(jt=jt)) @@ -309,8 +305,7 @@ class UpdateJobTemplate(command.ShowOne): self.log.debug("take_action(%s)", parsed_args) client = self.app.client_manager.data_processing - jt_id = utils.get_resource_id( - client.jobs, parsed_args.job_template) + jt_id = utils.get_job_template_id(self.app, client, parsed_args) update_data = utils.create_dict_from_kwargs( name=parsed_args.name, @@ -319,7 +314,7 @@ class UpdateJobTemplate(command.ShowOne): is_protected=parsed_args.is_protected ) - data = client.jobs.update(jt_id, **update_data).job + data = utils.update_job_templates(self.app, client, jt_id, update_data) _format_job_template_output(data) data = utils.prepare_data(data, JOB_TEMPLATE_FIELDS) diff --git a/saharaclient/osc/v1/jobs.py b/saharaclient/osc/v1/jobs.py index 27f1f88..abe6f1d 100644 --- a/saharaclient/osc/v1/jobs.py +++ b/saharaclient/osc/v1/jobs.py @@ -31,7 +31,7 @@ JOB_STATUS_CHOICES = ['done-with-error', 'failed', 'killed', 'pending', 'running', 'succeeded', 'to-be-killed'] -def _format_job_output(data): +def _format_job_output(app, data): data['status'] = data['info']['status'] del data['info'] data['job_template_id'] = data.pop('job_id') @@ -116,9 +116,7 @@ class ExecuteJob(command.ShowOne): ) return parser - def take_action(self, parsed_args): - self.log.debug("take_action(%s)", parsed_args) - client = self.app.client_manager.data_processing + def _take_action(self, client, parsed_args): if parsed_args.json: blob = osc_utils.read_blob_file_contents(parsed_args.json) @@ -132,7 +130,7 @@ class ExecuteJob(command.ShowOne): if 'job_configs' in template: template['configs'] = template.pop('job_configs') - data = client.job_executions.create(**template).to_dict() + data = utils.create_job_json(client, self.app, template) else: if not parsed_args.cluster or not parsed_args.job_template: raise exceptions.CommandError( @@ -170,8 +168,7 @@ class ExecuteJob(command.ShowOne): job_configs['params'] = dict( map(lambda x: x.split(':', 1), parsed_args.params)) - jt_id = utils.get_resource_id( - client.jobs, parsed_args.job_template) + jt_id = utils.get_job_template_id(self.app, client, parsed_args) cluster_id = utils.get_resource_id( client.clusters, parsed_args.cluster) if parsed_args.input not in [None, "", "None"]: @@ -185,17 +182,22 @@ class ExecuteJob(command.ShowOne): else: output_id = None - data = client.job_executions.create( - job_id=jt_id, cluster_id=cluster_id, input_id=input_id, - output_id=output_id, interface=parsed_args.interface, - configs=job_configs, is_public=parsed_args.public, - is_protected=parsed_args.protected).to_dict() - + data = utils.create_job(client, self.app, jt_id, cluster_id, + input_id, output_id, job_configs, + parsed_args) sys.stdout.write( 'Job "{job}" has been started successfully.\n'.format( job=data['id'])) - _format_job_output(data) + return data + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_job_output(self.app, data) data = utils.prepare_data(data, JOB_FIELDS) return self.dict2columns(data) @@ -228,6 +230,7 @@ class ListJobs(command.Lister): client = self.app.client_manager.data_processing data = client.job_executions.list() + for job in data: job.status = job.info['status'] @@ -275,7 +278,7 @@ class ShowJob(command.ShowOne): data = client.job_executions.get(parsed_args.job).to_dict() - _format_job_output(data) + _format_job_output(self.app, data) data = utils.prepare_data(data, JOB_FIELDS) return self.dict2columns(data) @@ -308,12 +311,16 @@ class DeleteJob(command.Command): client = self.app.client_manager.data_processing for job_id in parsed_args.job: client.job_executions.delete(job_id) + sys.stdout.write( 'Job "{job}" deletion has been started.\n'.format(job=job_id)) if parsed_args.wait: for job_id in parsed_args.job: - if not utils.wait_for_delete(client.job_executions, job_id): + wait_for_delete = utils.wait_for_delete( + client.job_executions, job_id) + + if not wait_for_delete: self.log.error( 'Error occurred during job deleting: %s' % job_id) @@ -367,18 +374,22 @@ class UpdateJob(command.ShowOne): return parser - def take_action(self, parsed_args): - self.log.debug("take_action(%s)", parsed_args) - client = self.app.client_manager.data_processing - + def _take_action(self, client, parsed_args): update_dict = utils.create_dict_from_kwargs( is_public=parsed_args.is_public, is_protected=parsed_args.is_protected) - data = client.job_executions.update( - parsed_args.job, **update_dict).job_execution + data = utils.update_job(client, self.app, parsed_args, update_dict) + + return data + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) - _format_job_output(data) + _format_job_output(self.app, data) data = utils.prepare_data(data, JOB_FIELDS) return self.dict2columns(data) diff --git a/saharaclient/osc/v2/cluster_templates.py b/saharaclient/osc/v2/cluster_templates.py new file mode 100644 index 0000000..28ad648 --- /dev/null +++ b/saharaclient/osc/v2/cluster_templates.py @@ -0,0 +1,149 @@ +# Copyright (c) 2015 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from osc_lib import utils as osc_utils +from oslo_log import log as logging + +from saharaclient.osc import utils +from saharaclient.osc.v1 import cluster_templates as ct_v1 + + +def _format_ct_output(app, data): + data['node_groups'] = ct_v1._format_node_groups_list(data['node_groups']) + data['anti_affinity'] = osc_utils.format_list(data['anti_affinity']) + + +class CreateClusterTemplate(ct_v1.CreateClusterTemplate): + """Creates cluster template""" + + log = logging.getLogger(__name__ + ".CreateClusterTemplate") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_ct_output(self.app, data) + data = utils.prepare_data(data, ct_v1.CT_FIELDS) + + return self.dict2columns(data) + + +class ListClusterTemplates(ct_v1.ListClusterTemplates): + """Lists cluster templates""" + + log = logging.getLogger(__name__ + ".ListClusterTemplates") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + search_opts = {} + if parsed_args.plugin: + search_opts['plugin_name'] = parsed_args.plugin + if parsed_args.plugin_version: + search_opts['plugin_version'] = parsed_args.plugin_version + + data = client.cluster_templates.list(search_opts=search_opts) + + if parsed_args.name: + data = utils.get_by_name_substring(data, parsed_args.name) + + if parsed_args.long: + columns = ('name', 'id', 'plugin_name', 'plugin_version', + 'node_groups', 'description') + column_headers = utils.prepare_column_headers(columns) + + else: + columns = ('name', 'id', 'plugin_name', 'plugin_version') + column_headers = utils.prepare_column_headers(columns) + + return ( + column_headers, + (osc_utils.get_item_properties( + s, + columns, + formatters={ + 'node_groups': ct_v1._format_node_groups_list + } + ) for s in data) + ) + + +class ShowClusterTemplate(ct_v1.ShowClusterTemplate): + """Display cluster template details""" + + log = logging.getLogger(__name__ + ".ShowClusterTemplate") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = utils.get_resource( + client.cluster_templates, parsed_args.cluster_template).to_dict() + + _format_ct_output(self.app, data) + data = utils.prepare_data(data, ct_v1.CT_FIELDS) + + return self.dict2columns(data) + + +class DeleteClusterTemplate(ct_v1.DeleteClusterTemplate): + """Deletes cluster template""" + + log = logging.getLogger(__name__ + ".DeleteClusterTemplate") + + +class UpdateClusterTemplate(ct_v1.UpdateClusterTemplate): + """Updates cluster template""" + + log = logging.getLogger(__name__ + ".UpdateClusterTemplate") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + ct_id = utils.get_resource_id( + client.cluster_templates, parsed_args.cluster_template) + + data = self._take_action(client, parsed_args, ct_id) + + _format_ct_output(self.app, data) + data = utils.prepare_data(data, ct_v1.CT_FIELDS) + + return self.dict2columns(data) + + +class ImportClusterTemplate(ct_v1.ImportClusterTemplate): + """Imports cluster template""" + + log = logging.getLogger(__name__ + ".ImportClusterTemplate") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_ct_output(self.app, data) + data = utils.prepare_data(data, ct_v1.CT_FIELDS) + + return self.dict2columns(data) + + +class ExportClusterTemplate(ct_v1.ExportClusterTemplate): + """Export cluster template to JSON""" + + log = logging.getLogger(__name__ + ".ExportClusterTemplate") diff --git a/saharaclient/osc/v2/clusters.py b/saharaclient/osc/v2/clusters.py new file mode 100644 index 0000000..40cb2cf --- /dev/null +++ b/saharaclient/osc/v2/clusters.py @@ -0,0 +1,172 @@ +# Copyright (c) 2015 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from osc_lib import utils as osc_utils +from oslo_log import log as logging + +from saharaclient.osc import utils +from saharaclient.osc.v1 import clusters as c_v1 + + +def _format_cluster_output(app, data): + data['image'] = data.pop('default_image_id') + data['node_groups'] = c_v1._format_node_groups_list(data['node_groups']) + data['anti_affinity'] = osc_utils.format_list(data['anti_affinity']) + + +class CreateCluster(c_v1.CreateCluster): + """Creates cluster""" + + log = logging.getLogger(__name__ + ".CreateCluster") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + if parsed_args.count and parsed_args.count > 1: + clusters = [] + for cluster in data['clusters']: + clusters.append( + utils.get_resource(client.clusters, + cluster['cluster']['id'])) + + if parsed_args.wait: + for cluster in clusters: + if not osc_utils.wait_for_status( + client.clusters.get, cluster.id): + self.log.error( + 'Error occurred during cluster creation: %s', + data['id']) + + data = {} + for cluster in clusters: + data[cluster.name] = cluster.id + + else: + if parsed_args.wait: + if not osc_utils.wait_for_status( + client.clusters.get, data['id']): + self.log.error( + 'Error occurred during cluster creation: %s', + data['id']) + data = client.clusters.get(data['id']).to_dict() + _format_cluster_output(self.app, data) + data = utils.prepare_data(data, c_v1.CLUSTER_FIELDS) + + return self.dict2columns(data) + + +class ListClusters(c_v1.ListClusters): + """Lists clusters""" + + log = logging.getLogger(__name__ + ".ListClusters") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + search_opts = {} + if parsed_args.plugin: + search_opts['plugin_name'] = parsed_args.plugin + if parsed_args.plugin_version: + search_opts['plugin_version'] = parsed_args.plugin_version + + data = client.clusters.list(search_opts=search_opts) + + if parsed_args.name: + data = utils.get_by_name_substring(data, parsed_args.name) + + if parsed_args.long: + columns = ('name', 'id', 'plugin_name', 'plugin_version', + 'status', 'description', 'default_image_id') + column_headers = utils.prepare_column_headers( + columns, {'default_image_id': 'image'}) + else: + columns = ('name', 'id', 'plugin_name', 'plugin_version', + 'status') + column_headers = utils.prepare_column_headers( + columns, {'default_image_id': 'image'}) + + return ( + column_headers, + (osc_utils.get_item_properties( + s, + columns + ) for s in data) + ) + + +class ShowCluster(c_v1.ShowCluster): + """Display cluster details""" + + log = logging.getLogger(__name__ + ".ShowCluster") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data, provision_steps = self._take_action(client, parsed_args) + + _format_cluster_output(self.app, data) + + data = self._show_cluster_info(data, provision_steps, parsed_args) + return data + + +class DeleteCluster(c_v1.DeleteCluster): + """Deletes cluster""" + + log = logging.getLogger(__name__ + ".DeleteCluster") + + +class UpdateCluster(c_v1.UpdateCluster): + """Updates cluster""" + + log = logging.getLogger(__name__ + ".UpdateCluster") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_cluster_output(self.app, data) + data = utils.prepare_data(data, c_v1.CLUSTER_FIELDS) + + return self.dict2columns(data) + + +class ScaleCluster(c_v1.ScaleCluster): + """Scales cluster""" + + log = logging.getLogger(__name__ + ".ScaleCluster") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_cluster_output(self.app, data) + data = utils.prepare_data(data, c_v1.CLUSTER_FIELDS) + + return self.dict2columns(data) + + +class VerificationUpdateCluster(c_v1.VerificationUpdateCluster): + """Updates cluster verifications""" + + log = logging.getLogger(__name__ + ".VerificationUpdateCluster") diff --git a/saharaclient/osc/v2/data_sources.py b/saharaclient/osc/v2/data_sources.py new file mode 100644 index 0000000..c05793b --- /dev/null +++ b/saharaclient/osc/v2/data_sources.py @@ -0,0 +1,48 @@ +# Copyright (c) 2015 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log as logging + +from saharaclient.osc.v1 import data_sources as ds_v1 + + +class CreateDataSource(ds_v1.CreateDataSource): + """Creates data source""" + + log = logging.getLogger(__name__ + ".CreateDataSource") + + +class ListDataSources(ds_v1.ListDataSources): + """Lists data sources""" + + log = logging.getLogger(__name__ + ".ListDataSources") + + +class ShowDataSource(ds_v1.ShowDataSource): + """Display data source details""" + + log = logging.getLogger(__name__ + ".ShowDataSource") + + +class DeleteDataSource(ds_v1.DeleteDataSource): + """Delete data source""" + + log = logging.getLogger(__name__ + ".DeleteDataSource") + + +class UpdateDataSource(ds_v1.UpdateDataSource): + """Update data source""" + + log = logging.getLogger(__name__ + ".UpdateDataSource") diff --git a/saharaclient/osc/v2/images.py b/saharaclient/osc/v2/images.py new file mode 100644 index 0000000..ed4fe4e --- /dev/null +++ b/saharaclient/osc/v2/images.py @@ -0,0 +1,62 @@ +# Copyright (c) 2015 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log as logging + +from saharaclient.osc.v1 import images as images_v1 + +IMAGE_FIELDS = ['name', 'id', 'username', 'tags', 'status', 'description'] + + +class ListImages(images_v1.ListImages): + """Lists registered images""" + + log = logging.getLogger(__name__ + ".ListImages") + + +class ShowImage(images_v1.ShowImage): + """Display image details""" + + log = logging.getLogger(__name__ + ".ShowImage") + + +class RegisterImage(images_v1.RegisterImage): + """Register an image""" + + log = logging.getLogger(__name__ + ".RegisterImage") + + +class UnregisterImage(images_v1.UnregisterImage): + """Unregister image(s)""" + + log = logging.getLogger(__name__ + ".RegisterImage") + + +class SetImageTags(images_v1.SetImageTags): + """Set image tags (Replace current image tags with provided ones)""" + + log = logging.getLogger(__name__ + ".AddImageTags") + + +class AddImageTags(images_v1.AddImageTags): + """Add image tags""" + + log = logging.getLogger(__name__ + ".AddImageTags") + + +class RemoveImageTags(images_v1.RemoveImageTags): + """Remove image tags""" + + log = logging.getLogger(__name__ + ".RemoveImageTags") diff --git a/saharaclient/osc/v2/job_binaries.py b/saharaclient/osc/v2/job_binaries.py new file mode 100644 index 0000000..e1aa30b --- /dev/null +++ b/saharaclient/osc/v2/job_binaries.py @@ -0,0 +1,212 @@ +# Copyright (c) 2015 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from osc_lib.command import command +from osc_lib import exceptions +from osc_lib import utils as osc_utils +from oslo_log import log as logging +from oslo_serialization import jsonutils + +from saharaclient.osc import utils +from saharaclient.osc.v1 import job_binaries as jb_v1 + + +class CreateJobBinary(command.ShowOne): + """Creates job binary""" + + log = logging.getLogger(__name__ + ".CreateJobBinary") + + def get_parser(self, prog_name): + parser = super(CreateJobBinary, self).get_parser(prog_name) + + parser.add_argument( + '--name', + metavar="<name>", + help="Name of the job binary [REQUIRED if JSON is not provided]", + ) + creation_type = parser.add_mutually_exclusive_group() + creation_type.add_argument( + '--url', + metavar='<url>', + help='URL for the job binary [REQUIRED if JSON and file are ' + 'not provided]' + ) + parser.add_argument( + '--description', + metavar="<description>", + help="Description of the job binary" + ) + username = parser.add_mutually_exclusive_group() + username.add_argument( + '--username', + metavar='<username>', + help='Username for accessing the job binary URL', + ) + username.add_argument( + '--access-key', + metavar='<accesskey>', + help='S3 access key for accessing the job binary URL', + ) + password = parser.add_mutually_exclusive_group() + password.add_argument( + '--password', + metavar='<password>', + help='Password for accessing the job binary URL', + ) + password.add_argument( + '--secret-key', + metavar='<secretkey>', + help='S3 secret key for accessing the job binary URL', + ) + password.add_argument( + '--password-prompt', + dest="password_prompt", + action="store_true", + help='Prompt interactively for password', + ) + password.add_argument( + '--secret-key-prompt', + dest="secret_key_prompt", + action="store_true", + help='Prompt interactively for S3 secret key', + ) + parser.add_argument( + '--s3-endpoint', + metavar='<endpoint>', + help='S3 endpoint for accessing the job binary URL (ignored if ' + 'binary not in S3', + ) + parser.add_argument( + '--public', + action='store_true', + default=False, + help='Make the job binary public', + ) + parser.add_argument( + '--protected', + action='store_true', + default=False, + help='Make the job binary protected', + ) + parser.add_argument( + '--json', + metavar='<filename>', + help='JSON representation of the job binary. Other ' + 'arguments will not be taken into account if this one is ' + 'provided' + ) + return parser + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + if parsed_args.json: + blob = osc_utils.read_blob_file_contents(parsed_args.json) + try: + template = jsonutils.loads(blob) + except ValueError as e: + raise exceptions.CommandError( + 'An error occurred when reading ' + 'template from file %s: %s' % (parsed_args.json, e)) + data = client.job_binaries.create(**template).to_dict() + else: + if parsed_args.password_prompt: + parsed_args.password = osc_utils.get_password( + self.app.stdin, confirm=False) + + if parsed_args.secret_key_prompt: + parsed_args.secret_key = osc_utils.get_password( + self.app.stdin, confirm=False) + + if not parsed_args.password: + parsed_args.password = parsed_args.secret_key + + if not parsed_args.username: + parsed_args.username = parsed_args.access_key + + if parsed_args.password and not parsed_args.username: + raise exceptions.CommandError( + 'Username via --username, or S3 access key via ' + '--access-key should be provided with password') + + if parsed_args.username and not parsed_args.password: + raise exceptions.CommandError( + 'Password should be provided via --password or ' + '--secret-key, or entered interactively with ' + '--password-prompt or --secret-key-prompt') + + if parsed_args.password and parsed_args.username: + if not parsed_args.url: + raise exceptions.CommandError( + 'URL must be provided via --url') + if parsed_args.url.startswith('s3'): + if not parsed_args.s3_endpoint: + raise exceptions.CommandError( + 'S3 job binaries need an endpoint provided via ' + '--s3-endpoint') + extra = { + 'accesskey': parsed_args.username, + 'secretkey': parsed_args.password, + 'endpoint': parsed_args.s3_endpoint, + } + + else: + extra = { + 'user': parsed_args.username, + 'password': parsed_args.password + } + else: + extra = None + + data = client.job_binaries.create( + name=parsed_args.name, url=parsed_args.url, + description=parsed_args.description, extra=extra, + is_public=parsed_args.public, + is_protected=parsed_args.protected).to_dict() + + data = utils.prepare_data(data, jb_v1.JOB_BINARY_FIELDS) + + return self.dict2columns(data) + + +class ListJobBinaries(jb_v1.ListJobBinaries): + """Lists job binaries""" + + log = logging.getLogger(__name__ + ".ListJobBinaries") + + +class ShowJobBinary(jb_v1.ShowJobBinary): + """Display job binary details""" + + log = logging.getLogger(__name__ + ".ShowJobBinary") + + +class DeleteJobBinary(jb_v1.DeleteJobBinary): + """Deletes job binary""" + + log = logging.getLogger(__name__ + ".DeleteJobBinary") + + +class UpdateJobBinary(jb_v1.UpdateJobBinary): + """Updates job binary""" + + log = logging.getLogger(__name__ + ".UpdateJobBinary") + + +class DownloadJobBinary(jb_v1.DownloadJobBinary): + """Downloads job binary""" + + log = logging.getLogger(__name__ + ".DownloadJobBinary") diff --git a/saharaclient/osc/v2/job_templates.py b/saharaclient/osc/v2/job_templates.py new file mode 100644 index 0000000..ad85899 --- /dev/null +++ b/saharaclient/osc/v2/job_templates.py @@ -0,0 +1,48 @@ +# Copyright (c) 2015 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log as logging + +from saharaclient.osc.v1 import job_templates as jt_v1 + + +class CreateJobTemplate(jt_v1.CreateJobTemplate): + """Creates job template""" + + log = logging.getLogger(__name__ + ".CreateJobTemplate") + + +class ListJobTemplates(jt_v1.ListJobTemplates): + """Lists job templates""" + + log = logging.getLogger(__name__ + ".ListJobTemplates") + + +class ShowJobTemplate(jt_v1.ShowJobTemplate): + """Display job template details""" + + log = logging.getLogger(__name__ + ".ShowJobTemplate") + + +class DeleteJobTemplate(jt_v1.DeleteJobTemplate): + """Deletes job template""" + + log = logging.getLogger(__name__ + ".DeleteJobTemplate") + + +class UpdateJobTemplate(jt_v1.UpdateJobTemplate): + """Updates job template""" + + log = logging.getLogger(__name__ + ".UpdateJobTemplate") diff --git a/saharaclient/osc/v2/job_types.py b/saharaclient/osc/v2/job_types.py new file mode 100644 index 0000000..1a22f8a --- /dev/null +++ b/saharaclient/osc/v2/job_types.py @@ -0,0 +1,54 @@ +# Copyright (c) 2015 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import path +import sys + +from oslo_log import log as logging +from oslo_serialization import jsonutils + +from saharaclient.osc.v1 import job_types as jt_v1 + + +class ListJobTypes(jt_v1.ListJobTypes): + """Lists job types supported by plugins""" + + log = logging.getLogger(__name__ + ".ListJobTypes") + + +class GetJobTypeConfigs(jt_v1.GetJobTypeConfigs): + """Get job type configs""" + + log = logging.getLogger(__name__ + ".GetJobTypeConfigs") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + if not parsed_args.file: + parsed_args.file = parsed_args.job_type + + data = client.job_templates.get_configs(parsed_args.job_type).to_dict() + + if path.exists(parsed_args.file): + self.log.error('File "%s" already exists. Choose another one with ' + '--file argument.' % parsed_args.file) + else: + with open(parsed_args.file, 'w') as f: + jsonutils.dump(data, f, indent=4) + sys.stdout.write( + '"%(type)s" job configs were saved in "%(file)s"' + 'file' % {'type': parsed_args.job_type, + 'file': parsed_args.file}) diff --git a/saharaclient/osc/v2/jobs.py b/saharaclient/osc/v2/jobs.py new file mode 100644 index 0000000..1ffe66f --- /dev/null +++ b/saharaclient/osc/v2/jobs.py @@ -0,0 +1,142 @@ +# Copyright (c) 2015 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from osc_lib import utils as osc_utils +from oslo_log import log as logging + +from saharaclient.osc import utils +from saharaclient.osc.v1 import jobs as jobs_v1 + + +def _format_job_output(app, data): + data['status'] = data['info']['status'] + del data['info'] + + +class ExecuteJob(jobs_v1.ExecuteJob): + """Executes job""" + + log = logging.getLogger(__name__ + ".ExecuteJob") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_job_output(self.app, data) + data = utils.prepare_data(data, jobs_v1.JOB_FIELDS) + + return self.dict2columns(data) + + +class ListJobs(jobs_v1.ListJobs): + """Lists jobs""" + + log = logging.getLogger(__name__ + ".ListJobs") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = client.jobs.list() + + for job in data: + job.status = job.info['status'] + + if parsed_args.status: + data = [job for job in data + if job.info['status'] == parsed_args.status.replace( + '-', '').upper()] + + if parsed_args.long: + columns = ('id', 'cluster id', 'job template id', 'status', + 'start time', 'end time') + column_headers = utils.prepare_column_headers(columns) + + else: + columns = ('id', 'cluster id', 'job template id', 'status') + column_headers = utils.prepare_column_headers(columns) + + return ( + column_headers, + (osc_utils.get_item_properties( + s, + columns + ) for s in data) + ) + + +class ShowJob(jobs_v1.ShowJob): + """Display job details""" + + log = logging.getLogger(__name__ + ".ShowJob") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = client.jobs.get(parsed_args.job).to_dict() + + _format_job_output(self.app, data) + data = utils.prepare_data(data, jobs_v1.JOB_FIELDS) + + return self.dict2columns(data) + + +class DeleteJob(jobs_v1.DeleteJob): + """Deletes job""" + + log = logging.getLogger(__name__ + ".DeleteJob") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + for job_id in parsed_args.job: + client.jobs.delete(job_id) + sys.stdout.write( + 'Job "{job}" deletion has been started.\n'.format(job=job_id)) + + if parsed_args.wait: + for job_id in parsed_args.job: + wait_for_delete = utils.wait_for_delete(client.jobs, job_id) + + if not wait_for_delete: + self.log.error( + 'Error occurred during job deleting: %s' % + job_id) + else: + sys.stdout.write( + 'Job "{job}" has been removed successfully.\n'.format( + job=job_id)) + + +class UpdateJob(jobs_v1.UpdateJob): + """Updates job""" + + log = logging.getLogger(__name__ + ".UpdateJob") + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)", parsed_args) + client = self.app.client_manager.data_processing + + data = self._take_action(client, parsed_args) + + _format_job_output(self.app, data) + data = utils.prepare_data(data, jobs_v1.JOB_FIELDS) + + return self.dict2columns(data) diff --git a/saharaclient/osc/v2/plugins.py b/saharaclient/osc/v2/plugins.py new file mode 100644 index 0000000..cf05512 --- /dev/null +++ b/saharaclient/osc/v2/plugins.py @@ -0,0 +1,40 @@ +# Copyright (c) 2015 Mirantis Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from oslo_log import log as logging + +from saharaclient.osc.v1 import plugins as p_v1 + + +class ListPlugins(p_v1.ListPlugins): + """Lists plugins""" + + log = logging.getLogger(__name__ + ".ListPlugins") + + +class ShowPlugin(p_v1.ShowPlugin): + """Display plugin details""" + + log = logging.getLogger(__name__ + ".ShowPlugin") + + +class GetPluginConfigs(p_v1.GetPluginConfigs): + """Get plugin configs""" + + log = logging.getLogger(__name__ + ".GetPluginConfigs") + + +class UpdatePlugin(p_v1.UpdatePlugin): + log = logging.getLogger(__name__ + ".UpdatePlugin") |