diff options
| author | Gorka Eguileor <geguileo@redhat.com> | 2016-08-26 13:56:18 +0200 |
|---|---|---|
| committer | Gorka Eguileor <geguileo@redhat.com> | 2017-10-03 11:03:28 +0200 |
| commit | 45eb51eb995240120f437bb5ac61ccee22f9dc6b (patch) | |
| tree | 0f2f3737482506c56186f38b610319996a8632c0 /cinderclient/v3/shell.py | |
| parent | 1a4176ad87da88a39e4bd04e2c55e8109215d591 (diff) | |
| download | python-cinderclient-45eb51eb995240120f437bb5ac61ccee22f9dc6b.tar.gz | |
Add service cleanup command
Cinder volume services will perform cleanup on start, but when we have
multiple volume services grouped in a cluster, we may want to trigger
cleanup of services that are down.
This patch adds command `work-cleanup` to trigger server cleanups and
prints service nodes that will be cleaned and those that didn't have an
alternative service in the cluster to do the cleanup.
This command will only work on servers supporting API version 3.24 or
higher.
New command:
cinder work-cleanup [--cluster <cluster-name>] [--host <hostname>]
[--binary <binary>]
[--is-up <True|true|False|false>]
[--disabled <True|true|False|false>]
[--resource-id <resource-id>]
[--resource-type <Volume|Snapshot>]
Specs: https://specs.openstack.org/openstack/cinder-specs/specs/newton/ha-aa-cleanup.html
Change-Id: I1c33ffbffcb14f34ee2bda9042e706937b1147d7
Depends-On: If336b6569b171846954ed6eb73f5a4314c6c7e2e
Implements: blueprint cinder-volume-active-active-support
Diffstat (limited to 'cinderclient/v3/shell.py')
| -rw-r--r-- | cinderclient/v3/shell.py | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/cinderclient/v3/shell.py b/cinderclient/v3/shell.py index c453826..9700ae2 100644 --- a/cinderclient/v3/shell.py +++ b/cinderclient/v3/shell.py @@ -1011,6 +1011,52 @@ def do_cluster_disable(cs, args): utils.print_dict(cluster.to_dict()) +@api_versions.wraps('3.24') +@utils.arg('--cluster', metavar='<cluster-name>', default=None, + help='Cluster name. Default=None.') +@utils.arg('--host', metavar='<hostname>', default=None, + help='Service host name. Default=None.') +@utils.arg('--binary', metavar='<binary>', default=None, + help='Service binary. Default=None.') +@utils.arg('--is-up', metavar='<True|true|False|false>', dest='is_up', + default=None, choices=('True', 'true', 'False', 'false'), + help='Filter by up/down status, if set to true services need to be' + ' up, if set to false services need to be down. Default is ' + 'None, which means up/down status is ignored.') +@utils.arg('--disabled', metavar='<True|true|False|false>', default=None, + choices=('True', 'true', 'False', 'false'), + help='Filter by disabled status. Default=None.') +@utils.arg('--resource-id', metavar='<resource-id>', default=None, + help='UUID of a resource to cleanup. Default=None.') +@utils.arg('--resource-type', metavar='<Volume|Snapshot>', default=None, + choices=('Volume', 'Snapshot'), + help='Type of resource to cleanup.') +def do_work_cleanup(cs, args): + """Request cleanup of services with optional filtering.""" + filters = dict(cluster_name=args.cluster, host=args.host, + binary=args.binary, is_up=args.is_up, + disabled=args.disabled, resource_id=args.resource_id, + resource_type=args.resource_type) + + filters = {k: v for k, v in filters.items() if v is not None} + + cleaning, unavailable = cs.workers.clean(**filters) + + columns = ('ID', 'Cluster Name', 'Host', 'Binary') + + if cleaning: + print('Following services will be cleaned:') + utils.print_list(cleaning, columns) + + if unavailable: + print('There are no alternative nodes to do cleanup for the following ' + 'services:') + utils.print_list(unavailable, columns) + + if not (cleaning or unavailable): + print('No cleanable services matched cleanup criteria.') + + @utils.arg('host', metavar='<host>', help='Cinder host on which the existing volume resides; ' |
