diff options
author | Zuul <zuul@review.openstack.org> | 2018-01-16 12:26:57 +0000 |
---|---|---|
committer | Gerrit Code Review <review@openstack.org> | 2018-01-16 12:26:57 +0000 |
commit | d2cf6e6ddc50fccbfa36700989d63d2b5627bc38 (patch) | |
tree | c7c60b867b5583eae34c4005345ed7e09a8bff8b | |
parent | 24e46d116a14648652a1b67d9c2e98df953a8f38 (diff) | |
parent | 45eb51eb995240120f437bb5ac61ccee22f9dc6b (diff) | |
download | python-cinderclient-d2cf6e6ddc50fccbfa36700989d63d2b5627bc38.tar.gz |
Merge "Add service cleanup command"
-rw-r--r-- | cinderclient/tests/unit/v3/fakes.py | 11 | ||||
-rw-r--r-- | cinderclient/tests/unit/v3/test_shell.py | 20 | ||||
-rw-r--r-- | cinderclient/v3/client.py | 2 | ||||
-rw-r--r-- | cinderclient/v3/shell.py | 46 | ||||
-rw-r--r-- | cinderclient/v3/workers.py | 44 | ||||
-rw-r--r-- | releasenotes/notes/service_cleanup_cmd-cac85b697bc22af1.yaml | 6 |
6 files changed, 129 insertions, 0 deletions
diff --git a/cinderclient/tests/unit/v3/fakes.py b/cinderclient/tests/unit/v3/fakes.py index 910633e..5a57b21 100644 --- a/cinderclient/tests/unit/v3/fakes.py +++ b/cinderclient/tests/unit/v3/fakes.py @@ -606,6 +606,17 @@ class FakeHTTPClient(fake_v2.FakeHTTPClient): } } + def post_workers_cleanup(self, **kw): + response = { + 'cleaning': [{'id': '1', 'cluster_name': 'cluster1', + 'host': 'host1', 'binary': 'binary'}, + {'id': '3', 'cluster_name': 'cluster1', + 'host': 'host3', 'binary': 'binary'}], + 'unavailable': [{'id': '2', 'cluster_name': 'cluster2', + 'host': 'host2', 'binary': 'binary'}], + } + return 200, {}, response + # # resource filters # diff --git a/cinderclient/tests/unit/v3/test_shell.py b/cinderclient/tests/unit/v3/test_shell.py index 39e3e18..ae15587 100644 --- a/cinderclient/tests/unit/v3/test_shell.py +++ b/cinderclient/tests/unit/v3/test_shell.py @@ -1241,3 +1241,23 @@ class ShellTest(utils.TestCase): '--name foo --description bar --bootable ' '--volume-type baz --availability-zone az ' '--metadata k1=v1 k2=v2') + + def test_worker_cleanup_before_3_24(self): + self.assertRaises(SystemExit, + self.run_command, + 'work-cleanup fakehost') + + def test_worker_cleanup(self): + self.run_command('--os-volume-api-version 3.24 ' + 'work-cleanup --cluster clustername --host hostname ' + '--binary binaryname --is-up false --disabled true ' + '--resource-id uuid --resource-type Volume') + expected = {'cluster_name': 'clustername', + 'host': 'hostname', + 'binary': 'binaryname', + 'is_up': 'false', + 'disabled': 'true', + 'resource_id': 'uuid', + 'resource_type': 'Volume'} + + self.assert_called('POST', '/workers/cleanup', body=expected) diff --git a/cinderclient/v3/client.py b/cinderclient/v3/client.py index 0f3a6bf..5eb5268 100644 --- a/cinderclient/v3/client.py +++ b/cinderclient/v3/client.py @@ -42,6 +42,7 @@ from cinderclient.v3 import volume_transfers from cinderclient.v3 import volume_type_access from cinderclient.v3 import volume_types from cinderclient.v3 import volumes +from cinderclient.v3 import workers class Client(object): @@ -91,6 +92,7 @@ class Client(object): self.transfers = volume_transfers.VolumeTransferManager(self) self.services = services.ServiceManager(self) self.clusters = clusters.ClusterManager(self) + self.workers = workers.WorkerManager(self) self.consistencygroups = consistencygroups.\ ConsistencygroupManager(self) self.groups = groups.GroupManager(self) diff --git a/cinderclient/v3/shell.py b/cinderclient/v3/shell.py index d27ce20..c512207 100644 --- a/cinderclient/v3/shell.py +++ b/cinderclient/v3/shell.py @@ -1060,6 +1060,52 @@ def do_cluster_disable(cs, args): utils.print_dict(cluster.to_dict()) +@api_versions.wraps('3.24') +@utils.arg('--cluster', metavar='<cluster-name>', default=None, + help='Cluster name. Default=None.') +@utils.arg('--host', metavar='<hostname>', default=None, + help='Service host name. Default=None.') +@utils.arg('--binary', metavar='<binary>', default=None, + help='Service binary. Default=None.') +@utils.arg('--is-up', metavar='<True|true|False|false>', dest='is_up', + default=None, choices=('True', 'true', 'False', 'false'), + help='Filter by up/down status, if set to true services need to be' + ' up, if set to false services need to be down. Default is ' + 'None, which means up/down status is ignored.') +@utils.arg('--disabled', metavar='<True|true|False|false>', default=None, + choices=('True', 'true', 'False', 'false'), + help='Filter by disabled status. Default=None.') +@utils.arg('--resource-id', metavar='<resource-id>', default=None, + help='UUID of a resource to cleanup. Default=None.') +@utils.arg('--resource-type', metavar='<Volume|Snapshot>', default=None, + choices=('Volume', 'Snapshot'), + help='Type of resource to cleanup.') +def do_work_cleanup(cs, args): + """Request cleanup of services with optional filtering.""" + filters = dict(cluster_name=args.cluster, host=args.host, + binary=args.binary, is_up=args.is_up, + disabled=args.disabled, resource_id=args.resource_id, + resource_type=args.resource_type) + + filters = {k: v for k, v in filters.items() if v is not None} + + cleaning, unavailable = cs.workers.clean(**filters) + + columns = ('ID', 'Cluster Name', 'Host', 'Binary') + + if cleaning: + print('Following services will be cleaned:') + utils.print_list(cleaning, columns) + + if unavailable: + print('There are no alternative nodes to do cleanup for the following ' + 'services:') + utils.print_list(unavailable, columns) + + if not (cleaning or unavailable): + print('No cleanable services matched cleanup criteria.') + + @utils.arg('host', metavar='<host>', help='Cinder host on which the existing volume resides; ' diff --git a/cinderclient/v3/workers.py b/cinderclient/v3/workers.py new file mode 100644 index 0000000..86f895d --- /dev/null +++ b/cinderclient/v3/workers.py @@ -0,0 +1,44 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Interface to workers API +""" +from cinderclient.apiclient import base as common_base +from cinderclient import base + + +class Service(base.Resource): + def __repr__(self): + return "<Service (%s): %s in cluster %s>" % (self.id, self.host, + self.cluster_name or '-') + + @classmethod + def list_factory(cls, mngr, elements): + return [cls(mngr, element, loaded=True) for element in elements] + + +class WorkerManager(base.Manager): + base_url = '/workers' + + def clean(self, **filters): + url = self.base_url + '/cleanup' + resp, body = self.api.client.post(url, body=filters) + + cleaning = Service.list_factory(self, body['cleaning']) + unavailable = Service.list_factory(self, body['unavailable']) + + result = common_base.TupleWithMeta((cleaning, unavailable), resp) + return result diff --git a/releasenotes/notes/service_cleanup_cmd-cac85b697bc22af1.yaml b/releasenotes/notes/service_cleanup_cmd-cac85b697bc22af1.yaml new file mode 100644 index 0000000..af5f930 --- /dev/null +++ b/releasenotes/notes/service_cleanup_cmd-cac85b697bc22af1.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + New ``work-cleanup`` command to trigger server cleanups by other nodes + within a cluster on Active-Active deployments on microversion 3.24 and + higher. |